text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from os.path import exists
from os.path import getmtime
from . import old_statusfile
# These outcomes can occur in a TestCase's outcomes list:
SKIP = "SKIP"
FAIL = "FAIL"
PASS = "PASS"
OKAY = "OKAY"
TIMEOUT = "TIMEOUT"
CRASH = "CRASH"
SLOW = "SLOW"
# These are just for the status files and are mapped below in DEFS:
FAIL_OK = "FAIL_OK"
PASS_OR_FAIL = "PASS_OR_FAIL"
ALWAYS = "ALWAYS"
KEYWORDS = {}
for key in [SKIP, FAIL, PASS, OKAY, TIMEOUT, CRASH, SLOW, FAIL_OK,
PASS_OR_FAIL, ALWAYS]:
KEYWORDS[key] = key
DEFS = {FAIL_OK: [FAIL, OKAY],
PASS_OR_FAIL: [PASS, FAIL]}
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
"mipsel", "x64", "nacl_ia32", "nacl_x64"]:
VARIABLES[var] = var
def DoSkip(outcomes):
return SKIP in outcomes or SLOW in outcomes
def IsFlaky(outcomes):
return ((PASS in outcomes) and (FAIL in outcomes) and
(not CRASH in outcomes) and (not OKAY in outcomes))
def IsFailOk(outcomes):
return (FAIL in outcomes) and (OKAY in outcomes)
def _AddOutcome(result, new):
global DEFS
if new in DEFS:
mapped = DEFS[new]
if type(mapped) == list:
for m in mapped:
_AddOutcome(result, m)
elif type(mapped) == str:
_AddOutcome(result, mapped)
else:
result.add(new)
def _ParseOutcomeList(rule, outcomes, target_dict, variables):
result = set([])
if type(outcomes) == str:
outcomes = [outcomes]
for item in outcomes:
if type(item) == str:
_AddOutcome(result, item)
elif type(item) == list:
if not eval(item[0], variables): continue
for outcome in item[1:]:
assert type(outcome) == str
_AddOutcome(result, outcome)
else:
assert False
if len(result) == 0: return
if rule in target_dict:
target_dict[rule] |= result
else:
target_dict[rule] = result
def ReadStatusFile(path, variables):
# As long as the old-format .status files are authoritative, just
# create the converted version on demand and cache it to speed up
# subsequent runs.
if path.endswith(".status"):
newpath = path + "2"
if not exists(newpath) or getmtime(newpath) < getmtime(path):
print "Converting status file."
converted = old_statusfile.ConvertNotation(path).GetOutput()
with open(newpath, 'w') as f:
f.write(converted)
path = newpath
with open(path) as f:
global KEYWORDS
contents = eval(f.read(), KEYWORDS)
rules = {}
wildcards = {}
variables.update(VARIABLES)
for section in contents:
assert type(section) == list
assert len(section) == 2
if not eval(section[0], variables): continue
section = section[1]
assert type(section) == dict
for rule in section:
assert type(rule) == str
if rule[-1] == '*':
_ParseOutcomeList(rule, section[rule], wildcards, variables)
else:
_ParseOutcomeList(rule, section[rule], rules, variables)
return rules, wildcards
|
{
"content_hash": "b97409a3126137edaf6a2c007b733253",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 77,
"avg_line_length": 26.617391304347827,
"alnum_prop": 0.6481541979745181,
"repo_name": "windyuuy/opera",
"id": "634fe6a08a82ac639f87a4dd14161d1699d10836",
"size": "4730",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chromium/src/v8/tools/testrunner/local/statusfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Assembly",
"bytes": "51642"
},
{
"name": "Batchfile",
"bytes": "35942"
},
{
"name": "C",
"bytes": "4303018"
},
{
"name": "C#",
"bytes": "35203"
},
{
"name": "C++",
"bytes": "207333360"
},
{
"name": "CMake",
"bytes": "25089"
},
{
"name": "CSS",
"bytes": "681256"
},
{
"name": "Dart",
"bytes": "24294"
},
{
"name": "Emacs Lisp",
"bytes": "25534"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "10400943"
},
{
"name": "IDL",
"bytes": "836"
},
{
"name": "Java",
"bytes": "2821184"
},
{
"name": "JavaScript",
"bytes": "14563996"
},
{
"name": "Lua",
"bytes": "13749"
},
{
"name": "Makefile",
"bytes": "55521"
},
{
"name": "Objective-C",
"bytes": "1211523"
},
{
"name": "Objective-C++",
"bytes": "6221908"
},
{
"name": "PHP",
"bytes": "61320"
},
{
"name": "Perl",
"bytes": "82949"
},
{
"name": "Protocol Buffer",
"bytes": "280464"
},
{
"name": "Python",
"bytes": "12627773"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "894814"
},
{
"name": "VimL",
"bytes": "4953"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
}
|
import logging
import socket
import gevent
import haigha.connection
EXCHANGE = "sutro"
LOG = logging.getLogger(__name__)
class MessageSource(object):
"""An AMQP based message source.
This will monitor a fanout exchange on AMQP and signal on receipt of any
messages.
"""
def __init__(self, host, port, vhost, username, password, message_handler):
self.host = host
self.port = port
self.vhost = vhost
self.username = username
self.password = password
self.message_handler = message_handler
def connect(self):
self.connection = haigha.connection.Connection(
host=self.host,
port=self.port,
vhost=self.vhost,
user=self.username,
password=self.password,
transport="gevent",
logger=LOG,
close_cb=self._on_close,
)
self.channel = self.connection.channel()
self.channel.exchange.declare(exchange=EXCHANGE, type="fanout")
self.channel.queue.declare(
exclusive=True,
auto_delete=True,
durable=False,
cb=self._on_queue_created,
)
@property
def connected(self):
return bool(self.connection)
def _on_queue_created(self, queue_name, *ignored):
self.channel.queue.bind(queue=queue_name, exchange=EXCHANGE)
self.channel.basic.consume(
queue=queue_name,
consumer=self._on_message,
)
def _on_message(self, message):
decoded = message.body.decode("utf-8")
namespace = message.delivery_info["routing_key"]
self.message_handler(namespace=namespace, message=decoded)
def _on_close(self):
LOG.warning("lost connection")
self.connection = None
self.channel = None
def pump_messages(self):
while True:
try:
self.connect()
LOG.info("connected")
while self.connected:
LOG.debug("pumping")
self.connection.read_frames()
gevent.sleep()
except socket.error as exception:
LOG.warning("connection failed: %s", exception)
gevent.sleep(1)
|
{
"content_hash": "6c95b128a6fc33bface3dfc89b023778",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 27.865853658536587,
"alnum_prop": 0.5754923413566739,
"repo_name": "spladug/sutro",
"id": "2fe0f1d2d6a768dda2ab8fff1985a2e060376e6f",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sutro/source.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12543"
},
{
"name": "Shell",
"bytes": "41"
}
],
"symlink_target": ""
}
|
import os
import logging
import json
import datetime
import psycopg2
import time
import threading
import secrets
from pyramid.config import Configurator
from pyramid.session import SignedCookieSessionFactory
from pyramid.view import view_config
from pyramid.events import NewRequest, subscriber
from waitress import serve
from contextlib import closing
from tweepy_inter import authorize
from tweepy_inter import fetch_user_statuses
from write_json import add_venue
here = os.path.dirname(os.path.abspath(__file__))
# DB_SCHEMA = """
# CREATE TABLE IF NOT EXISTS entries (
# id serial PRIMARY KEY,
# title VARCHAR (127) NOT NULL,
# tweet TEXT NOT NULL,
# venue VARCHAR (127) NOT NULL,
# created TIMESTAMP NOT NULL
# )
# """
DB_LOCALS_SCHEMA = """
CREATE TABLE IF NOT EXISTS locals (
id serial PRIMARY KEY,
venue VARCHAR(127) NOT NULL,
screen_name VARCHAR(127) NOT NULL,
address TEXT NOT NULL
)
"""
READ_LOCALS_ENTRY = """
SELECT "id", "venue", "screen_name", "address" FROM "locals"
"""
WRITE_LOCALS_ENTRY = """
INSERT INTO "locals" ("venue", "screen_name", "address") VALUES(%s, %s, %s)
"""
FETCH_LOCALS_ID = """
SELECT "id" FROM "locals" WHERE screen_name = %s
"""
DB_TWEETS_SCHEMA = """
CREATE TABLE IF NOT EXISTS "tweets" (
"id" serial PRIMARY KEY,
"parent_id" INTEGER REFERENCES locals ON UPDATE NO ACTION ON DELETE CASCADE,
"author_handle" TEXT NOT NULL,
"content" TEXT NOT NULL,
"time" TIMESTAMP NOT NULL,
"count" INTEGER NOT NULL,
"status_id" TEXT NOT NULL
)
"""
# TODO: edit tweets schema
# add _ profile_image_url
# status id
# TODO: update tweepy_inter.fetch_user_statuses() to match
GET_VENUE_INFO = """
SELECT id, venue, screen_name FROM locals WHERE address = %s
"""
# {table from} {id to associate with}
READ_TWEET = """
SELECT id, parent_id, author_handle, content, time, count, status_id FROM tweets WHERE parent_id = %s ORDER BY time DESC
"""
# {table name} {data from one tweet}
WRITE_TWEET = """
INSERT INTO tweets (parent_id, author_handle, content, time, count, status_id) VALUES (%s, %s, %s, %s, %s, %s)
"""
# {table name} {content to match}
UPDATE_TWEET = """
UPDATE tweets SET count = count + 1 WHERE content = %s
"""
FILTER_SAME_TWEET = """
SELECT status_id FROM tweets where parent_id = %s
"""
logging.basicConfig()
log = logging.getLogger(__file__)
@subscriber(NewRequest)
def open_connection(event):
request = event.request
settings = request.registry.settings
request.db = connect_db(settings)
request.add_finished_callback(close_connection)
# @view_config(route_name='home', renderer='string')
# def home(request):
# return "Wazzapp v1.0"
def connect_db(settings):
"""Return a connection to the configured database"""
return psycopg2.connect(settings['db'])
def close_connection(request):
"""close the database connection for this request
If there has been an error in the processing of the request, abort any
open transactions.
"""
db = getattr(request, 'db', None)
if db is not None:
if request.exception is not None:
db.rollback()
else:
db.commit()
request.db.close()
def init_db():
"""Create database tables defined by DB_SCHEMA
Warning: This function will not update existing table definitions
"""
settings = {}
settings['db'] = secrets.get_credentials()
with closing(connect_db(settings)) as db:
# db.cursor().execute(DB_SCHEMA)
db.cursor().execute(DB_LOCALS_SCHEMA)
db.cursor().execute(DB_TWEETS_SCHEMA)
db.commit()
def setup_data_snapshot():
"""
Set up database for interaction.
"""
settings = {}
settings['db'] = secrets.get_credentials()
with closing(connect_db(settings)) as db:
# cursor = db.cursor()
# Write venues to locals table
# venue, screen_name, address, lat, long
# json.loads(response.content, response.encoding)['results'][0]['geometry']['location']['lat']
venue_list = []
venue_list.append(('Key Arena', 'KeyArenaSeattle',
'305 Harrison Street, Seattle, WA 98109'))
venue_list.append(('Neumos', 'Neumos',
'925 East Pike Street, Seattle, WA 98122'))
venue_list.append(('Paramount Theatre', 'BroadwaySeattle',
'911 Pine Street, Seattle, WA 98101'))
venue_list.append(('Fremont Brewing', 'fremontbrewing',
'1050 North 34th Street, Seattle, WA 98103'))
venue_list.append(('Tractor Tavern', 'tractortavern',
'5213 Ballard Avenue Northwest, Seattle, WA 98107'))
venue_list.append(('Nectar Lounge', 'NectarLounge',
'412 North 36th Street, Seattle, WA 98103'))
venue_list.append(('The Triple Door', 'TheTripleDoor',
'216 Union Street, Seattle, WA 98101'))
venue_list.append(('The Showbox', 'ShowboxPresents',
'1426 1st Avenue, Seattle, WA 98101'))
venue_list.append(('The Crocodile', 'thecrocodile',
'2200 2nd Avenue, Seattle, WA 98121'))
venue_list.append(('Central Cinema', 'CentralCinema',
'1411 21st Avenue, Seattle, WA 98122'))
for venue in venue_list:
write_local(venue, db)
# Write tweets to tweets table
# parent_id, author_handle, content, time, count
for venue in venue_list:
pull_tweets(venue[1], db)
def write_local(local_info_tuple, connection):
cursor = connection.cursor()
cursor.execute(WRITE_LOCALS_ENTRY, local_info_tuple)
connection.commit()
def pull_tweets(target_twitter_handle, connection):
cursor = connection.cursor()
cursor.execute(FETCH_LOCALS_ID, (target_twitter_handle,))
refer = cursor.fetchone()[0]
# Filter out tweets that are already in the database
# cursor.execute(FILTER_SAME_TWEET, refer)
# tweet_ids = cursor.fetchall()
results = fetch_user_statuses(
authorize(), target_twitter_handle, reference=refer)
# edited_list = results
# for item in edited_list:
# if tweet_ids == item[-1]:
# results.remove(item)
cursor.executemany(WRITE_TWEET, results)
connection.commit()
@view_config(route_name='home', renderer='templates/base.jinja2')
def geo_json(request):
"""renders home page"""
return {}
@view_config(route_name='writelocation', request_method='POST', renderer='json')
def write_input_location(request):
# get twitter handle
api = authorize()
# Get the handle of the first-most result from twitter's user search
try:
handle_guess = api.search_users(
'{}, {}'.format(
request.params.get('venue'), 'Seattle'))[0].screen_name
# pull_tweets(handle_guess, request.db)
except IndexError:
handle_guess = ''
# Write/pull tweets regardless of correctness of twitter handle/address for now
# TODO: have user verification
cursor = request.db.cursor()
cursor.execute(GET_VENUE_INFO, (request.params.get('address', None), ))
if not cursor.fetchone():
write_local((request.params.get('venue'),
handle_guess,
request.params.get('address')),
request.db)
add_venue(request.params.get('address'))
if handle_guess:
pull_tweets(handle_guess, request.db)
return {'venue_guess': request.params.get('venue'),
'handle_guess': handle_guess,
'address_guess': request.params.get('address')}
@view_config(route_name='gettweets', renderer='json')
def get_tweets_from_db(request):
cursor = request.db.cursor()
cursor.execute(GET_VENUE_INFO, (request.params.get('address', None), ))
venue_info = cursor.fetchone()
cursor.execute(READ_TWEET, [venue_info[0]])
tweets = cursor.fetchall()
if tweets:
keys = ('id', 'parent_id', 'author_handle',
'content', 'time', 'count', 'status_id')
tweets = [dict(zip(keys, row)) for row in tweets]
for tweet in tweets:
time_since = int((
datetime.datetime.utcnow() - tweet['time']).total_seconds() // 3600)
if time_since > 23:
time_since = int(time_since // 24)
if time_since == 1:
tweet['time'] = "{} day ago".format(time_since)
else:
tweet['time'] = "{} days ago".format(time_since)
else:
tweet['time'] = "{} hours ago".format(time_since)
else:
tweets = None
return {'venue': venue_info[1], 'tweets': tweets, 'venue_handle': venue_info[2]}
def main():
"""Create a configured wsgi app"""
settings = {}
settings['reload_all'] = os.environ.get('DEBUG', True)
settings['debug_all'] = os.environ.get('DEBUG', True)
settings['db'] = secrets.get_credentials()
# secret value for session signing:
secret = os.environ.get('JOURNAL_SESSION_SECRET', 'itsaseekrit')
session_factory = SignedCookieSessionFactory(secret)
# configuration setup
config = Configurator(
settings=settings,
session_factory=session_factory
)
config.include('pyramid_jinja2')
config.add_route('home', '/')
config.add_route('gettweets', '/gettweets')
config.add_route('writelocation', '/writelocation')
config.add_static_view('static', os.path.join(here, 'static'))
config.scan()
app = config.make_wsgi_app()
return app
DELETE_TWEETS = """
DELETE FROM tweets
"""
PULL_HANDLE = """
SELECT author_handle FROM tweets
"""
def clear_database(connection):
cursor = connection.cursor()
cursor.execute(DELETE_TWEETS)
connection.commit()
def pull_handle(connection):
cursor = connection.cursor()
cursor.execute(PULL_HANDLE)
handels = cursor.fetchall()
connection.commit()
return handels
if __name__ == '__main__':
app = main()
port = os.environ.get('PORT', 8000)
serve(app, host='127.0.0.1', port=port)
|
{
"content_hash": "d0e055c7e37275e70c0f5ed4da7eeabb",
"timestamp": "",
"source": "github",
"line_count": 339,
"max_line_length": 120,
"avg_line_length": 30.04424778761062,
"alnum_prop": 0.6252331860579283,
"repo_name": "efrainc/Wazzap",
"id": "d047e170d298b5f1e67a854904a4c562a0a1dc26",
"size": "10209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webapp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8671"
},
{
"name": "CSS",
"bytes": "2064"
},
{
"name": "JavaScript",
"bytes": "6826"
},
{
"name": "Python",
"bytes": "17854"
}
],
"symlink_target": ""
}
|
import unittest
from satellite import tree_from_traversals
# Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0
class SatelliteTest(unittest.TestCase):
def test_empty_tree(self):
preorder = []
inorder = []
expected = {}
self.assertEqual(tree_from_traversals(preorder, inorder), expected)
def test_tree_with_one_item(self):
preorder = ["a"]
inorder = ["a"]
expected = {"v": "a", "l": {}, "r": {}}
self.assertEqual(tree_from_traversals(preorder, inorder), expected)
def test_tree_with_many_items(self):
preorder = ["a", "i", "x", "f", "r"]
inorder = ["i", "a", "f", "x", "r"]
expected = {
"v": "a",
"l": {"v": "i", "l": {}, "r": {}},
"r": {
"v": "x",
"l": {"v": "f", "l": {}, "r": {}},
"r": {"v": "r", "l": {}, "r": {}},
},
}
self.assertEqual(tree_from_traversals(preorder, inorder), expected)
def test_reject_traversals_of_different_length(self):
preorder = ["a", "b"]
inorder = ["b", "a", "r"]
with self.assertRaisesWithMessage(ValueError):
tree_from_traversals(preorder, inorder)
def test_reject_inconsistent_traversals_of_same_length(self):
preorder = ["x", "y", "z"]
inorder = ["a", "b", "c"]
with self.assertRaisesWithMessage(ValueError):
tree_from_traversals(preorder, inorder)
def test_reject_traversals_with_repeated_items(self):
preorder = ["a", "b", "a"]
inorder = ["b", "a", "a"]
with self.assertRaisesWithMessage(ValueError):
tree_from_traversals(preorder, inorder)
# Utility functions
def setUp(self):
try:
self.assertRaisesRegex
except AttributeError:
self.assertRaisesRegex = self.assertRaisesRegexp
def assertRaisesWithMessage(self, exception):
return self.assertRaisesRegex(exception, r".+")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "66d74b43480dff3dc95e39a46949d592",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 29.422535211267604,
"alnum_prop": 0.5390138822403063,
"repo_name": "smalley/python",
"id": "c5d312dbebad0e3b30a847beedfb1357980b1fda",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/satellite/satellite_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "583569"
},
{
"name": "Shell",
"bytes": "1255"
}
],
"symlink_target": ""
}
|
"""
Tests for warning suppression features of Trial.
"""
from __future__ import division, absolute_import
import unittest as pyunit
from twisted.trial import unittest
from twisted.trial.test import suppression
class SuppressionMixin(object):
"""
Tests for the warning suppression features of
L{twisted.trial.unittest.SynchronousTestCase}.
"""
def runTests(self, suite):
suite.run(pyunit.TestResult())
def _load(self, cls, methodName):
"""
Return a new L{unittest.TestSuite} with a single test method in it.
@param cls: A L{TestCase} subclass defining a test method.
@param methodName: The name of the test method from C{cls}.
"""
return pyunit.TestSuite([cls(methodName)])
def _assertWarnings(self, warnings, which):
"""
Assert that a certain number of warnings with certain messages were
emitted in a certain order.
@param warnings: A list of emitted warnings, as returned by
C{flushWarnings}.
@param which: A list of strings giving warning messages that should
appear in C{warnings}.
@raise self.failureException: If the warning messages given by C{which}
do not match the messages in the warning information in C{warnings},
or if they do not appear in the same order.
"""
self.assertEqual(
[warning['message'] for warning in warnings],
which)
def test_setUpSuppression(self):
"""
Suppressions defined by the test method being run are applied to any
warnings emitted while running the C{setUp} fixture.
"""
self.runTests(
self._load(self.TestSetUpSuppression, "testSuppressMethod"))
warningsShown = self.flushWarnings([
self.TestSetUpSuppression._emit])
self._assertWarnings(
warningsShown,
[suppression.CLASS_WARNING_MSG, suppression.MODULE_WARNING_MSG,
suppression.CLASS_WARNING_MSG, suppression.MODULE_WARNING_MSG])
def test_tearDownSuppression(self):
"""
Suppressions defined by the test method being run are applied to any
warnings emitted while running the C{tearDown} fixture.
"""
self.runTests(
self._load(self.TestTearDownSuppression, "testSuppressMethod"))
warningsShown = self.flushWarnings([
self.TestTearDownSuppression._emit])
self._assertWarnings(
warningsShown,
[suppression.CLASS_WARNING_MSG, suppression.MODULE_WARNING_MSG,
suppression.CLASS_WARNING_MSG, suppression.MODULE_WARNING_MSG])
def test_suppressMethod(self):
"""
A suppression set on a test method prevents warnings emitted by that
test method which the suppression matches from being emitted.
"""
self.runTests(
self._load(self.TestSuppression, "testSuppressMethod"))
warningsShown = self.flushWarnings([
self.TestSuppression._emit])
self._assertWarnings(
warningsShown,
[suppression.CLASS_WARNING_MSG, suppression.MODULE_WARNING_MSG])
def test_suppressClass(self):
"""
A suppression set on a L{SynchronousTestCase} subclass prevents warnings
emitted by any test methods defined on that class which match the
suppression from being emitted.
"""
self.runTests(
self._load(self.TestSuppression, "testSuppressClass"))
warningsShown = self.flushWarnings([
self.TestSuppression._emit])
self.assertEqual(
warningsShown[0]['message'], suppression.METHOD_WARNING_MSG)
self.assertEqual(
warningsShown[1]['message'], suppression.MODULE_WARNING_MSG)
self.assertEqual(len(warningsShown), 2)
def test_suppressModule(self):
"""
A suppression set on a module prevents warnings emitted by any test
mewthods defined in that module which match the suppression from being
emitted.
"""
self.runTests(
self._load(self.TestSuppression2, "testSuppressModule"))
warningsShown = self.flushWarnings([
self.TestSuppression._emit])
self.assertEqual(
warningsShown[0]['message'], suppression.METHOD_WARNING_MSG)
self.assertEqual(
warningsShown[1]['message'], suppression.CLASS_WARNING_MSG)
self.assertEqual(len(warningsShown), 2)
def test_overrideSuppressClass(self):
"""
The suppression set on a test method completely overrides a suppression
with wider scope; if it does not match a warning emitted by that test
method, the warning is emitted, even if a wider suppression matches.
"""
self.runTests(
self._load(self.TestSuppression, "testOverrideSuppressClass"))
warningsShown = self.flushWarnings([
self.TestSuppression._emit])
self.assertEqual(
warningsShown[0]['message'], suppression.METHOD_WARNING_MSG)
self.assertEqual(
warningsShown[1]['message'], suppression.CLASS_WARNING_MSG)
self.assertEqual(
warningsShown[2]['message'], suppression.MODULE_WARNING_MSG)
self.assertEqual(len(warningsShown), 3)
class SynchronousSuppressionTest(SuppressionMixin, unittest.SynchronousTestCase):
"""
@see: L{twisted.trial.test.test_tests}
"""
from twisted.trial.test.suppression import (
SynchronousTestSetUpSuppression as TestSetUpSuppression,
SynchronousTestTearDownSuppression as TestTearDownSuppression,
SynchronousTestSuppression as TestSuppression,
SynchronousTestSuppression2 as TestSuppression2)
|
{
"content_hash": "c09f7a71728fe0b134aeff6fecc46ac3",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 81,
"avg_line_length": 37.48125,
"alnum_prop": 0.6341504085376022,
"repo_name": "timkrentz/SunTracker",
"id": "7ea2e386aa3fe4f69088f90103ddeb986a908846",
"size": "6071",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/trial/test/test_suppression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe, json
import frappe.utils
import frappe.defaults
import frappe.widgets.form.meta
from frappe import _
@frappe.whitelist()
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
if not (doctype and name):
raise Exception, 'doctype and name required!'
if not name:
name = doctype
if not frappe.db.exists(doctype, name):
return []
try:
doc = frappe.get_doc(doctype, name)
doc.run_method("onload")
if not doc.has_permission("read"):
raise frappe.PermissionError, "read"
# add file list
get_docinfo(doctype, name)
except Exception:
frappe.errprint(frappe.utils.get_traceback())
frappe.msgprint(_('Did not load'))
raise
if doc and not name.startswith('_'):
frappe.user.update_recent(doctype, name)
frappe.response.docs.append(doc)
@frappe.whitelist()
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
docs = []
# with parent (called from report builder)
if with_parent:
parent_dt = frappe.model.meta.get_parent_dt(doctype)
if parent_dt:
docs = get_meta_bundle(parent_dt)
frappe.response['parent_dt'] = parent_dt
if not docs:
docs = get_meta_bundle(doctype)
frappe.response['restrictions'] = get_restrictions(docs[0])
if cached_timestamp and docs[0].modified==cached_timestamp:
return "use_cache"
frappe.response.docs.extend(docs)
def get_meta_bundle(doctype):
bundle = [frappe.widgets.form.meta.get_meta(doctype)]
for df in bundle[0].fields:
if df.fieldtype=="Table":
bundle.append(frappe.widgets.form.meta.get_meta(df.options))
return bundle
def get_docinfo(doctype, name):
frappe.response["docinfo"] = {
"attachments": add_attachments(doctype, name),
"comments": add_comments(doctype, name),
"assignments": add_assignments(doctype, name)
}
def get_restrictions(meta):
out = {}
all_restrictions = frappe.defaults.get_restrictions()
for df in meta.get_restricted_fields(all_restrictions):
out[df.options] = all_restrictions[df.options]
return out
def add_attachments(dt, dn):
attachments = []
for f in frappe.db.sql("""select name, file_name, file_url from
`tabFile Data` where attached_to_name=%s and attached_to_doctype=%s""",
(dn, dt), as_dict=True):
attachments.append({
'name': f.name,
'file_url': f.file_url,
'file_name': f.file_name
})
return attachments
def add_comments(dt, dn, limit=20):
cl = frappe.db.sql("""select name, comment, comment_by, creation from `tabComment`
where comment_doctype=%s and comment_docname=%s
order by creation desc limit %s""" % ('%s','%s', limit), (dt, dn), as_dict=1)
return cl
def add_assignments(dt, dn):
cl = frappe.db.sql_list("""select owner from `tabToDo`
where reference_type=%(doctype)s and reference_name=%(name)s and status="Open"
order by modified desc limit 5""", {
"doctype": dt,
"name": dn
})
return cl
@frappe.whitelist()
def get_badge_info(doctypes, filters):
filters = json.loads(filters)
doctypes = json.loads(doctypes)
filters["docstatus"] = ["!=", 2]
out = {}
for doctype in doctypes:
out[doctype] = frappe.db.get_value(doctype, filters, "count(*)")
return out
|
{
"content_hash": "a502c8272f0a6bd3609d9acb874f8d26",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 86,
"avg_line_length": 26.283464566929133,
"alnum_prop": 0.7028160575194727,
"repo_name": "rkawale/Internalhr-frappe",
"id": "b81e2a4bd42b9a10426d2dab7c282623e51a5259",
"size": "3442",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "frappe/widgets/form/load.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "78301"
},
{
"name": "JavaScript",
"bytes": "1458963"
},
{
"name": "Python",
"bytes": "714957"
}
],
"symlink_target": ""
}
|
"""
Copyright 2010 Sami Dalouche
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import threading
import logging
import traceback
import sqlalchemy
from sqlalchemy import create_engine, Table, Column, Integer, String, MetaData, ForeignKey, DateTime #, UniqueConstraint
from sqlalchemy.orm import mapper, relation, sessionmaker, scoped_session, backref #, eagerload
_sessionmaker = None # should be initialized by bootstrap
_threadlocal = threading.local()
logger = logging.getLogger("persistence.transactional")
def init(sessionmaker):
global _sessionmaker
_sessionmaker = sessionmaker
def transactional(f):
def do(*args, **kwargs):
def callback(session):
return f(*args, **kwargs)
return SessionTemplate(_sessionmaker).do_with_session(callback)
return do
class SessionTemplate(object):
""" Simple helper class akin to Spring-JDBC/Hibernate/ORM Template.
It doesnt't commit nor releases resources if other do_with_session() calls are pending
See http://www.sqlalchemy.org/trac/ticket/1084#comment:3 for suggestions on how to improve this
without using a custom threadlocal variable
"""
def __init__(self, sessionmaker):
assert sessionmaker is not None
self._sessionmaker = sessionmaker
def do_with_session(self, session_callback):
try:
session = begin_scope(self._sessionmaker)
result = session_callback(session)
except Exception as e1:
_mark_for_rollback(self._sessionmaker)
raise
finally:
end_scope(self._sessionmaker)
return result
class BoundSession(object):
def __init__(self, session, count=0):
assert count >= 0
self.session = session
self.count = count
self.should_commit = True
self.should_renew = False
def increment(self):
self.count=self.count+1
def decrement(self):
self.count=self.count -1
def mark_for_rollback(self):
self.should_commit = False
def mark_for_renewal(self):
self.should_renew = True
def begin_scope(session_maker):
bound_session = _threadlocal.current_session if _session_exists() else BoundSession(session_maker())
bound_session.increment()
_threadlocal.current_session = bound_session
if _threadlocal.current_session.should_renew:
_threadlocal.current_session.session = session_maker()
return bound_session.session
def end_scope(session_maker, force_rollback=False):
if _current_count() == 1 : # top level, we either commit or rollback
try:
if _should_commit() and (not force_rollback):
_session().commit()
else:
_rollback(session_maker)
finally:
_cleanup(session_maker)
else:
if not _should_commit() or force_rollback:
_rollback_and_mark_for_renewal(session_maker)
_threadlocal.current_session.decrement()
def _rollback_and_mark_for_renewal(session_maker):
_rollback(session_maker)
_threadlocal.current_session.mark_for_renewal()
def _rollback(session_maker):
#if not _session_exists():
# return
try:
conn = _session().connection().invalidate()
except sqlalchemy.exc.InvalidRequestError:
# ignore the following exception that happens on windows...
# InvalidRequestError("The transaction is inactive
# due to a rollback in a subtransaction and should be closed")
#
pass
except Exception:
pass
_session().rollback()
def _cleanup(session_maker):
try:
_session().close()
session_maker.remove()
finally:
del _threadlocal.current_session
def _session():
return _threadlocal.current_session.session if _session_exists() else None
def _current_count():
return _threadlocal.current_session.count if _session_exists() else 0
def _should_commit():
return _threadlocal.current_session.should_commit
def _session_exists():
return hasattr(_threadlocal, 'current_session')
def _mark_for_rollback(session_maker):
_threadlocal.current_session.mark_for_rollback()
|
{
"content_hash": "c61da33ca87ccdb2220005f6229c9db0",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 120,
"avg_line_length": 31.899328859060404,
"alnum_prop": 0.6686303387334315,
"repo_name": "pymager/pymager",
"id": "f27b08f91fb5f6c2db243f3cb04bff924c02cffd",
"size": "4753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymager/persistence/_transactional.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "154279"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
}
|
from kivy.uix.screenmanager import Screen
from kivy.uix.togglebutton import ToggleButton
from kivy.uix.popup import Popup
from kivy.lang import Builder
from kivy.utils import get_color_from_hex
from kivy.properties import NumericProperty
from screens import screenmanager
import random
__author__ = 'ohaz'
Builder.load_file('./screens/ingamescreen.kv')
# ----------------
# Global Variables
# ----------------
# This one should probably not be a global variable. Bad Design?
last_button = None
# A list of colours to use. Each element is a list of 2 colours - the lit one and a darker version of it.
colours = [['#FF0000', '#CC0000'], ['#FF00FF', '#CC00CC'], ['#0000FF', '#0000CC'], ['#00FF00', '#00CC00'],
['#FFFF00', '#CCCC00']]
class GameOverPopup(Popup):
"""
The popup that appears if you either leave the game, or lose
"""
def __init__(self, screen):
"""
Initialise the popup
:param screen: the screen the popup belongs to
:return:
"""
self.screen = screen
super(GameOverPopup, self).__init__()
def close(self):
"""
Closes the popup. Also switches to the main menu
:return:
"""
self.dismiss()
screenmanager.change_to('main_menu')
class ColourToggleButton(ToggleButton):
"""
Use a toggle button as a way to display the coloured blocks.
"""
def __init__(self, screen):
"""
Initialise the Button
:param screen: the screen this button belongs to
:return:
"""
super(ColourToggleButton, self).__init__()
self.screen = screen
self.visited = False
def colour_press(self):
"""
Event that gets fired when a button is pressed.
:return:
"""
global last_button
if last_button is None:
# If there is no "last button press", set this as the latest one
last_button = self
else:
# Another button has been pressed before. Switch the colours of the two
last_button.background_color, self.background_color = self.background_color, last_button.background_color
# Set their states back to normal and reset the last button pressed
last_button.state = 'normal'
self.state = 'normal'
last_button = None
# Check if the switch removed any blocks
points = self.screen.check_removal()
if points == 0:
# If nothing has been removed, the player gets one step closer to losing
self.screen.misses += 1
else:
# Give the player the points
self.screen.points += points
if self.screen.misses > 3:
# Player has lost, leave the game
self.screen.leave()
class IngameScreen(Screen):
"""
Class for the Ingame Screen.
Contains a few labels for the points and the misses, the game itself and a leave button.
"""
# Properties for the wrong moves the player did and the points.
misses = NumericProperty(0)
points = NumericProperty(0)
def on_enter(self, *args):
"""
Event that gets fired when the player enters this screen.
Does cleanup from previous games and prepares the new game
:param args: arguments passed
:return:
"""
# Set the wrong moves and the points to 0
self.misses = 0
self.points = 0
# Create a new random seed for colour generation
random.seed()
# Clear the grid from previous games
grid = self.ids.grid
grid.clear_widgets()
for i in range(0, grid.rows * grid.cols):
# Create new Coloured Buttons with random colours from the colours list
c_button = ColourToggleButton(self)
colour = random.randint(0, len(colours)-1)
c_button.background_color = get_color_from_hex(colours[colour][0])
grid.add_widget(c_button)
def check_removal(self, multiplier=1):
"""
Recursive function that checks if enough blocks with the same colour are next to each other
:param multiplier: The point multiplier
:return: Points the player gets for this step
"""
children = self.ids.grid.children
groups = []
points = 0
# Recursively check all children and creates groups with them
for i, child in enumerate(children):
if not child.visited:
groups.append(self.recursive_check(i))
# Reset visit status for the next pass
for child in children:
child.visited = False
# Get the groups that contain more than 3 blocks of the same colour, calculate points and let new blocks fall
high_groups = [x for x in groups if len(x) > 3]
for g in high_groups:
# I sort the blocks by reversed id, this helps in the implementation of how blocks fall
# If this was unsorted, a block might get the colour of the block above that actually should get removed
g.sort(reverse=True)
points += multiplier * len(g)
multiplier += 1
for button_id in g:
self.fall(button_id)
if len(high_groups) > 0:
return self.check_removal(multiplier) + points
else:
return 0
def fall(self, current):
"""
Gravity implementation. Blocks fall down (also spawns new ones)
:param current: the block we are currently visiting
:return:
"""
grid = self.ids.grid
children = grid.children
# The block above the current one is the one with the higher id
child_above = current + grid.cols
# the top row of blocks starts with this id
topmost = len(grid.children) - grid.cols
if child_above > topmost:
# We are in the top row, generate new coloured block
colour = random.randint(0, len(colours)-1)
children[current].background_color = get_color_from_hex(colours[colour][0])
else:
# Let the block on top of us fall down and do the same for the block above
children[current].background_color = children[child_above].background_color
self.fall(child_above)
def recursive_check(self, current):
"""
Recursively check the blocks for groups
:param current: the block currently visited
:return: a list of blocks
"""
grid = self.ids.grid
children = grid.children
own_color = children[current].background_color
children[current].visited = True
own_list = [current]
# Get all children next to the current one
child_top = current - grid.cols
if child_top < 0:
child_top = None
child_bot = current + grid.cols
if child_bot >= grid.rows * grid.cols:
child_bot = None
child_left = None
child_right = None
if current % grid.cols > 0:
child_left = current - 1
if current % grid.cols < grid.cols - 1:
child_right = current + 1
children_next = [child_top, child_bot, child_left, child_right]
# Check if children need to get added to the list
for child in children_next:
if child is not None:
if children[child].background_color == own_color and not children[child].visited:
own_list.extend(self.recursive_check(child))
return own_list
def leave(self):
"""
Leave the game. Creates a popup that shows the score
:return:
"""
p = GameOverPopup(self)
p.open()
|
{
"content_hash": "962ddef10d6e45e4b87a717ce22c834a",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 117,
"avg_line_length": 34.839285714285715,
"alnum_prop": 0.5934136340338289,
"repo_name": "ohaz/Colours",
"id": "12534806fe2941e8ec58e2bc1e352fcdf5f1d4c9",
"size": "7804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "screens/ingamescreen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10966"
}
],
"symlink_target": ""
}
|
"""Put user defined tasks in the plugins folder. You can start with
some customizations in this file which is included by default."""
from invoke import task
|
{
"content_hash": "e6fc51bdfb19fca43f78385b3e63f52f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 67,
"avg_line_length": 26.833333333333332,
"alnum_prop": 0.7701863354037267,
"repo_name": "ADicksonLab/wepy",
"id": "8bbc62ffdfb19d1bfd927254c21c8b382c40331e",
"size": "161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/plugins/custom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "51"
},
{
"name": "CSS",
"bytes": "920"
},
{
"name": "Dockerfile",
"bytes": "421"
},
{
"name": "HTML",
"bytes": "5283"
},
{
"name": "Makefile",
"bytes": "581"
},
{
"name": "Python",
"bytes": "1512860"
},
{
"name": "Shell",
"bytes": "7263"
},
{
"name": "TeX",
"bytes": "9643"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, print_function, division)
import os
import select
import socket
import sys
import threading
import time
import traceback
import binascii
from six.moves import range
import certifi
from backports import ssl_match_hostname
import six
import OpenSSL
from OpenSSL import SSL
from . import certutils, version_check, utils
# This is a rather hackish way to make sure that
# the latest version of pyOpenSSL is actually installed.
from netlib.exceptions import InvalidCertificateException, TcpReadIncomplete, TlsException, \
TcpTimeout, TcpDisconnect, TcpException
version_check.check_pyopenssl_version()
if six.PY2:
socket_fileobject = socket._fileobject
else:
socket_fileobject = socket.SocketIO
EINTR = 4
if os.environ.get("NO_ALPN"):
HAS_ALPN = False
else:
HAS_ALPN = OpenSSL._util.lib.Cryptography_HAS_ALPN
# To enable all SSL methods use: SSLv23
# then add options to disable certain methods
# https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
SSL_BASIC_OPTIONS = (
SSL.OP_CIPHER_SERVER_PREFERENCE
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_BASIC_OPTIONS |= SSL.OP_NO_COMPRESSION
SSL_DEFAULT_METHOD = SSL.SSLv23_METHOD
SSL_DEFAULT_OPTIONS = (
SSL.OP_NO_SSLv2 |
SSL.OP_NO_SSLv3 |
SSL_BASIC_OPTIONS
)
if hasattr(SSL, "OP_NO_COMPRESSION"):
SSL_DEFAULT_OPTIONS |= SSL.OP_NO_COMPRESSION
"""
Map a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
sslversion_choices = {
"all": (SSL.SSLv23_METHOD, SSL_BASIC_OPTIONS),
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
"secure": (SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3 | SSL_BASIC_OPTIONS)),
"SSLv2": (SSL.SSLv2_METHOD, SSL_BASIC_OPTIONS),
"SSLv3": (SSL.SSLv3_METHOD, SSL_BASIC_OPTIONS),
"TLSv1": (SSL.TLSv1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_1": (SSL.TLSv1_1_METHOD, SSL_BASIC_OPTIONS),
"TLSv1_2": (SSL.TLSv1_2_METHOD, SSL_BASIC_OPTIONS),
}
class SSLKeyLogger(object):
def __init__(self, filename):
self.filename = filename
self.f = None
self.lock = threading.Lock()
# required for functools.wraps, which pyOpenSSL uses.
__name__ = "SSLKeyLogger"
def __call__(self, connection, where, ret):
if where == SSL.SSL_CB_HANDSHAKE_DONE and ret == 1:
with self.lock:
if not self.f:
d = os.path.dirname(self.filename)
if not os.path.isdir(d):
os.makedirs(d)
self.f = open(self.filename, "ab")
self.f.write(b"\r\n")
client_random = binascii.hexlify(connection.client_random())
masterkey = binascii.hexlify(connection.master_key())
self.f.write(b"CLIENT_RANDOM %s %s\r\n" % (client_random, masterkey))
self.f.flush()
def close(self):
with self.lock:
if self.f:
self.f.close()
@staticmethod
def create_logfun(filename):
if filename:
return SSLKeyLogger(filename)
return False
log_ssl_key = SSLKeyLogger.create_logfun(
os.getenv("MITMPROXY_SSLKEYLOGFILE") or os.getenv("SSLKEYLOGFILE"))
class _FileLike(object):
BLOCKSIZE = 1024 * 32
def __init__(self, o):
self.o = o
self._log = None
self.first_byte_timestamp = None
def set_descriptor(self, o):
self.o = o
def __getattr__(self, attr):
return getattr(self.o, attr)
def start_log(self):
"""
Starts or resets the log.
This will store all bytes read or written.
"""
self._log = []
def stop_log(self):
"""
Stops the log.
"""
self._log = None
def is_logging(self):
return self._log is not None
def get_log(self):
"""
Returns the log as a string.
"""
if not self.is_logging():
raise ValueError("Not logging!")
return b"".join(self._log)
def add_log(self, v):
if self.is_logging():
self._log.append(v)
def reset_timestamps(self):
self.first_byte_timestamp = None
class Writer(_FileLike):
def flush(self):
"""
May raise TcpDisconnect
"""
if hasattr(self.o, "flush"):
try:
self.o.flush()
except (socket.error, IOError) as v:
raise TcpDisconnect(str(v))
def write(self, v):
"""
May raise TcpDisconnect
"""
if v:
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
try:
if hasattr(self.o, "sendall"):
self.add_log(v)
return self.o.sendall(v)
else:
r = self.o.write(v)
self.add_log(v[:r])
return r
except (SSL.Error, socket.error) as e:
raise TcpDisconnect(str(e))
class Reader(_FileLike):
def read(self, length):
"""
If length is -1, we read until connection closes.
"""
result = b''
start = time.time()
while length == -1 or length > 0:
if length == -1 or length > self.BLOCKSIZE:
rlen = self.BLOCKSIZE
else:
rlen = length
try:
data = self.o.read(rlen)
except SSL.ZeroReturnError:
# TLS connection was shut down cleanly
break
except (SSL.WantWriteError, SSL.WantReadError):
# From the OpenSSL docs:
# If the underlying BIO is non-blocking, SSL_read() will also return when the
# underlying BIO could not satisfy the needs of SSL_read() to continue the
# operation. In this case a call to SSL_get_error with the return value of
# SSL_read() will yield SSL_ERROR_WANT_READ or SSL_ERROR_WANT_WRITE.
if (time.time() - start) < self.o.gettimeout():
time.sleep(0.1)
continue
else:
raise TcpTimeout()
except socket.timeout:
raise TcpTimeout()
except socket.error as e:
raise TcpDisconnect(str(e))
except SSL.SysCallError as e:
if e.args == (-1, 'Unexpected EOF'):
break
raise TlsException(str(e))
except SSL.Error as e:
raise TlsException(str(e))
self.first_byte_timestamp = self.first_byte_timestamp or time.time()
if not data:
break
result += data
if length != -1:
length -= len(data)
self.add_log(result)
return result
def readline(self, size=None):
result = b''
bytes_read = 0
while True:
if size is not None and bytes_read >= size:
break
ch = self.read(1)
bytes_read += 1
if not ch:
break
else:
result += ch
if ch == b'\n':
break
return result
def safe_read(self, length):
"""
Like .read, but is guaranteed to either return length bytes, or
raise an exception.
"""
result = self.read(length)
if length != -1 and len(result) != length:
if not result:
raise TcpDisconnect()
else:
raise TcpReadIncomplete(
"Expected %s bytes, got %s" % (length, len(result))
)
return result
def peek(self, length):
"""
Tries to peek into the underlying file object.
Returns:
Up to the next N bytes if peeking is successful.
Raises:
TcpException if there was an error with the socket
TlsException if there was an error with pyOpenSSL.
NotImplementedError if the underlying file object is not a [pyOpenSSL] socket
"""
if isinstance(self.o, socket_fileobject):
try:
return self.o._sock.recv(length, socket.MSG_PEEK)
except socket.error as e:
raise TcpException(repr(e))
elif isinstance(self.o, SSL.Connection):
try:
if tuple(int(x) for x in OpenSSL.__version__.split(".")[:2]) > (0, 15):
return self.o.recv(length, socket.MSG_PEEK)
else:
# TODO: remove once a new version is released
# Polyfill for pyOpenSSL <= 0.15.1
# Taken from https://github.com/pyca/pyopenssl/commit/1d95dea7fea03c7c0df345a5ea30c12d8a0378d2
buf = SSL._ffi.new("char[]", length)
result = SSL._lib.SSL_peek(self.o._ssl, buf, length)
self.o._raise_ssl_error(self.o._ssl, result)
return SSL._ffi.buffer(buf, result)[:]
except SSL.Error as e:
six.reraise(TlsException, TlsException(str(e)), sys.exc_info()[2])
else:
raise NotImplementedError("Can only peek into (pyOpenSSL) sockets")
class Address(utils.Serializable):
"""
This class wraps an IPv4/IPv6 tuple to provide named attributes and
ipv6 information.
"""
def __init__(self, address, use_ipv6=False):
self.address = tuple(address)
self.use_ipv6 = use_ipv6
def get_state(self):
return {
"address": self.address,
"use_ipv6": self.use_ipv6
}
def set_state(self, state):
self.address = state["address"]
self.use_ipv6 = state["use_ipv6"]
@classmethod
def from_state(cls, state):
return Address(**state)
@classmethod
def wrap(cls, t):
if isinstance(t, cls):
return t
else:
return cls(t)
def __call__(self):
return self.address
@property
def host(self):
return self.address[0]
@property
def port(self):
return self.address[1]
@property
def use_ipv6(self):
return self.family == socket.AF_INET6
@use_ipv6.setter
def use_ipv6(self, b):
self.family = socket.AF_INET6 if b else socket.AF_INET
def __repr__(self):
return "{}:{}".format(self.host, self.port)
def __eq__(self, other):
if not other:
return False
other = Address.wrap(other)
return (self.address, self.family) == (other.address, other.family)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.address) ^ 42 # different hash than the tuple alone.
def ssl_read_select(rlist, timeout):
"""
This is a wrapper around select.select() which also works for SSL.Connections
by taking ssl_connection.pending() into account.
Caveats:
If .pending() > 0 for any of the connections in rlist, we avoid the select syscall
and **will not include any other connections which may or may not be ready**.
Args:
rlist: wait until ready for reading
Returns:
subset of rlist which is ready for reading.
"""
return [
conn for conn in rlist
if isinstance(conn, SSL.Connection) and conn.pending() > 0
] or select.select(rlist, (), (), timeout)[0]
def close_socket(sock):
"""
Does a hard close of a socket, without emitting a RST.
"""
try:
# We already indicate that we close our end.
# may raise "Transport endpoint is not connected" on Linux
sock.shutdown(socket.SHUT_WR)
# Section 4.2.2.13 of RFC 1122 tells us that a close() with any pending
# readable data could lead to an immediate RST being sent (which is the
# case on Windows).
# http://ia600609.us.archive.org/22/items/TheUltimateSo_lingerPageOrWhyIsMyTcpNotReliable/the-ultimate-so_linger-page-or-why-is-my-tcp-not-reliable.html
#
# This in turn results in the following issue: If we send an error page
# to the client and then close the socket, the RST may be received by
# the client before the error page and the users sees a connection
# error rather than the error page. Thus, we try to empty the read
# buffer on Windows first. (see
# https://github.com/mitmproxy/mitmproxy/issues/527#issuecomment-93782988)
#
if os.name == "nt": # pragma: no cover
# We cannot rely on the shutdown()-followed-by-read()-eof technique
# proposed by the page above: Some remote machines just don't send
# a TCP FIN, which would leave us in the unfortunate situation that
# recv() would block infinitely. As a workaround, we set a timeout
# here even if we are in blocking mode.
sock.settimeout(sock.gettimeout() or 20)
# limit at a megabyte so that we don't read infinitely
for _ in range(1024 ** 3 // 4096):
# may raise a timeout/disconnect exception.
if not sock.recv(4096):
break
# Now we can close the other half as well.
sock.shutdown(socket.SHUT_RD)
except socket.error:
pass
sock.close()
class _Connection(object):
rbufsize = -1
wbufsize = -1
def _makefile(self):
"""
Set up .rfile and .wfile attributes from .connection
"""
# Ideally, we would use the Buffered IO in Python 3 by default.
# Unfortunately, the implementation of .peek() is broken for n>1 bytes,
# as it may just return what's left in the buffer and not all the bytes we want.
# As a workaround, we just use unbuffered sockets directly.
# https://mail.python.org/pipermail/python-dev/2009-June/089986.html
if six.PY2:
self.rfile = Reader(self.connection.makefile('rb', self.rbufsize))
self.wfile = Writer(self.connection.makefile('wb', self.wbufsize))
else:
self.rfile = Reader(socket.SocketIO(self.connection, "rb"))
self.wfile = Writer(socket.SocketIO(self.connection, "wb"))
def __init__(self, connection):
if connection:
self.connection = connection
self._makefile()
else:
self.connection = None
self.rfile = None
self.wfile = None
self.ssl_established = False
self.finished = False
def get_current_cipher(self):
if not self.ssl_established:
return None
name = self.connection.get_cipher_name()
bits = self.connection.get_cipher_bits()
version = self.connection.get_cipher_version()
return name, bits, version
def finish(self):
self.finished = True
# If we have an SSL connection, wfile.close == connection.close
# (We call _FileLike.set_descriptor(conn))
# Closing the socket is not our task, therefore we don't call close
# then.
if not isinstance(self.connection, SSL.Connection):
if not getattr(self.wfile, "closed", False):
try:
self.wfile.flush()
self.wfile.close()
except TcpDisconnect:
pass
self.rfile.close()
else:
try:
self.connection.shutdown()
except SSL.Error:
pass
def _create_ssl_context(self,
method=SSL_DEFAULT_METHOD,
options=SSL_DEFAULT_OPTIONS,
verify_options=SSL.VERIFY_NONE,
ca_path=None,
ca_pemfile=None,
cipher_list=None,
alpn_protos=None,
alpn_select=None,
alpn_select_callback=None,
):
"""
Creates an SSL Context.
:param method: One of SSLv2_METHOD, SSLv3_METHOD, SSLv23_METHOD, TLSv1_METHOD, TLSv1_1_METHOD, or TLSv1_2_METHOD
:param options: A bit field consisting of OpenSSL.SSL.OP_* values
:param verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
:param ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
:param ca_pemfile: Path to a PEM formatted trusted CA certificate
:param cipher_list: A textual OpenSSL cipher list, see https://www.openssl.org/docs/apps/ciphers.html
:rtype : SSL.Context
"""
context = SSL.Context(method)
# Options (NO_SSLv2/3)
if options is not None:
context.set_options(options)
# Verify Options (NONE/PEER and trusted CAs)
if verify_options is not None:
def verify_cert(conn, x509, errno, err_depth, is_cert_verified):
if not is_cert_verified:
self.ssl_verification_error = dict(errno=errno,
depth=err_depth)
return is_cert_verified
context.set_verify(verify_options, verify_cert)
if ca_path is None and ca_pemfile is None:
ca_pemfile = certifi.where()
context.load_verify_locations(ca_pemfile, ca_path)
# Workaround for
# https://github.com/pyca/pyopenssl/issues/190
# https://github.com/mitmproxy/mitmproxy/issues/472
# Options already set before are not cleared.
context.set_mode(SSL._lib.SSL_MODE_AUTO_RETRY)
# Cipher List
if cipher_list:
try:
context.set_cipher_list(cipher_list)
# TODO: maybe change this to with newer pyOpenSSL APIs
context.set_tmp_ecdh(OpenSSL.crypto.get_elliptic_curve('prime256v1'))
except SSL.Error as v:
raise TlsException("SSL cipher specification error: %s" % str(v))
# SSLKEYLOGFILE
if log_ssl_key:
context.set_info_callback(log_ssl_key)
if HAS_ALPN:
if alpn_protos is not None:
# advertise application layer protocols
context.set_alpn_protos(alpn_protos)
elif alpn_select is not None and alpn_select_callback is None:
# select application layer protocol
def alpn_select_callback(conn_, options):
if alpn_select in options:
return bytes(alpn_select)
else: # pragma no cover
return options[0]
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is None:
context.set_alpn_select_callback(alpn_select_callback)
elif alpn_select_callback is not None and alpn_select is not None:
raise TlsException("ALPN error: only define alpn_select (string) OR alpn_select_callback (method).")
return context
class TCPClient(_Connection):
def __init__(self, address, source_address=None):
super(TCPClient, self).__init__(None)
self.address = address
self.source_address = source_address
self.cert = None
self.server_certs = []
self.ssl_verification_error = None
self.sni = None
@property
def address(self):
return self.__address
@address.setter
def address(self, address):
if address:
self.__address = Address.wrap(address)
else:
self.__address = None
@property
def source_address(self):
return self.__source_address
@source_address.setter
def source_address(self, source_address):
if source_address:
self.__source_address = Address.wrap(source_address)
else:
self.__source_address = None
def close(self):
# Make sure to close the real socket, not the SSL proxy.
# OpenSSL is really good at screwing up, i.e. when trying to recv from a failed connection,
# it tries to renegotiate...
if isinstance(self.connection, SSL.Connection):
close_socket(self.connection._socket)
else:
close_socket(self.connection)
def create_ssl_context(self, cert=None, alpn_protos=None, **sslctx_kwargs):
context = self._create_ssl_context(
alpn_protos=alpn_protos,
**sslctx_kwargs)
# Client Certs
if cert:
try:
context.use_privatekey_file(cert)
context.use_certificate_file(cert)
except SSL.Error as v:
raise TlsException("SSL client certificate error: %s" % str(v))
return context
def convert_to_ssl(self, sni=None, alpn_protos=None, **sslctx_kwargs):
"""
cert: Path to a file containing both client cert and private key.
options: A bit field consisting of OpenSSL.SSL.OP_* values
verify_options: A bit field consisting of OpenSSL.SSL.VERIFY_* values
ca_path: Path to a directory of trusted CA certificates prepared using the c_rehash tool
ca_pemfile: Path to a PEM formatted trusted CA certificate
"""
verification_mode = sslctx_kwargs.get('verify_options', None)
if verification_mode == SSL.VERIFY_PEER and not sni:
raise TlsException("Cannot validate certificate hostname without SNI")
context = self.create_ssl_context(
alpn_protos=alpn_protos,
**sslctx_kwargs
)
self.connection = SSL.Connection(context, self.connection)
if sni:
self.sni = sni
self.connection.set_tlsext_host_name(sni)
self.connection.set_connect_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
if self.ssl_verification_error:
raise InvalidCertificateException("SSL handshake error: %s" % repr(v))
else:
raise TlsException("SSL handshake error: %s" % repr(v))
else:
# Fix for pre v1.0 OpenSSL, which doesn't throw an exception on
# certificate validation failure
if verification_mode == SSL.VERIFY_PEER and self.ssl_verification_error is not None:
raise InvalidCertificateException("SSL handshake error: certificate verify failed")
self.cert = certutils.SSLCert(self.connection.get_peer_certificate())
# Keep all server certificates in a list
for i in self.connection.get_peer_cert_chain():
self.server_certs.append(certutils.SSLCert(i))
# Validate TLS Hostname
try:
crt = dict(
subjectAltName=[("DNS", x.decode("ascii", "strict")) for x in self.cert.altnames]
)
if self.cert.cn:
crt["subject"] = [[["commonName", self.cert.cn.decode("ascii", "strict")]]]
if sni:
hostname = sni.decode("ascii", "strict")
else:
hostname = "no-hostname"
ssl_match_hostname.match_hostname(crt, hostname)
except (ValueError, ssl_match_hostname.CertificateError) as e:
self.ssl_verification_error = dict(depth=0, errno="Invalid Hostname")
if verification_mode == SSL.VERIFY_PEER:
raise InvalidCertificateException("Presented certificate for {} is not valid: {}".format(sni, str(e)))
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def connect(self):
try:
connection = socket.socket(self.address.family, socket.SOCK_STREAM)
if self.source_address:
connection.bind(self.source_address())
connection.connect(self.address())
self.source_address = Address(connection.getsockname())
except (socket.error, IOError) as err:
raise TcpException(
'Error connecting to "%s": %s' %
(self.address.host, err))
self.connection = connection
self._makefile()
def settimeout(self, n):
self.connection.settimeout(n)
def gettimeout(self):
return self.connection.gettimeout()
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class BaseHandler(_Connection):
"""
The instantiator is expected to call the handle() and finish() methods.
"""
def __init__(self, connection, address, server):
super(BaseHandler, self).__init__(connection)
self.address = Address.wrap(address)
self.server = server
self.clientcert = None
def create_ssl_context(self,
cert, key,
handle_sni=None,
request_client_cert=None,
chain_file=None,
dhparams=None,
extra_chain_certs=None,
**sslctx_kwargs):
"""
cert: A certutils.SSLCert object or the path to a certificate
chain file.
handle_sni: SNI handler, should take a connection object. Server
name can be retrieved like this:
connection.get_servername()
And you can specify the connection keys as follows:
new_context = Context(TLSv1_METHOD)
new_context.use_privatekey(key)
new_context.use_certificate(cert)
connection.set_context(new_context)
The request_client_cert argument requires some explanation. We're
supposed to be able to do this with no negative effects - if the
client has no cert to present, we're notified and proceed as usual.
Unfortunately, Android seems to have a bug (tested on 4.2.2) - when
an Android client is asked to present a certificate it does not
have, it hangs up, which is frankly bogus. Some time down the track
we may be able to make the proper behaviour the default again, but
until then we're conservative.
"""
context = self._create_ssl_context(**sslctx_kwargs)
context.use_privatekey(key)
if isinstance(cert, certutils.SSLCert):
context.use_certificate(cert.x509)
else:
context.use_certificate_chain_file(cert)
if extra_chain_certs:
for i in extra_chain_certs:
context.add_extra_chain_cert(i.x509)
if handle_sni:
# SNI callback happens during do_handshake()
context.set_tlsext_servername_callback(handle_sni)
if request_client_cert:
def save_cert(conn_, cert, errno_, depth_, preverify_ok_):
self.clientcert = certutils.SSLCert(cert)
# Return true to prevent cert verification error
return True
context.set_verify(SSL.VERIFY_PEER, save_cert)
# Cert Verify
if chain_file:
context.load_verify_locations(chain_file)
if dhparams:
SSL._lib.SSL_CTX_set_tmp_dh(context._context, dhparams)
return context
def convert_to_ssl(self, cert, key, **sslctx_kwargs):
"""
Convert connection to SSL.
For a list of parameters, see BaseHandler._create_ssl_context(...)
"""
context = self.create_ssl_context(
cert,
key,
**sslctx_kwargs)
self.connection = SSL.Connection(context, self.connection)
self.connection.set_accept_state()
try:
self.connection.do_handshake()
except SSL.Error as v:
raise TlsException("SSL handshake error: %s" % repr(v))
self.ssl_established = True
self.rfile.set_descriptor(self.connection)
self.wfile.set_descriptor(self.connection)
def handle(self): # pragma: no cover
raise NotImplementedError
def settimeout(self, n):
self.connection.settimeout(n)
def get_alpn_proto_negotiated(self):
if HAS_ALPN and self.ssl_established:
return self.connection.get_alpn_proto_negotiated()
else:
return b""
class TCPServer(object):
request_queue_size = 20
def __init__(self, address):
self.address = Address.wrap(address)
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
self.socket = socket.socket(self.address.family, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.address())
self.address = Address.wrap(self.socket.getsockname())
self.socket.listen(self.request_queue_size)
def connection_thread(self, connection, client_address):
client_address = Address(client_address)
try:
self.handle_client_connection(connection, client_address)
except:
self.handle_error(connection, client_address)
finally:
close_socket(connection)
def serve_forever(self, poll_interval=0.1):
self.__is_shut_down.clear()
try:
while not self.__shutdown_request:
try:
r, w_, e_ = select.select(
[self.socket], [], [], poll_interval)
except select.error as ex: # pragma: no cover
if ex[0] == EINTR:
continue
else:
raise
if self.socket in r:
connection, client_address = self.socket.accept()
t = threading.Thread(
target=self.connection_thread,
args=(connection, client_address),
name="ConnectionThread (%s:%s -> %s:%s)" %
(client_address[0], client_address[1],
self.address.host, self.address.port)
)
t.setDaemon(1)
try:
t.start()
except threading.ThreadError:
self.handle_error(connection, Address(client_address))
connection.close()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
def shutdown(self):
self.__shutdown_request = True
self.__is_shut_down.wait()
self.socket.close()
self.handle_shutdown()
def handle_error(self, connection_, client_address, fp=sys.stderr):
"""
Called when handle_client_connection raises an exception.
"""
# If a thread has persisted after interpreter exit, the module might be
# none.
if traceback:
exc = six.text_type(traceback.format_exc())
print(u'-' * 40, file=fp)
print(
u"Error in processing of request from %s" % repr(client_address), file=fp)
print(exc, file=fp)
print(u'-' * 40, file=fp)
def handle_client_connection(self, conn, client_address): # pragma: no cover
"""
Called after client connection.
"""
raise NotImplementedError
def handle_shutdown(self):
"""
Called after server shutdown.
"""
|
{
"content_hash": "843954694f5c8ed51a0e101f9cfd0ff8",
"timestamp": "",
"source": "github",
"line_count": 917,
"max_line_length": 160,
"avg_line_length": 35.0567066521265,
"alnum_prop": 0.5670513578249915,
"repo_name": "ikoz/mitmproxy",
"id": "68a7127023d33b8764540ac967452e81cbb6517d",
"size": "32147",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netlib/tcp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "405"
},
{
"name": "CSS",
"bytes": "210214"
},
{
"name": "HTML",
"bytes": "57526"
},
{
"name": "JavaScript",
"bytes": "1945184"
},
{
"name": "Python",
"bytes": "1199268"
},
{
"name": "Shell",
"bytes": "3966"
}
],
"symlink_target": ""
}
|
import sys
import Queue
import threading
import pykeyvi
import argparse
import os
import gzip
import unicodedata, re
control_chars = ''.join(map(unichr, range(0,32)))
control_char_re = re.compile('[%s]' % re.escape(control_chars))
def remove_control_chars(s):
return control_char_re.sub('', s)
def print_progress(a,b):
print "Progress {}/{}".format(a,b)
def compile_worker():
while True:
compiler, output = compile_queue.get()
compiler.Compile(print_progress)
compiler.WriteToFile(output)
compile_queue.task_done()
compile_queue = Queue.Queue()
def compile_file(input, output, jobs, shards):
skipped_keys = 0
compilers = {}
for i in range (0, shards):
compilers[i] = pykeyvi.JsonDictionaryCompiler()
if os.path.isdir(input):
input_files = [os.path.join(input,d) for d in os.listdir(input)]
else:
input_files = [input]
for input_file in input_files:
if input_file.endswith(".gz"):
input_fd = gzip.open(input_file)
else:
input_fd = open(input_file)
for line in input_fd:
try:
parts = line.split("\t")
key = parts[0]
if key != remove_control_chars(key):
print "skip key: " + ":".join("{:02x}".format(ord(c)) for c in key) + " due to containing control characters"
skipped_keys +=1
value = parts[1]
shard = pykeyvi.JumpConsistentHashString(key, shards)
compilers[shard].Add(key, value)
except:
print "failed to add: " + line
print "Skipped keys " + str(skipped_keys)
for i in range(jobs):
t = threading.Thread(target=compile_worker)
t.daemon = True
t.start()
for i in range (0, shards):
compile_queue.put((compilers[i], output + "-" + str(i)))
compile_queue.join()
ARGV = [
('-i', '--input', str, None, 'input file'),
('-o', '--output', str, None, 'output'),
('-b', '--bucket', str, None, 's3 bucket to read from'),
('-k', '--s3key', str, None, 's3 key/folder to read from'),
('-j', '--jobs', int, 1, 'number of parallel jobs'),
('-s', '--shards', int, 1, 'number of shards'),
]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile json keyvi dictionary')
for arg in ARGV:
parser.add_argument(*arg[0:2], type=arg[2], default=arg[3], help=arg[4])
args = parser.parse_args()
if args.input:
compile_file(args.input, args.output, args.jobs, args.shards)
|
{
"content_hash": "9cbe206ec0d07f973db77661ce6b4e9d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 129,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.5526509572901326,
"repo_name": "DavidNemeskey/keyvi",
"id": "582501e265f09ea57ae0130e97f487cc0a485891",
"size": "2762",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pykeyvi/scripts/compile_json.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "7256"
},
{
"name": "C++",
"bytes": "455845"
},
{
"name": "Python",
"bytes": "73622"
},
{
"name": "Shell",
"bytes": "484"
}
],
"symlink_target": ""
}
|
import datetime
import time
from pysqlite2._sqlite import *
paramstyle = "qmark"
threadsafety = 1
apilevel = "2.0"
Date = datetime.date
Time = datetime.time
Timestamp = datetime.datetime
def DateFromTicks(ticks):
return apply(Date, time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return apply(Time, time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return apply(Timestamp, time.localtime(ticks)[:6])
version_info = tuple([int(x) for x in version.split(".")])
sqlite_version_info = tuple([int(x) for x in sqlite_version.split(".")])
Binary = buffer
def register_adapters_and_converters():
def adapt_date(val):
return val.isoformat()
def adapt_datetime(val):
return val.isoformat(" ")
def convert_date(val):
return datetime.date(*map(int, val.split("-")))
def convert_timestamp(val):
datepart, timepart = val.split(" ")
year, month, day = map(int, datepart.split("-"))
timepart_full = timepart.split(".")
hours, minutes, seconds = map(int, timepart_full[0].split(":"))
if len(timepart_full) == 2:
microseconds = int(float("0." + timepart_full[1]) * 1000000)
else:
microseconds = 0
val = datetime.datetime(year, month, day, hours, minutes, seconds, microseconds)
return val
register_adapter(datetime.date, adapt_date)
register_adapter(datetime.datetime, adapt_datetime)
register_converter("date", convert_date)
register_converter("timestamp", convert_timestamp)
register_adapters_and_converters()
# Clean up namespace
del(register_adapters_and_converters)
|
{
"content_hash": "6a57aeee08811eee061505189c7eb0cf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 25.323076923076922,
"alnum_prop": 0.6646415552855407,
"repo_name": "cigamit/boxeehack",
"id": "78f29a4fe1e6f179c58d795e1a6f8471e3c87173",
"size": "2668",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "hack/boxee/skin/boxee/720p/scripts/external/Linux/pysqlite2/dbapi2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "217624"
},
{
"name": "Shell",
"bytes": "37304"
}
],
"symlink_target": ""
}
|
"""Preprocess tags per video and create their node description"""
import csv
import pandas as pd
import json
if __name__ == '__main__':
print("Load json file...")
json_data = json.load(open('data_clusters_v6.json'))
cluster = dict()
g_index = 0
for key, g in json_data.items():
cluster[g_index] = g
g_index+=1
print(g_index)
print("Load csv file...")
csvfile = pd.read_csv('TEDTalks_byID.csv').to_dict("records")
print("check all tags...")
removeTag = ["science","technology","global issues"]
MAX = 0
tagset = set()
tagmap = dict()
nodelist = list();
for row in csvfile:
s = row['tags'].split(",")
for i in range(len(s)):
key = s[i].strip().lower()
if key.startswith('ted') or key=="" or key in removeTag:
continue
if key not in tagset:
for g, glist in cluster.items():
if key in glist:
nodelist.append({'tag':key, 'group':g})
break
tagset.add(key)
if key in tagmap:
tempmap = tagmap[key]
else:
tempmap = dict()
tagmap[key] = tempmap
#print (tempmap)
for j in range(len(s)):
child = s[j].strip().lower()
if j==i or child.startswith('ted') or child=="" or child in removeTag:
continue
if child in tempmap:
tempmap[child] += 1
if tempmap[child] > MAX:
MAX = tempmap[child]
else:
tempmap[child] = 1
#tagmapJSON = json.dumps(tagmap['cars'],indent = 4,separators=(',', ': '))
#with open('data_WO_TEDtag_v3.json', 'w') as outfile:
# json.dump(tagmap, outfile)
print("MAX",MAX);
print(len(nodelist))
usedmap = dict()
linklist = list();
for key, con_dict in tagmap.items():
if key in usedmap:
usedset = usedmap[key]
else:
usedset = set()
usedmap[key] = usedset
for child, value in con_dict.items():
if child in usedset:
continue
if child in usedmap and key in usedmap[child]:
continue
usedset.add(child)
linklist.append(
{
"source": key,
"target": child,
"value": value
}
)
linklist.sort(key = lambda x: x["value"])
print(len(linklist))
network = (
{
"nodes": nodelist,
"links": linklist
}
)
#with open('network_WO_TEDtag_v5.json', 'w') as outfile:
# json.dump(network, outfile)
#nodelist = list();
|
{
"content_hash": "e292a7ebad67e0ebd4a164c8bee6956c",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 78,
"avg_line_length": 19.74561403508772,
"alnum_prop": 0.6023989338071968,
"repo_name": "cwkenwaysun/TEDmap",
"id": "608be620f06920ead4a74c675b0be1914c424ee7",
"size": "2251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/tagprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4393"
},
{
"name": "HTML",
"bytes": "8680"
},
{
"name": "JavaScript",
"bytes": "68650"
},
{
"name": "Python",
"bytes": "21674"
}
],
"symlink_target": ""
}
|
import mock
import testtools
import webob.exc
from nova.api.openstack import api_version_request as api_version
from nova.api.openstack.compute import hosts as os_hosts_v21
from nova.compute import power_state
from nova.compute import vm_states
from nova import context as context_maker
from nova import db
from nova import exception
from nova import test
from nova.tests import fixtures
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_hosts
from nova.tests import uuidsentinel
def stub_service_get_all(context, disabled=None):
return fake_hosts.SERVICES_LIST
def stub_service_get_by_host_and_binary(context, host_name, binary):
for service in stub_service_get_all(context):
if service['host'] == host_name and service['binary'] == binary:
return service
def stub_set_host_enabled(context, host_name, enabled):
"""Simulates three possible behaviours for VM drivers or compute
drivers when enabling or disabling a host.
'enabled' means new instances can go to this host
'disabled' means they can't
"""
results = {True: "enabled", False: "disabled"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not enabled]
else:
# Do the right thing
return results[enabled]
def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
results = {True: "on_maintenance", False: "off_maintenance"}
if host_name == "notimplemented":
# The vm driver for this host doesn't support this feature
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
elif host_name == "host_c2":
# Simulate a failure
return results[not mode]
else:
# Do the right thing
return results[mode]
def stub_host_power_action(context, host_name, action):
if host_name == "notimplemented":
raise NotImplementedError()
elif host_name == "dummydest":
# The host does not exist
raise exception.ComputeHostNotFound(host=host_name)
elif host_name == "service_not_available":
# The service is not available
raise exception.ComputeServiceUnavailable(host=host_name)
return action
def _create_instance(**kwargs):
"""Create a test instance."""
ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
"""Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = kwargs.get('user_id', 'admin')
inst['project_id'] = kwargs.get('project_id', 'fake')
inst['instance_type_id'] = '1'
if 'host' in kwargs:
inst['host'] = kwargs.get('host')
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['memory_mb'] = kwargs.get('memory_mb', 20)
inst['root_gb'] = kwargs.get('root_gb', 30)
inst['ephemeral_gb'] = kwargs.get('ephemeral_gb', 30)
inst['vm_state'] = kwargs.get('vm_state', vm_states.ACTIVE)
inst['power_state'] = kwargs.get('power_state', power_state.RUNNING)
inst['task_state'] = kwargs.get('task_state', None)
inst['availability_zone'] = kwargs.get('availability_zone', None)
inst['ami_launch_index'] = 0
inst['launched_on'] = kwargs.get('launched_on', 'dummy')
return inst
class FakeRequestWithNovaZone(object):
environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
api_version_request = api_version.APIVersionRequest('2.1')
class HostTestCaseV21(test.TestCase):
"""Test Case for hosts."""
validation_ex = exception.ValidationError
Controller = os_hosts_v21.HostController
policy_ex = exception.PolicyNotAuthorized
def _setup_stubs(self):
# Pretend we have fake_hosts.HOST_LIST in the DB
self.stub_out('nova.db.service_get_all',
stub_service_get_all)
# Only hosts in our fake DB exist
self.stub_out('nova.db.service_get_by_host_and_binary',
stub_service_get_by_host_and_binary)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
# 'host_c1' always succeeds, and 'host_c2'
self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def setUp(self):
super(HostTestCaseV21, self).setUp()
self.controller = self.Controller()
self.hosts_api = self.controller.api
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.useFixture(fixtures.SingleCellSimple())
self._setup_stubs()
def _test_host_update(self, host, key, val, expected_value):
body = {key: val}
result = self.controller.update(self.req, host, body=body)
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
"""Verify that the compute hosts are returned."""
result = self.controller.index(self.req)
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
self._test_host_update('host_c2', 'status', 'disable', 'enabled')
def test_enable_host(self):
self._test_host_update('host_c1', 'status', 'enable', 'enabled')
self._test_host_update('host_c2', 'status', 'enable', 'disabled')
def test_enable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'enable', 'on_maintenance')
def test_disable_maintenance(self):
self._test_host_update('host_c1', 'maintenance_mode',
'disable', 'off_maintenance')
def _test_host_update_notimpl(self, key, val):
def stub_service_get_all_notimpl(self, req):
return [{'host': 'notimplemented', 'topic': None,
'availability_zone': None}]
self.stub_out('nova.db.service_get_all',
stub_service_get_all_notimpl)
body = {key: val}
self.assertRaises(webob.exc.HTTPNotImplemented,
self.controller.update,
self.req, 'notimplemented', body=body)
def test_disable_host_notimpl(self):
self._test_host_update_notimpl('status', 'disable')
def test_enable_maintenance_notimpl(self):
self._test_host_update_notimpl('maintenance_mode', 'enable')
def test_host_startup(self):
result = self.controller.startup(self.req, "host_c1")
self.assertEqual(result["power_action"], "startup")
def test_host_shutdown(self):
result = self.controller.shutdown(self.req, "host_c1")
self.assertEqual(result["power_action"], "shutdown")
def test_host_reboot(self):
result = self.controller.reboot(self.req, "host_c1")
self.assertEqual(result["power_action"], "reboot")
def _test_host_power_action_notimpl(self, method):
self.assertRaises(webob.exc.HTTPNotImplemented,
method, self.req, "notimplemented")
def test_host_startup_notimpl(self):
self._test_host_power_action_notimpl(self.controller.startup)
def test_host_shutdown_notimpl(self):
self._test_host_power_action_notimpl(self.controller.shutdown)
def test_host_reboot_notimpl(self):
self._test_host_power_action_notimpl(self.controller.reboot)
def test_host_status_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_host(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_host_status_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest, body={'status': 'enable'})
def test_host_maintenance_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.update(self.req, dest,
body={'maintenance_mode': 'enable'})
def test_host_power_action_bad_status(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'service_not_available'
with testtools.ExpectedException(webob.exc.HTTPBadRequest,
".*%s.*" % dest):
self.controller.reboot(self.req, dest)
def test_bad_status_value(self):
bad_body = {"status": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
bad_body2 = {"status": "disablabc"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body2)
def test_bad_update_key(self):
bad_body = {"crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_bad_update_key_and_correct_update_key(self):
bad_body = {"status": "disable", "crazy": "bad"}
self.assertRaises(self.validation_ex, self.controller.update,
self.req, "host_c1", body=bad_body)
def test_good_update_keys(self):
body = {"status": "disable", "maintenance_mode": "enable"}
result = self.controller.update(self.req, 'host_c1', body=body)
self.assertEqual(result["host"], "host_c1")
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
def test_show_host_not_exist(self):
# A host given as an argument does not exist.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
with testtools.ExpectedException(webob.exc.HTTPNotFound,
".*%s.*" % dest):
self.controller.show(self.req, dest)
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
dic = {'service_id': s_ref['id'],
'host': s_ref['host'],
'uuid': uuidsentinel.compute_node,
'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
'cpu_info': '', 'stats': ''}
db.compute_node_create(ctxt, dic)
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
"""No instances are running on the given host."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 3)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
"""show() works correctly as expected."""
ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
host=s_ref['host'])
result = self.controller.show(self.req, s_ref['host'])
proj = ['(total)', '(used_now)', '(used_max)', 'p-01', 'p-02']
column = ['host', 'project', 'cpu', 'memory_mb', 'disk_gb']
self.assertEqual(len(result['host']), 5)
for resource in result['host']:
self.assertIn(resource['resource']['project'], proj)
self.assertEqual(len(resource['resource']), 5)
self.assertEqual(set(column), set(resource['resource'].keys()))
db.service_destroy(ctxt, s_ref['id'])
db.instance_destroy(ctxt, i_ref1['uuid'])
db.instance_destroy(ctxt, i_ref2['uuid'])
def test_show_late_host_mapping_gone(self):
s_ref = self._create_compute_service()
with mock.patch.object(self.controller.api,
'instance_get_all_by_host') as m:
m.side_effect = exception.HostMappingNotFound(name='something')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, self.req, s_ref['host'])
def test_list_hosts_with_zone(self):
result = self.controller.index(FakeRequestWithNovaZone())
self.assertIn('hosts', result)
hosts = result['hosts']
self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
class HostsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(HostsPolicyEnforcementV21, self).setUp()
self.controller = os_hosts_v21.HostController()
self.req = fakes.HTTPRequest.blank('')
def test_index_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
def test_show_policy_failed(self):
rule_name = "os_compute_api:os-hosts"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.show, self.req, 1)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class HostControllerDeprecationTest(test.NoDBTestCase):
def setUp(self):
super(HostControllerDeprecationTest, self).setUp()
self.controller = os_hosts_v21.HostController()
self.req = fakes.HTTPRequest.blank('', version='2.43')
def test_not_found_for_all_host_api(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.startup, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.shutdown, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.reboot, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.update, self.req, fakes.FAKE_UUID,
body={})
|
{
"content_hash": "7ea54af958c49d53c9b6946958222e8b",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 78,
"avg_line_length": 41.974117647058826,
"alnum_prop": 0.6120298222994562,
"repo_name": "jianghuaw/nova",
"id": "308202e8b1d9136fbc4e84c6f0cf12e3eb33003b",
"size": "18479",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/tests/unit/api/openstack/compute/test_hosts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
"""
A compiler from a Relay expression to TVM's graph runtime.
The compiler is built from a few pieces.
First we define a compiler from a single Relay expression to the
graph langauge. We require the expression to be a function.
The function's parameters correspond to the placeholder/inputs
and model parameters found in the computation graph representation.
The body of the function represents the computation graph.
The compiler's output is a program in the graph language, which is composed of
graph langauge is composed of Node, NodeRef, InputNode, OpNode.
This "little language" represents programs in TVM's graph format.
To connect to the graph runtime, we use a printer that converts our graph format
into TVM's JSON format. The resulting string can be loaded by
contrib.graph_runtime or any other TVM runtime compatible systems.
"""
from __future__ import absolute_import
from tvm.ndarray import empty
from tvm.relay import build_module
from tvm import target as _target
from tvm import expr as _expr
class GraphRuntimeCodegen(object):
"""The compiler from Relay to the TVM runtime system."""
def __init__(self, mod, target):
self._mod = build_module._GraphRuntimeCodegen()
self._init = self._mod["init"]
self._codegen = self._mod["codegen"]
self._get_graph_json = self._mod["get_graph_json"]
self._list_params_name = self._mod["list_params_name"]
self._get_param_by_name = self._mod["get_param_by_name"]
self._get_lowered_funcs = self._mod["get_lowered_funcs"]
self._setup(mod, target)
def _setup(self, mod, target):
tgts = {}
if isinstance(target, dict):
for dev, tgt in target.items():
if not isinstance(tgt, (str, _target.Target)):
raise Exception("Unknown target type")
tgts[dev] = _target.create(tgt)
elif isinstance(target, (str, _target.Target)):
tgts[_expr.IntImm("int32", 0)] = _target.create(target)
self._init(mod, tgts)
def codegen(self, func):
"""Compile a single function into a graph.
Parameters
----------
func: tvm.relay.Expr
The function to compile.
Returns
-------
graph_json : str
The graph json that can be consumed by runtime.
lowered_funcs : List[tvm.LoweredFunc] or Dict[str, List[tvm.LoweredFunc]]
The lowered functions.
params : Dict[str, tvm.nd.NDArray]
Additional constant parameters.
"""
self._codegen(func)
graph_json = self._get_graph_json()
lowered_func = self._get_lowered_funcs()
param_names = self._list_params_name()
params = {}
for name in param_names:
key = name.value
arr = self._get_param_by_name(key)
param = empty(arr.shape, dtype=arr.dtype, ctx=arr.ctx)
arr.copyto(param)
params[key] = param
return graph_json, lowered_func, params
|
{
"content_hash": "cd84701ca64f6e6f0448feaef1ddeb28",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 81,
"avg_line_length": 38.32911392405063,
"alnum_prop": 0.6406869220607662,
"repo_name": "mlperf/training_results_v0.7",
"id": "cf31e9cff833c412623610887e5e505defefcb94",
"size": "3813",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/python/tvm/relay/backend/graph_runtime_codegen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
}
|
"""Utilities and helper functions."""
import contextlib
import os
import shutil
import tempfile
from oslo.config import cfg
from brick.initiator import connector
from brick.openstack.common import lockutils
from brick.openstack.common import log as logging
from brick.openstack.common import processutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
ISO_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S"
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
synchronized = lockutils.synchronized_with_prefix('brick-')
def execute(*cmd, **kwargs):
"""Convenience wrapper around oslo's execute() method."""
if 'run_as_root' in kwargs and 'root_helper' not in kwargs:
kwargs['root_helper'] = get_root_helper()
return processutils.execute(*cmd, **kwargs)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError as e:
LOG.debug('Could not remove tmpdir: %s', e)
def get_root_helper():
return 'sudo brick-rootwrap %s' % CONF.rootwrap_config
def brick_get_connector_properties():
"""wrapper for the brick calls to automatically set
the root_helper needed for brick.
"""
root_helper = get_root_helper()
return connector.get_connector_properties(root_helper,
CONF.my_ip)
def brick_get_connector(protocol, driver=None,
execute=processutils.execute,
use_multipath=False,
device_scan_attempts=3,
*args, **kwargs):
"""Wrapper to get a brick connector object.
This automatically populates the required protocol as well
as the root_helper needed to execute commands.
"""
root_helper = get_root_helper()
return connector.InitiatorConnector.factory(protocol, root_helper,
driver=driver,
execute=execute,
use_multipath=use_multipath,
device_scan_attempts=
device_scan_attempts,
*args, **kwargs)
|
{
"content_hash": "6aa844cbae846a7e46f5d4d88ca67e2d",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 76,
"avg_line_length": 29.88421052631579,
"alnum_prop": 0.5833039802747446,
"repo_name": "hemna/cinder-brick",
"id": "4d64fbb5955a49ee02d972dae2ff64158476bc2f",
"size": "3412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brick/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "290964"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import re
out = open('README.md', 'w')
out.write("""### Repository for open source packages
Open source packages mirrored here for convenience.
""")
def add_name(row):
res = {'home': row[1]}
m = re.match(r'.*[=/](.+)', row[0])
res['name'] = m.group(1) if m else row[0]
try:
res['repo'] = row[2]
except:
res['repo'] = row[0]
return res
originals = sorted(
map(
add_name,
[
['http://download.virtualbox.org/virtualbox/4.3.26/VirtualBox-4.3.26-98988-OSX.dmg', 'http://virtualbox.org'],
['http://xquartz.macosforge.org/downloads/SL/XQuartz-2.7.7.dmg', 'http://xquartz.macosforge.org'],
['https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2.dmg', 'http://vagrantup.com'],
['http://download.qt.io/official_releases/qt/4.8/4.8.6/qt-everywhere-opensource-src-4.8.6.tar.gz', 'http://www.qt.io/'],
['http://sourceforge.net/projects/pyqt/files/sip/sip-4.16.6/sip-4.16.6.tar.gz', 'http://www.riverbankcomputing.com/software/sip/intro'],
['http://sourceforge.net/projects/pyqt/files/PyQt4/PyQt-4.11.3/PyQt-x11-gpl-4.11.3.tar.gz', 'http://www.riverbankcomputing.com/software/pyqt/intro'],
['http://www.open-mpi.org/software/ompi/v1.8/downloads/openmpi-1.8.4.tar.bz2', 'http://www.open-mpi.org/'],
['openmpi-1.8.4-1.x86_64.rpm', 'http://www.open-mpi.org/', 'http://www.open-mpi.org/software/ompi/v1.8/downloads/openmpi-1.8.4-1.src.rpm'],
['http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/cgi-bin/oagLog4.cgi?name=elegant.26.0.2.tar.gz', 'http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/software.shtml#elegant'],
['http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/cgi-bin/oagLog4.cgi?name=Elegant.msi', 'http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/software.shtml#elegant'],
['http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/cgi-bin/oagLog4.cgi?name=SDDS_ToolKit.msi', 'http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/software.shtml#SDDSbinaries'],
['http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/cgi-bin/oagLog4.cgi?name=elegant-26.0.2-1.rhel.6.5.openmpi.x86_64.rpm', 'http://www.aps.anl.gov/Accelerator_Systems_Division/Accelerator_Operations_Physics/software.shtml#elegant'],
['http://09c8d0b2229f813c1b93-c95ac804525aac4b6dba79b00b39d1d3.r79.cf1.rackcdn.com/Anaconda-2.1.0-Windows-x86.exe', 'http://continuum.io/downloads'],
['http://mirror.ctan.org/systems/texlive/tlnet/install-tl-windows.exe', 'http://www.tug.org/texlive/acquire-netinstall.html'],
['http://genesis.web.psi.ch/Genesis-Beta/genesis-3.2.1-beta.tar.gz', 'http://genesis.web.psi.ch']]),
key=lambda x: x['name'])
for row in originals:
out.write("* [{name}](https://raw.githubusercontent.com/radiasoft/foss-mirror/master/{name}) [original]({repo}) [home]({home})\n".format(**row))
out.close()
|
{
"content_hash": "3626614f69091559228ee506e5b8209c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 278,
"avg_line_length": 72.47727272727273,
"alnum_prop": 0.6782690498588899,
"repo_name": "radiasoft/foss-mirror",
"id": "39f434377a6c8fb40156353506a6f2edd8811ccc",
"size": "3189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gen-readme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3189"
},
{
"name": "Shell",
"bytes": "755"
}
],
"symlink_target": ""
}
|
"""PipelineRunner, an abstract base runner object."""
# pytype: skip-file
import importlib
import logging
import os
import shelve
import shutil
import tempfile
from typing import TYPE_CHECKING
from typing import Optional
from apache_beam.options.pipeline_options import StandardOptions
if TYPE_CHECKING:
from apache_beam import pvalue
from apache_beam import PTransform
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import AppliedPTransform
from apache_beam.pipeline import Pipeline
from apache_beam.pipeline import PipelineVisitor
__all__ = ['PipelineRunner', 'PipelineState', 'PipelineResult']
_RUNNER_MAP = {
path.rsplit('.', maxsplit=1)[-1].lower(): path
for path in StandardOptions.ALL_KNOWN_RUNNERS
}
# Allow this alias, but don't make public.
_RUNNER_MAP['pythonrpcdirectrunner'] = (
'apache_beam.runners.experimental'
'.python_rpc_direct.python_rpc_direct_runner.PythonRPCDirectRunner')
_LOGGER = logging.getLogger(__name__)
def create_runner(runner_name):
# type: (str) -> PipelineRunner
"""For internal use only; no backwards-compatibility guarantees.
Creates a runner instance from a runner class name.
Args:
runner_name: Name of the pipeline runner. Possible values are listed in
_RUNNER_MAP above.
Returns:
A runner object.
Raises:
RuntimeError: if an invalid runner name is used.
"""
# Get the qualified runner name by using the lower case runner name. If that
# fails try appending the name with 'runner' and check if it matches.
# If that also fails, use the given runner name as is.
runner_name = _RUNNER_MAP.get(
runner_name.lower(),
_RUNNER_MAP.get(runner_name.lower() + 'runner', runner_name))
if '.' in runner_name:
module, runner = runner_name.rsplit('.', 1)
try:
return getattr(importlib.import_module(module), runner)()
except ImportError:
if 'dataflow' in runner_name.lower():
raise ImportError(
'Google Cloud Dataflow runner not available, '
'please install apache_beam[gcp]')
elif 'interactive' in runner_name.lower():
raise ImportError(
'Interactive runner not available, '
'please install apache_beam[interactive]')
else:
raise
else:
raise ValueError(
'Unexpected pipeline runner: %s. Valid values are %s '
'or the fully qualified name of a PipelineRunner subclass.' %
(runner_name, ', '.join(StandardOptions.KNOWN_RUNNER_NAMES)))
class PipelineRunner(object):
"""A runner of a pipeline object.
The base runner provides a run() method for visiting every node in the
pipeline's DAG and executing the transforms computing the PValue in the node.
A custom runner will typically provide implementations for some of the
transform methods (ParDo, GroupByKey, Create, etc.). It may also
provide a new implementation for clear_pvalue(), which is used to wipe out
materialized values in order to reduce footprint.
"""
def run(self,
transform, # type: PTransform
options=None # type: Optional[PipelineOptions]
):
# type: (...) -> PipelineResult
"""Run the given transform or callable with this runner.
Blocks until the pipeline is complete. See also `PipelineRunner.run_async`.
"""
result = self.run_async(transform, options)
result.wait_until_finish()
return result
def run_async(self,
transform, # type: PTransform
options=None # type: Optional[PipelineOptions]
):
# type: (...) -> PipelineResult
"""Run the given transform or callable with this runner.
May return immediately, executing the pipeline in the background.
The returned result object can be queried for progress, and
`wait_until_finish` may be called to block until completion.
"""
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam import PTransform
from apache_beam.pvalue import PBegin
from apache_beam.pipeline import Pipeline
p = Pipeline(runner=self, options=options)
if isinstance(transform, PTransform):
p | transform
else:
transform(PBegin(p))
return p.run()
def run_pipeline(
self,
pipeline, # type: Pipeline
options # type: PipelineOptions
):
# type: (...) -> PipelineResult
"""Execute the entire pipeline or the sub-DAG reachable from a node.
Runners should override this method.
"""
raise NotImplementedError
def apply(self,
transform, # type: PTransform
input, # type: Optional[pvalue.PValue]
options # type: PipelineOptions
):
"""Runner callback for a pipeline.apply call.
Args:
transform: the transform to apply.
input: transform's input (typically a PCollection).
A concrete implementation of the Runner class may want to do custom
pipeline construction for a given transform. To override the behavior
for a transform class Xyz, implement an apply_Xyz method with this same
signature.
"""
for cls in transform.__class__.mro():
m = getattr(self, 'apply_%s' % cls.__name__, None)
if m:
return m(transform, input, options)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' % (transform, self))
def visit_transforms(
self,
pipeline, # type: Pipeline
options # type: PipelineOptions
):
# type: (...) -> None
# Imported here to avoid circular dependencies.
# pylint: disable=wrong-import-order, wrong-import-position
from apache_beam.pipeline import PipelineVisitor
class RunVisitor(PipelineVisitor):
def __init__(self, runner):
# type: (PipelineRunner) -> None
self.runner = runner
def visit_transform(self, transform_node):
try:
self.runner.run_transform(transform_node, options)
except:
_LOGGER.error('Error while visiting %s', transform_node.full_label)
raise
pipeline.visit(RunVisitor(self))
def apply_PTransform(self, transform, input, options):
# The base case of apply is to call the transform's expand.
return transform.expand(input)
def run_transform(self,
transform_node, # type: AppliedPTransform
options # type: PipelineOptions
):
"""Runner callback for a pipeline.run call.
Args:
transform_node: transform node for the transform to run.
A concrete implementation of the Runner class must implement run_Abc for
some class Abc in the method resolution order for every non-composite
transform Xyz in the pipeline.
"""
for cls in transform_node.transform.__class__.mro():
m = getattr(self, 'run_%s' % cls.__name__, None)
if m:
return m(transform_node, options)
raise NotImplementedError(
'Execution of [%s] not implemented in runner %s.' %
(transform_node.transform, self))
def is_fnapi_compatible(self):
"""Whether to enable the beam_fn_api experiment by default."""
return True
class PValueCache(object):
"""For internal use only; no backwards-compatibility guarantees.
Local cache for arbitrary information computed for PValue objects."""
def __init__(self, use_disk_backed_cache=False):
# Cache of values computed while a runner executes a pipeline. This is a
# dictionary of PValues and their computed values. Note that in principle
# the runner could contain PValues from several pipelines without clashes
# since a PValue is associated with one and only one pipeline. The keys of
# the dictionary are tuple of PValue instance addresses obtained using id()
# and tag names converted to strings.
self._use_disk_backed_cache = use_disk_backed_cache
if use_disk_backed_cache:
self._tempdir = tempfile.mkdtemp()
self._cache = shelve.open(os.path.join(self._tempdir, 'shelve'))
else:
self._cache = {}
def __del__(self):
if self._use_disk_backed_cache:
self._cache.close()
shutil.rmtree(self._tempdir)
def __len__(self):
return len(self._cache)
def to_cache_key(self, transform, tag):
return transform.full_label, tag
def _ensure_pvalue_has_real_producer(self, pvalue):
"""Ensure the passed-in PValue has the real_producer attribute.
Args:
pvalue: A PValue instance whose cached value is requested.
During the runner's execution only the results of the primitive transforms
are cached. Whenever we are looking for a PValue that is the output of a
composite transform we need to find the output of its rightmost transform
part.
"""
if not hasattr(pvalue, 'real_producer'):
real_producer = pvalue.producer
while real_producer.parts:
real_producer = real_producer.parts[-1]
pvalue.real_producer = real_producer
def is_cached(self, pobj):
from apache_beam.pipeline import AppliedPTransform
if isinstance(pobj, AppliedPTransform):
transform = pobj
tag = None
else:
self._ensure_pvalue_has_real_producer(pobj)
transform = pobj.real_producer
tag = pobj.tag
return self.to_cache_key(transform, tag) in self._cache
def cache_output(self, transform, tag_or_value, value=None):
if value is None:
value = tag_or_value
tag = None
else:
tag = tag_or_value
self._cache[self.to_cache_key(transform, tag)] = value
def get_pvalue(self, pvalue):
"""Gets the value associated with a PValue from the cache."""
self._ensure_pvalue_has_real_producer(pvalue)
try:
return self._cache[self.key(pvalue)]
except KeyError:
if (pvalue.tag is not None and
self.to_cache_key(pvalue.real_producer, None) in self._cache):
# This is an undeclared, empty output of a DoFn executed
# in the local runner before this output was referenced.
return []
else:
raise
def get_unwindowed_pvalue(self, pvalue):
return [v.value for v in self.get_pvalue(pvalue)]
def clear_pvalue(self, pvalue):
"""Removes a PValue from the cache."""
if self.is_cached(pvalue):
del self._cache[self.key(pvalue)]
def key(self, pobj):
self._ensure_pvalue_has_real_producer(pobj)
return self.to_cache_key(pobj.real_producer, pobj.tag)
# FIXME: replace with PipelineState(str, enum.Enum)
class PipelineState(object):
"""State of the Pipeline, as returned by :attr:`PipelineResult.state`.
This is meant to be the union of all the states any runner can put a
pipeline in. Currently, it represents the values of the dataflow
API JobState enum.
"""
UNKNOWN = 'UNKNOWN' # not specified by a runner, or unknown to a runner.
STARTING = 'STARTING' # not yet started
STOPPED = 'STOPPED' # paused or not yet started
RUNNING = 'RUNNING' # currently running
DONE = 'DONE' # successfully completed (terminal state)
FAILED = 'FAILED' # failed (terminal state)
CANCELLED = 'CANCELLED' # explicitly cancelled (terminal state)
UPDATED = 'UPDATED' # replaced by another job (terminal state)
DRAINING = 'DRAINING' # still processing, no longer reading data
DRAINED = 'DRAINED' # draining completed (terminal state)
PENDING = 'PENDING' # the job has been created but is not yet running.
CANCELLING = 'CANCELLING' # job has been explicitly cancelled and is
# in the process of stopping
RESOURCE_CLEANING_UP = 'RESOURCE_CLEANING_UP' # job's resources are being
# cleaned up
UNRECOGNIZED = 'UNRECOGNIZED' # the job state reported by a runner cannot be
# interpreted by the SDK.
@classmethod
def is_terminal(cls, state):
return state in [
cls.DONE, cls.FAILED, cls.CANCELLED, cls.UPDATED, cls.DRAINED
]
class PipelineResult(object):
"""A :class:`PipelineResult` provides access to info about a pipeline."""
def __init__(self, state):
self._state = state
@property
def state(self):
"""Return the current state of the pipeline execution."""
return self._state
def wait_until_finish(self, duration=None):
"""Waits until the pipeline finishes and returns the final status.
Args:
duration (int): The time to wait (in milliseconds) for job to finish.
If it is set to :data:`None`, it will wait indefinitely until the job
is finished.
Raises:
IOError: If there is a persistent problem getting job
information.
NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline, or :data:`None` on timeout.
"""
raise NotImplementedError
def cancel(self):
"""Cancels the pipeline execution.
Raises:
IOError: If there is a persistent problem getting job
information.
NotImplementedError: If the runner does not support this
operation.
Returns:
The final state of the pipeline.
"""
raise NotImplementedError
def metrics(self):
"""Returns :class:`~apache_beam.metrics.metric.MetricResults` object to
query metrics from the runner.
Raises:
NotImplementedError: If the runner does not support this
operation.
"""
raise NotImplementedError
# pylint: disable=unused-argument
def aggregated_values(self, aggregator_or_name):
"""Return a dict of step names to values of the Aggregator."""
_LOGGER.warning(
'%s does not implement aggregated_values', self.__class__.__name__)
return {}
|
{
"content_hash": "17a7074473a088096001a890d311271e",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 80,
"avg_line_length": 33.30731707317073,
"alnum_prop": 0.6775043936731108,
"repo_name": "apache/beam",
"id": "f2e09edb28eec392fe6d2b3217898ab26637001c",
"size": "14441",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/runners/runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "C",
"bytes": "3869"
},
{
"name": "CSS",
"bytes": "4957"
},
{
"name": "Cython",
"bytes": "70760"
},
{
"name": "Dart",
"bytes": "912687"
},
{
"name": "Dockerfile",
"bytes": "59805"
},
{
"name": "FreeMarker",
"bytes": "7933"
},
{
"name": "Go",
"bytes": "5508697"
},
{
"name": "Groovy",
"bytes": "936956"
},
{
"name": "HCL",
"bytes": "103872"
},
{
"name": "HTML",
"bytes": "184151"
},
{
"name": "Java",
"bytes": "41223435"
},
{
"name": "JavaScript",
"bytes": "119576"
},
{
"name": "Jupyter Notebook",
"bytes": "55818"
},
{
"name": "Kotlin",
"bytes": "220768"
},
{
"name": "Lua",
"bytes": "3620"
},
{
"name": "Python",
"bytes": "10728612"
},
{
"name": "Rust",
"bytes": "5168"
},
{
"name": "SCSS",
"bytes": "318364"
},
{
"name": "Sass",
"bytes": "25954"
},
{
"name": "Scala",
"bytes": "1429"
},
{
"name": "Shell",
"bytes": "375834"
},
{
"name": "Smarty",
"bytes": "2618"
},
{
"name": "Thrift",
"bytes": "3260"
},
{
"name": "TypeScript",
"bytes": "1997829"
}
],
"symlink_target": ""
}
|
import sys
from ptnode import *
from color import rgb_tuple_to_hex_str,hex_str_to_rgb_tuple
#-----------------------------------------------------------------------------
#
# Constants
#
#-----------------------------------------------------------------------------
DUNNART_SPAN_LENGTH_FACTOR = 21 # multiply number of residues by this
DUNNART_HELIX_SPAN_FACTOR = 10 # for helices (above if for strands)
# NB may need to adjust value for x/y offset
# for SVG/firefox shape hack
# for hover text residue index in
# webserver/javascript/pohovertext.js if
# this value is changed.
DUNNART_HELIX_WIDTH = 30 # width of helices
DUNNART_STRAND_WIDTH = 40 # width of strands
DUNNART_SHEET_GAP_SIZE = 90 # space to leave between sheets
DUNNART_TERMINUS_SIZE = 32 # width and height of terminus nodes
DUNNART_DOMAIN_GAP_SIZE = 50 # space to leave between domain bounding boxes
DUNNART_TOP_PORT = 49 # Dunnart input XML SVG code on flags for port
DUNNART_BOTTOM_PORT = 50
DUNNART_LEFT_PORT = 52
DUNNART_RIGHT_PORT = 56
DUNNART_DEFAULT_PORT = 544 # default flags for no specified port
SHAPE_ALPHA = 1.0 # alpha channel (opacity) value for shapes
PTGRAPH_NS = "proorigami" # Namespace prefix for pro-origami properties
# TODO: should have DUNNART_NS instead of hardcoded
#-----------------------------------------------------------------------------
#
# Class definitions
#
#-----------------------------------------------------------------------------
class PTSVGNode:
"""
A PTSVGNode contains information such as height, width, x,y
position, color, etc. needed for writing a shape as SVG (XML) for the
diagram editor.
We will end up using ineritance to inherit both from this and from the
relevant PTNode (PTNodeHelix etc.) to get a class that has all the
protein structural information (from the PTNode derived class) and
augmented with graph layout information from this class.
"""
def __init__(self):
"""
Construct a PTSVGNode, intially all data is set to None.
"""
self.color = None # Color for drawing shape for this node.
# (r, g, b) tuple. Use get/set_color()
self.color_hex = None # color for drawing shape for this node.
# hex string 'rrggbb'. Use get/set_color()
# or get/set_color_hex().
# set_color() and set_color_hex()
# both will set both color and
# color_hex.
# set the following with set_svginfo (in derived classes) but just
# just read them directly, sick of pointless get/set methods
# (color only has one as I copied&pasted from PTNode before I created
# this class and got sick of unnecessary get/set methods bloating code).
self.xmlid = None # XML identifier for this shape.
self.xpos = None # initial x co-ordinate for layout of diagram
self.ypos = None # initial y co-ordinate for layout of diagram
self.width = None # width of shape on diagram
self.height = None # height of shape on diagram
self.label = None # label to put on shape
self.indguide = None # PTSVGIndGuide this node is aligned on
self.nterm_port = None # DUNNART_x_PORT for incoming connector
self.cterm_port = None # DUNNART_x_PORT for outgoing connector
self.sseseqnum = None # SSE sequential number
self.headLabel = '' # label for 'head' (C-terminal end) of shape
self.tailLabel = '' # label for 'tail' (N-terminal end) of shape
def set_color(self, color):
"""
Label this node with color (r,g,b) tuple. See comments in __init__
Parameters: color - (r,g,b) color tuple
Return value: None
Uses data members (write): color, color_hex
"""
self.color = color
self.color_hex = rgb_tuple_to_hex_str(color)
def get_color(self):
"""
Get the value of the color tuple. See comments in __init__
Parameters: None.
Return value: (r,g,b) color tuple
Uses data members (readnly): color
"""
return self.color
def set_color_hex(self, hexstr):
"""
Label this node with color rrggbb hex string. See comments in __init__
Parameters: hexstr - rrggbb hex color string
Return value: None
Uses data members (write): color, color_hex
"""
self.color = hex_str_to_rgb_tuple(hexstr)
self.color_hex = hexstr
def get_color_hex(self):
"""
Get the hex color string. See comments in __init__
Parameters: None
Return value: hex color string 'rrggbb'
Uses data members (readonly): color_hex
"""
return self.color_hex
def get_empty_port(self):
"""
For a node that has only one port used, return the flags
(top/bottom/left/right) for the port to use for the other connector
(i.e. the opposite one to that used, so top if bottom used, etc.)
Must only be used when the node has exactly one port already set,
will raise exception otherwise.
Parameters:
None
Retrun value:
DUNNART_x_PORT value for the port to put second connector on.
Uses data members (readonly):
nterm_port, cterm_port
Raises exceptions:
ValueError if no or both ports are set, or either is
DUNNART_DEFAULT_PORT (or invalid port value)
"""
new_port = None
if self.nterm_port != None and self.cterm_port != None:
raise ValueError('both ports set')
if self.nterm_port != None:
used_port = self.nterm_port
elif self.cterm_port != None:
used_port = self.cterm_port
else:
raise ValueError('no port set')
if used_port == DUNNART_TOP_PORT:
new_port = DUNNART_BOTTOM_PORT
elif used_port == DUNNART_BOTTOM_PORT:
new_port = DUNNART_TOP_PORT
elif used_port == DUNNART_LEFT_PORT:
new_port = DUNNART_RIGHT_PORT
elif used_port == DUNNART_RIGHT_PORT:
new_port = DUNNART_LEFT_PORT
else:
raise ValueError('bad port value ' + str(used_port))
return new_port
class PTSVGNodeHelix(PTNodeHelix, PTSVGNode):
"""
A Helix PTNode augmneted with SVG information
"""
def __init__(self, *args):
"""
Construct PTSVGNodeHelix with supplied nodeid and type.
Parameters:
Variable parameter list: straight to PTNodeHelix constructor (q.v.).
Raises exceptions:
TypeError if helixtype argument is invalid.
"""
PTNodeHelix.__init__(self, *args)
PTSVGNode.__init__(self)
def set_svginfo(self, xmlid, xpos, ypos, label, sseseqnum):
"""
Set the SVG information in this node.
Parameters:
xmlid - the current XML id for this node
xpos - x coorindate to write helix
ypos - y coordinate to write helix
label - label to write on this node
sseseqnum - SSE sequential number (from 1 to n, same as for
'sequential' labelling scheme).
Return value:
None
Writes data members:
xmlid
xpos
ypos
label
height
width
sseseqnum
"""
self.xmlid = xmlid
self.xpos = xpos
self.ypos = ypos
self.label = label
self.height = self.get_span() * DUNNART_HELIX_SPAN_FACTOR
# make sure we don't get helices drawn wrong way due to helix width
# being greater than height
if self.height < DUNNART_HELIX_WIDTH + 2:
self.height = DUNNART_HELIX_WIDTH + 2
self.width = DUNNART_HELIX_WIDTH
if self.get_sideways():
temp = self.height
self.height = self.width
self.width = temp
self.sseseqnum = sseseqnum
def write_svg(self, fh):
"""
Write this helix to the SVG file.
Mostly there are Dunnart namespace attributes for Dunnart to use
in layout out diagram, but also some proorigami namespace attributes
that are ignored by Dunnart but retained in its finished diagram
SVG output to be used by interactive SVG for hover text showing
residues and so on.
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
shape = 'bioHelix'
# NB reversed used by Dunnart only for head and tail labels
# but is also needed in interactive
# SVG in webserver (pohovertext.js) for getting residue position
# for hover text; so we use the proorigami namespace and the
# dunnart namespace
if self.get_reversed():
reversed_str = "1"
else:
reversed_str = "0"
clr = rgb_tuple_to_hex_str(self.get_color(), SHAPE_ALPHA)
style = 'fill:#' + rgb_tuple_to_hex_str(self.get_color()) + ';'
(residue_names, residue_ids) = get_residue_strings(self.resname_list,
self.resid_list)
if self.get_cluster_id():
clusterid = str(self.get_cluster_id())
else:
clusterid = ''
if self.domain_id:
domainid = self.domain_id
else:
domainid = ''
fh.write(' <dunnart:node id="'+str(self.xmlid)+'" ' +
'dunnart:label="' + self.label + '" ' +
'dunnart:width="' + str(self.width) + '" ' +
'dunnart:height="' + str(self.height) +'" ' +
'dunnart:xPos="' + str(self.xpos) + '" ' +
'dunnart:yPos="' + str(self.ypos) + '" ' +
PTGRAPH_NS + ':reversed="' + reversed_str + '" ' +
'dunnart:reversed="' + reversed_str + '" ' +
'dunnart:fillColour="' + clr + '" ' + # color
'style="' + style + '" ' + # color
'dunnart:type="' + shape + '" ' +
PTGRAPH_NS + ':' + 'residueNames="' +
residue_names + '" ' +
PTGRAPH_NS + ':' + 'residueSeqNums="' +
residue_ids +
'" ' +
PTGRAPH_NS + ':' + 'helixType="' +
self.get_type().lower() + '" ' +
PTGRAPH_NS + ':' + 'sseseqnum="' +
self.sseseqnum + '" ' +
PTGRAPH_NS + ':' + 'chainId="' +
self.get_chainid() + '" ' +
PTGRAPH_NS + ':' + 'clusterId="' +
clusterid + '" ' +
PTGRAPH_NS + ':' + 'domainId="' +
domainid + '" ' +
# hovertext is updated by Javascript in interactive SVG
# as is selected flag
PTGRAPH_NS + ':' + 'hovertext="' + self.nodeid +'" ' +
PTGRAPH_NS + ':' + 'selected="0" ' +
'onmousemove="updateHoverText(evt)" ' +
'onclick="handleClickEvent(evt)"' + ' ' +
'dunnart:headLabel="' + self.headLabel + '" ' +
'dunnart:tailLabel="' + self.tailLabel + '" '
' />\n')
class PTSVGNodeTerminus(PTNodeTerminus, PTSVGNode):
"""
A Terminus PTNode augmented with SVG information
"""
def __init__(self, *args):
"""
Construct PTSVGNodeTerminus with supplied nodeid and type
Parameters:
variable parameter list: straight to PTNodeTerminus construtor (q.v.)
"""
PTNodeTerminus.__init__(self, *args)
PTSVGNode.__init__(self)
def set_svginfo(self, xmlid, xpos, ypos, label):
"""
Set the SVG information in this node.
Parameters:
xmlid - the current XML id for this node
xpos - x coorindate to write helix
ypos - y coordinate to write helix
label - label to write on this node
Return value:
None
Writes data members:
xmlid
xpos
ypos
label
height
width
"""
self.xmlid = xmlid
self.xpos = xpos
self.ypos = ypos
self.label = label
self.height = DUNNART_TERMINUS_SIZE
self.width = DUNNART_TERMINUS_SIZE
self.label = label
def write_svg(self, fh):
"""
Write this termimus to the SVG file.
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
if self.domain_id:
domainid = self.domain_id
else:
domainid = ''
#shape='flowEndOfProc'
shape='rect' # FIXME errors with flowEndOfProc, even without ports
clr = rgb_tuple_to_hex_str(self.get_color(), SHAPE_ALPHA)
style = 'fill:#' + rgb_tuple_to_hex_str(self.get_color()) + ';'
fh.write(' <dunnart:node id="'+str(self.xmlid)+'" ' +
'dunnart:label="' + self.label + '" ' +
'dunnart:width="' + str(self.width) + '" ' +
'dunnart:height="' + str(self.height) +'" ' +
'dunnart:xPos="' + str(self.xpos) + '" ' +
'dunnart:yPos="' + str(self.ypos) + '" ' +
'dunnart:fillColour="' + clr + '" ' + # color
'style="' + style + '" ' + # color
'dunnart:type="' + shape + '" ' +
PTGRAPH_NS + ':' + 'chainId="' +
self.get_chainid() + '" ' +
PTGRAPH_NS + ':' + 'domainId="' +
domainid + '" ' +
PTGRAPH_NS + ':' + 'hovertext="chain ' + self.chainid + '" ' +
'onclick="handleClickEvent(evt)"' +
'/>\n')
class PTSVGNodeStrand(PTNodeStrand, PTSVGNode):
"""
A Strand PTNode augmented with SVG information
"""
def __init__(self, *args):
"""
Construct PTSVGNodeStrand with supplied noeid.
Parameters:
Variable parameter list: straight to PTNodeStrand constructor (q.v.).
"""
PTNodeStrand.__init__(self, *args)
PTSVGNode.__init__(self)
def set_svginfo(self, xmlid, xpos, ypos, label, indguide, sseseqnum):
"""
Set the SVG information in this node.
Parameters:
xmlid - the current XML id for this node
xpos - x coorindate to write helix
ypos - y coordinate to write helix
label - label to write on this node
indguide - PTSVGIndGuide this strand is aligned on
sseseqnum - SSE sequential number (from 1 to n, same as for
'sequential' labelling scheme).
Return value:
None
Writes data members:
xmlid
xpos
ypos
label
height
width
"""
self.xmlid = xmlid
self.xpos = xpos
self.ypos = ypos
self.label = label
self.height = self.get_span() * DUNNART_SPAN_LENGTH_FACTOR
# make sure we don't get strands drawn wrong way due to strand width
# being greater than height
if self.height < DUNNART_STRAND_WIDTH + 2:
self.height = DUNNART_STRAND_WIDTH + 2
self.width = DUNNART_STRAND_WIDTH
if self.get_sideways():
temp = self.height
self.height = self.width
self.width = temp
self.indguide = indguide
self.sseseqnum = sseseqnum
def write_svg(self, fh):
"""
Write this strand to the SVG file.
Mostly there are Dunnart namespace attributes for Dunnart to use
in layout out diagram, but also some proorigami namespace attributes
that are ignored by Dunnart but retained in its finished diagram
SVG output to be used by interactive SVG for hover text showing
residues and so on.
Parameters:
fh - open filehandle to write SVG XML text to
Return value:
None
"""
shape = 'bioStrand'
if self.get_reversed():
reversed_str = "1"
else:
reversed_str = "0"
clr = rgb_tuple_to_hex_str(self.get_color(), SHAPE_ALPHA)
style = 'fill:#' + rgb_tuple_to_hex_str(self.get_color()) + ';'
(residue_names, residue_ids) = get_residue_strings(self.resname_list,
self.resid_list)
if self.get_sheet_id():
sheetid = self.get_sheet_id()
else:
sheetid = ''
if self.domain_id:
domainid = self.domain_id
else:
domainid = ''
fh.write(' <dunnart:node id="'+str(self.xmlid)+'" ' +
'dunnart:label="' + self.label + '" ' +
'dunnart:width="' + str(self.width) + '" ' +
'dunnart:height="' + str(self.height) +'" ' +
'dunnart:xPos="' + str(self.xpos) + '" ' +
'dunnart:yPos="' + str(self.ypos) + '" ' +
'dunnart:reversed="' + reversed_str + '" ' +
'dunnart:fillColour="' + clr + '" ' + # color
'style="' + style + '" ' + # color
'dunnart:type="' + shape + '" ' +
PTGRAPH_NS + ':' + 'residueNames="' +
residue_names + '" ' +
PTGRAPH_NS + ':' + 'residueSeqNums="' +
residue_ids +
'" ' +
PTGRAPH_NS + ':' + 'sseseqnum="' +
self.sseseqnum + '" ' +
PTGRAPH_NS + ':' + 'chainId="' +
self.get_chainid() + '" ' +
PTGRAPH_NS + ':' + 'sheetId="' +
sheetid + '" ' +
PTGRAPH_NS + ':' + 'domainId="' +
domainid + '" ' +
# hovertext is updated by Javascript in interactive SVG
PTGRAPH_NS + ':' + 'hovertext="' + self.nodeid + '" ' +
PTGRAPH_NS + ':' + 'selected="0" ' +
'dunnart:headLabel="' + self.headLabel + '" ' +
'dunnart:tailLabel="' + self.tailLabel + '" '
'onmousemove="updateHoverText(evt)" ' +
'onclick="handleClickEvent(evt)"' +
' />\n')
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def get_residue_strings(resname_list, resid_list):
"""
Give a list of residue names and list of residue PDB sequence identifiers,
return string represnetation of the lists to put in SVG for use e.g
in hovertext on interactive SVG.
Parameters:
resname_list - list of 3-letter residue names
resid_list - list of residue PDB sequence numbers
Return value:
tuple (residue_names, residue_list) where
residue_names is string with all residue names space-separated
residue_list is string with all sequene numbers space-separarated
"""
# can have empty residue name and id lists for eg connectors
# that are between a terminus and an SSE node, and no coil region
# between terminus and that SSE
if len(resname_list) > 1:
residue_names = reduce(lambda a,b : a + ' ' + b, resname_list)
residue_ids = reduce(lambda a,b : str(a) + ' ' + str(b), resid_list)
elif len(resname_list) == 1:
residue_names = str(resname_list[0])
residue_ids = str(resid_list[0])
else:
residue_names = ""
residue_ids = ""
return (residue_names, residue_ids)
|
{
"content_hash": "2741753ebd93cd51980fbd3d1cfcde62",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 80,
"avg_line_length": 38.17439703153989,
"alnum_prop": 0.5115182737169518,
"repo_name": "NirBenTalLab/proorigami-cde-package",
"id": "4144e068f832e9297d74ad57c4f6a4e46c88fb19",
"size": "21101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cde-root/local/munk/data/phd/phd/ptgraph/ptsvgnode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "16762"
},
{
"name": "Python",
"bytes": "4730244"
},
{
"name": "Shell",
"bytes": "9915"
}
],
"symlink_target": ""
}
|
import sys
import argparse
from socks1 import * #import all the socks from the file
from twisted.internet import reactor
#from twisted.protocols import socks
from twisted.python import log
sys.path.append('.')
import marionette_tg.conf
def startthestuff(port, address):
ip = address
s5 = Socks5(address, port)
s5.start()
return 'started socks5 on ' + str(address) + ' with port ' + str(port) #outputs? atm no..
parser = argparse.ArgumentParser(
description='SOCKS5 proxy server.')
parser.add_argument('--local_port', '-lport', dest='local_port', required=True,
help='local port to listen on for HTTP requests')
args = parser.parse_args()
SOCKS_PORT = int(args.local_port)
port = SOCKS_PORT
address = '0.0.0.0'
if '__main__' == __name__:
if marionette_tg.conf.get("general.debug"):
log.startLogging(sys.stdout)
#reactor.listenTCP(SOCKS_PORT, socks.SOCKSv4Factory(None))
startthestuff(port, address)
reactor.run()
#<---
|
{
"content_hash": "8882ec5c143bd4201ffce21889ae8880",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 93,
"avg_line_length": 24.725,
"alnum_prop": 0.6855409504550051,
"repo_name": "flipchan/LayerProx",
"id": "3502b24791e61a978412b3cd4a44b7d5635c662e",
"size": "1145",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "versions/offthewire_version/bin/sock5-server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22156"
},
{
"name": "HTML",
"bytes": "15466"
},
{
"name": "JavaScript",
"bytes": "48"
},
{
"name": "Python",
"bytes": "1335880"
},
{
"name": "Shell",
"bytes": "12455"
}
],
"symlink_target": ""
}
|
import copy
from nova.api.validation import parameter_types
create_backup = {
'type': 'object',
'properties': {
'createBackup': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'backup_type': {
'type': 'string',
},
'rotation': parameter_types.non_negative_integer,
'metadata': parameter_types.metadata,
},
'required': ['name', 'backup_type', 'rotation'],
'additionalProperties': False,
},
},
'required': ['createBackup'],
'additionalProperties': False,
}
create_backup_v20 = copy.deepcopy(create_backup)
create_backup_v20['properties'][
'createBackup']['properties']['name'] = (parameter_types.
name_with_leading_trailing_spaces)
|
{
"content_hash": "3d77f4aa965a8d614b41c6f9187b5d7b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 65,
"avg_line_length": 27.741935483870968,
"alnum_prop": 0.5348837209302325,
"repo_name": "mahak/nova",
"id": "29401c853b427550feb8fe69ad87d1ec8dd383a7",
"size": "1491",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/schemas/create_backup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import re
from OpenSSL import SSL
from netlib import http_auth, certutils, tcp
from .. import utils, platform, version
from .primitives import RegularProxyMode, TransparentProxyMode, UpstreamProxyMode, ReverseProxyMode, Socks5ProxyMode
TRANSPARENT_SSL_PORTS = [443, 8443]
CONF_BASENAME = "mitmproxy"
CA_DIR = "~/.mitmproxy"
class HostMatcher(object):
def __init__(self, patterns=[]):
self.patterns = list(patterns)
self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns]
def __call__(self, address):
address = tcp.Address.wrap(address)
host = "%s:%s" % (address.host, address.port)
if any(rex.search(host) for rex in self.regexes):
return True
else:
return False
def __nonzero__(self):
return bool(self.patterns)
class ProxyConfig:
def __init__(
self,
host='',
port=8080,
server_version=version.NAMEVERSION,
cadir=CA_DIR,
clientcerts=None,
no_upstream_cert=False,
body_size_limit=None,
mode=None,
upstream_server=None,
http_form_in=None,
http_form_out=None,
authenticator=None,
ignore_hosts=[],
tcp_hosts=[],
ciphers_client=None,
ciphers_server=None,
certs=[],
certforward=False,
ssl_version_client="secure",
ssl_version_server="secure",
ssl_ports=TRANSPARENT_SSL_PORTS
):
self.host = host
self.port = port
self.server_version = server_version
self.ciphers_client = ciphers_client
self.ciphers_server = ciphers_server
self.clientcerts = clientcerts
self.no_upstream_cert = no_upstream_cert
self.body_size_limit = body_size_limit
if mode == "transparent":
self.mode = TransparentProxyMode(platform.resolver(), ssl_ports)
elif mode == "socks5":
self.mode = Socks5ProxyMode(ssl_ports)
elif mode == "reverse":
self.mode = ReverseProxyMode(upstream_server)
elif mode == "upstream":
self.mode = UpstreamProxyMode(upstream_server)
else:
self.mode = RegularProxyMode()
# Handle manual overrides of the http forms
self.mode.http_form_in = http_form_in or self.mode.http_form_in
self.mode.http_form_out = http_form_out or self.mode.http_form_out
self.check_ignore = HostMatcher(ignore_hosts)
self.check_tcp = HostMatcher(tcp_hosts)
self.authenticator = authenticator
self.cadir = os.path.expanduser(cadir)
self.certstore = certutils.CertStore.from_store(
self.cadir,
CONF_BASENAME)
for spec, cert in certs:
self.certstore.add_cert_file(spec, cert)
self.certforward = certforward
self.openssl_method_client, self.openssl_options_client = version_to_openssl(
ssl_version_client)
self.openssl_method_server, self.openssl_options_server = version_to_openssl(
ssl_version_server)
self.ssl_ports = ssl_ports
sslversion_choices = (
"all",
"secure",
"SSLv2",
"SSLv3",
"TLSv1",
"TLSv1_1",
"TLSv1_2")
def version_to_openssl(version):
"""
Convert a reasonable SSL version specification into the format OpenSSL expects.
Don't ask...
https://bugs.launchpad.net/pyopenssl/+bug/1020632/comments/3
"""
if version == "all":
return SSL.SSLv23_METHOD, None
elif version == "secure":
# SSLv23_METHOD + NO_SSLv2 + NO_SSLv3 == TLS 1.0+
# TLSv1_METHOD would be TLS 1.0 only
return SSL.SSLv23_METHOD, (SSL.OP_NO_SSLv2 | SSL.OP_NO_SSLv3)
elif version in sslversion_choices:
return getattr(SSL, "%s_METHOD" % version), None
else:
raise ValueError("Invalid SSL version: %s" % version)
def process_proxy_options(parser, options):
body_size_limit = utils.parse_size(options.body_size_limit)
c = 0
mode, upstream_server = None, None
if options.transparent_proxy:
c += 1
if not platform.resolver:
return parser.error(
"Transparent mode not supported on this platform.")
mode = "transparent"
if options.socks_proxy:
c += 1
mode = "socks5"
if options.reverse_proxy:
c += 1
mode = "reverse"
upstream_server = options.reverse_proxy
if options.upstream_proxy:
c += 1
mode = "upstream"
upstream_server = options.upstream_proxy
if c > 1:
return parser.error(
"Transparent, SOCKS5, reverse and upstream proxy mode "
"are mutually exclusive.")
if options.clientcerts:
options.clientcerts = os.path.expanduser(options.clientcerts)
if not os.path.exists(
options.clientcerts) or not os.path.isdir(
options.clientcerts):
return parser.error(
"Client certificate directory does not exist or is not a directory: %s" %
options.clientcerts)
if (options.auth_nonanonymous or options.auth_singleuser or options.auth_htpasswd):
if options.auth_singleuser:
if len(options.auth_singleuser.split(':')) != 2:
return parser.error(
"Invalid single-user specification. Please use the format username:password")
username, password = options.auth_singleuser.split(':')
password_manager = http_auth.PassManSingleUser(username, password)
elif options.auth_nonanonymous:
password_manager = http_auth.PassManNonAnon()
elif options.auth_htpasswd:
try:
password_manager = http_auth.PassManHtpasswd(
options.auth_htpasswd)
except ValueError as v:
return parser.error(v.message)
authenticator = http_auth.BasicProxyAuth(password_manager, "mitmproxy")
else:
authenticator = http_auth.NullProxyAuth(None)
certs = []
for i in options.certs:
parts = i.split("=", 1)
if len(parts) == 1:
parts = ["*", parts[0]]
parts[1] = os.path.expanduser(parts[1])
if not os.path.exists(parts[1]):
parser.error("Certificate file does not exist: %s" % parts[1])
certs.append(parts)
ssl_ports = options.ssl_ports
if options.ssl_ports != TRANSPARENT_SSL_PORTS:
# arparse appends to default value by default, strip that off.
# see http://bugs.python.org/issue16399
ssl_ports = ssl_ports[len(TRANSPARENT_SSL_PORTS):]
return ProxyConfig(
host=options.addr,
port=options.port,
cadir=options.cadir,
clientcerts=options.clientcerts,
no_upstream_cert=options.no_upstream_cert,
body_size_limit=body_size_limit,
mode=mode,
upstream_server=upstream_server,
http_form_in=options.http_form_in,
http_form_out=options.http_form_out,
ignore_hosts=options.ignore_hosts,
tcp_hosts=options.tcp_hosts,
authenticator=authenticator,
ciphers_client=options.ciphers_client,
ciphers_server=options.ciphers_server,
certs=certs,
certforward=options.certforward,
ssl_version_client=options.ssl_version_client,
ssl_version_server=options.ssl_version_server,
ssl_ports=ssl_ports
)
def ssl_option_group(parser):
group = parser.add_argument_group("SSL")
group.add_argument(
"--cert",
dest='certs',
default=[],
type=str,
metavar="SPEC",
action="append",
help='Add an SSL certificate. SPEC is of the form "[domain=]path". '
'The domain may include a wildcard, and is equal to "*" if not specified. '
'The file at path is a certificate in PEM format. If a private key is included in the PEM, '
'it is used, else the default key in the conf dir is used. '
'The PEM file should contain the full certificate chain, with the leaf certificate as the first entry. '
'Can be passed multiple times.')
group.add_argument(
"--cert-forward", action="store_true",
dest="certforward", default=False,
help="Simply forward SSL certificates from upstream."
)
group.add_argument(
"--ciphers-client", action="store",
type=str, dest="ciphers_client", default=None,
help="Set supported ciphers for client connections. (OpenSSL Syntax)"
)
group.add_argument(
"--ciphers-server", action="store",
type=str, dest="ciphers_server", default=None,
help="Set supported ciphers for server connections. (OpenSSL Syntax)"
)
group.add_argument(
"--client-certs", action="store",
type=str, dest="clientcerts", default=None,
help="Client certificate directory."
)
group.add_argument(
"--no-upstream-cert", default=False,
action="store_true", dest="no_upstream_cert",
help="Don't connect to upstream server to look up certificate details."
)
group.add_argument(
"--ssl-port",
action="append",
type=int,
dest="ssl_ports",
default=list(TRANSPARENT_SSL_PORTS),
metavar="PORT",
help="Can be passed multiple times. Specify destination ports which are assumed to be SSL. "
"Defaults to %s." %
str(TRANSPARENT_SSL_PORTS))
group.add_argument(
"--ssl-version-client", dest="ssl_version_client",
default="secure", action="store",
choices=sslversion_choices,
help="Set supported SSL/TLS version for client connections. "
"SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure."
)
group.add_argument(
"--ssl-version-server", dest="ssl_version_server",
default="secure", action="store",
choices=sslversion_choices,
help="Set supported SSL/TLS version for server connections. "
"SSLv2, SSLv3 and 'all' are INSECURE. Defaults to secure."
)
|
{
"content_hash": "7c98c7b9710458ccc691bea31f194035",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 116,
"avg_line_length": 36.2226148409894,
"alnum_prop": 0.6089162032972393,
"repo_name": "ryoqun/mitmproxy",
"id": "3f5796694da4803ded4055593baf09e255bce7a6",
"size": "10251",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "libmproxy/proxy/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "340150"
},
{
"name": "HTML",
"bytes": "97993"
},
{
"name": "JavaScript",
"bytes": "1728505"
},
{
"name": "Python",
"bytes": "863436"
},
{
"name": "Shell",
"bytes": "5474"
}
],
"symlink_target": ""
}
|
import logging
import datetime
from decimal import Decimal
from django.conf import settings
from django.db import models, connection
from django.contrib.auth.models import User
class Overdraft(Exception):
pass
logger = logging.getLogger('wallet')
handler = logging.StreamHandler()
formatter = logging.Formatter(
"%(asctime)s %(levelname)-8s --- %(message)s"
)
handler.setFormatter(formatter)
if not getattr(settings, 'WALLET_LOGGER', True):
handler.setHandler(logging.ERROR)
logger.addHandler(handler)
class Wallet(models.Model):
user = models.ForeignKey(User, unique=True, related_name='wallets')
class Meta:
permissions = (('can_view_wallet_report', 'Can view wallet report'),)
def __unicode__(self):
return "%s's wallet" % self.user.username
def get_balance(self):
cursor = connection.cursor()
cursor.execute(
"SELECT SUM(value) FROM wallet_transaction WHERE wallet_id = %s",
(self.id,)
)
value = cursor.fetchone()[0]
if value is None:
value = Decimal('0.0')
return value
def withdraw(self, value, allow_overdraft=False):
if not isinstance(value, int) and not isinstance(value, Decimal):
raise ValueError("Value must be a Python int or Decimal")
if value < 0:
raise ValueError("You can't withdraw a negative amount")
if not allow_overdraft and (self.get_balance() - value) < 0:
raise Overdraft
return self.transactions.create(
date=datetime.datetime.now(),
value=value * Decimal('-1.0'),
)
def deposit(self, value):
if not isinstance(value, int) and not isinstance(value, Decimal):
raise ValueError("Value must be a Python int or Decimal")
if value < 0:
raise ValueError("You can't deposit a negative amount")
return self.transactions.create(
date=datetime.datetime.now(),
value=value,
)
class Transaction(models.Model):
wallet = models.ForeignKey(Wallet, related_name='transactions')
date = models.DateTimeField()
value = models.DecimalField(max_digits=20, decimal_places=2)
notes = models.TextField(null=True, blank=True)
def __unicode__(self):
return 'Transaction #%d (%.2f)' % (self.id, self.value)
class PaymentOption(models.Model):
name = models.CharField(max_length=255)
dollar_amount = models.DecimalField(max_digits=20, decimal_places=2)
wallet_amount = models.DecimalField(max_digits=20, decimal_places=2)
enabled = models.BooleanField(default=True)
def __unicode__(self):
return '%s ($%.2f)' % (self.name, self.dollar_amount)
class Invoice(models.Model):
user = models.ForeignKey(User, related_name='wallet_invoices')
option = models.ForeignKey(PaymentOption, related_name='invoices')
date_billed = models.DateTimeField()
transaction = models.ForeignKey(
Transaction,
related_name='invoices',
null=True,
blank=True,
unique=True,
)
|
{
"content_hash": "0b92544dee772d5ebac0c7b12b96970e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 77,
"avg_line_length": 31.887755102040817,
"alnum_prop": 0.6432,
"repo_name": "wengcq/django-wallet",
"id": "a7944b051002ebde0c7f41e0933efc64b10f89da",
"size": "3125",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "wallet/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1624"
},
{
"name": "Python",
"bytes": "21245"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from os.path import join
import tempfile
import shutil
from io import BytesIO
try:
from subprocess import STDOUT, CalledProcessError
from sympy.core.compatibility import check_output
except ImportError:
pass
from sympy.utilities.exceptions import SymPyDeprecationWarning
from sympy.utilities.misc import find_executable
from .latex import latex
from sympy.utilities.decorator import doctest_depends_on
@doctest_depends_on(exe=('latex', 'dvipng'), modules=('pyglet',),
disable_viewers=('evince', 'gimp', 'superior-dvi-viewer'))
def preview(expr, output='png', viewer=None, euler=True, packages=(),
filename=None, outputbuffer=None, preamble=None, dvioptions=None,
outputTexFile=None, **latex_settings):
r"""
View expression or LaTeX markup in PNG, DVI, PostScript or PDF form.
If the expr argument is an expression, it will be exported to LaTeX and
then compiled using the available TeX distribution. The first argument,
'expr', may also be a LaTeX string. The function will then run the
appropriate viewer for the given output format or use the user defined
one. By default png output is generated.
By default pretty Euler fonts are used for typesetting (they were used to
typeset the well known "Concrete Mathematics" book). For that to work, you
need the 'eulervm.sty' LaTeX style (in Debian/Ubuntu, install the
texlive-fonts-extra package). If you prefer default AMS fonts or your
system lacks 'eulervm' LaTeX package then unset the 'euler' keyword
argument.
To use viewer auto-detection, lets say for 'png' output, issue
>>> from sympy import symbols, preview, Symbol
>>> x, y = symbols("x,y")
>>> preview(x + y, output='png')
This will choose 'pyglet' by default. To select a different one, do
>>> preview(x + y, output='png', viewer='gimp')
The 'png' format is considered special. For all other formats the rules
are slightly different. As an example we will take 'dvi' output format. If
you would run
>>> preview(x + y, output='dvi')
then 'view' will look for available 'dvi' viewers on your system
(predefined in the function, so it will try evince, first, then kdvi and
xdvi). If nothing is found you will need to set the viewer explicitly.
>>> preview(x + y, output='dvi', viewer='superior-dvi-viewer')
This will skip auto-detection and will run user specified
'superior-dvi-viewer'. If 'view' fails to find it on your system it will
gracefully raise an exception.
You may also enter 'file' for the viewer argument. Doing so will cause
this function to return a file object in read-only mode, if 'filename'
is unset. However, if it was set, then 'preview' writes the genereted
file to this filename instead.
There is also support for writing to a BytesIO like object, which needs
to be passed to the 'outputbuffer' argument.
>>> from io import BytesIO
>>> obj = BytesIO()
>>> preview(x + y, output='png', viewer='BytesIO',
... outputbuffer=obj)
The LaTeX preamble can be customized by setting the 'preamble' keyword
argument. This can be used, e.g., to set a different font size, use a
custom documentclass or import certain set of LaTeX packages.
>>> preamble = "\\documentclass[10pt]{article}\n" \
... "\\usepackage{amsmath,amsfonts}\\begin{document}"
>>> preview(x + y, output='png', preamble=preamble)
If the value of 'output' is different from 'dvi' then command line
options can be set ('dvioptions' argument) for the execution of the
'dvi'+output conversion tool. These options have to be in the form of a
list of strings (see subprocess.Popen).
Additional keyword args will be passed to the latex call, e.g., the
symbol_names flag.
>>> phidd = Symbol('phidd')
>>> preview(phidd, symbol_names={phidd:r'\ddot{\varphi}'})
For post-processing the generated TeX File can be written to a file by
passing the desired filename to the 'outputTexFile' keyword
argument. To write the TeX code to a file named
"sample.tex" and run the default png viewer to display the resulting
bitmap, do
>>> preview(x + y, outputTexFile="sample.tex")
"""
special = [ 'pyglet' ]
if viewer is None:
if output == "png":
viewer = "pyglet"
else:
# sorted in order from most pretty to most ugly
# very discussable, but indeed 'gv' looks awful :)
# TODO add candidates for windows to list
candidates = {
"dvi": [ "evince", "okular", "kdvi", "xdvi" ],
"ps": [ "evince", "okular", "gsview", "gv" ],
"pdf": [ "evince", "okular", "kpdf", "acroread", "xpdf", "gv" ],
}
try:
for candidate in candidates[output]:
path = find_executable(candidate)
if path is not None:
viewer = path
break
else:
raise SystemError(
"No viewers found for '%s' output format." % output)
except KeyError:
raise SystemError("Invalid output format: %s" % output)
else:
if viewer == "file":
if filename is None:
SymPyDeprecationWarning(feature="Using viewer=\"file\" without a "
"specified filename", deprecated_since_version="0.7.3",
useinstead="viewer=\"file\" and filename=\"desiredname\"",
issue=7018).warn()
elif viewer == "StringIO":
SymPyDeprecationWarning(feature="The preview() viewer StringIO",
useinstead="BytesIO", deprecated_since_version="0.7.4",
issue=7083).warn()
viewer = "BytesIO"
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"StringIO\"")
elif viewer == "BytesIO":
if outputbuffer is None:
raise ValueError("outputbuffer has to be a BytesIO "
"compatible object if viewer=\"BytesIO\"")
elif viewer not in special and not find_executable(viewer):
raise SystemError("Unrecognized viewer: %s" % viewer)
if preamble is None:
actual_packages = packages + ("amsmath", "amsfonts")
if euler:
actual_packages += ("euler",)
package_includes = "\n" + "\n".join(["\\usepackage{%s}" % p
for p in actual_packages])
preamble = r"""\documentclass[12pt]{article}
\pagestyle{empty}
%s
\begin{document}
""" % (package_includes)
else:
if len(packages) > 0:
raise ValueError("The \"packages\" keyword must not be set if a "
"custom LaTeX preamble was specified")
latex_main = preamble + '\n%s\n\n' + r"\end{document}"
if isinstance(expr, str):
latex_string = expr
else:
latex_string = latex(expr, mode='inline', **latex_settings)
try:
workdir = tempfile.mkdtemp()
with open(join(workdir, 'texput.tex'), 'w') as fh:
fh.write(latex_main % latex_string)
if outputTexFile is not None:
shutil.copyfile(join(workdir, 'texput.tex'), outputTexFile)
if not find_executable('latex'):
raise RuntimeError("latex program is not installed")
try:
check_output(['latex', '-halt-on-error', '-interaction=nonstopmode',
'texput.tex'], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'latex' exited abnormally with the following output:\n%s" %
e.output)
if output != "dvi":
defaultoptions = {
"ps": [],
"pdf": [],
"png": ["-T", "tight", "-z", "9", "--truecolor"],
"svg": ["--no-fonts"],
}
commandend = {
"ps": ["-o", "texput.ps", "texput.dvi"],
"pdf": ["texput.dvi", "texput.pdf"],
"png": ["-o", "texput.png", "texput.dvi"],
"svg": ["-o", "texput.svg", "texput.dvi"],
}
if output == "svg":
cmd = ["dvisvgm"]
else:
cmd = ["dvi" + output]
if not find_executable(cmd[0]):
raise RuntimeError("%s is not installed" % cmd[0])
try:
if dvioptions is not None:
cmd.extend(dvioptions)
else:
cmd.extend(defaultoptions[output])
cmd.extend(commandend[output])
except KeyError:
raise SystemError("Invalid output format: %s" % output)
try:
check_output(cmd, cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s' exited abnormally with the following output:\n%s" %
(' '.join(cmd), e.output))
src = "texput.%s" % (output)
if viewer == "file":
if filename is None:
buffer = BytesIO()
with open(join(workdir, src), 'rb') as fh:
buffer.write(fh.read())
return buffer
else:
shutil.move(join(workdir,src), filename)
elif viewer == "BytesIO":
with open(join(workdir, src), 'rb') as fh:
outputbuffer.write(fh.read())
elif viewer == "pyglet":
try:
from pyglet import window, image, gl
from pyglet.window import key
except ImportError:
raise ImportError("pyglet is required for preview.\n visit http://www.pyglet.org/")
if output == "png":
from pyglet.image.codecs.png import PNGImageDecoder
img = image.load(join(workdir, src), decoder=PNGImageDecoder())
else:
raise SystemError("pyglet preview works only for 'png' files.")
offset = 25
win = window.Window(
width=img.width + 2*offset,
height=img.height + 2*offset,
caption="sympy",
resizable=False
)
win.set_vsync(False)
try:
def on_close():
win.has_exit = True
win.on_close = on_close
def on_key_press(symbol, modifiers):
if symbol in [key.Q, key.ESCAPE]:
on_close()
win.on_key_press = on_key_press
def on_expose():
gl.glClearColor(1.0, 1.0, 1.0, 1.0)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
img.blit(
(win.width - img.width) / 2,
(win.height - img.height) / 2
)
win.on_expose = on_expose
while not win.has_exit:
win.dispatch_events()
win.flip()
except KeyboardInterrupt:
pass
win.close()
else:
try:
check_output([viewer, src], cwd=workdir, stderr=STDOUT)
except CalledProcessError as e:
raise RuntimeError(
"'%s %s' exited abnormally with the following output:\n%s" %
(viewer, src, e.output))
finally:
try:
shutil.rmtree(workdir) # delete directory
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
|
{
"content_hash": "1a034243d1303fe7ea26135c762db941",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 99,
"avg_line_length": 37.66457680250784,
"alnum_prop": 0.5558884727424054,
"repo_name": "wolfram74/numerical_methods_iserles_notes",
"id": "9a2f3a206d7cd8cb21425b5ea561ef760334d857",
"size": "12015",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/sympy/printing/preview.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "282435"
},
{
"name": "C++",
"bytes": "59801"
},
{
"name": "CSS",
"bytes": "2038"
},
{
"name": "FORTRAN",
"bytes": "3707"
},
{
"name": "Groff",
"bytes": "6753"
},
{
"name": "HTML",
"bytes": "37522"
},
{
"name": "JavaScript",
"bytes": "1368241"
},
{
"name": "Python",
"bytes": "31296026"
},
{
"name": "Shell",
"bytes": "3869"
},
{
"name": "Smarty",
"bytes": "21425"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
Sample script that monitors smartcard readers.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from sys import stdin, exc_info
from time import sleep
from smartcard.ReaderMonitoring import ReaderMonitor, ReaderObserver
class printobserver(ReaderObserver):
"""A simple reader observer that is notified
when readers are added/removed from the system and
prints the list of readers
"""
def update(self, observable, (addedreaders, removedreaders)):
print "Added readers", addedreaders
print "Removed readers", removedreaders
try:
print "Add or remove a smartcard reader to the system."
print "This program will exit in 10 seconds"
print ""
readermonitor = ReaderMonitor()
readerobserver = printobserver()
readermonitor.addObserver(readerobserver)
sleep(10)
# don't forget to remove observer, or the
# monitor will poll forever...
readermonitor.deleteObserver(readerobserver)
import sys
if 'win32' == sys.platform:
print 'press Enter to continue'
sys.stdin.read(1)
except:
print exc_info()[0], ':', exc_info()[1]
|
{
"content_hash": "ef96a62677ee595b42d16620757a2dc2",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 75,
"avg_line_length": 31.177419354838708,
"alnum_prop": 0.7387480600103467,
"repo_name": "mixja/eap-sim-lab",
"id": "178b6a87bfdced34cebec70a123b5e12e9d633bf",
"size": "1956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pyscard-1.6.16/smartcard/Examples/framework/sample_MonitorReaders.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "296205"
},
{
"name": "CSS",
"bytes": "16483"
},
{
"name": "JavaScript",
"bytes": "102146"
},
{
"name": "Makefile",
"bytes": "9775"
},
{
"name": "Python",
"bytes": "1465805"
},
{
"name": "Shell",
"bytes": "7763"
}
],
"symlink_target": ""
}
|
from rest_framework import serializers
from .models import Message
from .permissions import IsOwnerOrReadOnly
from rest_framework.permissions import IsAuthenticated
class MessageReadSerializer(serializers.ModelSerializer):
pk = serializers.IntegerField(read_only=True)
author = serializers.CharField(read_only=True)
message = serializers.CharField(read_only=True)
date = serializers.DateTimeField(read_only=True)
image = serializers.ImageField(read_only=True)
class Meta:
model = Message
fields = ('pk', 'author', 'message', 'date', 'image')
class MessageWriteSerializer(serializers.ModelSerializer):
message = serializers.CharField()
image = serializers.ImageField(required=False, allow_null=True)
class Meta:
model = Message
fields = ('message', 'image')
|
{
"content_hash": "14d612875d82e54f1de67d50005ed399",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 33.24,
"alnum_prop": 0.7340553549939831,
"repo_name": "fmarco/message-board",
"id": "422d8bcd61b286263495c81866c0f00be187ce89",
"size": "831",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "messageboard/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "345"
},
{
"name": "HTML",
"bytes": "1690"
},
{
"name": "JavaScript",
"bytes": "1806"
},
{
"name": "Python",
"bytes": "15716"
}
],
"symlink_target": ""
}
|
import os
__author__ = 'brucewootton'
"""
In memory blob store with file lock and appending support.
"""
BLOBS = {}
def startup():
pass
def file_name(uuid):
pass
def get_blob(uuid):
return BLOBS[uuid]
def write_blob(uuid, blob):
BLOBS[uuid] = blob
def append_data(uuid, value):
if uuid in BLOBS:
BLOBS[uuid] += value
else:
BLOBS[uuid] = value
def clear(uuid):
BLOBS.pop(uuid)
|
{
"content_hash": "7baf74f55ab699499ba41b9f5daf1c0a",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 58,
"avg_line_length": 12.083333333333334,
"alnum_prop": 0.6091954022988506,
"repo_name": "MPC-Berkeley/barc",
"id": "5b989074dc5bac3aec25f637a26c28a403d4ec2c",
"size": "435",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Dator/data_api/in_memory_provider.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "37857"
},
{
"name": "C++",
"bytes": "34556"
},
{
"name": "CMake",
"bytes": "25703"
},
{
"name": "CSS",
"bytes": "143"
},
{
"name": "HTML",
"bytes": "27848"
},
{
"name": "JavaScript",
"bytes": "10764902"
},
{
"name": "Julia",
"bytes": "117617"
},
{
"name": "Less",
"bytes": "69047"
},
{
"name": "MATLAB",
"bytes": "9115"
},
{
"name": "Python",
"bytes": "343196"
},
{
"name": "SCSS",
"bytes": "69934"
},
{
"name": "Shell",
"bytes": "13578"
},
{
"name": "Vim script",
"bytes": "370"
}
],
"symlink_target": ""
}
|
import numpy as np
from skeleton import Skeleton
class MocapDataset:
def __init__(self, fps, skeleton):
self._skeleton = skeleton
self._fps = fps
self._data = None # Must be filled by subclass
self._cameras = None # Must be filled by subclass
def remove_joints(self, joints_to_remove):
kept_joints = self._skeleton.remove_joints(joints_to_remove)
for subject in self._data.keys():
for action in self._data[subject].keys():
s = self._data[subject][action]
s['positions'] = s['positions'][:, kept_joints]
def __getitem__(self, key):
return self._data[key]
def subjects(self):
return self._data.keys()
def fps(self):
return self._fps
def skeleton(self):
return self._skeleton
def cameras(self):
return self._cameras
def supports_semi_supervised(self):
# This method can be overridden
return False
|
{
"content_hash": "1826b536befde9165e0b17b92cd14466",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 64,
"avg_line_length": 24.216216216216218,
"alnum_prop": 0.6551339285714286,
"repo_name": "MTASZTAKI/ApertusVR",
"id": "c297220dbfb4df239b5d46121027246179a5e806",
"size": "896",
"binary": false,
"copies": "3",
"ref": "refs/heads/0.9",
"path": "plugins/physics/bulletPhysics/3rdParty/bullet3/examples/pybullet/gym/pybullet_envs/deep_mimic/mocap/mocap_dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7599"
},
{
"name": "C++",
"bytes": "1207412"
},
{
"name": "CMake",
"bytes": "165066"
},
{
"name": "CSS",
"bytes": "1816"
},
{
"name": "GLSL",
"bytes": "223507"
},
{
"name": "HLSL",
"bytes": "141879"
},
{
"name": "HTML",
"bytes": "34827"
},
{
"name": "JavaScript",
"bytes": "140550"
},
{
"name": "Python",
"bytes": "1370"
}
],
"symlink_target": ""
}
|
def initialize(context):
# The initialize function sets any data or variables that
# you'll use in your algorithm.
# For instance, you'll want to define the security
# (or securities) you want to backtest.
# You'll also want to define any parameters or values
# you're going to use later.
# It's only called once at the beginning of your algorithm.
# In our example, we're looking at Apple.
# If you re-type this line you'll see
# the auto-complete that is available for security.
context.security = symbol('AAPL')
# initialize a day counter so that days can be taken into account
context.daycounter = 0
#comment
# The handle_data function is where the real work is done.
# This function is run either every minute
# (in live trading and minute backtesting mode)
# or every day (in daily backtesting mode).
def handle_data(context, data):
# We add a new day each time we iterate through this function...
context.daycounter += 1
# We've built a handful of useful data transforms for you to use,
# such as moving average.
# To make market decisions, we're calculating the stock's
# moving average for the last 5 days and its current price.
average_price = data[context.security].mavg(5)
current_price = data[context.security].price
# Another powerful built-in feature of the Quantopian backtester is the
# portfolio object. The portfolio object tracks your positions, cash,
# cost basis of specific holdings, and more. In this line, we calculate
# the current amount of cash in our portfolio.
cash = context.portfolio.cash
# Here is the meat of our algorithm.
# If the current price is 1% above the 5-day average price
# AND we have enough cash, then we will order.
# If the current price is below the average price,
# then we want to close our position to 0 shares.
if current_price > 1.03*average_price and cash > current_price:
# Need to calculate how many shares we can buy
number_of_shares = int(cash/current_price)
# Place the buy order (positive means buy, negative means sell)
order(context.security, +number_of_shares)
log.info("Buying %s" % (context.security.symbol))
# if we are successful in buying shares we reset the day counter to
# measure our success for the next month...
context.daycounter = 0
# if the current price is lower than the average price for more than 30 days...
# We sell all of our stocks, because the price is probably tanking too much.
elif current_price < average_price and context.daycounter > 30:
# Sell all of our shares by setting the target position to zero
order_target(context.security, 0)
log.info("Selling %s" % (context.security.symbol))
# You can use the record() method to track any custom signal.
# The record graph tracks up to five different variables.
# Here we record the Apple stock price.
record(stock_price=data[context.security].price)
|
{
"content_hash": "6c4b0fc794eed811867fa2ee1c73725f",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 83,
"avg_line_length": 47.76923076923077,
"alnum_prop": 0.6856682769726248,
"repo_name": "mmilutinovic1313/zipline-with-algorithms",
"id": "49d938dab5f3f4d4da303a500ead7bb2757c0ffc",
"size": "3399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ALGORITHMS/ThirtyDayThreePercent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1047562"
},
{
"name": "Shell",
"bytes": "3962"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_all_request(
subscription_id: str,
*,
running: Optional[bool] = None,
continuation_token_parameter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Chaos/experiments")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if running is not None:
_params['running'] = _SERIALIZER.query("running", running, 'bool')
if continuation_token_parameter is not None:
_params['continuationToken'] = _SERIALIZER.query("continuation_token_parameter", continuation_token_parameter, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
*,
running: Optional[bool] = None,
continuation_token_parameter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if running is not None:
_params['running'] = _SERIALIZER.query("running", running, 'bool')
if continuation_token_parameter is not None:
_params['continuationToken'] = _SERIALIZER.query("continuation_token_parameter", continuation_token_parameter, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_delete_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_create_or_update_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
*,
json: Optional[_models.Experiment] = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', None)) # type: Optional[str]
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
if content_type is not None:
_headers['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_params,
headers=_headers,
json=json,
content=content,
**kwargs
)
def build_cancel_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/cancel") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_start_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/start") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_all_statuses_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/statuses") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_status_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
status_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/statuses/{statusId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
"statusId": _SERIALIZER.url("status_id", status_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_list_execution_details_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/executionDetails") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_execution_details_request(
subscription_id: str,
resource_group_name: str,
experiment_name: str,
execution_details_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/executionDetails/{executionDetailsId}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', pattern=r'^[a-zA-Z0-9_\-\.\(\)]*[a-zA-Z0-9_\-\(\)]$'),
"experimentName": _SERIALIZER.url("experiment_name", experiment_name, 'str', min_length=1, pattern=r'^[^<>%&:?#/\\]+$'),
"executionDetailsId": _SERIALIZER.url("execution_details_id", execution_details_id, 'str', pattern=r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class ExperimentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.chaos.ChaosManagementClient`'s
:attr:`experiments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_all(
self,
running: Optional[bool] = None,
continuation_token_parameter: Optional[str] = None,
**kwargs: Any
) -> Iterable[_models.ExperimentListResult]:
"""Get a list of Experiment resources in a subscription.
:param running: Optional value that indicates whether to filter results based on if the
Experiment is currently running. If null, then the results will not be filtered. Default value
is None.
:type running: bool
:param continuation_token_parameter: String that sets the continuation token. Default value is
None.
:type continuation_token_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExperimentListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.chaos.models.ExperimentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
running=running,
continuation_token_parameter=continuation_token_parameter,
template_url=self.list_all.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
running=running,
continuation_token_parameter=continuation_token_parameter,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Chaos/experiments"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
running: Optional[bool] = None,
continuation_token_parameter: Optional[str] = None,
**kwargs: Any
) -> Iterable[_models.ExperimentListResult]:
"""Get a list of Experiment resources in a resource group.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param running: Optional value that indicates whether to filter results based on if the
Experiment is currently running. If null, then the results will not be filtered. Default value
is None.
:type running: bool
:param continuation_token_parameter: String that sets the continuation token. Default value is
None.
:type continuation_token_parameter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExperimentListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.chaos.models.ExperimentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
running=running,
continuation_token_parameter=continuation_token_parameter,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
api_version=api_version,
running=running,
continuation_token_parameter=continuation_token_parameter,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> None:
"""Delete a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> _models.Experiment:
"""Get a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.Experiment]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}"} # type: ignore
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
experiment_name: str,
experiment: _models.Experiment,
**kwargs: Any
) -> _models.Experiment:
"""Create or update a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:param experiment: Experiment resource to be created or updated.
:type experiment: ~azure.mgmt.chaos.models.Experiment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.Experiment
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.Experiment]
_json = self._serialize.body(experiment, 'Experiment')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Experiment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}"} # type: ignore
@distributed_trace
def cancel(
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> _models.ExperimentCancelOperationResult:
"""Cancel a running Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExperimentCancelOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.ExperimentCancelOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentCancelOperationResult]
request = build_cancel_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.cancel.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExperimentCancelOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/cancel"} # type: ignore
@distributed_trace
def start(
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> _models.ExperimentStartOperationResult:
"""Start a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExperimentStartOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.ExperimentStartOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentStartOperationResult]
request = build_start_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.start.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExperimentStartOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
start.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/start"} # type: ignore
@distributed_trace
def list_all_statuses(
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> Iterable[_models.ExperimentStatusListResult]:
"""Get a list of statuses of a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExperimentStatusListResult or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.chaos.models.ExperimentStatusListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentStatusListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_statuses_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.list_all_statuses.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_all_statuses_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentStatusListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all_statuses.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/statuses"} # type: ignore
@distributed_trace
def get_status(
self,
resource_group_name: str,
experiment_name: str,
status_id: str,
**kwargs: Any
) -> _models.ExperimentStatus:
"""Get a status of a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:param status_id: GUID that represents a Experiment status.
:type status_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExperimentStatus, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.ExperimentStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentStatus]
request = build_get_status_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
status_id=status_id,
api_version=api_version,
template_url=self.get_status.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExperimentStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_status.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/statuses/{statusId}"} # type: ignore
@distributed_trace
def list_execution_details(
self,
resource_group_name: str,
experiment_name: str,
**kwargs: Any
) -> Iterable[_models.ExperimentExecutionDetailsListResult]:
"""Get a list of execution details of a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExperimentExecutionDetailsListResult or the result
of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.chaos.models.ExperimentExecutionDetailsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentExecutionDetailsListResult]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_execution_details_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=self.list_execution_details.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_execution_details_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentExecutionDetailsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_execution_details.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/executionDetails"} # type: ignore
@distributed_trace
def get_execution_details(
self,
resource_group_name: str,
experiment_name: str,
execution_details_id: str,
**kwargs: Any
) -> _models.ExperimentExecutionDetails:
"""Get an execution detail of a Experiment resource.
:param resource_group_name: String that represents an Azure resource group.
:type resource_group_name: str
:param experiment_name: String that represents a Experiment resource name.
:type experiment_name: str
:param execution_details_id: GUID that represents a Experiment execution detail.
:type execution_details_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExperimentExecutionDetails, or the result of cls(response)
:rtype: ~azure.mgmt.chaos.models.ExperimentExecutionDetails
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.ExperimentExecutionDetails]
request = build_get_execution_details_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
experiment_name=experiment_name,
execution_details_id=execution_details_id,
api_version=api_version,
template_url=self.get_execution_details.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExperimentExecutionDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_execution_details.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Chaos/experiments/{experimentName}/executionDetails/{executionDetailsId}"} # type: ignore
|
{
"content_hash": "5e01db02e43b970a77ba55a2030507fd",
"timestamp": "",
"source": "github",
"line_count": 1297,
"max_line_length": 233,
"avg_line_length": 43.46723207401696,
"alnum_prop": 0.624509995210813,
"repo_name": "Azure/azure-sdk-for-python",
"id": "a13fae65a3c455b584b1412113da31403bf0bb61",
"size": "56877",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/chaos/azure-mgmt-chaos/azure/mgmt/chaos/operations/_experiments_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
from SCons.Script import *
# Helper for creating source lists with certain files only enabled by config
# settings.
def FeatureSources(config, files):
output = []
for f in files:
if type(f) == tuple:
if config[f[0]]:
output.append(File(f[1]))
else:
output.append(File(f))
return output
# Raise an error if a certain target is not specified.
def RequireTarget(target, error):
if GetOption('help') or target in COMMAND_LINE_TARGETS:
return
raise SCons.Errors.StopError(error)
|
{
"content_hash": "b71177f2a71261af308f0dbfad6bffe6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 26.105263157894736,
"alnum_prop": 0.7217741935483871,
"repo_name": "gil0mendes/Infinity-OS",
"id": "ec8a3374bad5b0ad3427314a7df0a2e21933b511",
"size": "1591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utilities/build/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "26549"
},
{
"name": "C",
"bytes": "2115027"
},
{
"name": "C++",
"bytes": "183805"
},
{
"name": "Objective-C",
"bytes": "2108"
},
{
"name": "Python",
"bytes": "97191"
},
{
"name": "Shell",
"bytes": "769"
}
],
"symlink_target": ""
}
|
import sys
from setuptools import setup, find_packages
# To install the library, open a Terminal shell, then run this
# file by typing:
#
# python setup.py install
#
# You need to have the setuptools module installed.
# Try reading the setuptools documentation:
# http://pypi.python.org/pypi/setuptools
REQUIRES = []
setup(
name="SwaggerPetstore",
version="1.0",
description="default Title",
author_email="",
url="Contact Email",
keywords=["Swagger", "default Title"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Api Description
"""
)
|
{
"content_hash": "0c1cccf05eb3a6546227c87a1023432c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 62,
"avg_line_length": 20.25,
"alnum_prop": 0.6867283950617284,
"repo_name": "jfiala/swagger-spring-demo",
"id": "066a6b125299865afacbd1d475c4d8fb24a8878e",
"size": "648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "user-rest-service-1.0.2/generated-code/python/SwaggerPetstore-python/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "27237"
},
{
"name": "C++",
"bytes": "79766"
},
{
"name": "CSS",
"bytes": "360027"
},
{
"name": "HTML",
"bytes": "34412"
},
{
"name": "Java",
"bytes": "782614"
},
{
"name": "JavaScript",
"bytes": "3621677"
},
{
"name": "Objective-C",
"bytes": "56983"
},
{
"name": "PHP",
"bytes": "98153"
},
{
"name": "Python",
"bytes": "36004"
},
{
"name": "Ruby",
"bytes": "29848"
},
{
"name": "Scala",
"bytes": "37823"
},
{
"name": "Shell",
"bytes": "18947"
}
],
"symlink_target": ""
}
|
try:
import sys
import codecs
import inspect
except:
print('Library Error')
sys.exit(0)
def main():
try:
input_code = openSourceCode(sys.argv[2])
except:
print('File is not exist')
if __name__ == '__main__':
if len(sys.argv) != 3:
print('usage : python fuzzer.py <option> <filename>')
sys.exit(0)
else:
main()
|
{
"content_hash": "b733951eb9aef728fa938b6ebbb60118",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 55,
"avg_line_length": 14.521739130434783,
"alnum_prop": 0.6317365269461078,
"repo_name": "ied206/adv_ku_fuzz",
"id": "0ec4838b9d771614be479e0d0a0e10de7198da62",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/fuzzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "334"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from models import FilmSearch, Films, Developers, Notes
class FilmSearchAdmin(admin.ModelAdmin):
list_display = (
"Film", "Developer", "Dilution", "ASA_ISO", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Film",
)
class FilmsAdmin(admin.ModelAdmin):
list_display = (
"Film", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Film",
)
class DevelopersAdmin(admin.ModelAdmin):
list_display = (
"Developer", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Developer",
)
class NotesAdmin(admin.ModelAdmin):
list_display = (
"Notes", "Remark", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Notes",
)
admin.site.register(FilmSearch, FilmSearchAdmin)
admin.site.register(Films, FilmsAdmin)
admin.site.register(Developers, DevelopersAdmin)
admin.site.register(Notes, NotesAdmin)
|
{
"content_hash": "69769208aad133226640e55b4373bb48",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 96,
"avg_line_length": 20.653061224489797,
"alnum_prop": 0.6373517786561265,
"repo_name": "summychou/GGFilm",
"id": "495ddc3e6a5c62fe9493d0ffdbabbee9b981e7d9",
"size": "1012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WeRoBot/myrobot/admin.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44518"
},
{
"name": "HTML",
"bytes": "15886"
},
{
"name": "JavaScript",
"bytes": "78187"
},
{
"name": "Python",
"bytes": "53156"
}
],
"symlink_target": ""
}
|
import database
import math
class Rectangle(object):
def __init__(self, pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
self.left = min(x1, x2)
self.right = max(x1, x2)
self.top = max(y1, y2)
self.bottom = min(y1, y2)
def contains(self, pt3):
x3, y3 = pt3
return self.left <= x3 <= self.right and self.bottom <= y3 <= self.top
class Search(object):
def __init__(self):
self.graph = database.Database.GRAPH #graph of world (dict)
self.points = database.Database.POINTS #co-ordinates of all nodes (dict)
def get_distance(self,start_node,end_node):
start_points = self.points.get(start_node)
end_points = self.points.get(end_node)
x_dif = end_points[0] - start_points[0]
y_dif = end_points[1] - start_points[1]
distance = math.hypot(x_dif,y_dif)
return distance
def get_list(self,string):
return string.split()
# Gets the neighbour with the lowest f_score
def get_lowest_f_score(self,open_set,goal,g_score):
lowest_f_cost = 100000000000000000
for node in open_set:
current_f_cost = g_score + self.get_distance(node,goal)
if len(open_set) == 1:
lowest_node = node
break
elif current_f_cost < lowest_f_cost:
lowest_node = node
lowest_f_cost = current_f_cost
return lowest_node
def create_path(self,came_from,current_node):
if current_node in came_from:
path = self.create_path(came_from, came_from[current_node])
return (path + ' ' + current_node + ' ')
else:
return ' ' + current_node + ' '
def find_path(self,start,goal):
closed_set = [] #evaluated nodes
open_set = [start] #nodes waiting to be evaluated
came_from = {}
g_score = 0
f_score = g_score + self.get_distance(start,goal)
while len(open_set) > 0:
#find the lowest f_score
#itereate through open set and grab lowest f_score
current_node = self.get_lowest_f_score(open_set,goal,g_score)
#check if current is the goal
if current_node == goal:
return self.get_list(self.create_path(came_from,goal))
# Mark current node as visited or expanded
open_set.remove(current_node)
closed_set.append(current_node)
#iterate through all neighbours of the current node
neighbours = self.graph.get(current_node)
for neighbour in neighbours:
# if already evaluated, check the next neighbour
if neighbour in closed_set:
continue
neighbour_to_current_dist = self.get_distance(current_node,neighbour)
current_g_score = self.get_distance(start,current_node)
neighbour_g_score = self.get_distance(start,neighbour)
tentative_g_score = current_g_score + neighbour_to_current_dist
# if neighbour hasn't been visited yet or has less cost than neighbour
if neighbour not in open_set or tentative_g_score < neighbour_g_score:
came_from[neighbour] = current_node
neighbour_g_score = tentative_g_score
neighbour_f_score = neighbour_g_score + self.get_distance(neighbour,goal)
#add neighbour to the unvisited list
if neighbour not in open_set:
open_set.append(neighbour)
return None #returns None if failed to find path
class Angle(object):
def __init__(self, current, target):
""" Convert input rads to degrees """
current = math.degrees(current)
target = math.degrees(target)
self.angle_from = self.normalize(current)
self.angle_to = self.normalize(target)
def check(self):
""" Check which direction is best to rotate based on current heading. """
# -1 is anti-clockwise
# 1 is clockwise
move_angle = self.angle_to - self.angle_from
if (move_angle > 0):
if (abs(move_angle) > 180):
return -1
else:
return 1
else:
if (abs(move_angle) > 180):
return 1
else:
return -1
def normalize(self, input_angle):
""" Normalise input angle """
new_angle = int(input_angle)
if new_angle < 0:
new_angle += 360
return new_angle
|
{
"content_hash": "a15d9a74dc47666611ea3217f73cea0e",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 27.840579710144926,
"alnum_prop": 0.6775117126496616,
"repo_name": "swanndri/ROS-Healthcare-Simulator",
"id": "a3bc64b1eec56b7e4c5eea1ce2f37f4909c4db32",
"size": "3842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "se306/src/package1/scripts/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "92222"
},
{
"name": "Shell",
"bytes": "10229"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
from pants.binaries.binary_tool import NativeTool
logger = logging.getLogger(__name__)
class YarnpkgDistribution(NativeTool):
"""Represents a self-bootstrapping Yarnpkg distribution."""
options_scope = 'yarnpkg-distribution'
name = 'yarnpkg'
default_version = 'v0.19.1'
archive_type = 'tgz'
|
{
"content_hash": "1f1abfe698afb12af18075770be95fa5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 93,
"avg_line_length": 26.38888888888889,
"alnum_prop": 0.7136842105263158,
"repo_name": "UnrememberMe/pants",
"id": "4e381740fe0896858d083dd5f09f5a6a25389ff6",
"size": "622",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/node/src/python/pants/contrib/node/subsystems/yarnpkg_distribution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1805"
},
{
"name": "HTML",
"bytes": "48321"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5767085"
},
{
"name": "Rust",
"bytes": "427157"
},
{
"name": "Scala",
"bytes": "75938"
},
{
"name": "Shell",
"bytes": "75470"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from resource_management.libraries.script.script import Script
# default value of log4j version is 1 for hive
log4j_version = '1'
config = Script.get_config()
hadoop_user = config["configurations"]["cluster-env"]["hadoop.user.name"]
ams_user = hadoop_user
try:
ams_collector_conf_dir = os.environ["COLLECTOR_CONF_DIR"]
ams_collector_home_dir = os.environ["COLLECTOR_HOME"]
hbase_cmd = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "bin", "hbase.cmd")
hbase_conf_dir = os.path.join(os.environ["COLLECTOR_HOME"], "hbase", "conf")
except:
ams_collector_conf_dir = None
ams_collector_home_dir = None
hbase_cmd = None
hbase_conf_dir = None
try:
ams_monitor_conf_dir = os.environ["MONITOR_CONF_DIR"]
ams_monitor_home_dir = os.environ["MONITOR_HOME"]
except:
ams_monitor_conf_dir = None
ams_monitor_home_dir = None
hadoop_native_lib = None
hadoop_bin_dir = None
hadoop_conf_dir = None
try:
hadoop_native_lib = os.path.join(os.environ["HADOOP_HOME"], "bin")
hadoop_bin_dir = os.path.join(os.environ["HADOOP_HOME"], "bin")
hadoop_conf_dir = os.path.join(os.environ["HADOOP_HOME"], "conf")
except:
pass
from service_mapping import *
|
{
"content_hash": "8e44aefef46443a7e9ed3c4a745637d1",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 85,
"avg_line_length": 31.901639344262296,
"alnum_prop": 0.7410071942446043,
"repo_name": "arenadata/ambari",
"id": "64589ec49683f534a2403ae036a2fbc767feaedd",
"size": "1968",
"binary": false,
"copies": "3",
"ref": "refs/heads/branch-adh-1.6",
"path": "ambari-server/src/main/resources/common-services/AMBARI_METRICS/0.1.0/package/scripts/params_windows.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "46700"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "343739"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "Dockerfile",
"bytes": "6387"
},
{
"name": "EJS",
"bytes": "777"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Gherkin",
"bytes": "990"
},
{
"name": "Groovy",
"bytes": "15882"
},
{
"name": "HTML",
"bytes": "717983"
},
{
"name": "Handlebars",
"bytes": "1819641"
},
{
"name": "Java",
"bytes": "29172298"
},
{
"name": "JavaScript",
"bytes": "18571926"
},
{
"name": "Jinja",
"bytes": "1490416"
},
{
"name": "Less",
"bytes": "412933"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "287501"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "18507704"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Ruby",
"bytes": "38590"
},
{
"name": "SCSS",
"bytes": "40072"
},
{
"name": "Shell",
"bytes": "924115"
},
{
"name": "Stylus",
"bytes": "820"
},
{
"name": "TSQL",
"bytes": "42351"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
import pytest
from fastapi.testclient import TestClient
from docs_src.first_steps.tutorial001 import app
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Root",
"operationId": "root__get",
}
}
},
}
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/", 200, {"message": "Hello World"}),
("/nonexistent", 404, {"detail": "Not Found"}),
("/openapi.json", 200, openapi_schema),
],
)
def test_get_path(path, expected_status, expected_response):
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
|
{
"content_hash": "8dc5f8c3fca833b0ac331434ee6ad7db",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 27.23076923076923,
"alnum_prop": 0.5037664783427496,
"repo_name": "tiangolo/fastapi",
"id": "48d42285c8e704c7870ab756d52f3849809e6312",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_tutorial/test_first_steps/test_tutorial001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25"
},
{
"name": "HTML",
"bytes": "187"
},
{
"name": "Python",
"bytes": "1928986"
},
{
"name": "Shell",
"bytes": "1383"
}
],
"symlink_target": ""
}
|
from unittest import mock
from airflow.providers.google.cloud.operators.facebook_ads_to_gcs import FacebookAdsReportToGcsOperator
GCS_BUCKET = "airflow_bucket_fb"
GCS_OBJ_PATH = "Temp/this_is_my_report_json.json"
GCS_CONN_ID = "google_cloud_default"
FACEBOOK_ADS_CONN_ID = "facebook_default"
API_VERSION = "v6.0"
FIELDS = [
"campaign_name",
"campaign_id",
"ad_id",
"clicks",
"impressions",
]
PARAMS = {
"level": "ad",
"date_preset": "yesterday"
}
FACEBOOK_RETURN_VALUE = [
{
"campaign_name": "abcd",
"campaign_id": "abcd",
"ad_id": "abcd",
"clicks": "2",
"impressions": "2",
}
]
class TestFacebookAdsReportToGcsOperator:
@mock.patch("airflow.providers.google.cloud.operators.facebook_ads_to_gcs.FacebookAdsReportingHook")
@mock.patch("airflow.providers.google.cloud.operators.facebook_ads_to_gcs.GCSHook")
def test_execute(self, mock_gcs_hook, mock_ads_hook):
mock_ads_hook.return_value.bulk_facebook_report.return_value = FACEBOOK_RETURN_VALUE
op = FacebookAdsReportToGcsOperator(facebook_conn_id=FACEBOOK_ADS_CONN_ID,
fields=FIELDS,
params=PARAMS,
object_name=GCS_OBJ_PATH,
bucket_name=GCS_BUCKET,
task_id="run_operator")
op.execute({})
mock_ads_hook.assert_called_once_with(facebook_conn_id=FACEBOOK_ADS_CONN_ID,
api_version=API_VERSION)
mock_ads_hook.return_value.bulk_facebook_report.assert_called_once_with(params=PARAMS,
fields=FIELDS)
mock_gcs_hook.assert_called_once_with(gcp_conn_id=GCS_CONN_ID)
mock_gcs_hook.return_value.upload.assert_called_once_with(bucket_name=GCS_BUCKET,
object_name=GCS_OBJ_PATH,
filename=mock.ANY,
gzip=False)
|
{
"content_hash": "0f29d83b20153b8b1db821ab6bb01eec",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 104,
"avg_line_length": 42.22641509433962,
"alnum_prop": 0.5285969615728329,
"repo_name": "wooga/airflow",
"id": "a2b8b6b9dade566bc948e824b82d8043dd3112e0",
"size": "3023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_facebook_ads_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
}
|
"""Convert Gettext PO localization files to Comma-Separated Value (.csv) files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/csv2po.html
for examples and usage instructions.
"""
from translate.storage import po
from translate.storage import csvl10n
class po2csv:
def convertcomments(self, inputunit):
return " ".join(inputunit.getlocations())
def convertunit(self, inputunit):
csvunit = csvl10n.csvunit()
if inputunit.isheader():
return None
#csvunit.location = "location"
#csvunit.source = "source"
#csvunit.target = "target"
elif inputunit.isblank():
return None
else:
csvunit.location = self.convertcomments(inputunit)
csvunit.source = inputunit.source
csvunit.target = inputunit.target
return csvunit
def convertplurals(self, inputunit):
"""Convert PO plural units
We only convert the first plural form. So languages with multiple
plurals are not handled. For single plural languages we simply
skip this plural extraction.
"""
if len(inputunit.target.strings) == 1: # No plural forms
return None
csvunit = csvl10n.csvunit()
csvunit.location = self.convertcomments(inputunit)
csvunit.source = inputunit.source.strings[1]
csvunit.target = inputunit.target.strings[1]
return csvunit
def convertstore(self, inputstore, columnorder=None):
if columnorder is None:
columnorder = ['location', 'source', 'target']
outputstore = csvl10n.csvfile(fieldnames=columnorder)
for inputunit in inputstore.units:
outputunit = self.convertunit(inputunit)
if outputunit is not None:
outputstore.addunit(outputunit)
if inputunit.hasplural():
outputunit = self.convertplurals(inputunit)
if outputunit is not None:
outputstore.addunit(outputunit)
return outputstore
def convertcsv(inputfile, outputfile, templatefile, columnorder=None):
"""reads in inputfile using po, converts using po2csv, writes to outputfile"""
# note that templatefile is not used, but it is required by the converter...
inputstore = po.pofile(inputfile)
if inputstore.isempty():
return 0
convertor = po2csv()
outputstore = convertor.convertstore(inputstore, columnorder)
outputfile.write(str(outputstore))
return 1
def main(argv=None):
from translate.convert import convert
formats = {"po": ("csv", convertcsv)}
parser = convert.ConvertOptionParser(formats, description=__doc__)
parser.add_option("", "--columnorder", dest="columnorder", default=None,
help="specify the order and position of columns (location,source,target)")
parser.passthrough.append("columnorder")
parser.run(argv)
if __name__ == '__main__':
main()
|
{
"content_hash": "bd5da42c0c5eaaf6f4eb26f1a7a00a85",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 93,
"avg_line_length": 35.845238095238095,
"alnum_prop": 0.6552640318830953,
"repo_name": "staranjeet/fjord",
"id": "57ca47aa77a2d3ff33c52e517f301cc0636e8b9f",
"size": "3772",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vendor/packages/translate-toolkit/translate/convert/po2csv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "158619"
},
{
"name": "HTML",
"bytes": "127302"
},
{
"name": "JavaScript",
"bytes": "296754"
},
{
"name": "Python",
"bytes": "853569"
},
{
"name": "Shell",
"bytes": "11673"
},
{
"name": "Smarty",
"bytes": "780"
}
],
"symlink_target": ""
}
|
"""
TFLite testcases
================
This article is a test script to test TFLite operator with Relay.
"""
from __future__ import print_function
from functools import partial
import pytest
import numpy as np
import tvm
import tempfile
from tvm import te
from tvm import relay
try:
import tensorflow.compat.v1 as tf
# tensorflow.python.framework.ops module itself is not part of
# TensorFlow's public API: the precise contents of that module
# may vary from one version to the next
import tensorflow.compat.v1 as ops
except ImportError:
import tensorflow as tf
import tensorflow as ops
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.ops import variables
from distutils.version import LooseVersion
try:
from tensorflow import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib import lite as interpreter_wrapper
from tvm.contrib.download import download_testdata
import tvm.relay.testing.tf as tf_testing
from packaging import version as package_version
from PIL import Image
import os
#######################################################################
# Generic run functions for TVM & TFLite
# --------------------------------------
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
#######################################################################
# Get a real image for e2e testing
# --------------------------------
def get_real_image(im_height, im_width, quantized=True):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8") if quantized else np.array(image).astype("float32")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def pre_processed_image(height, width):
repo_base = "https://github.com/dmlc/web-data/raw/main/tensorflow/models/InceptionV1/"
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(image, channels=3)
with tf.name_scope("eval_image"):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the specified height and width.
image = tf.image.resize(image, [height, width], align_corners=False)
image = tf.expand_dims(image, axis=0)
return image
def get_real_image_object_detection(im_height, im_width):
repo_base = "https://github.com/dmlc/web-data/raw/main/gluoncv/detection/"
img_name = "street_small.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def vmobj_to_list(o):
if isinstance(o, tvm.nd.NDArray):
return [o.numpy().tolist()]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f))
return result
elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue):
if o.constructor.name_hint == "Cons":
tl = vmobj_to_list(o.fields[1])
hd = vmobj_to_list(o.fields[0])
hd.extend(tl)
return hd
elif o.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in o.constructor.name_hint:
return [0]
elif "tensor" in o.constructor.name_hint:
return [o.fields[0].numpy()]
else:
raise RuntimeError("Unknown object type: %s" % o.constructor.name_hint)
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def _quantize_keras_model(
keras_model, representative_data_gen, is_float_input=False, is_float_output=False
):
"""Utility function to quantize a Keras model using TFLite converter."""
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
# NOTE: If representative dataset is provided, and inference input type is not set,
# then converter will self add quant & dequant Op accordingly.
if not is_float_input:
converter.inference_input_type = tf.uint8
if not is_float_output:
converter.inference_output_type = tf.uint8
return converter.convert()
def run_tvm_graph(
tflite_model_buf,
input_data,
input_node,
num_output=1,
target="llvm",
out_names=None,
mode="graph_executor",
op_converter=relay.frontend.tflite.OperatorConverter,
):
"""Generic function to compile on relay and execute on tvm"""
# TFLite.Model.Model has changed to TFLite.Model from 1.14 to 2.1
try:
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError:
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, e in enumerate(input_node):
shape_dict[e] = input_data[i].shape
dtype_dict[e] = input_data[i].dtype.name
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
if mode in ["debug", "vm"]:
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
# Interpreter doesn't bind constants, so still need to find in params
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*inputs
)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
dev = tvm.device(target, 0)
from tvm.contrib import graph_executor
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
for i, e in enumerate(input_node):
m.set_input(e, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
# execute
m.run()
# get outputs
assert out_names is None or num_output == len(
out_names
), "out_names: {} num_output: {}".format(out_names, num_output)
tvm_output_list = []
for i in range(0, num_output):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
def run_tflite_graph(tflite_model_buf, input_data):
"""Generic function to execute TFLite"""
input_data = convert_to_list(input_data)
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for i in range(len(input_details)):
interpreter.resize_tensor_input(input_details[i]["index"], input_data[i].shape)
interpreter.allocate_tensors()
# set input
assert len(input_data) == len(input_details)
for i in range(len(input_details)):
interpreter.set_tensor(input_details[i]["index"], input_data[i])
# Run
interpreter.invoke()
# get output
tflite_output = list()
for i in range(len(output_details)):
tflite_output.append(interpreter.get_tensor(output_details[i]["index"]))
return tflite_output
def compare_tflite_with_tvm(
in_data,
in_name,
input_tensors,
output_tensors,
init_global_variables=False,
out_names=None,
quantized=False,
input_range=None,
mode="graph_executor",
experimental_new_converter=False,
fp16_quantized=False,
):
"""Generic function to generate and compare TFLite and TVM output"""
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
out_names = convert_to_list(out_names)
in_node = [0] * len(in_name)
for i in range(len(in_name)):
in_node[i] = in_name[i].split(":")[0] if ":" in in_name[i] else in_name[i]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
# convert to tflite model
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
converter.experimental_new_converter = experimental_new_converter
if quantized:
converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
input_stats = {}
# calculate the mean and quantization scale for every input tensor,
# with respect to its fp32 input range, defined in fake_quant.
# s = 255/(fmax-fmin); m = -fmin*s (the zero point)
for i in input_arrays:
try:
quant_scale = 255 / (input_range[i][1] - input_range[i][0])
except ZeroDivisionError:
raise ZeroDivisionError(
"Min and max of the input range for tensor " + i + " can't be equal"
)
mean = -input_range[i][0] * quant_scale
input_stats[i] = (mean, quant_scale)
converter.quantized_input_stats = input_stats
elif fp16_quantized:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model_buffer = converter.convert()
tflite_output = run_tflite_graph(tflite_model_buffer, in_data)
for device in ["llvm"]:
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
continue
tvm_output = run_tvm_graph(
tflite_model_buffer,
in_data,
in_node,
target=device,
num_output=len(out_names),
out_names=out_names,
mode=mode,
)
# WARNING: the results could well be random values clipped to 0 or 255 because of badly tuned output
# range for the specific operator. While adding test ensure that we aren't getting only clipped values
# in output tensors that still pass the assertion. For reference see _test_elemwise_qnn_out_range()
if quantized and not fp16_quantized:
for i in range(len(tflite_output)):
# allow absolute tolerance of 1 in the quantized results
tvm.testing.assert_allclose(tflite_output[i], tvm_output[i], atol=1, rtol=1e-5)
else:
for i in range(len(tflite_output)):
tvm.testing.assert_allclose(
tflite_output[i], tvm_output[i], atol=1e-5, rtol=1e-5
)
def with_fused_activation_function(input_tensor, fn_name):
if fn_name is None or fn_name == "NONE":
return input_tensor
if fn_name == "RELU":
return nn_ops.relu(input_tensor)
if fn_name == "RELU6":
return nn_ops.relu6(input_tensor)
if fn_name == "RELU_N1_TO_1":
return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))
if fn_name == "TANH":
return math_ops.tanh(input_tensor)
raise AssertionError("Unknown fused_activation_function {}".format(fn_name))
def _test_split(in_shape, axis, num_splits, dtype):
"""internal split tester taking as parameters in_shape, number of tensors to split into
and dtype (data type)"""
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=in_shape, dtype=dtype, name="in_data")
out = array_ops.split(in_data, num_splits, axis=axis)
num_splits = len(num_splits) if isinstance(num_splits, list) else num_splits
out_names = ["out_" + str(n) + ":0" for n in range(num_splits)]
compare_tflite_with_tvm([np_data], ["in_data"], [in_data], out, out_names=out_names)
def test_forward_split():
"""test split layer"""
# rank 1
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
# rank 2
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
# rank 3
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
# rank 4
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
# split along negative axis
_test_split((6, 1, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
# size_splits split
_test_split((6,), 0, [1, 2, 3], "float32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
#######################################################################
# slice
# -----
def _test_slice(data, begin, size):
"""One iteration of SLICE"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.slice(in_data, begin, size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_slice():
"""SLICE"""
_test_slice(np.arange(4, dtype=np.float32).reshape((4,)), begin=[0], size=[2])
_test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3])
# tflite 1.13 outputs nonsense values if size[i] == -1
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1])
_test_slice(np.arange(5, dtype=np.int32).reshape((5,)), begin=[4], size=[-1])
#######################################################################
# Topk
# ----
def _test_topk(in_shape, k=1):
"""One iteration of TOPK"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.top_k(in_data, k, name="TopK")
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out[0]])
def test_forward_topk():
"""TOPK"""
_test_topk((3,), 1)
_test_topk((3,), 3)
_test_topk((3, 5, 7), 3)
_test_topk((3, 5, 7), 3)
#######################################################################
# Gather
# ------
def _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False, wrap_idx=False):
"""One iteration of Gather"""
indices = np.asarray(indices).astype("int32")
data = np.random.uniform(1, 10, size=dshape)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
if wrap_idx:
in_name = "in_indices"
indices_expr = array_ops.placeholder(
shape=indices.shape, dtype=indices.dtype, name=in_name
)
in_tensor_name = [in_name + ":0"]
in_indices = [indices_expr]
else:
indices_expr = indices
indices = []
in_tensor_name = []
in_indices = []
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_data")
if axis:
out = array_ops.gather(in_data, indices_expr, axis=axis)
else:
out = array_ops.gather(in_data, indices_expr) # tflite conversion fails for None axis
input_range = {"in_data": (-100, 100)} if quantized else None
try:
compare_tflite_with_tvm(
[data] + indices,
["in_data:0"] + in_tensor_name,
[in_data] + in_indices,
[out],
quantized=quantized,
input_range=input_range,
)
except ValueError as e:
if not oob:
raise e
except Exception as e:
raise e
def test_forward_gather():
"""GATHER"""
for quantized in [False, True]:
for wrap_idx in [False, True]:
_test_gather((4,), [1], 0, "float32", quantized, wrap_idx)
_test_gather((4,), [1], None, "int32", quantized, wrap_idx)
_test_gather((1, 4), [0], 0, "int32", quantized, wrap_idx)
_test_gather((4,), [[[1, 0], [0, 1]]], 0, "float32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], None, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 0, "int32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 2, "int32", quantized, wrap_idx)
_test_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[2, 1]]], -1, "int32", quantized, wrap_idx)
# Out of boundary error cannot be tested with wrapped index
_test_gather((4,), [16], 0, "float32", quantized, oob=True)
_test_gather((1, 3, 3), [12], 0, "int32", quantized, oob=True)
_test_gather((1, 3, 3), [20], 1, "float32", quantized, oob=True)
_test_gather((1, 3, 3), [20, 20], 2, "float32", quantized, oob=True)
#######################################################################
# Gather_ND
# ---------
def _test_gather_nd(data, indices):
"""One iteration of GATHER_ND"""
with tf.Graph().as_default():
in_data = tf.placeholder(shape=data.shape, dtype=data.dtype, name="data")
indices_data = tf.placeholder(shape=indices.shape, dtype=indices.dtype, name="indices")
out = tf.gather_nd(in_data, indices_data)
compare_tflite_with_tvm(
[data, indices], ["data:0", "indices:0"], [in_data, indices_data], [out]
)
def test_forward_gather_nd():
"""GATHER_ND"""
_test_gather_nd(
np.array([[[1.2, 2.0], [3.1, 4.1]], [[5.1, 6.1], [7.1, 8.1]]]).astype("float32"),
np.asarray([[0, 1], [1, 0]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(30), [5, 6]).astype("int32"), np.asarray([[1, 2]]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(12), [2, 3, 2]).astype("int32"),
np.asarray([[[0, 0], [0, 1]], [[1, 0], [1, 1]]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(4), [4]).astype("float32"), np.asarray([1]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0, 3]).astype("int32")
)
#######################################################################
# StridedSlice
# ------------
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
quantized=False,
):
"""One iteration of a Stridedslice"""
data = np.random.uniform(size=ip_shape).astype(dtype)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data")
out = array_ops.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
)
input_range = {"in_data": (-100, 100)} if quantized else None
compare_tflite_with_tvm(
[data], ["in_data:0"], [in_data], [out], quantized=quantized, input_range=input_range
)
def test_forward_stridedslice():
"""test StridedSlice"""
for quantized in [False, True]:
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=7,
quantized=quantized,
)
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=5,
quantized=quantized,
)
_test_stridedslice((2), [1], [1], [1], "float32", shrink_axis_mask=1, quantized=quantized)
_test_stridedslice(
(3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32", quantized=quantized
)
_test_stridedslice(
(3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=0, quantized=quantized
)
_test_stridedslice(
(4, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2, quantized=quantized
)
_test_stridedslice(
(3, 4), [-1, 0], [0, 3], [1, 1], "float32", shrink_axis_mask=1, quantized=quantized
)
#######################################################################
# transpose
# ---------
def _test_forward_transpose(ishape, axes=()):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if not axes:
out = array_ops.transpose(in_data)
else:
out = array_ops.transpose(in_data, axes)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_transpose():
_test_forward_transpose((2, 2))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), ())
#######################################################################
# Cast
# ----
def _test_cast(data, cast_dtype, use_mlir=False):
"""One iteration of CAST"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = math_ops.cast(in_data, cast_dtype)
compare_tflite_with_tvm(
data, "Placeholder:0", [in_data], [out], experimental_new_converter=use_mlir
)
def test_forward_cast():
"""CAST"""
for use_mlir in [False, True]:
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.uint8, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.int32).reshape((1, 6)), cast_dtype=tf.int64, use_mlir=use_mlir
)
#######################################################################
# Batch Mat Mul
# ----
def _test_batch_matmul(A_shape, B_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
A = array_ops.placeholder(shape=A_shape, dtype=dtype, name="A")
B = array_ops.placeholder(shape=B_shape, dtype=dtype, name="B")
result = math_ops.matmul(A, B, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
A_np = np.random.uniform(high=5.0, size=A_shape).astype(dtype)
B_np = np.random.uniform(high=5.0, size=B_shape).astype(dtype)
compare_tflite_with_tvm([A_np, B_np], [A.name, B.name], [A, B], [result])
def test_forward_batch_matmul():
"""BATCH_MAT_MUL"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "float32")
#######################################################################
# Tile
# ----
def _test_forward_tile(in_shape, reps, dtype):
data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.tile(in_data, reps)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_tile():
_test_forward_tile((2,), (3,), "int32")
_test_forward_tile((2, 2), (2, 3), "float32")
######################################################################
# BatchToSpaceND
# --------------
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.batch_to_space_nd(in_data, block_shape, crops)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_batch_to_space_nd():
# test cases: https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/batch-to-space-n-d
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 3, 3, 1], block_shape=[2, 2], crops=[[0, 1], [0, 1]])
######################################################################
# SpaceToBatchND
# --------------
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.space_to_batch_nd(in_data, block_shape, paddings)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_space_to_batch_nd():
# test cases: https://www.tensorflow.org/api_docs/python/tf/space_to_batch_nd
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]])
#######################################################################
# Pooling
# -------
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
out = nn_ops.pool(in_data, **kwargs)
compare_tflite_with_tvm(x, "Placeholder:0", [in_data], [out])
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
def test_forward_pooling():
"""Pooling"""
for pool_type in ["AVG", "MAX"]:
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None):
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = tf.placeholder(dtype=tf.float32, name="input", shape=input_shape)
out = tf.sqrt(
tf.nn.avg_pool(
tf.square(in_data),
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
)
out = with_fused_activation_function(out, fused_func_name)
compare_tflite_with_tvm(x, "input", [in_data], [out])
def test_forward_l2_pool2d():
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "SAME", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "SAME", "NHWC")
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC", "RELU")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "VALID", "NHWC", "RELU6")
#######################################################################
# Convolution
# -----------
def _test_tflite2_quantized_convolution(
input_shape, kernel_shape, dilations, strides, padding, data_format
):
"""One iteration of TFLite2 quantized convolution with given shapes and attributes"""
data_format = "channels_last" if "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
kernel = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.Conv2D(
filters=kernel_shape[3],
kernel_size=(kernel_shape[0], kernel_shape[1]),
strides=strides,
padding=padding,
data_format=data_format,
activation="relu",
use_bias=False,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
keras_model.layers[1].set_weights([kernel])
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for i in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen)
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, data_in.name.replace(":0", ""))
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def _test_tflite2_quantized_depthwise_convolution(
input_shape, kernel_shape, dilations, strides, padding, data_format, depth_multiplier
):
"""One iteration of TFLite2 quantized depthwise convolution with given shapes and attributes"""
data_format = "channels_last" if "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
kernel = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_shape[0], kernel_shape[1]),
strides=strides,
padding=padding,
data_format=data_format,
activation="relu",
use_bias=False,
depth_multiplier=depth_multiplier,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
keras_model.layers[1].set_weights([kernel])
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for i in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen)
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, data_in.name.replace(":0", ""))
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def _test_convolution(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
is_depthwise=False,
quantized=False,
fp16_quantized=False,
):
"""One iteration of convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
if quantized:
data_array = np.random.uniform(0, 255, tensor_in_sizes).astype("uint8")
filter_array = np.random.uniform(0, 255, filter_in_sizes).astype("uint8")
else:
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if is_depthwise:
out = nn_ops.depthwise_conv2d_native(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format
)
else:
out = nn_ops.conv2d(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format
)
if quantized and not fp16_quantized:
if is_depthwise:
# Quantized the inputs and feed them to the convolution
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.depthwise_conv2d_native(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
# Set the input quantization range
input_range = {"in_data": (-100, 100)} if quantized else None
# Compare
compare_tflite_with_tvm(
data_array,
"in_data",
[in_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
)
else:
# Quantized the inputs and feed them to the convolution
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.conv2d(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
# Set the input quantization range
input_range = {"in_data": (-100, 100)} if quantized else None
# Compare
compare_tflite_with_tvm(
data_array,
"in_data",
[in_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
fp16_quantized=fp16_quantized,
)
else:
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(data_array, "in_data", [in_data], [out])
def test_forward_convolution():
for quantized in [False, True]:
for fp16_quantized in [False, True]:
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# depthwise convolution
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 19],
[3, 3, 19, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# depthwise convolution with single input channel
_test_convolution(
[1, 76, 64, 1],
[9, 5, 1, 96],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
# TFLite2 quantized convolution testing
if package_version.parse(tf.VERSION) >= package_version.parse("2.3.0"):
_test_convolution(
[1, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
# Disable as tests are flaky - https://github.com/apache/tvm/issues/6064
# depthwise convolution
# _test_tflite2_quantized_depthwise_convolution([1, 8, 8, 128], [1, 1, 128, 1], [1, 1], [1, 1],
# 'SAME', 'NHWC', 1)
# _test_tflite2_quantized_depthwise_convolution([1, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2],
# 'VALID', 'NHWC', 1)
# _test_tflite2_quantized_depthwise_convolution([1, 24, 24, 3], [7, 7, 3, 8], [1, 1], [2, 2],
# 'SAME', 'NHWC', 8)
#######################################################################
# Transpose Convolution
# ---------------------
def _test_transpose_conv(
tensor_in_sizes,
filter_in_sizes,
output_shape,
strides,
padding,
quantized=False,
fp16_quantized=False,
):
"""One iteration of transpose convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
with tf.Graph().as_default():
if quantized and not fp16_quantized:
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [max(f, 255) for f in range(1, total_size_1 + 1)]
filter_array = [max(f, 255) for f in range(1, total_size_2 + 1)]
data_array = np.reshape(data_array, tensor_in_sizes).astype("uint8")
filter_array = np.reshape(filter_array, filter_in_sizes).astype("uint8")
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="q_data"
)
input_range = {"q_data": (-100, 100)}
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="q_filter"
)
strides = [1] + strides + [1]
out = nn_ops.conv2d_transpose(
inq_data, inq_filter, output_shape=output_shape, strides=strides, padding=padding
)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[data_array], ["q_data"], [inq_data], [out], quantized=True, input_range=input_range
)
else:
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
# in_filter layout is HWOI
out = nn_ops.conv2d_transpose(
in_data, in_filter, output_shape=output_shape, strides=strides, padding=padding
)
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(
[data_array], ["in_data"], [in_data], [out], fp16_quantized=fp16_quantized
)
def test_forward_transpose_conv():
for quantized in [True, False]:
for fp16_quantized in [True, False]:
# odd size input, padding VALID
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
# odd size input, padding SAME
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
# kernel 3x3, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 34, 34, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 65, 65, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 65, 34, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 3x3, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
# kernel 2x2, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 33, 33, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 64, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 33, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 2x2, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
# kernel 1x1, padding VALID
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
# kernel 1x1, padding SAME
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
#######################################################################
# Reshape
# -------
def _test_reshape(data, out_shape, wrap_shape, quantized=False):
"""One iteration of reshape operation with given data and out shape"""
if quantized:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_0"
)
input_range = {"inq_0": (-100, 100)}
out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)
out = array_ops.reshape(inq_data, in_shape)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["inq_0:0", "Newshape:0"] if wrap_shape else ["inq_0:0"],
[inq_data, in_shape] if wrap_shape else [inq_data],
[out],
quantized=True,
input_range=input_range,
mode="vm",
)
else:
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32)
in_shape = (
out_shape
if not wrap_shape
else array_ops.placeholder(
shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape"
)
)
out = array_ops.reshape(in_data, in_shape)
compare_tflite_with_tvm(
[data, out_shape] if wrap_shape else [data],
["Placeholder:0", "Newshape:0"] if wrap_shape else ["Placeholder:0"],
[in_data, in_shape] if wrap_shape else [in_data],
[out],
mode="vm",
)
def test_forward_reshape():
for wrap in [True, False]:
_test_reshape(np.arange(6.0, dtype=np.float32), [2, 3], wrap)
_test_reshape(np.arange(6), [-1, 2], wrap)
_test_reshape(np.arange(6), [3, -1], wrap)
_test_reshape(np.arange(6), [-1], wrap)
_test_reshape(np.arange(6, dtype=np.uint8), [2, 3], False, True)
_test_reshape(np.arange(6, dtype=np.uint8), [-1, 2], False, True)
#######################################################################
# Resize
# ------
def _test_resize(
tf_resize_op, images_data, size_data, align_corners, half_pixel_centers, quantized=False
):
"""One iteration of Resize"""
# Test with tensor and constant
with tf.Graph().as_default():
images_tensor = array_ops.placeholder(shape=images_data.shape, dtype="float32", name="in")
size = ops.convert_to_tensor(size_data, dtype=size_data.dtype)
if quantized:
images_tensor_q = tf.quantization.fake_quant_with_min_max_args(
images_tensor, min=-3, max=2, name="in"
)
input_range = {"in": (-3, 2)}
out_tensor = tf_resize_op(
images=images_tensor_q,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
)
out_tensor = tf.quantization.fake_quant_with_min_max_args(
out_tensor, min=-3, max=2, name="out_tensor"
)
compare_tflite_with_tvm(
[images_data],
["in:0"],
[images_tensor],
[out_tensor],
quantized=True,
input_range=input_range,
)
else:
out_tensor = tf_resize_op(
images=images_tensor,
size=size,
align_corners=align_corners,
half_pixel_centers=half_pixel_centers,
)
compare_tflite_with_tvm([images_data], ["in:0"], [images_tensor], [out_tensor])
def test_all_resize():
"""Resize"""
images_data = np.random.uniform(0, 255, (1, 16, 16, 3))
images_data_float32 = images_data.astype(np.float32)
images_data_uint8 = images_data.astype(np.uint8)
size_data = np.array([8, 8]).astype("int32")
### RESIZE_BILINEAR
_test_resize(
tf.image.resize_bilinear,
images_data_float32,
size_data,
align_corners=False,
half_pixel_centers=False,
quantized=False,
)
_test_resize(
tf.image.resize_bilinear,
images_data_float32,
size_data,
align_corners=True,
half_pixel_centers=False,
quantized=False,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=False,
half_pixel_centers=False,
quantized=True,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=True,
half_pixel_centers=False,
quantized=True,
)
_test_resize(
tf.image.resize_bilinear,
images_data_uint8,
size_data,
align_corners=False,
half_pixel_centers=True,
quantized=True,
)
### RESIZE_NEAREST_NEIGHBOR (was added in v1.13)
# According to topi resize.h
# Align corners not supported for nearest neighbour
from tflite.BuiltinOperator import BuiltinOperator
if "RESIZE_NEAREST_NEIGHBOR" in dir(BuiltinOperator()):
_test_resize(
tf.image.resize_nearest_neighbor,
images_data_float32,
size_data,
align_corners=False,
half_pixel_centers=False,
)
#######################################################################
# Range
# -----
def _test_range(start, limit, delta):
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
start_scalar, limit_scalar, delta_scalar = (
tf.placeholder(dtype=start.dtype, shape=(), name="start"),
tf.placeholder(dtype=limit.dtype, shape=(), name="limit"),
tf.placeholder(dtype=delta.dtype, shape=(), name="delta"),
)
out = tf.range(start_scalar, limit_scalar, delta_scalar, name="range")
compare_tflite_with_tvm(
[start, limit, delta],
["start", "limit", "delta"],
[start_scalar, limit_scalar, delta_scalar],
[out],
mode="vm",
quantized=False,
)
def _test_range_default():
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
inputs = [
tf.placeholder(dtype=tf.int32, shape=(), name="p1"),
tf.placeholder(dtype=tf.int32, shape=(), name="p2"),
]
outputs = [
tf.range(start=inputs[0], limit=inputs[1]), # use default delta
tf.range(
start=inputs[1]
), # use start as limit with 0 as the first item in the range
]
compare_tflite_with_tvm(
[np.int32(1), np.int32(18)], ["p1", "p2"], inputs, outputs, mode="vm"
)
def test_forward_range():
_test_range(np.int32(1), np.int32(18), np.int32(3))
_test_range(np.int32(1), np.int32(18), np.float32(3.1)) # increment is of type float
_test_range(np.float32(1.0), np.int32(18), np.int32(3.1)) # start is of type float
_test_range_default()
#######################################################################
# Shape
# -----
def test_forward_shape():
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
tf.reset_default_graph()
with tf.Graph().as_default():
data = np.array([1, 18, 3], dtype=np.int32)
start = tf.placeholder(dtype=tf.int32, shape=[], name="start")
limit = tf.placeholder(dtype=tf.int32, shape=[], name="limit")
delta = tf.placeholder(dtype=tf.int32, shape=[], name="delta")
r = tf.range(start, limit, delta, tf.int32, name="range")
out = tf.shape(r, out_type=tf.dtypes.int32)
compare_tflite_with_tvm(
[x for x in np.nditer(data)],
["start", "limit", "delta"],
[start, limit, delta],
[out],
mode="vm",
)
#######################################################################
# Concatenation
# -------------
def _test_concatenation(data, axis):
"""One iteration of concatenation"""
assert len(data) >= 1
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name="in_{}".format(idx))
for idx, tensor in enumerate(data)
]
out = array_ops.concat(in_data, axis=axis)
name = ["in_{}:0".format(idx) for idx in range(len(data))]
compare_tflite_with_tvm(data, name, in_data, [out])
def test_forward_concatenation():
_test_concatenation([np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], 1)
_test_concatenation([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], 1)
_test_concatenation(
[
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
],
1,
)
#######################################################################
# Unary elemwise
# --------------
def _test_unary_elemwise(math_op, data):
"""One iteration of unary elemwise"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in")
out = math_op(in_data)
compare_tflite_with_tvm(data, ["in:0"], [in_data], [out])
#######################################################################
# Ceil
# ----
def _test_ceil(data):
"""One iteration of ceil"""
return _test_unary_elemwise(math_ops.ceil, data)
#######################################################################
# Floor
# -----
def _test_floor(data):
"""One iteration of floor"""
return _test_unary_elemwise(math_ops.floor, data)
#######################################################################
# Round
# -----
def _test_round(data):
"""One iteration of round"""
return _test_unary_elemwise(math_ops.round, data)
#######################################################################
# Exp
# ---
def _test_exp(data):
"""One iteration of exp"""
return _test_unary_elemwise(math_ops.exp, data)
#######################################################################
# Log
# ---
def _test_log(data):
"""One iteration of log"""
return _test_unary_elemwise(math_ops.log, data)
#######################################################################
# Sin
# ---
def _test_sin(data, quantized=False):
"""One iteration of sin"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=1, max=6, name="inq_0"
)
input_range = {"inq_0": (1, 6)}
out = math_ops.sin(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out")
compare_tflite_with_tvm(
data,
"inq_0:0",
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
out = math_ops.sin(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_sin():
"""SIN"""
_test_sin(np.arange(-2.0, 4.0, dtype=np.float32), quantized=False)
_test_sin(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False)
_test_sin(np.arange(1, 240, 40, dtype=np.uint8), quantized=True)
_test_sin(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True)
#######################################################################
# Cos
# ---
def _test_cos(data):
"""One iteration of cos"""
return _test_unary_elemwise(math_ops.cos, data)
#######################################################################
# Tan
# ---
def _test_tan(data):
"""One iteration of tan"""
return _test_unary_elemwise(math_ops.tan, data)
#######################################################################
# Square
# ------
def _test_square(data):
"""One iteration of square"""
return _test_unary_elemwise(math_ops.square, data)
#######################################################################
# Elu
# ---
def _test_elu(data):
"""One iteration of elu"""
return _test_unary_elemwise(nn_ops.elu, data)
def _test_forward_unary_elemwise(test_op):
# functions that need positive input
if test_op.__name__ in {"_test_log"}:
test_op(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)))
else:
test_op(np.random.uniform(-10, 10, (3, 2)).astype(np.float32))
def test_all_unary_elemwise():
_test_forward_unary_elemwise(_test_floor)
_test_forward_unary_elemwise(_test_exp)
_test_forward_unary_elemwise(_test_log)
_test_forward_unary_elemwise(_test_square)
# ceil and cos come with TFLite 1.14.0.post1 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_forward_unary_elemwise(_test_ceil)
_test_forward_unary_elemwise(_test_cos)
_test_forward_unary_elemwise(_test_round)
# This fails with TF and Tflite 1.15.2, this could not have been tested
# in CI or anywhere else. The failure mode is that we see a backtrace
# from the converter that we need to provide a custom Tan operator
# implementation.
# _test_forward_unary_elemwise(_test_tan)
_test_forward_unary_elemwise(_test_elu)
#######################################################################
# Element-wise
# ------------
def _test_elemwise(
math_op,
data,
fused_activation_function=None,
quantized=False,
qnn_op=None,
same_qnn_params=False,
):
"""One iteration of elemwise"""
assert len(data) == 2
def __test_elemwise(in_data):
assert 2 == len(in_data)
if quantized:
# set the fp32 output range with respect to the operation
out_min, out_max = _test_elemwise_qnn_out_range(qnn_op)
inq0_min, inq0_max = (-100, 100)
inq1_min, inq1_max = (-50, 50)
# if requested use same quantization parameters provided by _test_elemwise_qnn_out_range
if same_qnn_params:
inq0_min, inq0_max = (out_min, out_max)
inq1_min, inq1_max = (out_min, out_max)
# fake_quant will keep the tensors in float32 until the conversion in the session
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=out_min, max=out_max, name="inq_0"
)
if None != in_data[0]
else tf.quantization.fake_quant_with_min_max_args(
data[0], min=out_min, max=out_max, name="const_tensor0"
),
tf.quantization.fake_quant_with_min_max_args(
in_data[1], min=out_min, max=out_max, name="inq_1"
)
if None != in_data[1]
else tf.quantization.fake_quant_with_min_max_args(
data[1], min=out_min, max=out_max, name="const_tensor1"
),
]
input_range = {
x[1][0]: x[1][1]
for x in zip(
in_data, (("inq_0", (inq0_min, inq0_max)), ("inq_1", (inq1_min, inq1_max)))
)
if None != x[0]
}
out = math_op(inq_data[0], inq_data[1])
out = with_fused_activation_function(out, fused_activation_function)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=out_min, max=out_max, name="out"
)
# Note same_qnn_params uses experimental_new_converter as toco failed
compare_tflite_with_tvm(
[x[1] for x in zip(in_data, data) if None != x[0]],
[x + ":0" for x in input_range.keys()],
[x[1] for x in zip(in_data, inq_data) if None != x[0]],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=same_qnn_params,
)
else:
out = math_op(
in_data[0]
if None != in_data[0]
else ops.convert_to_tensor(data[0], dtype=data[0].dtype),
in_data[1]
if None != in_data[1]
else ops.convert_to_tensor(data[1], dtype=data[1].dtype),
)
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(
[x[1] for x in zip(in_data, data) if None != x[0]],
[x[1] for x in zip(in_data, ("in_0:0", "in_1:0")) if None != x[0]],
[x for x in in_data if None != x],
[out],
)
# Test with two tensors
with tf.Graph().as_default():
__test_elemwise(
in_data=[
array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"),
]
)
# Test with tensor and constant
with tf.Graph().as_default():
__test_elemwise(
in_data=[array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"), None]
)
# Test with constant and tensor
with tf.Graph().as_default():
__test_elemwise(
in_data=[None, array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1")]
)
#######################################################################
# Add
# ---
def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of add"""
return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Subtract
# --------
def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of subtract"""
return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Mul
# ---
def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of mul"""
return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op)
#######################################################################
# Divide
# ------
def _test_div(data, fused_activation_function=None):
"""One iteration of divide"""
return _test_elemwise(math_ops.divide, data, fused_activation_function)
#######################################################################
# Power
# -----
def _test_pow(data):
"""One iteration of power"""
return _test_elemwise(math_ops.pow, data)
#######################################################################
# Maximum
# -------
def _test_maximum(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of maximum"""
return _test_elemwise(
math_ops.maximum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True
)
#######################################################################
# Minimum
# -------
def _test_minimum(data, fused_activation_function=None, quantized=False, qnn_op=None):
"""One iteration of minimum"""
return _test_elemwise(
math_ops.minimum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True
)
#######################################################################
# Greater
# -------
def _test_greater(data):
"""One iteration of greater"""
return _test_elemwise(math_ops.greater, data)
#######################################################################
# Greater_equal
# -------------
def _test_greater_equal(data):
"""One iteration of greater_equal"""
return _test_elemwise(math_ops.greater_equal, data)
#######################################################################
# Less
# ----
def _test_less(data):
"""One iteration of less"""
return _test_elemwise(math_ops.less, data)
#######################################################################
# Less_equal
# ----------
def _test_less_equal(data):
"""One iteration of less_equal"""
return _test_elemwise(math_ops.less_equal, data)
#######################################################################
# Equal
# -----
def _test_equal(data):
"""One iteration of equal"""
return _test_elemwise(math_ops.equal, data)
#######################################################################
# Not_equal
# ---------
def _test_not_equal(data):
"""One iteration of not_equal"""
return _test_elemwise(math_ops.not_equal, data)
#######################################################################
# Squared_difference
# ------------------
def _test_squared_difference(data):
"""One iteration of squared difference"""
return _test_elemwise(math_ops.squared_difference, data)
#######################################################################
# Floor_divide
# ------------
def _test_floor_divide(data):
"""One iteration of floor_div"""
return _test_elemwise(math_ops.floordiv, data)
#######################################################################
# Floor_mod
# ---------
def _test_floor_mod(data):
"""One iteration of floor_mod"""
return _test_elemwise(math_ops.floormod, data)
def _test_forward_elemwise(testop):
"""Elewise"""
testop(
[
np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
]
)
testop(
[
np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
]
)
testop(
[
np.arange(3.0, dtype=np.float32).reshape((1, 3)),
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
]
)
def _test_forward_elemwise_quantized(testop):
testop(
[
np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
],
quantized=True,
qnn_op=testop,
)
def _test_elemwise_qnn_out_range(qnn_op):
# set the fake_quant output range with respect to the input tensors float32 range
qnn_out_range = {
_test_add: (-150, 150),
_test_sub: (-150, 150),
_test_mul: (-5e3, 5e3),
_test_maximum: (-112, 111),
_test_minimum: (-128, 127),
}
return qnn_out_range[qnn_op]
def test_all_elemwise():
_test_forward_elemwise(_test_add)
_test_forward_elemwise_quantized(_test_add)
_test_forward_elemwise(partial(_test_add, fused_activation_function="RELU"))
# this is broken with tf upgrade 1.15.2 and hits a segfault that needs
# further investigation.
# _test_forward_elemwise(partial(_test_add, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_sub)
_test_forward_elemwise_quantized(_test_sub)
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_mul)
_test_forward_elemwise_quantized(_test_mul)
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_div)
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU"))
_test_forward_elemwise(partial(_test_div, fused_activation_function="RELU6"))
_test_forward_elemwise(_test_pow)
_test_forward_elemwise(_test_maximum)
_test_forward_elemwise_quantized(_test_maximum)
_test_forward_elemwise(_test_minimum)
_test_forward_elemwise_quantized(_test_minimum)
_test_forward_elemwise(_test_greater)
_test_forward_elemwise(_test_squared_difference)
_test_forward_elemwise(_test_greater_equal)
_test_forward_elemwise(_test_less)
_test_forward_elemwise(_test_less_equal)
_test_forward_elemwise(_test_equal)
_test_forward_elemwise(_test_not_equal)
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_forward_elemwise(_test_floor_divide)
_test_forward_elemwise(_test_floor_mod)
#######################################################################
# AddN
# ----
def _test_forward_add_n(inputs):
tf.reset_default_graph()
with tf.Graph().as_default():
temp = []
for each in inputs:
temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype))
output = tf.add_n(temp)
compare_tflite_with_tvm(
[each for each in inputs],
[each.name for each in temp],
[each for each in temp],
[output],
)
def test_forward_add_n():
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
z = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32)
m, n, o = x.astype(np.float32), y.astype(np.float32), z.astype(np.float32)
in0 = x
in1 = [x, y]
in2 = (x, y, z)
in3 = m
in4 = [m, n]
in5 = (m, n, o)
_test_forward_add_n(in0)
_test_forward_add_n(in1)
_test_forward_add_n(in2)
_test_forward_add_n(in3)
_test_forward_add_n(in4)
_test_forward_add_n(in5)
#######################################################################
# Logical operators
# -----------------
def _test_logical_binary(logical_bin_op, data):
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=data[0].shape, dtype="bool", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="bool", name="in_1"),
]
if logical_bin_op == math_ops.logical_not:
out = math_ops.logical_or(in_data[0], in_data[1], name="out1")
out = logical_bin_op(out, name="out")
else:
out = logical_bin_op(in_data[0], in_data[1], name="out")
compare_tflite_with_tvm(data, ["in_0:0", "in_1:0"], in_data, [out])
def _test_forward_logical_and(data):
"""One iteration of logical and"""
return _test_logical_binary(math_ops.logical_and, data)
def _test_forward_logical_or(data):
"""One iteration of logical or"""
return _test_logical_binary(math_ops.logical_or, data)
def _test_forward_logical_not(data):
"""One iteration of logical not"""
return _test_logical_binary(math_ops.logical_not, data)
def test_all_logical():
data = [
np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"),
np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"),
]
# boolean dtype is not supported by older versions than TFLite 1.15.0
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_forward_logical_and(data)
_test_forward_logical_or(data)
_test_forward_logical_not(data)
#######################################################################
# Zeros like
# ----------
def _test_zeros_like(data):
"""One iteration of ZEROS LIKE"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.zeros_like(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_zeros_like():
"""ZEROS LIKE"""
_test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
#######################################################################
# Fill
# ----
def _test_fill(dims, value_data, value_dtype):
"""Use the fill op to create a tensor of value_data with constant dims."""
value_data = np.array(value_data, dtype=value_dtype)
# TF 1.13 TFLite convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
with tf.Graph().as_default():
value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[])
out = tf.fill(dims, value)
compare_tflite_with_tvm([value_data], ["value"], [value], [out])
with tf.Graph().as_default():
input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims)
# Fill op gets converted to static tensor during conversion
out = tf.fill(dims, value_data)
out1 = tf.add(out, input1)
input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype)
compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1])
def test_forward_fill():
"""Test FILL op"""
_test_fill((1, 2, 2, 4), 5, "int32")
_test_fill((1, 2, 2, 4), 5, "float32")
_test_fill((5,), 5, "int32")
#######################################################################
# Reduce
# ------
def _test_reduce(math_op, data, keep_dims=None):
"""One iteration of reduce"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in")
out = math_op(in_data, data[1], keep_dims)
compare_tflite_with_tvm([data[0]], ["in:0"], [in_data], [out])
def _test_reduce_quantize(math_op, data, keep_dims=None):
"""One iteration of reduce"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
input_range = {"inq_0": (-100, 100)}
out = math_op(inq_data, data[1], keep_dims)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out")
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], [inq_data[0]], [out], quantized=True, input_range=input_range
)
#######################################################################
# Reduce_min
# ----------
def _test_reduce_min(data, keep_dims=None):
"""One iteration of reduce_min"""
return _test_reduce(math_ops.reduce_min, data, keep_dims)
#######################################################################
# Reduce_max
# ----------
def _test_reduce_max(data, keep_dims=None):
"""One iteration of reduce_max"""
return _test_reduce(math_ops.reduce_max, data, keep_dims)
#######################################################################
# Reduce_mean
# -----------
def _test_reduce_mean(data, keep_dims=None, quantized=False):
"""One iteration of reduce_mean"""
if quantized:
return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims)
else:
return _test_reduce(math_ops.reduce_mean, data, keep_dims)
#######################################################################
# Reduce_prod
# -----------
def _test_reduce_prod(data, keep_dims=None):
"""One iteration of reduce_prod"""
return _test_reduce(math_ops.reduce_prod, data, keep_dims)
#######################################################################
# Reduce_sum
# -----------
def _test_reduce_sum(data, keep_dims=None):
"""One iteration of reduce_sum"""
return _test_reduce(math_ops.reduce_sum, data, keep_dims)
#######################################################################
# Reduce_any
# ----------
def _test_reduce_any(data, keep_dims=None):
"""One iteration of reduce_any"""
return _test_reduce(math_ops.reduce_any, data, keep_dims)
def _test_forward_reduce(testop, dtype="float32"):
"""Reduce"""
if dtype == "bool":
data0 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), None]
data1 = [
np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),
np.array(1, dtype=np.int32),
]
data2 = [
np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype),
np.array([1, 2], dtype=np.int32),
]
else:
data0 = [np.random.rand(16, 16, 16, 16).astype(dtype), None]
data1 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array(1, dtype=np.int32)]
data2 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array([1, 2], dtype=np.int32)]
for data in [data0, data1, data2]:
testop(data)
testop(data, keep_dims=False)
testop(data, keep_dims=True)
def _test_forward_reduce_quantized(testop):
data0 = [
np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8),
np.array([1, 2], dtype=np.int32),
]
testop(data0, quantized=True)
testop(data0, keep_dims=False, quantized=True)
testop(data0, keep_dims=True, quantized=True)
def test_all_reduce():
_test_forward_reduce(_test_reduce_min)
_test_forward_reduce(_test_reduce_max)
_test_forward_reduce(_test_reduce_mean)
_test_forward_reduce_quantized(_test_reduce_mean)
_test_forward_reduce(_test_reduce_prod)
_test_forward_reduce(_test_reduce_sum)
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_forward_reduce(_test_reduce_any, dtype="bool")
#######################################################################
# Arg_min_max
# -----------
def _test_arg_min_max(math_op, data, axis, quantized=False):
"""One iteration of arg_min_max"""
with tf.Graph().as_default():
t_name = "in"
in_data = array_ops.placeholder(shape=data.shape, dtype=np.float32, name=t_name)
input_range = None
qmin, qmax = -100, 102
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=qmin, max=qmax, name="q" + t_name
)
input_range = {inq_data.name.split(":")[0]: (qmin, qmax)}
out = math_op(input=inq_data, axis=axis)
compare_tflite_with_tvm(
[data], [inq_data.name], [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_op(input=in_data, axis=axis)
compare_tflite_with_tvm([data], [in_data.name], [in_data], [out])
def test_forward_arg_min_max():
# test quantized
for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.uint8)]:
# There is no quantized version of ArgMin
for axis in [None, 0, 1, -1]:
_test_arg_min_max(math_ops.argmax, data, axis, True)
for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.float32)]:
for axis in [None, 0, 1, -1]:
_test_arg_min_max(math_ops.argmax, data, axis)
_test_arg_min_max(math_ops.argmin, data, axis)
#######################################################################
# Select, Where
# -------------
def test_forward_select():
with tf.Graph().as_default():
with tf.Session() as sess:
input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1")
input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2")
mask = input1 > input2
out = tf.where(mask, input1 + 1, input2 * 2)
in_data1 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("int32")
in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("int32")
compare_tflite_with_tvm(
[in_data1, in_data2], ["input1:0", "input2:0"], [input1, input2], [out]
)
@pytest.mark.parametrize("quant_bits", [2, 4, 8, 16])
@pytest.mark.parametrize(
"value, min, max", [[-10.11, -6, 6], [-3.55, -6, 6], [0, -6, 6], [3.55, -6, 6], [10.11, -6, 6]]
)
def test_forward_fake_quant(value, min, max, quant_bits):
with tf.Graph().as_default():
with tf.Session() as sess:
input = tf.placeholder(tf.float32, shape=[1], name="input")
out = tf.quantization.fake_quant_with_min_max_args(
input, min=min, max=max, num_bits=quant_bits, name=None
)
in_data = np.float32(value)
compare_tflite_with_tvm([in_data], ["input:0"], [input], [out])
# Squeeze
# -------
def _test_squeeze(data, squeeze_dims=None):
"""One iteration of squeeze"""
if squeeze_dims is None:
squeeze_dims = []
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if squeeze_dims:
out = array_ops.squeeze(in_data, squeeze_dims)
else:
out = array_ops.squeeze(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_squeeze():
"""Squeeze"""
_test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2])
_test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3])
#######################################################################
# Quantize/DeQuantize
# -------------------
def _test_quantize_dequantize(data):
"""One iteration of quantize and dequantize"""
# Keras model to force TFLite converter to insert 2 TFLite quantize ops.
# First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
# Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
data_in = tf.keras.layers.Input(shape=data.shape[1:])
relu = tf.keras.layers.ReLU()(data_in)
add = tf.keras.layers.Add()([data_in, relu])
concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
input_name = data_in.name.split(":")[0]
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for i in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True)
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, input_name)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
def _test_quantize_dequantize_const(data):
"""One iteration of quantize and dequantize"""
# Keras model to force TFLite converter to insert 2 TFLite quantize ops.
# First TFLite quantize op converts float32 tensor to int8 tensor - Qnn quantize.
# Second TFLite quantize op converts int8 tensor to int8 tensor - Qnn requantize.
data_in = tf.keras.layers.Input(shape=data.shape[1:])
relu = tf.keras.layers.ReLU()(data_in)
add = tf.keras.layers.Add()([data, relu])
concat = tf.keras.layers.Concatenate(axis=0)([relu, add])
keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat)
input_name = data_in.name.split(":")[0]
# To create quantized values with dynamic range of activations, needs representative dataset
def representative_data_gen():
for i in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True)
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, input_name)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
def test_forward_quantize_dequantize():
"""Quantize Dequantize"""
data = np.random.uniform(0, 1, (1, 4, 4, 3)).astype("float32")
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
_test_quantize_dequantize(data)
_test_quantize_dequantize_const(data)
#######################################################################
# Pad
# ---
def _test_pad(data, mode="CONSTANT", quantized=False):
"""One iteration of PAD"""
assert len(data) == 2
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {"inq_0": (-100, 100)}
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
out = array_ops.pad(
inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range
)
else:
out = array_ops.pad(
in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out])
def test_forward_pad():
"""Pad"""
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="REFLECT",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="SYMMETRIC",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int64),
],
mode="REFLECT",
)
_test_pad(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int64),
],
mode="SYMMETRIC",
)
_test_pad(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
quantized=True,
)
#######################################################################
# PADV2
# -----
def _test_padv2(data, mode="CONSTANT", quantized=False):
"""One iteration of PADV2"""
assert len(data) == 2 or len(data) == 3
with_constant_values = len(data) == 3
# Test with tensor and constant
with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")]
if quantized:
# fake_quant will keep the tensors in float32 until the conversion in the session
input_range = {"inq_0": (-100, 100)}
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
in_data[0], min=-100, max=100, name="inq_0"
)
]
if with_constant_values:
in_constant_values = constant_op.constant(
data[2], shape=data[2].shape, dtype="float32", name="in_constant_values"
)
inq_constant_values = tf.quantization.fake_quant_with_min_max_args(
in_constant_values, min=-100, max=100, name="inq_constant_values"
)
out = array_ops.pad_v2(
inq_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values=inq_constant_values,
mode=mode,
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-100, max=100, name="out"
)
else:
out = array_ops.pad_v2(
inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm(
[data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range
)
else:
if with_constant_values:
out = array_ops.pad_v2(
in_data[0],
ops.convert_to_tensor(data[1], dtype=data[1].dtype),
constant_values=ops.convert_to_tensor(data[2], dtype=data[2].dtype),
mode=mode,
)
else:
out = array_ops.pad_v2(
in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode
)
compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out])
def test_forward_padv2():
"""PADV2"""
# Tests without Constant_values
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="REFLECT",
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
mode="SYMMETRIC",
)
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
],
quantized=True,
)
# Tests with Constant_values
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)),
np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32),
np.array([1], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([-1], dtype=np.float32),
]
)
_test_padv2(
[
np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
]
)
# NOTE: In versions > 2.1.0, there is a bug in Tensorflow package for this scenario.
# Hence, it is disabled temporarily for TF version > 2.1.0 .
if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"):
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.array([2], dtype=np.float32),
],
quantized=True,
)
# Constant Values input can be scalar
_test_padv2(
[
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32),
np.float32(2),
]
)
# NOTE: In versions > 2.1.0, there is a bug in Tensorflow package for this scenario.
# Hence, it is disabled temporarily for TF versions > 2.1.0.
if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"):
_test_padv2(
[
np.arange(0, 256, dtype=np.uint8).reshape((1, 256)),
np.array([[1, 1], [2, 2]], dtype=np.int32),
np.uint8(10),
],
quantized=True,
)
#######################################################################
# EXPAND_DIMS
# -----------
def _test_expand_dims(input_shape, input_type, axis, quantized=False):
"""One iteration of EXPAND_DIMS"""
with tf.Graph().as_default():
axis = ops.convert_to_tensor(axis, dtype=axis.dtype)
if quantized:
# ignoring input_type as quantized requires uint8
input = np.random.uniform(0, 256, input_shape).astype("uint8")
in_input = tf.placeholder(dtype="float32", shape=input.shape, name="input")
input_range = {"q_input": (-100, 100)}
inq_input = tf.quantization.fake_quant_with_min_max_args(
in_input, min=-100, max=100, name="q_input"
)
out = array_ops.expand_dims(inq_input, axis=axis)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[input], ["q_input"], [inq_input], [out], quantized=True, input_range=input_range
)
else:
input = np.random.uniform(-100, 100, input_shape).astype(input_type)
in_input = tf.placeholder(dtype=input.dtype, shape=input.shape, name="input")
out = array_ops.expand_dims(in_input, axis=axis)
compare_tflite_with_tvm([input], ["input"], [in_input], [out])
def test_forward_expand_dims():
"""EXPAND_DIMS"""
for quantized in [False, True]:
_test_expand_dims((6, 2, 7, 5), "float32", np.int32(0), quantized=quantized)
_test_expand_dims((1, 2, 3), "int32", np.int32(-2), quantized=quantized)
_test_expand_dims((2, 4, 5), "float32", np.array([1], dtype=np.int32), quantized=quantized)
#######################################################################
# ONE_HOT
# -------
def _test_one_hot(indices, depth, on_value, off_value, axis=None):
"""One iteration of One_Hot"""
with tf.Graph().as_default():
in_indices = tf.placeholder(dtype=indices.dtype, shape=indices.shape, name="indices")
in_depth = ops.convert_to_tensor(depth, dtype=depth.dtype)
in_on_value = tf.placeholder(dtype=on_value.dtype, shape=on_value.shape, name="on_value")
in_off_value = tf.placeholder(
dtype=off_value.dtype, shape=off_value.shape, name="off_value"
)
if axis is not None:
out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value, axis=axis)
else:
out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value)
compare_tflite_with_tvm(
[indices, on_value, off_value],
["indices", "on_value", "off_value"],
[in_indices, in_on_value, in_off_value],
[out],
)
def test_forward_one_hot():
"""One_Hot"""
_test_one_hot(np.int32(2), np.int32(8), np.int32(1), np.int32(0))
_test_one_hot(np.int32(4), np.int32(8), np.float32(1), np.float32(0))
_test_one_hot(np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1))
_test_one_hot(
np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1), axis=0
)
#######################################################################
# Pack
# ----
def _test_pack(data, is_var, axis, quantized=False):
"""One iteration of pack"""
assert len(data) >= 1
assert len(data) == len(is_var)
if quantized:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype="float32", name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype="float32", name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]
inq_data = [
tf.quantization.fake_quant_with_min_max_args(
i_data, min=-100, max=100, name="inq_{}".format(idx)
)
for idx, i_data in enumerate(in_data)
]
input_range = {}
for i in range(len(data)):
input_range["inq_{}".format(i)] = (-100, 100)
out = array_ops.pack(inq_data, axis=axis)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
name = ["inq_{}:0".format(idx) for idx in range(len(data))]
compare_tflite_with_tvm(
data, name, inq_data, [out], quantized=True, input_range=input_range
)
else:
with tf.Graph().as_default():
in_data = [
array_ops.placeholder(shape=d.shape, dtype=d.dtype, name="in_" + str(idx))
if is_var[idx]
else constant_op.constant(
d, shape=d.shape, dtype=d.dtype, name="in_constant_" + str(idx)
)
for idx, d in enumerate(data)
]
out = array_ops.pack(in_data, axis=axis)
name = [_.name for _ in in_data]
compare_tflite_with_tvm(data, name, in_data, [out], experimental_new_converter=True)
def test_forward_pack():
"""Pack"""
_test_pack([np.int32(1), np.int32(5)], [False, False], 0)
_test_pack([np.array([1, 4]), np.array([2, 5]), np.array([3, 6])], [True, False, False], 0)
_test_pack(
[np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], [True, True], 1
)
_test_pack([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], [True, True], 1)
_test_pack(
[
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
np.arange(6).reshape((2, 1, 1, 3)),
],
[True, True, True],
1,
)
_test_pack(
[
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)),
],
[True, True, True],
1,
quantized=True,
)
#######################################################################
# Unpack
# ------
def _test_unpack(data, axis, num_unpacks):
"""One iteration of UNPACK"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name="unpack")
out_names = ["out_" + str(n) + ":0" for n in range(num_unpacks)]
compare_tflite_with_tvm([data], "Placeholder:0", [in_data], out, out_names=out_names)
def test_forward_unpack():
"""UNPACK"""
_test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1)
_test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3)
_test_unpack(
np.array(np.random.uniform(0, 5, (3, 1, 2)), dtype=np.float32), axis=0, num_unpacks=3
)
# tflite 1.13 doesn't accept negative axis
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_unpack(
np.array(np.random.uniform(0, 5, (3, 6)), dtype=np.int32), axis=-2, num_unpacks=3
)
_test_unpack(
np.array(np.random.uniform(0, 5, (2, 3, 4)), dtype=np.int32), axis=-3, num_unpacks=2
)
#######################################################################
# Local response normalization
# ----------------------------
def _test_local_response_normalization(data, depth_radius, bias, alpha, beta):
"""One iteration of LOCAL_RESPONSE_NORMALIZATION"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
out = nn_ops.local_response_normalization(
in_data, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta
)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_local_response_normalization():
"""LOCAL_RESPONSE_NORMALIZATION"""
data = np.random.uniform(size=(1, 6, 4, 3)).astype("float32")
# LOCAL_RESPONSE_NORMALIZATION come with TFLite >= 1.14.0 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_local_response_normalization(data, depth_radius=5, bias=1, alpha=1, beta=0.5)
#######################################################################
# L2 normalization
# ----------------
def _test_l2_normalization(data, axis, fused_activation_function=None):
"""One iteration of L2_NORMALIZATION"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_impl.l2_normalize(in_data, axis)
out = with_fused_activation_function(out, fused_activation_function)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_l2_normalization():
"""L2_NORMALIZATION"""
data = np.random.uniform(size=(3, 6, 4)).astype("float32")
_test_l2_normalization(data, axis=2)
_test_l2_normalization(data, axis=2, fused_activation_function="RELU")
#######################################################################
# Logistic
# --------
def _test_logistic(data, quantized=False):
"""One iteration of LOGISTIC"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-5, max=5, name="inq_0"
)
input_range = {"inq_0": (-5, 5)}
out = math_ops.sigmoid(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.sigmoid(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_logistic():
"""LOGISTIC"""
_test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Softmax
# -------
def _test_softmax(data):
"""One iteration of softmax"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.softmax(in_data)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_softmax():
"""Softmax"""
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 2, 3)))
######################################################################
# Log_softmax
# -----------
def _test_log_softmax(data, quantized=False):
"""One iteration of log_softmax"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
# tflite log_softmax supports only the case when axis is not specified
out = nn_ops.log_softmax(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-20, max=0, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.log_softmax(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_log_softmax():
"""Log_softmax"""
_test_log_softmax(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_log_softmax(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Tanh
# ----
def _test_tanh(data, quantized=False):
"""One iteration of TANH"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=3, name="inq_0"
)
input_range = {"inq_0": (-3, 3)}
out = math_ops.tanh(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.tanh(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_tanh():
"""TANH"""
_test_tanh(np.arange(6.0, dtype=np.float32).reshape((1, 6)), quantized=False)
_test_tanh(np.arange(0, 256, 30, dtype=np.uint8), quantized=True)
#######################################################################
# RSQRT
# ----
def _test_quant_rsqrt(data):
"""Test RSQRT with quantized data"""
# tensorflow version upgrade support
if tf.__version__ < LooseVersion("2.6.1"):
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=1, max=6, name="inq_0"
)
input_range = {"inq_0": (1, 6)}
out = math_ops.rsqrt(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out")
compare_tflite_with_tvm(
data,
"inq_0:0",
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
def _create_model():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.rsqrt(x)
return op
dtype = "int8"
model = Model()
# Save the model
export_dir = tempfile.gettempdir() + "/tf_model"
tf.saved_model.save(
model,
export_dir,
signatures=model.tf_function.get_concrete_function(
tf.TensorSpec(data.shape, tf.float32, name="input"),
),
)
# Convert the model
def representative_dataset():
for _ in range(100):
tmp_data = np.random.rand(*tuple(data.shape))
yield [tmp_data.astype(np.float32) * 2]
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_model_quant = _create_model()
tflite_output = run_tflite_graph(tflite_model_quant, data)
in_node = ["tfl.quantize"]
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
def _test_rsqrt(data, quantized=False):
"""One iteration of RSQRT"""
if quantized:
_test_quant_rsqrt(data)
else:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_0")
out = math_ops.rsqrt(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_rsqrt():
"""RSQRT"""
_test_rsqrt(np.arange(1.0, 7.0, dtype=np.float32), quantized=False)
_test_rsqrt(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False)
# tensorflow version upgrade support
if tf.__version__ < LooseVersion("2.6.1"):
_test_rsqrt(np.arange(1, 240, 40, dtype=np.uint8), quantized=True)
_test_rsqrt(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True)
else:
_test_rsqrt(np.arange(1, 240, 40, dtype=np.int8), quantized=True)
_test_rsqrt(np.arange(1, 240, 40, dtype=np.int8).reshape((2, 1, 3)), quantized=True)
#######################################################################
# SQRT
# ----
def _test_sqrt(data, quantized=False):
"""One iteration of SQRT"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=1, max=6, name="inq_0"
)
input_range = {"inq_0": (1, 6)}
out = math_ops.sqrt(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out")
compare_tflite_with_tvm(
data,
"inq_0:0",
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
out = math_ops.sqrt(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_sqrt():
"""SQRT"""
_test_sqrt(np.arange(1.0, 7.0, dtype=np.float32), quantized=False)
_test_sqrt(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False)
_test_sqrt(np.arange(1, 240, 40, dtype=np.uint8), quantized=True)
_test_sqrt(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True)
#######################################################################
# NEG
# ----
def _test_neg(data, quantized=False):
"""One iteration of NEG"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=1, max=6, name="inq_0"
)
input_range = {"inq_0": (1, 6)}
out = math_ops.neg(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=1, max=6, name="out")
compare_tflite_with_tvm(
data,
"inq_0:0",
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
out = math_ops.neg(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_neg():
"""NEG"""
_test_neg(np.arange(-2.0, 4.0, dtype=np.float32), quantized=False)
_test_neg(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False)
_test_neg(np.arange(1, 240, 40, dtype=np.uint8), quantized=True)
_test_neg(np.arange(1, 240, 40, dtype=np.uint8).reshape((2, 1, 3)), quantized=True)
#######################################################################
# ABS
# ----
def _test_abs(data, quantized=False):
"""One iteration of ABS"""
if quantized:
def _create_model():
class Model(tf.Module):
@tf.function
def tf_function(self, x):
op = tf.math.abs(x)
return op
dtype = "int8"
model = Model()
# Save the model
export_dir = tempfile.gettempdir() + "/tf_model"
tf.saved_model.save(
model,
export_dir,
signatures=model.tf_function.get_concrete_function(
tf.TensorSpec(data.shape, tf.float32, name="input"),
),
)
# Convert the model
def representative_dataset():
for _ in range(100):
tmp_data = np.random.rand(*tuple(data.shape))
yield [tmp_data.astype(np.float32) * 2 - 1]
converter = tf.lite.TFLiteConverter.from_saved_model(export_dir)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_dataset
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.inference_input_type = tf.int8
converter.inference_output_type = tf.int8
tflite_model = converter.convert()
return tflite_model
tflite_model_quant = _create_model()
tflite_output = run_tflite_graph(tflite_model_quant, data)
# TFLite 2.6.x upgrade support
if tf.__version__ < LooseVersion("2.6.1"):
in_node = ["serving_default_input_int8"]
else:
in_node = ["tfl.quantize"]
tvm_output = run_tvm_graph(tflite_model_quant, data, in_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2
)
else:
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_0")
out = math_ops.abs(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_abs():
"""ABS"""
_test_abs(np.arange(-3.0, 3.0, dtype=np.float32), quantized=False)
_test_abs(np.arange(-3.0, 3.0, dtype=np.float32).reshape((2, 1, 3)), quantized=False)
_test_abs(np.arange(-128, 127, 45, dtype=np.int8), quantized=True)
#######################################################################
# ReLu
# ----
def _test_relu(data, quantized=False):
"""One iteration of ReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
out = nn_ops.relu(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.relu(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu():
"""ReLU"""
_test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6)))
_test_relu(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# ReLU6
# -----
def _test_relu6(data, quantized=False):
"""One iteration of ReLU6"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-10, max=10, name="inq_0"
)
input_range = {"inq_0": (-10, 10)}
out = nn_ops.relu6(inq_data)
out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.relu6(in_data)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu6():
"""ReLU6"""
_test_relu6(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32))
_test_relu6(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# Leaky_ReLU
# ----------
def _test_leaky_relu(data, alpha, quantized=False):
"""One iteration of Leaky_ReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=2, name="inq_0"
)
input_range = {"inq_0": (-3, 2)}
out = nn_ops.leaky_relu(inq_data, alpha)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-3, max=2, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = nn_ops.leaky_relu(in_data, alpha)
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_leaky_relu():
"""Leaky_ReLU"""
_test_leaky_relu(np.random.uniform(-5, 5, (1, 6)).astype(np.float32), alpha=0.2)
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_leaky_relu(
np.random.uniform(0, 255, (2, 3)).astype(np.uint8), alpha=0.3, quantized=True
)
#######################################################################
# ReLU_n1_to_1
# ------------
def _test_relu_n1_to_1(data, quantized=False):
"""One iteration of ReLU_n1_to_1"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0")
if quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-3, max=3, name="inq_0"
)
input_range = {"inq_0": (-3, 3)}
# There is no such tf operation. The specific pattern will be replaced into RELU_N1_TO_1 by tflite
out = math_ops.maximum(-1.0, math_ops.minimum(inq_data, 1.0))
out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out")
compare_tflite_with_tvm(
data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range
)
else:
out = math_ops.maximum(-1.0, math_ops.minimum(in_data, 1.0))
compare_tflite_with_tvm(data, "in_0:0", [in_data], [out])
def test_forward_relu_n1_to_1():
"""ReLU_n1_to_1"""
_test_relu_n1_to_1(np.random.uniform(-3, 3, (1, 6)).astype(np.float32))
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_relu_n1_to_1(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True)
#######################################################################
# PReLU
# -----
def _test_prelu(data, alpha):
"""One iteration of PReLU"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
# This specific pattern will be replaced into PRelu by tflite
out = nn_ops.relu(in_data) + (-alpha * nn_ops.relu(-in_data))
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_prelu():
"""PReLU"""
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((3,), 0.2, dtype="float32"),
)
_test_prelu(
np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"),
np.full((1, 1, 3), 0.2, dtype="float32"),
)
#######################################################################
# DepthToSpace
# ------------
def _test_depthtospace(data, block_size):
"""One iteration of depth_to_space operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.depth_to_space(in_data, block_size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_depthtospace():
# DEPTH_TO_SPACE comes with TFLite >= 1.15.0 fbs schema
if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"):
_test_depthtospace(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2)
_test_depthtospace(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4)
#######################################################################
# SpaceToDepth
# ------------
def _test_spacetodepth(data, block_size):
"""One iteration of space_to_depth operation with given data and block size"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.space_to_depth(in_data, block_size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_spacetodepth():
_test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2)
_test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4)
#######################################################################
# ReverseSequence
# ---------------
def _test_reverse_sequence(shape, dtype, seq_lengths, batch_axis, seq_axis):
"""One iteration of reverse_sequence operation with given data and attributes"""
data = np.random.uniform(0, 100, size=shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(dtype=dtype, name="input", shape=shape)
out = tf.reverse_sequence(
in_data, seq_lengths=seq_lengths, batch_axis=batch_axis, seq_axis=seq_axis
)
compare_tflite_with_tvm(data, "input", [in_data], [out])
def test_forward_reverse_sequence():
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_reverse_sequence([4, 3], "float32", [3, 2, 1], 1, 0)
_test_reverse_sequence([4, 3], "float32", [3, 2, 1, 3], 0, 1)
_test_reverse_sequence([2, 3, 3, 3], "float32", [2, 3, 2], 2, 1)
_test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3], 0, 2)
_test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3, 1, 4], 3, 2)
#######################################################################
# Sparse To Dense
# ---------------
def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
# tflite 1.13 convert method does not accept empty shapes
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
with tf.Graph().as_default():
indices = tf.placeholder(
shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices"
)
values = tf.placeholder(
shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values"
)
oshape = tf.constant(
output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype)
)
if default_value == None:
output = tf.sparse_to_dense(indices, oshape, values)
compare_tflite_with_tvm(
[sparse_indices, sparse_values],
["indices", "values"],
[indices, values],
[output],
)
else:
dv = tf.placeholder(shape=(), dtype=str(default_value.dtype), name="default_value")
output = tf.sparse_to_dense(indices, oshape, values, dv)
compare_tflite_with_tvm(
[sparse_indices, sparse_values, default_value],
["indices", "values", "default_value"],
[indices, values, dv],
[output],
)
def test_forward_sparse_to_dense():
"""
Works in tvm/topi/tensorflow. But tflite converter breaks this test case
_test_sparse_to_dense(
np.int32(1),
np.int32(3),
np.int32(0),
np.array([5]).astype("int32")
)
"""
# vector
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3, 3, 3]).astype("int32"),
np.int32(0),
np.array([5]).astype("int32"),
)
# vector nXd
_test_sparse_to_dense(
np.array([[0, 0], [1, 2]]).astype("int32"),
np.array([1, 2]).astype("int32"),
np.int32(0),
np.array([3, 4]).astype("int32"),
)
_test_sparse_to_dense(
np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"),
np.array([1, 2]).astype("int32"),
np.int32(4),
np.array([2, 3, 4]).astype("int32"),
)
# floats
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3.1, 3.1, 3.1]).astype("float32"),
np.float32(3.5),
np.array([5]).astype("int32"),
)
# default value not specified
_test_sparse_to_dense(
np.array([0, 1, 4]).astype("int32"),
np.array([3.1, 3.1, 3.1]).astype("float32"),
None,
np.array([5]).astype("int32"),
)
#######################################################################
# Fully Connected
# ---------------
def _test_fully_connected(
tensor_in_sizes,
const_input,
filter_in_sizes,
bias_in_size=None,
quantized=False,
fp16_quantized=False,
):
"""One iteration of fully connected"""
total_size_1 = np.prod(tensor_in_sizes)
total_size_2 = np.prod(filter_in_sizes)
assert (
int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0]
), "input size and filter size are mismatched"
# Initializes the input tensor with array containing incrementing
# numbers from 1.
data_array = np.arange(
1, total_size_1 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32
)
filter_array = np.arange(
1, total_size_2 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32
)
in_name = "input"
with tf.Graph().as_default():
in_data = (
constant_op.constant(data_array, shape=tensor_in_sizes, dtype=np.float32, name=in_name)
if const_input
else array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32, name=in_name)
)
in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype=np.float32)
data_array = np.reshape(data_array, tensor_in_sizes)
# if we have bias
if bias_in_size:
assert bias_in_size[0] == filter_in_sizes[1], "bias and filter size are mismatched"
bias_array = np.arange(
1, bias_in_size[0] + 1, dtype=np.uint8 if quantized else np.float32
)
in_bias = constant_op.constant(bias_array, shape=bias_in_size, dtype=np.float32)
if quantized and not fp16_quantized:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_0"
)
input_range = {"inq_0": (-100, 100)}
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_1"
)
input_range = {"inq_0": (-100, 100), "inq_1": (-100, 100)}
# reshape N H W C into N H*W*C
inq_data_reshape = array_ops.reshape(inq_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(inq_data_reshape, inq_filter)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
# if we have bias
if bias_in_size:
out = nn_ops.bias_add(out, in_bias)
compare_tflite_with_tvm(
data_array,
inq_data.name,
[inq_data],
[out],
quantized=True,
input_range=input_range,
experimental_new_converter=True,
)
else:
# reshape N H W C into N H*W*C
in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1])
out = math_ops.mat_mul(in_data_reshape, in_filter)
# TODO : Need to construct a fc op with (keep_num_dims == True)
# if we have bias
if bias_in_size:
out = nn_ops.bias_add(out, in_bias)
compare_tflite_with_tvm(
data_array,
in_data.name,
[in_data],
[out],
experimental_new_converter=True,
fp16_quantized=fp16_quantized,
)
def test_forward_fully_connected():
"""Fully Connected"""
for input_shape, weight_shape, bias_shape in [
([1, 4], [4, 4], None),
([1, 4], [4, 4], [4]),
([1, 1, 1, 5], [5, 5], None),
([1, 1, 10], [10, 103], None),
([1, 1, 1, 150], [150, 100], None),
([1, 1, 1, 150], [150, 100], None),
([1, 1, 1, 150], [150, 100], [100]),
([5, 1, 1, 150], [150, 100], None),
([5, 1, 1, 150], [150, 100], [100]),
]:
for const_input in [False, True]:
for quantized in [False, True]:
for fp16_quantized in [False, True]:
_test_fully_connected(
input_shape,
const_input,
weight_shape,
bias_shape,
quantized,
fp16_quantized,
)
#######################################################################
# REVERSE_V2
# ----------
def _test_reverse_v2(input_shape, axis, dtype):
"""One iteration of REVERSE_V2"""
with tf.Graph().as_default():
input = np.random.randint(0, 100, size=input_shape).astype(dtype)
in_input = tf.placeholder(dtype=input.dtype, shape=input.shape, name="input")
in_axis = ops.convert_to_tensor(axis, dtype=axis.dtype)
out = array_ops.reverse(in_input, in_axis)
compare_tflite_with_tvm([input], ["input"], [in_input], [out])
def test_forward_reverse_v2():
"""REVERSE_V2"""
for dtype in ["float32", "int32"]:
_test_reverse_v2((5), np.array([0], dtype="int32"), dtype)
_test_reverse_v2((5, 6, 4, 2), np.array([2], dtype="int32"), dtype)
#######################################################################
# MATRIX_SET_DIAG
# ---------------
def _test_matrix_set_diag(input_shape, input_type, quantized=False):
"""One iteration of MATRIX_SET_DIAG"""
with tf.Graph().as_default():
diagonal_shape = list(input_shape[:-2])
diagonal_shape.append(min(input_shape[-2], input_shape[-1]))
if quantized:
# ignoring input_type as quantized requires uint8
input = np.random.uniform(0, 256, input_shape).astype("uint8")
in_input = tf.placeholder(dtype="float32", shape=input.shape, name="input")
inq_input = tf.quantization.fake_quant_with_min_max_args(
in_input, min=-100, max=100, name="q_input"
)
diagonal = np.random.uniform(0, 256, diagonal_shape).astype("uint8")
in_diagonal = tf.placeholder(dtype="float32", shape=diagonal.shape, name="diagonal")
inq_diagonal = tf.quantization.fake_quant_with_min_max_args(
in_diagonal, min=-100, max=100, name="q_diagonal"
)
input_range = {"q_input": (-100, 100), "q_diagonal": (-100, 100)}
out = array_ops.matrix_set_diag(inq_input, inq_diagonal)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[input, diagonal],
["q_input", "q_diagonal"],
[inq_input, inq_diagonal],
[out],
quantized=True,
input_range=input_range,
)
else:
input = np.random.uniform(0, 100, input_shape).astype(input_type)
diagonal = np.random.uniform(0, 100, diagonal_shape).astype(input_type)
in_input = tf.placeholder(dtype=input.dtype, shape=input.shape, name="input")
in_diagonal = tf.placeholder(
dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal"
)
out = array_ops.matrix_set_diag(in_input, in_diagonal)
compare_tflite_with_tvm(
[input, diagonal], ["input", "diagonal"], [in_input, in_diagonal], [out]
)
def test_forward_matrix_set_diag():
"""MATRIX_SET_DIAG"""
for dtype in [np.float32, np.int32]:
_test_matrix_set_diag((4, 4), dtype)
_test_matrix_set_diag((5, 4, 3, 4), dtype)
_test_matrix_set_diag((4, 4, 2), dtype)
_test_matrix_set_diag((4, 4), np.uint8, quantized=True)
_test_matrix_set_diag((5, 4, 3, 4), np.uint8, quantized=True)
_test_matrix_set_diag((4, 4, 2), np.uint8, quantized=True)
#######################################################################
# MATRIX_DIAG
# -----------
def _test_matrix_diag(diagonal_shape, dtype):
"""One iteration of MATRIX_DIAG"""
with tf.Graph().as_default():
diagonal = np.random.uniform(0, 100, diagonal_shape).astype(dtype)
in_diagonal = tf.placeholder(dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal")
out = array_ops.matrix_diag(in_diagonal)
compare_tflite_with_tvm(
[diagonal], ["diagonal"], [in_diagonal], [out], experimental_new_converter=True
)
def test_forward_matrix_diag():
"""MATRIX_DIAG"""
for dtype in [np.float32, np.int32]:
_test_matrix_diag((4), dtype)
_test_matrix_diag((5, 4, 3), dtype)
_test_matrix_diag((2, 3), dtype)
#######################################################################
# Custom Operators
# ----------------
def test_detection_postprocess():
tf_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/object_detection/"
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz",
"ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03/tflite_graph.pb",
)
converter = tf.lite.TFLiteConverter.from_frozen_graph(
tf_model_file,
input_arrays=["raw_outputs/box_encodings", "raw_outputs/class_predictions"],
output_arrays=[
"TFLite_Detection_PostProcess",
"TFLite_Detection_PostProcess:1",
"TFLite_Detection_PostProcess:2",
"TFLite_Detection_PostProcess:3",
],
input_shapes={
"raw_outputs/box_encodings": (1, 1917, 4),
"raw_outputs/class_predictions": (1, 1917, 91),
},
)
converter.allow_custom_ops = True
converter.inference_type = tf.lite.constants.FLOAT
tflite_model = converter.convert()
np.random.seed(0)
box_encodings = np.random.uniform(size=(1, 1917, 4)).astype("float32")
class_predictions = np.random.uniform(size=(1, 1917, 91)).astype("float32")
tflite_output = run_tflite_graph(tflite_model, [box_encodings, class_predictions])
tvm_output = run_tvm_graph(
tflite_model,
[box_encodings, class_predictions],
["raw_outputs/box_encodings", "raw_outputs/class_predictions"],
num_output=4,
)
# Check all output shapes are equal
assert all(
[
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
]
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# Check bounding box co-ords
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-5,
atol=1e-5,
)
# Check the class
# Stricter check to ensure class remains same
np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]))
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
#######################################################################
# Custom Converter
# ----------------
def test_custom_op_converter():
"""Test case for user-defined operator converter in TFLite frontend"""
class DummyOperatorConverter(relay.frontend.tflite.OperatorConverter):
"""Operator Converter for converting TFLite ops to relay ops"""
def __init__(self, model, subgraph, exp_tab):
super(DummyOperatorConverter, self).__init__(model, subgraph, exp_tab)
self.allow_custom_ops = True
convert_map_overwrite = {"SUB": self.convert_sub_dummy}
self.convert_map.update(convert_map_overwrite)
def convert_sub_dummy(self, op):
"""Convert TFLite SUB"""
input_tensors = self.get_input_tensors(op)
assert len(input_tensors) == 2, "input tensors length should be 2"
lhs_tensor = input_tensors[0]
rhs_tensor = input_tensors[1]
lhs_expr = self.get_expr(lhs_tensor.tensor_idx)
rhs_expr = self.get_expr(rhs_tensor.tensor_idx)
temp_expr = relay.op.negative(rhs_expr)
out = relay.op.add(lhs_expr, temp_expr)
return out
with tf.Graph().as_default():
# Generate TFLite model for single addition
data = [
np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)),
np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)),
]
in_data = [
array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"),
array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"),
]
out = math_ops.subtract(in_data[0], in_data[1])
in_name = [x[1] for x in zip(in_data, ("in_0:0", "in_1:0"))]
input_tensors = [x for x in in_data]
output_tensors = [out]
in_node = [0] * len(in_name)
for i in range(len(in_name)):
in_node[i] = in_name[i].split(":")[0] if ":" in in_name[i] else in_name[i]
with tf.Session() as sess:
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
tflite_model_buf = converter.convert()
in_data = [x[1] for x in zip(in_data, data)]
tvm_output_orig = run_tvm_graph(tflite_model_buf, in_data, in_node)
tvm_output_dummy = run_tvm_graph(
tflite_model_buf, in_data, in_node, op_converter=DummyOperatorConverter
)
tvm.testing.assert_allclose(
np.squeeze(tvm_output_orig[0]), np.squeeze(tvm_output_dummy[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet
# ---------
def test_forward_mobilenet_v1():
"""Test the Mobilenet V1 TF Lite model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224.tgz",
"mobilenet_v1_1.0_224.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_mobilenet_v2():
"""Test the Mobilenet V2 TF Lite model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"http://download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224.tgz",
"mobilenet_v2_1.0_224.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V3
# ------------
def test_forward_mobilenet_v3():
"""Test the Mobilenet V3 TF Lite model."""
# In MobilenetV3, some ops are not supported before tf 1.15 fbs schema
if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"):
return
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_float.tgz",
"v3-large_224_1.0_float/v3-large_224_1.0_float.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V1 Sparse
# -----------------
def test_forward_sparse_mobilenet_v1():
"""Test the Sparse version of Mobilenet V1 TF Lite model."""
# MobilenetV1
tflite_model_file = download_testdata(
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv1_140_90_12b4_720.tflite",
"mbv1_140_90_12b4_720.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Mobilenet V2 Sparse
# -----------------
def test_forward_sparse_mobilenet_v2():
"""Test the Sparse version of Mobilenet V2 TF Lite model."""
# MobilenetV1
tflite_model_file = download_testdata(
"https://storage.googleapis.com/fast-convnets/tflite-models/mbv2_200_85_11-16b2_744.tflite",
"mbv2_200_85_11-16b2_744.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Inception
# ---------
def test_forward_inception_v3_net():
"""Test the Inception V3 TF Lite model."""
# InceptionV3
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v3_2018_04_27.tgz",
"inception_v3.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_inception_v4_net():
"""Test the Inception V4 TF Lite model."""
# InceptionV4
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
"inception_v4.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_inception_v4_net_batched():
"""Test the Inception V4 TF Lite model."""
# InceptionV4
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/model_zoo/upload_20180427/inception_v4_2018_04_27.tgz",
"inception_v4.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(4, 299, 299, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5
)
def test_forward_qnn_inception_v1_net():
"""Test the Quantized TFLite Inception model."""
# InceptionV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/inception_v1_224_quant_20181026.tgz",
"inception_v1_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v1_net():
"""Test the Quantized TFLite Mobilenet V1 model."""
# MobilenetV1
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz",
"mobilenet_v1_1.0_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_qnn_mobilenet_v2_net():
"""Test the Quantized TFLite Mobilenet V2 model."""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite_11_05_08/mobilenet_v2_1.0_224_quant.tgz",
"mobilenet_v2_1.0_224_quant.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
#######################################################################
# Mobilenet V3 Quantized
# ----------------------
def test_forward_qnn_mobilenet_v3_net():
"""Test the Quantized TFLite Mobilenet V3 model."""
# In MobilenetV3, some ops are not supported before tf 1.15 fbs schema
if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"):
pytest.skip("Unsupported in tflite < 1.15.0")
else:
pytest.skip("This segfaults with tensorflow 1.15.2 and above")
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/mobilenet_v3/checkpoints/v3-large_224_1.0_uint8.tgz",
"v3-large_224_1.0_uint8/v3-large_224_1.0_uint8.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_resnet50():
"""Test the Quantized TFLite version 2.1.0 Resnet50 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/resnet_50_quantized.tflite",
"resnet_50_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_inception_v1():
"""Test the Quantized TFLite version 2.1.0 Inception V1 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/inception_v1_quantized.tflite",
"inception_v1_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite2_qnn_mobilenet_v2():
"""Test the Quantized TFLite version 2.1.0 Mobilenet V2 model."""
if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"):
tflite_model_file = download_testdata(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/Quantized/mobilenet_v2_quantized.tflite",
"mobilenet_v2_quantized.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = pre_processed_image(224, 224)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
def test_forward_tflite_float16():
"""Test float16 quantized model"""
# MobilenetV2
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/mobilenet_v1_2018_02_22/mobilenet_v1_0.25_128.tgz",
"mobilenet_v1_0.25_128_frozen.pb",
)
converter = tf.lite.TFLiteConverter.from_frozen_graph(
tflite_model_file, ["input"], ["MobilenetV1/Predictions/Reshape_1"]
)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model_buf = converter.convert()
# Test image. Checking the labels because the requantize implementation is different between
# TFLite and Relay. This cause final output numbers to mismatch. So, testing accuracy via
# labels. Also, giving a real image, instead of random inputs.
data = get_real_image(128, 128, quantized=False)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tflite_predictions = np.squeeze(tflite_output)
tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1]
tvm_output = run_tvm_graph(tflite_model_buf, data, "input")
tvm_predictions = np.squeeze(tvm_output)
tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1]
tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels)
#######################################################################
# Quantized SSD Mobilenet
# -----------------------
def test_forward_qnn_coco_ssd_mobilenet_v1():
"""Test the quantized Coco SSD Mobilenet V1 TF Lite model."""
pytest.skip(
"LLVM bug - getExtendedVectorNumElements - "
+ "https://discuss.tvm.apache.org/t/segfault-in-llvm/3567. The workaround is to use a "
+ "specific target, for example, llvm -mpcu=core-avx2"
)
tflite_model_file = tf_testing.get_workload_official(
"https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip",
"detect.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = get_real_image_object_detection(300, 300)
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(
tflite_model_buf, data, "normalized_input_image_tensor", num_output=4
)
# Check all output shapes are equal
assert all(
[
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
]
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# We compare the bounding boxes whose prediction score is above 60%. This is typical in end
# to end application where a low prediction score is discarded. This is also needed because
# multiple low score bounding boxes can have same score and TFlite and TVM can have
# different orderings for same score bounding boxes. Another reason for minor differences in
# low score bounding boxes is the difference between TVM and TFLite for requantize operator.
if tvm_output[2][0][i] > 0.6:
# Check bounding box co-ords. The tolerances have to be adjusted, from 1e-5 to 1e-2,
# because of differences between for requantiize operator in TFLite and TVM.
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-2,
atol=1e-2,
)
# Check the class
# Stricter check to ensure class remains same
np.testing.assert_equal(
np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i])
)
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
#######################################################################
# SSD Mobilenet
# -------------
def test_forward_coco_ssd_mobilenet_v1():
"""Test the FP32 Coco SSD Mobilenet V1 TF Lite model."""
tflite_model_file = tf_testing.get_workload_official(
"https://raw.githubusercontent.com/dmlc/web-data/main/tensorflow/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tgz",
"ssd_mobilenet_v1_coco_2018_01_28.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
np.random.seed(0)
data = np.random.uniform(size=(1, 300, 300, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(
tflite_model_buf, data, "normalized_input_image_tensor", num_output=4
)
# Check all output shapes are equal
assert all(
[
tvm_tensor.shape == tflite_tensor.shape
for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output)
]
)
# Check valid count is the same
assert tvm_output[3] == tflite_output[3]
valid_count = tvm_output[3][0]
# For boxes that do not have any detections, TFLite puts random values. Therefore, we compare
# tflite and tvm tensors for only valid boxes.
for i in range(0, valid_count):
# Check bounding box co-ords
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0][0][i]),
np.squeeze(tflite_output[0][0][i]),
rtol=1e-5,
atol=1e-5,
)
# Check the class
np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]))
# Check the score
tvm.testing.assert_allclose(
np.squeeze(tvm_output[2][0][i]),
np.squeeze(tflite_output[2][0][i]),
rtol=1e-5,
atol=1e-5,
)
#######################################################################
# MediaPipe
# -------------
def test_forward_mediapipe_hand_landmark():
"""Test MediaPipe 2D hand landmark TF Lite model."""
# MediaPipe 2D hand landmark TF
tflite_model_file = download_testdata(
"https://github.com/google/mediapipe/raw/v0.7.4/mediapipe/models/hand_landmark.tflite",
"hand_landmark.tflite",
)
with open(tflite_model_file, "rb") as f:
tflite_model_buf = f.read()
data = np.random.uniform(size=(1, 256, 256, 3)).astype("float32")
tflite_output = run_tflite_graph(tflite_model_buf, data)
tvm_output = run_tvm_graph(tflite_model_buf, data, "input_1", num_output=2)
for i in range(2):
tvm.testing.assert_allclose(
np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]), rtol=1e-5, atol=1e-5
)
#######################################################################
# Test check for Tensorflow "dynamic range quantization" optimization
# --------------
def test_prevent_tensorflow_dynamic_range():
"""
Should prevent running "dynamic range quantization" optimized TFLite graph
"""
data_array = np.random.randint(0, 2, (1, 1024, 1024)).astype(dtype=np.float32)
filter_array = np.random.randint(0, 2, (1024, 1024)).astype(dtype=np.float32)
data_in = tf.keras.layers.Input(shape=data_array.shape[1:])
dense = tf.keras.layers.Dense(units=filter_array.shape[-1], use_bias=False)(data_in)
keras_model = tf.keras.models.Model(data_in, dense)
keras_model.layers[1].set_weights([filter_array])
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
with pytest.raises(tvm.error.OpNotImplemented):
tvm_output = run_tvm_graph(tflite_model, data_array, data_in.name.replace(":0", ""))
#######################################################################
# Main
# ----
if __name__ == "__main__":
# BatchToSpaceND
test_forward_batch_to_space_nd()
# SpaceToBatchND
test_forward_space_to_batch_nd()
# Split
test_forward_split()
# Transpose
test_forward_transpose()
# Cast
test_forward_cast()
# BatchMatMul
test_forward_batch_matmul()
# Tile
test_forward_tile()
# Query
test_forward_shape()
# Transforms
test_forward_concatenation()
test_forward_pad()
test_forward_pack()
test_forward_unpack()
test_forward_reshape()
test_all_resize()
test_forward_range()
test_forward_squeeze()
test_forward_slice()
test_forward_topk()
test_forward_gather()
test_forward_gather_nd()
test_forward_stridedslice()
test_forward_depthtospace()
test_forward_spacetodepth()
test_forward_reverse_sequence()
test_forward_sparse_to_dense()
test_forward_select()
test_forward_quantize_dequantize()
test_forward_arg_min_max()
test_forward_expand_dims()
test_forward_reverse_v2()
test_forward_matrix_set_diag()
test_forward_matrix_diag()
# NN
test_forward_convolution()
test_forward_transpose_conv()
test_forward_logistic()
test_forward_pooling()
test_forward_l2_pool2d()
test_forward_softmax()
test_forward_tanh()
test_forward_rsqrt()
test_forward_neg()
test_forward_sin()
test_forward_abs()
test_forward_sqrt()
test_forward_relu()
test_forward_relu6()
test_forward_leaky_relu()
test_forward_relu_n1_to_1()
test_forward_log_softmax()
test_forward_prelu()
test_forward_fully_connected()
test_forward_l2_normalization()
test_forward_local_response_normalization()
# Elemwise
test_all_elemwise()
test_forward_add_n()
# Unary elemwise
test_all_unary_elemwise()
# Zeros Like
test_forward_zeros_like()
# Fill
test_forward_fill()
# Reduce
test_all_reduce()
# Logical
test_all_logical()
# Detection_PostProcess
test_detection_postprocess()
# Overwrite Converter
test_custom_op_converter()
# End to End
test_forward_mobilenet_v1()
test_forward_mobilenet_v2()
test_forward_mobilenet_v3()
test_forward_inception_v3_net()
test_forward_inception_v4_net()
test_forward_inception_v4_net_batched()
test_forward_coco_ssd_mobilenet_v1()
test_forward_mediapipe_hand_landmark()
# End to End Sparse models
test_forward_sparse_mobilenet_v1()
test_forward_sparse_mobilenet_v2()
# End to End quantized
test_forward_qnn_inception_v1_net()
test_forward_qnn_mobilenet_v1_net()
test_forward_qnn_mobilenet_v2_net()
# This also fails with a segmentation fault in my run
# with Tflite 1.15.2
test_forward_qnn_mobilenet_v3_net()
test_forward_qnn_coco_ssd_mobilenet_v1()
# TFLite 2.1.0 quantized tests
test_forward_tflite2_qnn_resnet50()
test_forward_tflite2_qnn_inception_v1()
test_forward_tflite2_qnn_mobilenet_v2()
test_forward_tflite_float16()
|
{
"content_hash": "2d391d4af5cb47176bad2bcd4fd0b2b8",
"timestamp": "",
"source": "github",
"line_count": 4935,
"max_line_length": 135,
"avg_line_length": 35.42938196555218,
"alnum_prop": 0.5372389101141589,
"repo_name": "dmlc/tvm",
"id": "abb05354d921cecb0ece3e5c8107fecfba4c9286",
"size": "175690",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/python/frontend/tflite/test_forward.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_chain = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
return #TODO
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].generate(1))
spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
|
{
"content_hash": "2e909299ceaa807942bcb6b95702d77f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 105,
"avg_line_length": 40.125,
"alnum_prop": 0.6227068189685012,
"repo_name": "kallewoof/elements",
"id": "6f0073019d228777af7669432a2ea8da84eae708",
"size": "3190",
"binary": false,
"copies": "3",
"ref": "refs/heads/elements-bc2",
"path": "qa/rpc-tests/mempool_resurrect_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "888757"
},
{
"name": "C++",
"bytes": "5528925"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "192428"
},
{
"name": "Makefile",
"bytes": "110940"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7239"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1183029"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "72918"
}
],
"symlink_target": ""
}
|
import warnings as _warnings
_warnings.resetwarnings()
_warnings.filterwarnings('error')
from tdi import html
template = html.from_string("""
<node tdi="item">
<node tdi="nested">
<node tdi="subnested"></node>
</node><tdi tdi=":-nested">
</tdi>
</node>
""".lstrip())
class Model(object):
def render_item(self, node):
def sep(node):
node.hiddenelement = False
node.content = (
u'\n '
+ u'-'.join(map(unicode, node.ctx[1]))
+ u'\n '
)
node.nested.repeat(self.repeat_nested, [1, 2, 3, 4], separate=sep)
return True
def repeat_nested(self, node, item):
node['j'] = item
model = Model()
template.render(model)
|
{
"content_hash": "9a2435448d6f781ca5be2b1af32bc927",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 74,
"avg_line_length": 23.9375,
"alnum_prop": 0.5391644908616188,
"repo_name": "ndparker/tdi",
"id": "25ec0d692078d119f75e965475c83d8a118e18cc",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/rendering/repeat_separated_items.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "567028"
},
{
"name": "C++",
"bytes": "6510"
},
{
"name": "HTML",
"bytes": "998"
},
{
"name": "Python",
"bytes": "1032169"
},
{
"name": "Shell",
"bytes": "425"
}
],
"symlink_target": ""
}
|
"""
Partial! M4 Design Of Experiments driver.
Consider this only as a hint as to how this might be done.
The code here has only been developed to test feasibility,
and was written by someone without much 'mool' knowledge.
"""
__all__ = ('DOE',)
import mool.Optimization.DOE
from openmdao.lib.drivers.caseiterdriver import CaseIteratorDriver
from openmdao.main.api import Case, ListCaseIterator
from openmdao.main.datatypes.api import Int, Str
class DOE(CaseIteratorDriver):
""" M4 Design Of Experiments driver. """
# No 'Option' variables yet.
type = Str('ccd', iotype='in', desc='Type of experiment design.')
n_samples = Int(value=1, low=1, iotype='in', desc='Number of samples.')
lhs = Int(value=2, low=1, iotype='in',
desc='???, used by LHS and Rand_LHS.')
def __init__(self):
super(DOE, self).__init__()
self.design_variables = [] # List of (name, min, max) tuples.
self.response_variables = [] # List of names.
def _pre_execute(self):
""" Generate cases. """
cases = self.generate_cases()
if cases is None:
self.raise_exception('No cases generated', RuntimeError)
self.iterator = ListCaseIterator(cases)
self.outerator = []
super(DOE, self)._pre_execute()
# pylint: disable-msg=E1101
# "Instance of <class> has no <attr> member"
def generate_cases(self):
""" Generate cases to be run based on configuration. """
nvars = len(self.design_variables)
if self.type == 'ccd':
# CCD requires an exact value
if nvars == 1:
min_samples = 3
else:
min_samples = 2 ** nvars + 2 * nvars + 1
if self.n_samples != min_samples:
self._logger.warning('Setting n_samples to CCD required value: %d' % \
min_samples)
self.n_samples = min_samples
elif self.type == 'lhs':
min_samples = nvars
elif self.type == 'rand_lhs':
min_samples = nvars
elif self.type == 'oa2':
min_samples = (nvars - 1) ** 2
elif self.type == 'oa3':
min_samples = (nvars - 1) ** 3
else:
self._logger.error("Unknown type '%s'" % self.type)
return None
if self.n_samples < min_samples:
self._logger.warning('Updating n_samples to minimum: %d' % min_samples)
self.n_samples = min_samples
xmin = []
xmax = []
for name, min_val, max_val in self.design_variables:
xmin.append(min_val)
xmax.append(max_val)
doe = mool.Optimization.DOE.DOE(xmin, xmax, self.type, self.n_samples,
self.lhs)
sample_points = doe.x
cases = []
for point in sample_points:
inputs = []
for i, var in enumerate(self.design_variables):
inputs.append((var[0], None, point[i]))
outputs = []
for var in self.response_variables:
outputs.append((var, None, None))
cases.append(Case(inputs, outputs))
return cases
|
{
"content_hash": "084d3cc30d6df69760ef8fb15f0c43aa",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 86,
"avg_line_length": 36.12359550561798,
"alnum_prop": 0.5580093312597201,
"repo_name": "DailyActie/Surrogate-Model",
"id": "0bd536995c4203cf6edc2f713cd2127f46e230f5",
"size": "3215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/OpenMDAO-Framework-dev/contrib/m4/doe.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from modularodm import Q
from modularodm.exceptions import QueryException, NoResultsFound
from website import util
from website import prereg
from website import settings
from website import language
from website.util import metrics
from website.util import paths
from website.util import sanitize
from website.models import Institution
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.preprints import views as preprint_views
from website.institutions import views as institution_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
if request.host_url != settings.DOMAIN:
try:
inst_id = (Institution.find_one(Q('domains', 'eq', request.host.lower())))._id
login_url = '{}institutions/{}'.format(settings.DOMAIN, inst_id)
except NoResultsFound:
login_url = request.url.replace(request.host_url, settings.DOMAIN)
else:
login_url = request.url
return {
'private_link_anonymous': is_private_link_anonymous_view(),
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_locale': user.locale if user and user.locale else '',
'user_timezone': user.timezone if user and user.timezone else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'user_entry_point': metrics.get_entry_point(user) if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'noteworthy_links_node': settings.NEW_AND_NOTEWORTHY_LINKS_NODE,
'popular_links_node': settings.POPULAR_LINKS_NODE,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(login_url, auto=True),
'reauth_url': util.web_url_for('auth_logout', redirect_url=request.url, reauth=True),
'profile_url': cas.get_profile_url(),
'enable_institutions': settings.ENABLE_INSTITUTIONS,
'keen_project_id': settings.KEEN_PROJECT_ID,
'keen_write_key': settings.KEEN_WRITE_KEY,
}
def is_private_link_anonymous_view():
try:
# Avoid circular import
from website.project.model import PrivateLink
return PrivateLink.find_one(
Q('key', 'eq', request.args.get('view_only'))
).anonymous
except QueryException:
return False
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string, trust=False)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('index'))
status.push_status_message(language.LOGOUT, kind='success', trust=False)
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule(
'/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string, trust=False)
),
Rule(
'/api/v1/<path:_>',
['get', 'post'],
HTTPError(http.NOT_FOUND),
json_renderer
),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule(
'/dashboard/',
'get',
website_views.redirect_to_home,
OsfWebRenderer('home.mako', trust=False)
),
Rule(
'/myprojects/',
'get',
website_views.dashboard,
OsfWebRenderer('dashboard.mako', trust=False)
),
Rule(
'/reproducibility/',
'get',
website_views.reproducibility,
notemplate
),
Rule('/about/', 'get', website_views.redirect_about, notemplate),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako', trust=False)),
Rule(['/getting-started/', '/getting-started/email/', '/howosfworks/'], 'get', website_views.redirect_getting_started, notemplate),
Rule('/support/', 'get', {}, OsfWebRenderer('public/pages/support.mako', trust=False)),
Rule(
'/explore/',
'get',
{},
OsfWebRenderer('public/explore.mako', trust=False)
),
Rule(
[
'/messages/',
'/help/'
],
'get',
{},
OsfWebRenderer('public/comingsoon.mako', trust=False)
),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako', trust=False),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako', trust=False),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako', trust=False),
),
Rule(
'/api/v1/meetings/submissions/',
'get',
conference_views.conference_submissions,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule(
'/news/',
'get',
{},
OsfWebRenderer('public/pages/news.mako', trust=False)
),
Rule(
'/prereg/',
'get',
prereg.prereg_landing_page,
OsfWebRenderer('prereg_landing_page.mako', trust=False)
),
Rule(
'/preprints/',
'get',
preprint_views.preprint_landing_page,
OsfWebRenderer('public/pages/preprint_landing.mako', trust=False),
),
Rule(
'/preprint/',
'get',
preprint_views.preprint_redirect,
notemplate,
),
Rule(
'/api/v1/prereg/draft_registrations/',
'get',
prereg.prereg_draft_registrations,
json_renderer,
),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako', trust=False),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
notemplate
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string, trust=False)
),
# Resend confirmation URL linked to in CAS login page
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string, trust=False)
),
# TODO: Remove `auth_register_post`
Rule(
'/register/',
'post',
auth_views.auth_register_post,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(
[
'/login/',
'/account/'
],
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule(
'/login/first/',
'get',
auth_views.auth_login,
OsfWebRenderer('public/login.mako', trust=False),
endpoint_suffix='__first', view_kwargs={'first': True}
),
Rule(
'/logout/',
'get',
auth_views.auth_logout,
notemplate
),
Rule(
'/forgotpassword/',
'get',
auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako', trust=False)
),
Rule(
'/forgotpassword/',
'post',
auth_views.forgot_password_post,
OsfWebRenderer('public/login.mako', trust=False)
),
Rule(
[
'/midas/',
'/summit/',
'/accountbeta/',
'/decline/'
],
'get',
auth_views.auth_registerbeta,
notemplate
),
# FIXME or REDIRECTME: This redirects to settings when logged in, but gives an error (no template) when logged out
Rule(
'/login/connected_tools/',
'get',
landing_page_views.connected_tools,
OsfWebRenderer('public/login_landing.mako', trust=False)
),
# FIXME or REDIRECTME: mod-meta error when logged out: signin form not rendering for login_landing sidebar
Rule(
'/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
OsfWebRenderer('public/login_landing.mako', trust=False)
),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
['/user/merge/'],
'get',
auth_views.merge_user_get,
OsfWebRenderer('merge_accounts.mako', trust=False)
),
Rule(
['/user/merge/'],
'post',
auth_views.merge_user_post,
OsfWebRenderer('merge_accounts.mako', trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/tokens/',
'get',
profile_views.personal_access_token_list,
OsfWebRenderer('profile/personal_tokens_list.mako', trust=False)
),
Rule(
'/settings/tokens/create/',
'get',
profile_views.personal_access_token_register,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
Rule(
'/settings/tokens/<_id>/',
'get',
profile_views.personal_access_token_detail,
OsfWebRenderer('profile/personal_tokens_detail.mako', trust=False)
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule(
'/search/',
'get',
{},
OsfWebRenderer('search.mako', trust=False)
),
Rule(
'/share/',
'get',
{},
OsfWebRenderer('share_search.mako', trust=False)
),
Rule(
'/share/registration/',
'get',
{'register': settings.SHARE_REGISTRATION_URL},
OsfWebRenderer('share_registration.mako', trust=False)
),
Rule(
'/share/help/',
'get',
{'help': settings.SHARE_API_DOCS_URL},
OsfWebRenderer('share_api_docs.mako', trust=False)
),
Rule( # FIXME: Dead route; possible that template never existed; confirm deletion candidate with ErinB
'/share_dashboard/',
'get',
{},
OsfWebRenderer('share_dashboard.mako', trust=False)
),
Rule(
'/share/atom/',
'get',
search_views.search_share_atom,
xml_renderer
),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Institution
process_rules(app, [
Rule('/institutions/<inst_id>/', 'get', institution_views.view_institution, OsfWebRenderer('institution.mako', trust=False))
])
# Project
# Web
process_rules(app, [
# '/' route loads home.mako if logged in, otherwise loads landing.mako
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako', trust=False)),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('landing.mako', trust=False)),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako', trust=False)),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule( # TODO: Where, if anywhere, is this route used?
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako', trust=False)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<metaschema_id>/',
'/project/<pid>/node/<nid>/register/<metaschema_id>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'post',
project_views.drafts.new_draft_registration,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/',
],
'get',
project_views.drafts.edit_draft_registration_page,
OsfWebRenderer('project/edit_draft_registration.mako', trust=False)),
Rule(
[
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
],
'get',
project_views.drafts.draft_before_register_page,
OsfWebRenderer('project/register_draft.mako', trust=False)),
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule(
[
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
],
'get',
project_views.node.project_statistics_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
'/project/<pid>/files/deleted/<trashed_id>/',
'/project/<pid>/node/<nid>/files/deleted/<trashed_id>/',
],
'get',
addon_views.addon_deleted_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by [coming replacement]
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
# Draft Registrations
Rule([
'/project/<pid>/drafts/',
], 'get', project_views.drafts.get_draft_registrations, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'get', project_views.drafts.get_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'put', project_views.drafts.update_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/',
], 'delete', project_views.drafts.delete_draft_registration, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/submit/',
], 'post', project_views.drafts.submit_draft_for_review, json_renderer),
# Meta Schemas
Rule([
'/project/drafts/schemas/',
], 'get', project_views.drafts.get_metaschemas, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule(
[
'/project/<pid>/contributor/remove/',
'/project/<pid>/node/<nid>/contributor/remove/',
],
'POST',
project_views.contributor.project_remove_contributor,
json_renderer,
),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/drafts/<draft_id>/register/',
'/project/<pid>/node/<nid>/drafts/<draft_id>/register/',
], 'post', project_views.drafts.register_draft_registration, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/withdraw/',
'/project/<pid>/node/<nid>/withdraw/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/tree/',
'/project/<pid>/node/<nid>/tree/'
],
'get',
project_views.node.get_node_tree,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
)
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
|
{
"content_hash": "71489b38eb278d67dfd37ffce7b4dca5",
"timestamp": "",
"source": "github",
"line_count": 1631,
"max_line_length": 139,
"avg_line_length": 31.368485591661557,
"alnum_prop": 0.5036159649740042,
"repo_name": "zachjanicki/osf.io",
"id": "f68f22235a0a9aec6cd6bd6a0246249c050dc1ba",
"size": "51186",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "145253"
},
{
"name": "HTML",
"bytes": "107077"
},
{
"name": "JavaScript",
"bytes": "1579614"
},
{
"name": "Mako",
"bytes": "666549"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5326873"
}
],
"symlink_target": ""
}
|
"""This code example gets an team by its id. To determine which teams
exist, run get_all_teams.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
team_service = client.GetService(
'TeamService', 'https://www.google.com', 'v201203')
# Set the id of the team to get.
team_id = 'INSERT_TEAM_ID_HERE'
# Get team.
team = team_service.GetTeam(team_id)[0]
# Display results.
print ('Team with id \'%s\' and name \'%s\' was found.'
% (team['id'], team['name']))
|
{
"content_hash": "2791588ed93e303af919d8c156a56dca",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.6685330347144457,
"repo_name": "donspaulding/adspygoogle",
"id": "7425e5f7dcc71d7bc418e59b2ed16c9d14455a29",
"size": "1511",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201203/get_team.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
import logging
from .scriptable_strategy import ScriptableStrategy
def create_strategy(args = None):
return __IBS()
class __IBSStrategyBase(ScriptableStrategy):
def __init__(self, buy_value, sell_value):
super().__init__('IBS_%f_%f'
% (buy_value, sell_value),
'((C-L)/(H-L)) < %f' % buy_value,
'((C-L)/(H-L)) > %f' % sell_value,
'close',
'close')
self.buy_value_ = buy_value
self.sell_value_ = sell_value
class __IBS(__IBSStrategyBase):
def __init__(self):
super().__init__(0.2, 0.8)
|
{
"content_hash": "a8ca26a4c79ab21ebd5326f23885b2f3",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 59,
"avg_line_length": 28.652173913043477,
"alnum_prop": 0.4795144157814871,
"repo_name": "stonewell/learn-curve",
"id": "50f55745f8c794bd05bad89491c4ea391f85486a",
"size": "659",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/strategy/ibs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "799918"
}
],
"symlink_target": ""
}
|
"""
Classes for dynamic generation of mock objects.
"""
import inspect
def serialize_obj(obj):
if isinstance(obj, float):
val = str(round(obj, 10))
elif isinstance(obj, dict):
d = {}
for k1, v1 in obj.items():
d[k1] = serialize_obj(v1)
val = str(d)
elif isinstance(obj, list):
l1 = []
for i1 in obj:
l1.append(serialize_obj(i1))
val = str(l1)
elif isinstance(obj, tuple):
l1 = ()
for i1 in obj:
l1 = l1 + (serialize_obj(i1),)
val = str(l1)
else:
val = str(obj)
return val
def serialize_args(*args, **kwargs):
"""Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
class Mock(object):
def _get_next_value(self, name):
c = self._access_count.get(name)
if c is None:
c = 0
else:
c = c + 1
self._access_count[name] = c
return self._values[name][c]
def _get_next_ret_value(self, name, params):
d = self._access_count.get(name)
if d is None:
d = {}
self._access_count[name] = d
c = d.get(params)
if c is None:
c = 0
else:
c = c + 1
d[params] = c
return self._values[name][params][c]
def __init__(self, values):
self._values = values
self._access_count = {}
def has_values(self):
return len(self._values) > 0
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
return object.__getattribute__(self, name)
else:
if isinstance(self._values[name], dict):
def newfunc(*args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value(name, params)
return newfunc
else:
return self._get_next_value(name)
def __str__(self):
return self._get_next_value('__str__')
def __iter__(self):
return getattr(self._get_next_value('__iter__'), '__iter__')()
def __len__(self):
return self._get_next_value('__len__')
def __getitem__(self, key):
return self._get_next_ret_value('__getitem__', str(key))
def __call__(self, *args, **kwargs):
params = serialize_args(args, kwargs)
return self._get_next_ret_value('__call__', params)
class MockProxy(object):
def __init__(self, wrapped):
self._wrapped = wrapped
self._recorded_values = {}
def _get_proxy_object(self, obj):
if (hasattr(obj, '__dict__') or
isinstance(obj, tuple) or
isinstance(obj, list) or
isinstance(obj, dict)):
p = MockProxy(obj)
else:
p = obj
return p
def __getattr__(self, name):
if name in ['_wrapped']:
return object.__getattribute__(self, name)
else:
attr = getattr(self._wrapped, name)
if (inspect.isfunction(attr) or
inspect.ismethod(attr) or
inspect.isbuiltin(attr)):
def newfunc(*args, **kwargs):
result = attr(*args, **kwargs)
p = self._get_proxy_object(result)
params = serialize_args(args, kwargs)
self._add_recorded_ret_value(name, params, p)
return p
return newfunc
elif (hasattr(attr, '__dict__') or
(hasattr(attr, '__getitem__') and not
(isinstance(attr, str) or isinstance(attr, unicode)))):
p = MockProxy(attr)
else:
p = attr
self._add_recorded_value(name, p)
return p
def __setattr__(self, name, value):
if name in ['_wrapped', '_recorded_values']:
object.__setattr__(self, name, value)
else:
setattr(self._wrapped, name, value)
def _add_recorded_ret_value(self, name, params, val):
d = self._recorded_values.get(name)
if d is None:
d = {}
self._recorded_values[name] = d
l = d.get(params)
if l is None:
l = []
d[params] = l
l.append(val)
def _add_recorded_value(self, name, val):
if name not in self._recorded_values:
self._recorded_values[name] = []
self._recorded_values[name].append(val)
def get_mock(self):
values = {}
for k, v in self._recorded_values.items():
if isinstance(v, dict):
d = {}
values[k] = d
for k1, v1 in v.items():
l = []
d[k1] = l
for i1 in v1:
if isinstance(i1, MockProxy):
l.append(i1.get_mock())
else:
l.append(i1)
else:
l = []
values[k] = l
for i in v:
if isinstance(i, MockProxy):
l.append(i.get_mock())
elif isinstance(i, dict):
d = {}
for k1, v1 in v.items():
if isinstance(v1, MockProxy):
d[k1] = v1.get_mock()
else:
d[k1] = v1
l.append(d)
elif isinstance(i, list):
l1 = []
for i1 in i:
if isinstance(i1, MockProxy):
l1.append(i1.get_mock())
else:
l1.append(i1)
l.append(l1)
else:
l.append(i)
return Mock(values)
def __str__(self):
s = str(self._wrapped)
self._add_recorded_value('__str__', s)
return s
def __len__(self):
l = len(self._wrapped)
self._add_recorded_value('__len__', l)
return l
def __iter__(self):
it = []
for i in self._wrapped:
it.append(self._get_proxy_object(i))
self._add_recorded_value('__iter__', it)
return iter(it)
def __getitem__(self, key):
p = self._get_proxy_object(self._wrapped[key])
self._add_recorded_ret_value('__getitem__', str(key), p)
return p
def __call__(self, *args, **kwargs):
c = self._wrapped(*args, **kwargs)
p = self._get_proxy_object(c)
params = serialize_args(args, kwargs)
self._add_recorded_ret_value('__call__', params, p)
return p
|
{
"content_hash": "a3ef8e5a408d608cbff5ac75a0aef65f",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 73,
"avg_line_length": 30.955156950672645,
"alnum_prop": 0.4540055048529625,
"repo_name": "tomasdubec/openstack-cinder",
"id": "d39a96300dfda8c016afc2d669d5c0bdeef5c459",
"size": "7541",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cinder/tests/windows/mockproxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
Created on Fri Jan 9 12:52:31 2015
@author: stuart
"""
import os
import glob
import numpy as np
import pysac.yt as sacyt
import pysac.mhs_atmosphere as atm
l_mpi=False
scales, physical_constants = \
atm.units_const.get_parameters()
#define the models required
spruits = ['spruit_const','spruit_sqrt','spruit_linear','spruit_square']
#spruits = ['spruit_const']
oneD_arrays = {}
oned_dataset = []
#loop over all four models
for spruit in spruits:
datadir = os.path.expanduser('~/Documents/mhs_atmosphere/'+
spruit+'/')
figsdir = os.path.expanduser('~/Documents/mhs_atmosphere/figs/'+spruit+'/')
if not os.path.exists(figsdir):
os.makedirs(figsdir)
#open all gdf files in the model directory
files = glob.glob(datadir+'/*')
#files = glob.glob(datadir+'/'+spruits[0]+'_3Daux.gdf')
files.sort()
print(files)
for file_ in files:
#ds = yt.load(file_)
ds = sacyt.SACGDFDataset(file_)
vars_ = ds.index.field_list
for var_ in vars_:
var_field = var_[1]
max_var = np.max(np.abs(ds.index.grids[0][var_field]))/\
ds.index.grids[0][var_field].unit_quantity
var = ds.index.grids[0][var_field]
if max_var > 0.:
# save 1D slices from each variable for plotting
oneD_arrays = atm.mhs_plot.make_1d_slices(ds, var_field, oneD_arrays)
# select the central slice to plot normal to the y-plane
nx_2 = ds.domain_dimensions[1]/2
if 'mag' in var_field:
lines = True
elif 'density' in var_field or 'pressure' in var_field:
lines = True
if 'D' in file_:
lines = False
else:
lines = False
if '_HS' in var_field:
contours = False
else:
contours = True
# save 2D plot in model's figures directory
figname = figsdir+spruit+'_'+var_field+'.eps'
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=['y',nx_2],
aspect=0.2, lines=lines,
contours=contours,
model=spruit)
if ('gas','thermal_pressure') in ds.derived_field_list:
var_field = 'thermal_pressure'
figname = figsdir+spruit+'_'+var_field+'.eps'
lines, contours = True, True
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=['y',nx_2],
aspect=0.2, lines=lines,
contours=contours,
model=spruit)
if ('gas','mag_pressure') in ds.derived_field_list:
var_field = 'mag_pressure'
figname = figsdir+spruit+'_'+var_field+'.eps'
lines, contours = True, True
atm.mhs_plot.make_2d_plot(ds, var_field, figname,
normal=['y',nx_2],
aspect=0.2, lines=lines,
contours=contours,
model=spruit)
plot_label = figsdir+spruit+'_axis.eps'
keys = ['alfven_speed','sound_speed','mag_field_z_bg']
subkeys = ['axis']
atm.mhs_plot.make_1d_zplot(oneD_arrays, plot_label, keys=keys, subkeys=subkeys,
ylog = True, xlog = True, xlim = (0,12)
)
|
{
"content_hash": "f013b4160aa35473e1d3e4a81f7d6f09",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 85,
"avg_line_length": 42.07608695652174,
"alnum_prop": 0.46912942392146734,
"repo_name": "SWAT-Sheffield/pysac",
"id": "901a3878c491943e5e7ee451342b353816228eda",
"size": "3895",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/mhs_atmosphere/plots/spruit_plot.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "251979"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {
'configuration': self.get_config_7mode(),
'host': 'openstack@7modenfs',
}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
self.driver.perf_library = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'share': None, 'is_snapshot': False},
{'share': None, 'is_snapshot': True},
{'share': 'fake_share', 'is_snapshot': False},
{'share': 'fake_share', 'is_snapshot': True})
@ddt.unpack
def test_clone_backing_file_for_volume(self, share, is_snapshot):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path',
return_value=(fake.SHARE_IP, fake.EXPORT_PATH))
mock_get_actual_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export',
return_value='fake_path')
self.driver._clone_backing_file_for_volume(
fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share,
is_snapshot=is_snapshot)
mock_get_export_ip_path.assert_called_once_with(
fake.VOLUME_ID, share)
mock_get_actual_path_for_export.assert_called_once_with(
fake.EXPORT_PATH)
self.driver.zapi_client.clone_file.assert_called_once_with(
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone',
None)
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES // units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES // units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
return_value=capacity)
self.mock_object(self.driver.perf_library,
'get_node_utilization',
return_value=30.0)
result = self.driver._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'consistencygroup_support': True,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'multiattach': True,
'provisioned_capacity_gb': 4456.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.driver._get_volume_model_update(fake.VOLUME_REF))
def test_delete_cgsnapshot(self):
mock_delete_file = self.mock_object(self.driver, '_delete_file')
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT]))
mock_delete_file.assert_called_once_with(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name'])
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
def test_get_snapshot_backing_flexvol_names(self):
snapshots = [
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
{'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
]
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_flexvol_names_from_hosts(hosts)
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)
def test_check_for_setup_error(self):
mock_get_ontapi_version = self.mock_object(
self.driver.zapi_client, 'get_ontapi_version')
mock_get_ontapi_version.return_value = ['1', '10']
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
mock_super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
self.driver.check_for_setup_error()
mock_get_ontapi_version.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
mock_super_check_for_setup_error.assert_called_once_with()
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_add_looping_tasks')
self.driver._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_handle_ems_logging(self):
volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
self.driver, '_get_backing_flexvol_names',
return_value=volume_list)
self.mock_object(
dot_utils, 'build_ems_log_message_0',
return_value='fake_base_ems_log_message')
self.mock_object(
dot_utils, 'build_ems_log_message_1',
return_value='fake_pool_ems_log_message')
mock_send_ems_log_message = self.mock_object(
self.driver.zapi_client, 'send_ems_log_message')
self.driver._handle_ems_logging()
mock_send_ems_log_message.assert_has_calls([
mock.call('fake_base_ems_log_message'),
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.driver.driver_name, self.driver.app_version,
self.driver.driver_mode)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.driver.driver_name, self.driver.app_version, None,
volume_list, [])
def test_get_backing_flexvol_names(self):
result = self.driver._get_backing_flexvol_names()
self.assertEqual('path', result[0])
|
{
"content_hash": "df3f49ba9fb410dc46ba923ebcf55988",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 76,
"avg_line_length": 40.72047244094488,
"alnum_prop": 0.5983757130426375,
"repo_name": "ge0rgi/cinder",
"id": "378878a9430910c9b3df68424afcfc2806222b55",
"size": "10972",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/ocata",
"path": "cinder/tests/unit/volume/drivers/netapp/dataontap/test_nfs_7mode.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19963591"
}
],
"symlink_target": ""
}
|
from .responses import PERMISSION_DENIED
def sudo_required(func):
def wrapper(*args, **kwargs):
request = None
if 'request' in kwargs.keys():
request = kwargs['request']
elif 'request' in func.__code__.co_varnames:
request_pos = func.__code__.co_varnames.index('request')
request = args[request_pos]
elif 'self' in func.__code__.co_varnames:
if 'request' in vars(args[0]):
request = func.__self__.request
if request and not request.user.is_superuser:
return PERMISSION_DENIED
return func(*args, **kwargs)
return wrapper
def auth_required(func):
def wrapper(*args, **kwargs):
request = None
if 'request' in kwargs.keys():
request = kwargs['request']
elif 'request' in func.__code__.co_varnames:
request_pos = func.__code__.co_varnames.index('request')
request = args[request_pos]
elif 'self' in func.__code__.co_varnames:
if 'request' in vars(args[0]):
request = func.__self__.request
if request:
if not request.user.is_authenticated:
return PERMISSION_DENIED
return func(*args, **kwargs)
return wrapper
|
{
"content_hash": "723151db855ec01df2f424b727be4597",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 59,
"avg_line_length": 29.135135135135137,
"alnum_prop": 0.6753246753246753,
"repo_name": "YuriyLisovskiy/messenger",
"id": "990f14975b16843d8524e4a617a3c64846c0d865",
"size": "1078",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/view_modifiers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "131309"
},
{
"name": "HTML",
"bytes": "30440"
},
{
"name": "JavaScript",
"bytes": "267623"
},
{
"name": "Python",
"bytes": "66525"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import dataclasses
from dataclasses import dataclass
from typing import Iterable, List, Mapping, Optional, Tuple
from pants.backend.python.subsystems.python_native_code import PythonNativeCodeSubsystem
from pants.backend.python.util_rules import pex_environment
from pants.backend.python.util_rules.pex_environment import (
PexEnvironment,
PexSubsystem,
PythonExecutable,
)
from pants.core.util_rules import external_tool
from pants.core.util_rules.external_tool import (
DownloadedExternalTool,
ExternalToolRequest,
TemplatedExternalTool,
)
from pants.engine.fs import CreateDigest, Digest, Directory, MergeDigests
from pants.engine.internals.selectors import MultiGet
from pants.engine.platform import Platform
from pants.engine.process import Process, ProcessCacheScope
from pants.engine.rules import Get, collect_rules, rule
from pants.option.global_options import GlobalOptions, ca_certs_path_to_file_content
from pants.util.frozendict import FrozenDict
from pants.util.logging import LogLevel
from pants.util.meta import classproperty, frozen_after_init
from pants.util.strutil import create_path_env_var
class PexCli(TemplatedExternalTool):
options_scope = "pex-cli"
name = "pex"
help = "The PEX (Python EXecutable) tool (https://github.com/pantsbuild/pex)."
default_version = "v2.1.107"
default_url_template = "https://github.com/pantsbuild/pex/releases/download/{version}/pex"
version_constraints = ">=2.1.103,<3.0"
@classproperty
def default_known_versions(cls):
return [
"|".join(
(
cls.default_version,
plat,
"bfc19b16e0f298742dd933289bd8057dd503f9ad0678310412d382800d48b3ae",
"3840814",
)
)
for plat in ["macos_arm64", "macos_x86_64", "linux_x86_64", "linux_arm64"]
]
@frozen_after_init
@dataclass(unsafe_hash=True)
class PexCliProcess:
subcommand: tuple[str, ...]
extra_args: tuple[str, ...]
set_resolve_args: bool
description: str = dataclasses.field(compare=False)
additional_input_digest: Optional[Digest]
extra_env: Optional[FrozenDict[str, str]]
output_files: Optional[Tuple[str, ...]]
output_directories: Optional[Tuple[str, ...]]
python: Optional[PythonExecutable]
level: LogLevel
concurrency_available: int
cache_scope: ProcessCacheScope
def __init__(
self,
*,
subcommand: Iterable[str],
extra_args: Iterable[str],
description: str,
set_resolve_args: bool = True,
additional_input_digest: Optional[Digest] = None,
extra_env: Optional[Mapping[str, str]] = None,
output_files: Optional[Iterable[str]] = None,
output_directories: Optional[Iterable[str]] = None,
python: Optional[PythonExecutable] = None,
level: LogLevel = LogLevel.INFO,
concurrency_available: int = 0,
cache_scope: ProcessCacheScope = ProcessCacheScope.SUCCESSFUL,
) -> None:
self.subcommand = tuple(subcommand)
self.extra_args = tuple(extra_args)
self.set_resolve_args = set_resolve_args
self.description = description
self.additional_input_digest = additional_input_digest
self.extra_env = FrozenDict(extra_env) if extra_env else None
self.output_files = tuple(output_files) if output_files else None
self.output_directories = tuple(output_directories) if output_directories else None
self.python = python
self.level = level
self.concurrency_available = concurrency_available
self.cache_scope = cache_scope
self.__post_init__()
def __post_init__(self) -> None:
if "--pex-root-path" in self.extra_args:
raise ValueError("`--pex-root` flag not allowed. We set its value for you.")
class PexPEX(DownloadedExternalTool):
"""The Pex PEX binary."""
@rule
async def download_pex_pex(pex_cli: PexCli, platform: Platform) -> PexPEX:
pex_pex = await Get(DownloadedExternalTool, ExternalToolRequest, pex_cli.get_request(platform))
return PexPEX(digest=pex_pex.digest, exe=pex_pex.exe)
@rule
async def setup_pex_cli_process(
request: PexCliProcess,
pex_pex: PexPEX,
pex_env: PexEnvironment,
python_native_code: PythonNativeCodeSubsystem.EnvironmentAware,
global_options: GlobalOptions,
pex_subsystem: PexSubsystem,
) -> Process:
tmpdir = ".tmp"
gets: List[Get] = [Get(Digest, CreateDigest([Directory(tmpdir)]))]
cert_args = []
if global_options.ca_certs_path:
ca_certs_fc = ca_certs_path_to_file_content(global_options.ca_certs_path)
gets.append(Get(Digest, CreateDigest((ca_certs_fc,))))
cert_args = ["--cert", ca_certs_fc.path]
digests_to_merge = [pex_pex.digest]
digests_to_merge.extend(await MultiGet(gets))
if request.additional_input_digest:
digests_to_merge.append(request.additional_input_digest)
input_digest = await Get(Digest, MergeDigests(digests_to_merge))
global_args = [
# Ensure Pex and its subprocesses create temporary files in the the process execution
# sandbox. It may make sense to do this generally for Processes, but in the short term we
# have known use cases where /tmp is too small to hold large wheel downloads Pex is asked to
# perform. Making the TMPDIR local to the sandbox allows control via
# --local-execution-root-dir for the local case and should work well with remote cases where
# a remoting implementation has to allow for processes producing large binaries in a
# sandbox to support reasonable workloads. Communicating TMPDIR via --tmpdir instead of via
# environment variable allows Pex to absolutize the path ensuring subprocesses that change
# CWD can find the TMPDIR.
"--tmpdir",
tmpdir,
]
if request.concurrency_available > 0:
global_args.extend(["--jobs", "{pants_concurrency}"])
verbosity_args = [f"-{'v' * pex_subsystem.verbosity}"] if pex_subsystem.verbosity > 0 else []
resolve_args = (
[*cert_args, "--python-path", create_path_env_var(pex_env.interpreter_search_paths)]
if request.set_resolve_args
else []
)
args = [
*request.subcommand,
*global_args,
*verbosity_args,
*resolve_args,
# NB: This comes at the end because it may use `--` passthrough args, # which must come at
# the end.
*request.extra_args,
]
complete_pex_env = pex_env.in_sandbox(working_directory=None)
normalized_argv = complete_pex_env.create_argv(pex_pex.exe, *args, python=request.python)
env = {
**complete_pex_env.environment_dict(python_configured=request.python is not None),
**python_native_code.subprocess_env_vars,
**(request.extra_env or {}),
# If a subcommand is used, we need to use the `pex3` console script.
**({"PEX_SCRIPT": "pex3"} if request.subcommand else {}),
}
return Process(
normalized_argv,
description=request.description,
input_digest=input_digest,
env=env,
output_files=request.output_files,
output_directories=request.output_directories,
append_only_caches=complete_pex_env.append_only_caches,
level=request.level,
concurrency_available=request.concurrency_available,
cache_scope=request.cache_scope,
)
def rules():
return [
*collect_rules(),
*external_tool.rules(),
*pex_environment.rules(),
]
|
{
"content_hash": "9ea5cc8eb4991ad3fd1f2bfe4ff58be5",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 100,
"avg_line_length": 37.661764705882355,
"alnum_prop": 0.6710920213458285,
"repo_name": "benjyw/pants",
"id": "2e98e953cf31247d2438fbc55d00cee8f4056c16",
"size": "7815",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/backend/python/util_rules/pex_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
}
|
import codecs
import hyperframe
def http2_read_raw_frame(rfile):
header = rfile.safe_read(9)
length = int(codecs.encode(header[:3], 'hex_codec'), 16)
if length == 4740180:
raise ValueError("Length field looks more like HTTP/1.1: %s" % rfile.peek(20))
body = rfile.safe_read(length)
return [header, body]
def http2_read_frame(rfile):
header, body = http2_read_raw_frame(rfile)
frame, length = hyperframe.frame.Frame.parse_frame_header(header)
frame.parse_body(memoryview(body))
return frame
|
{
"content_hash": "0f0d2bb5e74c421969ea727cb99f65ff",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 86,
"avg_line_length": 25.761904761904763,
"alnum_prop": 0.6802218114602587,
"repo_name": "tdickers/mitmproxy",
"id": "d45be64611866fe43fbb473901fc015295ad1759",
"size": "541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netlib/http/http2/framereader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "405"
},
{
"name": "CSS",
"bytes": "194361"
},
{
"name": "HTML",
"bytes": "2878"
},
{
"name": "JavaScript",
"bytes": "127316"
},
{
"name": "Python",
"bytes": "1248282"
},
{
"name": "Shell",
"bytes": "4087"
}
],
"symlink_target": ""
}
|
import tornado.web
import json
from jaeger_client.local_agent_net import LocalAgentSender
from jaeger_client.config import (
Config,
DEFAULT_SAMPLING_PORT,
DEFAULT_REPORTING_PORT,
)
from jaeger_client.constants import (
SAMPLER_TYPE_CONST,
SAMPLER_TYPE_REMOTE,
)
from jaeger_client.sampler import RemoteControlledSampler, ConstSampler
from jaeger_client.reporter import Reporter
from jaeger_client.tracer import Tracer
config = {
'service_name': 'crossdock-python',
'enabled': True,
'sampler': {
'type': 'probabilistic',
'param': 1,
},
'reporter_flush_interval': 1,
'sampling_refresh_interval': 5,
}
class EndToEndHandler(object):
"""
Handler that creates traces from a http request.
json: {
"type": "remote"
"operation": "operationName",
"count": 2,
"tags": {
"key": "value"
}
}
Given the above json payload, the handler will use a tracer with the RemoteControlledSampler
to create 2 traces for the "operationName" operation with the tags: {"key":"value"}. These
traces are reported to the agent with the hostname "test_driver".
"""
def __init__(self):
cfg = Config(config)
init_sampler = cfg.sampler
channel = self.local_agent_sender
reporter = Reporter(
channel=channel,
flush_interval=cfg.reporter_flush_interval)
remote_sampler = RemoteControlledSampler(
channel=channel,
service_name=cfg.service_name,
sampling_refresh_interval=cfg.sampling_refresh_interval,
init_sampler=init_sampler)
remote_tracer = Tracer(
service_name=cfg.service_name,
reporter=reporter,
sampler=remote_sampler)
const_tracer = Tracer(
service_name=cfg.service_name,
reporter=reporter,
sampler=ConstSampler(decision=True)
)
self._tracers = {
SAMPLER_TYPE_CONST: const_tracer,
SAMPLER_TYPE_REMOTE: remote_tracer
}
@property
def tracers(self):
return self._tracers
@tracers.setter
def tracers(self, tracers):
self._tracers = tracers
@property
def local_agent_sender(self):
return LocalAgentSender(
host='test_driver',
sampling_port=DEFAULT_SAMPLING_PORT,
reporting_port=DEFAULT_REPORTING_PORT,
)
@tornado.gen.coroutine
def generate_traces(self, request, response_writer):
req = json.loads(request.body)
sampler_type = req.get('type', 'remote')
tracer = self.tracers[sampler_type]
for _ in range(req.get('count', 0)):
span = tracer.start_span(req['operation'])
for k, v in req.get('tags', {}).iteritems():
span.set_tag(k, v)
span.finish()
response_writer.finish()
|
{
"content_hash": "9f1ba506aecd23ae7be390ce0e4b18d4",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 96,
"avg_line_length": 28.01904761904762,
"alnum_prop": 0.6084296397008837,
"repo_name": "guillermo-sentinella/jaeger-client-python",
"id": "88c9b2967476bedeaeb154d417be0d22d96c341a",
"size": "2942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crossdock/server/endtoend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3725"
},
{
"name": "Python",
"bytes": "225177"
}
],
"symlink_target": ""
}
|
from eth_tools import Contract, address
from nose.tools import assert_equal
from pyethereum import tester as t
INITIALIZE = 0
SET_CONFIGURATION = 1
BUY_TICKET = 2
GET_TICKET_OWNER = 3
GET_TICKET_NUMBERS = 4
TRANSFER_TICKET = 5
CHECK_WINNERS = 6
CLAIM_WINNINGS = 7
GET_BALANCE = 8
SET_PAYOUTS = 9
WITHDRAW = 10
START_LOTTO = 11
class TestLotto:
def setup(self):
self.state = t.state()
self.contract = Contract("contracts/lotto.se", self.state)
self.contract.call(INITIALIZE)
self.state.mine(1)
def test_buying_ticket(self):
numbers = [1, 3, 4, 5, 9, 35]
self.contract.call(START_LOTTO)
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
assert_equal(address(self.contract.call(GET_TICKET_OWNER, [ticket_id])[0]), t.a0)
assert_equal(self.contract.call(GET_TICKET_NUMBERS, [ticket_id]), numbers)
new_numbers = [1, 5, 7, 8, 10, 35]
new_ticket_id = self.contract.call(BUY_TICKET, new_numbers)[0]
assert_equal(address(self.contract.call(GET_TICKET_OWNER, [new_ticket_id])[0]), t.a0)
assert_equal(self.contract.call(GET_TICKET_NUMBERS, [new_ticket_id]), new_numbers)
def test_cannot_buy_invalid_ticket(self):
numbers = [1, 1, 2, 3, 5, 8]
self.contract.call(START_LOTTO)
assert_equal(self.contract.call(BUY_TICKET, numbers), [-3])
def test_lotto_closes_after_specified_block(self):
self.contract.call(SET_CONFIGURATION, [0, 0, 4])
self.contract.call(START_LOTTO)
self.state.mine(5)
numbers = [1, 3, 4, 5, 9, 35]
assert_equal(self.contract.call(BUY_TICKET, numbers), [-2])
def test_ticket_prices(self):
numbers = [1, 3, 4, 5, 9, 35]
self.contract.call(SET_CONFIGURATION, [1])
self.contract.call(START_LOTTO)
assert_equal(self.contract.call(BUY_TICKET, numbers, ether=0), [-1])
assert_equal(self.contract.call(BUY_TICKET, numbers, ether=1), [0])
def test_transfering_ticket(self):
numbers = [1, 3, 4, 5, 9, 35]
self.contract.call(START_LOTTO)
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
self.contract.call(TRANSFER_TICKET, [ticket_id, t.a1])
assert_equal(address(self.contract.call(GET_TICKET_OWNER, [ticket_id])[0]), t.a1)
def test_check_winning_numbers(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 4])
self.contract.call(START_LOTTO)
assert_equal(self.contract.call(CHECK_WINNERS), [-1])
self.state.mine(5)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
def test_claim_winnings(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 4, 4], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 1000, 101, 0, 0, 0, 0, 0, 0])
self.contract.call(START_LOTTO)
numbers = [1, 2, 3, 4, 5, 35]
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
self.state.mine(5)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [101])
def test_cannot_overwrite_winning_numbers(self):
self.contract.call(SET_CONFIGURATION, [0, 0, 4, 4], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 1000, 101, 0, 0, 0, 0, 0, 0])
self.contract.call(START_LOTTO)
self.state.mine(5)
winning_numbers = self.contract.call(CHECK_WINNERS)
assert_equal(len(winning_numbers), 6)
self.state.mine(1)
assert_equal(self.contract.call(CHECK_WINNERS), [-2])
def test_cannot_claim_winnings_after_deadline(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 2, 2], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 1000, 101, 0, 0, 0, 0, 0, 0])
self.contract.call(START_LOTTO)
numbers = [1, 2, 3, 4, 5, 35]
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
self.state.mine(5)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [-1])
def test_cannot_double_claim_winnings(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 4, 4], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 1000, 101, 0, 0, 0, 0, 0, 0])
self.contract.call(START_LOTTO)
numbers = [1, 2, 3, 4, 5, 35]
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
self.state.mine(5)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [101])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [-2])
def test_multiple_winners_split_jackpot(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 4, 4], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000, 0])
self.contract.call(START_LOTTO)
numbers = [1, 2, 5, 6, 7, 1]
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
second_ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
self.state.mine(5)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [500])
def test_withdrawal_is_only_possible_after_lotto_deadline(self):
self.contract.call(SET_CONFIGURATION, [0, 0, 2, 2], ether = 1000)
self.contract.call(START_LOTTO)
assert_equal(self.contract.call(WITHDRAW, [500]), [-1])
self.state.mine(5)
assert_equal(self.contract.call(WITHDRAW, [500]), [500])
def test_start_new_lotto(self):
rng = Contract("contracts/fake_rng.se", self.state)
self.contract.call(SET_CONFIGURATION, [0, rng.contract, 2, 2], ether = 1000)
self.contract.call(SET_PAYOUTS, [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1000, 0])
self.contract.call(START_LOTTO)
assert_equal(self.contract.call(START_LOTTO), [-1])
self.state.mine(5)
assert_equal(self.contract.call(START_LOTTO), [1])
numbers = [1, 2, 5, 6, 7, 1]
ticket_id = self.contract.call(BUY_TICKET, numbers)[0]
assert_equal(ticket_id, 0)
self.state.mine(3)
assert_equal(self.contract.call(CHECK_WINNERS), [1,2,5,6,7,1])
assert_equal(self.contract.call(CLAIM_WINNINGS, [ticket_id]), [1000])
|
{
"content_hash": "2438d6488087e689b69822eabd981e36",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 93,
"avg_line_length": 39.26589595375722,
"alnum_prop": 0.6229942587958193,
"repo_name": "jeffanthony/ethereum-powerball",
"id": "611c4710a9d350edb3a17414e5994556192fa9cd",
"size": "6793",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/lotto_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8110"
}
],
"symlink_target": ""
}
|
import argparse
import fnmatch
import ntpath
import sys
import os
import shutil
from helpers import compile_helper
from subprocess import call
# Add this to your path
protoc_path = "protoc"
# Specify desired language / output
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--lang", help="Language to produce protoc files")
parser.add_argument("-o", "--out_path", help="Output path for protoc files")
args = parser.parse_args()
# Set defaults
lang = args.lang or "csharp"
out_path = args.out_path or "out"
default_out_path = out_path == "out"
# Determine where to store
proj_root = os.path.abspath("../")
proto_path = os.path.abspath("src/")
out_path = os.path.abspath(out_path)
tmp_out_path = out_path
# Output dir is actually different csharp because we modify it before compiling.
if lang == "csharp":
tmp_out_path = os.path.join(tmp_out_path, "POGOProtos")
if not default_out_path:
print 'Can we remove "%s"?' % tmp_out_path
may_remove = compile_helper.query_yes_no("Please answer.", default="no")
else:
may_remove = True
if may_remove and os.path.exists(tmp_out_path):
shutil.rmtree(tmp_out_path)
# Find protofiles and compile
for root, dirnames, filenames in os.walk(proto_path):
protos = fnmatch.filter(filenames, '*.proto')
relative_out_path = None
for filename in protos:
relative_out_path = None
proto_file = os.path.join(root, filename)
relative_file_path = proto_file.replace(proto_path, "")
relative_path = relative_file_path.replace(ntpath.basename(proto_file), "")
if lang == "csharp":
destination_path = os.path.abspath(out_path + relative_path)
else:
destination_path = os.path.abspath(out_path)
if relative_out_path is None:
relative_out_path = os.path.abspath(out_path + relative_path)
if not os.path.exists(destination_path):
os.makedirs(destination_path)
print("Compiling " + relative_file_path + "..")
command = """{0} --proto_path="{1}" --{2}_out="{3}" "{4}\"""".format(
protoc_path,
proto_path,
lang,
destination_path,
os.path.abspath(proto_file)
)
call(command, shell=True)
compile_helper.finish_compile(out_path, lang)
print("Done!")
|
{
"content_hash": "c95fbdbe6f1c0a0ba550c0cc971986a7",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 83,
"avg_line_length": 29.455696202531644,
"alnum_prop": 0.650623119896863,
"repo_name": "cstrachan88/PoGo-Proxy.NET",
"id": "d807da32eb2081afa5e082318b741eae4a63575a",
"size": "2350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PoGo Proxy.Protocs/POGOProtos-1.0/compile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1618559"
},
{
"name": "Protocol Buffer",
"bytes": "84810"
},
{
"name": "Python",
"bytes": "13489"
}
],
"symlink_target": ""
}
|
"""
jenkinsapi plugins
"""
from __future__ import print_function
import logging
import time
import re
try:
from StringIO import StringIO
from urllib import urlencode
except ImportError:
# Python3
from io import BytesIO as StringIO
from urllib.parse import urlencode
import json
import requests
from jenkinsapi.plugin import Plugin
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import UnknownPlugin
from jenkinsapi.custom_exceptions import JenkinsAPIException
from jenkinsapi.utils.jsonp_to_json import jsonp_to_json
from jenkinsapi.utils.manifest import read_manifest
log = logging.getLogger(__name__)
class Plugins(JenkinsBase):
"""
Plugins class for jenkinsapi
"""
def __init__(self, url, jenkins_obj):
self.jenkins_obj = jenkins_obj
JenkinsBase.__init__(self, url)
# print('DEBUG: Plugins._data=', self._data)
def get_jenkins_obj(self):
return self.jenkins_obj
def check_updates_server(self):
url = (
'%s/pluginManager/checkUpdatesServer'
% self.jenkins_obj.baseurl
)
self.jenkins_obj.requester.post_and_confirm_status(
url, params={}, data={})
@property
def update_center_dict(self):
update_center = 'https://updates.jenkins.io/update-center.json'
jsonp = requests.get(update_center).content.decode('utf-8')
return json.loads(jsonp_to_json(jsonp))
def _poll(self, tree=None):
return self.get_data(self.baseurl, tree=tree)
def keys(self):
return self.get_plugins_dict().keys()
__iter__ = keys
def iteritems(self):
return self._get_plugins()
def values(self):
return [a[1] for a in self.iteritems()]
def _get_plugins(self):
if 'plugins' in self._data:
for p_dict in self._data["plugins"]:
yield p_dict["shortName"], Plugin(p_dict)
def get_plugins_dict(self):
return dict(self._get_plugins())
def __len__(self):
return len(self.get_plugins_dict().keys())
def __getitem__(self, plugin_name):
try:
return self.get_plugins_dict()[plugin_name]
except KeyError:
raise UnknownPlugin(plugin_name)
def __setitem__(self, shortName, plugin):
"""
Installs plugin in Jenkins.
If plugin already exists - this method is going to uninstall the
existing plugin and install the specified version if it is not
already installed.
:param shortName: Plugin ID
:param plugin a Plugin object to be installed.
"""
if self.plugin_version_already_installed(plugin):
return
if plugin.is_latest(self.update_center_dict):
self._install_plugin_from_updatecenter(plugin)
else:
self._install_specific_version(plugin)
self._wait_until_plugin_installed(plugin)
def _install_plugin_from_updatecenter(self, plugin):
"""
Latest versions of plugins can be installed from the update
center (and don't need a restart.)
"""
xml_str = plugin.get_attributes()
url = (
'%s/pluginManager/installNecessaryPlugins'
% self.jenkins_obj.baseurl
)
self.jenkins_obj.requester.post_xml_and_confirm_status(
url, data=xml_str)
@property
def update_center_install_status(self):
"""
Jenkins 2.x specific
"""
url = "%s/updateCenter/installStatus" % self.jenkins_obj.baseurl
status = self.jenkins_obj.requester.get_url(url)
if status.status_code == 404:
raise JenkinsAPIException(
'update_center_install_status not available for Jenkins 1.X')
return status.json()
@property
def restart_required(self):
"""
Call after plugin installation to check if Jenkins requires a restart
"""
try:
jobs = self.update_center_install_status['data']['jobs']
except JenkinsAPIException:
return True # Jenkins 1.X has no update_center
return any([job for job in jobs if job['requiresRestart'] == 'true'])
def _install_specific_version(self, plugin):
"""
Plugins that are not the latest version have to be uploaded.
"""
download_link = plugin.get_download_link(
update_center_dict=self.update_center_dict)
downloaded_plugin = self._download_plugin(download_link)
plugin_dependencies = self._get_plugin_dependencies(downloaded_plugin)
log.debug("Installing dependencies for plugin '%s'", plugin.shortName)
self.jenkins_obj.install_plugins(plugin_dependencies)
url = ('%s/pluginManager/uploadPlugin' % self.jenkins_obj.baseurl)
requester = self.jenkins_obj.requester
downloaded_plugin.seek(0)
requester.post_and_confirm_status(
url, files={'file': ('plugin.hpi', downloaded_plugin)},
data={}, params={})
def _get_plugin_dependencies(self, downloaded_plugin):
"""
Returns a list of all dependencies for a downloaded plugin
"""
plugin_dependencies = []
manifest = read_manifest(downloaded_plugin)
manifest_dependencies = manifest.main_section.get(
'Plugin-Dependencies')
if manifest_dependencies:
dependencies = manifest_dependencies.split(',')
for dep in dependencies:
# split plugin:version;resolution:optional entries
components = dep.split(';')
dep_plugin = components[0]
name = dep_plugin.split(':')[0]
# install latest dependency, avoids multiple
# versions of the same dep
plugin_dependencies.append(
Plugin({'shortName': name, 'version': 'latest'}))
return plugin_dependencies
def _download_plugin(self, download_link):
downloaded_plugin = StringIO()
downloaded_plugin.write(requests.get(download_link).content)
return downloaded_plugin
def _plugin_has_finished_installation(self, plugin):
"""
Return True if installation is marked as 'Success' or
'SuccessButRequiresRestart' in Jenkins' update_center,
else return False.
"""
try:
jobs = self.update_center_install_status['data']['jobs']
for job in jobs:
if job['name'] == plugin.shortName and \
job['installStatus'] \
in ['Success', 'SuccessButRequiresRestart']:
return True
return False
except JenkinsAPIException:
return False # lack of update_center in Jenkins 1.X
def plugin_version_is_being_installed(self, plugin):
"""
Return true if plugin is currently being installed.
"""
try:
jobs = self.update_center_install_status['data']['jobs']
except JenkinsAPIException:
return False # lack of update_center in Jenkins 1.X
return any([job for job in jobs
if job['name'] == plugin.shortName
and job['version'] == plugin.version])
def plugin_version_already_installed(self, plugin):
"""
Check if plugin version is already installed
"""
if plugin.shortName not in self:
if self.plugin_version_is_being_installed(plugin):
return True
return False
installed_plugin = self[plugin.shortName]
if plugin.version == installed_plugin.version:
return True
elif plugin.version == "latest":
# we don't have an exact version, we first check if Jenkins
# knows about an update
if hasattr(installed_plugin, 'hasUpdates') \
and installed_plugin.hasUpdates:
return False
# Jenkins may not have an up-to-date catalogue,
# so check update-center directly
latest_version = self.update_center_dict[
'plugins'][plugin.shortName]['version']
return installed_plugin.version == latest_version
return False
def __delitem__(self, shortName):
if re.match('.*@.*', shortName):
real_shortName = re.compile('(.*)@(.*)').search(shortName).group(1)
raise ValueError(
("Plugin shortName can't contain version. '%s' should be '%s'")
% (shortName, real_shortName)
)
if shortName not in self:
raise KeyError(
'Plugin with ID "%s" not found, cannot uninstall' % shortName)
if self[shortName].deleted:
raise JenkinsAPIException(
'Plugin "%s" already marked for uninstall. '
'Restart jenkins for uninstall to complete.')
params = {
'Submit': 'OK',
'json': {}
}
url = ('%s/pluginManager/plugin/%s/doUninstall'
% (self.jenkins_obj.baseurl, shortName))
self.jenkins_obj.requester.post_and_confirm_status(
url, params={}, data=urlencode(params)
)
self.poll()
if not self[shortName].deleted:
raise JenkinsAPIException(
"Problem uninstalling plugin '%s'." % shortName)
def _wait_until_plugin_installed(self, plugin, maxwait=120, interval=1):
for _ in range(maxwait, 0, -interval):
self.poll()
if self._plugin_has_finished_installation(plugin):
return True
if plugin.shortName in self:
return True # for Jenkins 1.X
time.sleep(interval)
if self.jenkins_obj.version.startswith('2'):
raise JenkinsAPIException(
"Problem installing plugin '%s'." % plugin.shortName)
log.warning("Plugin '%s' not found in loaded plugins."
"You may need to restart Jenkins.", plugin.shortName)
return False
def __contains__(self, plugin_name):
"""
True if plugin_name is the name of a defined plugin
"""
return plugin_name in self.keys()
def __str__(self):
plugins = [plugin["shortName"]
for plugin in self._data.get("plugins", [])]
return str(sorted(plugins))
|
{
"content_hash": "5b3181f85c9513c5ce2c0bcb84a82612",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 79,
"avg_line_length": 35.44932432432432,
"alnum_prop": 0.5936338511388545,
"repo_name": "salimfadhley/jenkinsapi",
"id": "9083c67cb4ad90b715b0196b0b0465d3bbdd86d5",
"size": "10493",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jenkinsapi/plugins.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "340693"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
class Building(models.Model):
name = models.CharField(max_length=200)
lat = models.FloatField()
lng = models.FloatField()
def __unicode__(self):
return self.name
|
{
"content_hash": "91bd6188912d591e00fb8897fced227a",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 40,
"avg_line_length": 21.818181818181817,
"alnum_prop": 0.7333333333333333,
"repo_name": "andrewwong97/path-hero",
"id": "a4f2941b6be042367bab1e09a3c0493bf121bb2d",
"size": "240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pathhero/main/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "48833"
},
{
"name": "HTML",
"bytes": "5244"
},
{
"name": "JavaScript",
"bytes": "97109"
},
{
"name": "Python",
"bytes": "7713"
}
],
"symlink_target": ""
}
|
from os.path import abspath
from io import BytesIO
import copy
import numpy
import unittest
# Enthought library imports
from mayavi.core.engine import Engine
from mayavi.core.null_engine import NullEngine
from mayavi.sources.array_source import ArraySource
from mayavi.modules.outline import Outline
from mayavi.modules.image_plane_widget import ImagePlaneWidget
class TestImagePlaneWidget(unittest.TestCase):
def make_data(self):
"""Creates suitable data for the test."""
dims = numpy.array((64, 64, 64), 'i')
# Create some scalars to render.
dx, dy, dz = 10.0/(dims - 1)
x = numpy.reshape(numpy.arange(-5.0, 5.0+dx*0.5, dx, 'f'),
(dims[0], 1, 1))
y = numpy.reshape(numpy.arange(-5.0, 5.0+dy*0.5, dy, 'f'),
(1, dims[1], 1))
z = numpy.reshape(numpy.arange(-5.0, 5.0+dz*0.5, dz, 'f'),
(1, 1, dims[0]))
scalars = numpy.sin(x*y*z)/(x*y*z)
return scalars
def setUp(self):
"""Initial setting up of test fixture, automatically called by TestCase before any other test method is invoked"""
e = NullEngine()
# Uncomment to see visualization for debugging etc.
#e = Engine()
e.start()
s=e.new_scene()
self.e=e
self.s=s
############################################################
# Create a new scene and set up the visualization.
d = ArraySource()
sc = self.make_data()
d.scalar_data = sc
e.add_source(d)
# Create an outline for the data.
o = Outline()
e.add_module(o)
# ImagePlaneWidgets for the scalars
ipw = ImagePlaneWidget()
e.add_module(ipw)
ipw_y = ImagePlaneWidget()
e.add_module(ipw_y)
ipw_y.ipw.plane_orientation = 'y_axes'
ipw_z = ImagePlaneWidget()
e.add_module(ipw_z)
ipw_z.ipw.plane_orientation = 'z_axes'
self.scene = e.current_scene
return
def tearDown(self):
"""For necessary clean up, automatically called by TestCase after the test methods have been invoked"""
self.e.stop()
return
def check(self):
"""Do the actual testing."""
s=self.scene
src = s.children[0]
i1, i2, i3 = src.children[0].children[1:]
self.assertEqual(i1.ipw.plane_orientation,'x_axes')
self.assertEqual(numpy.allclose(i1.ipw.center, (0, 31.5, 31.5)),True)
self.assertEqual( i2.ipw.plane_orientation,'y_axes')
self.assertEqual(numpy.allclose(i2.ipw.center, (31.5, 0, 31.5)),True)
self.assertEqual(i3.ipw.plane_orientation,'z_axes')
self.assertEqual( numpy.allclose(i3.ipw.center, (31.5, 31.5, 0)),True)
def test_image_plane_widget(self):
"Test if the test fixture works"
self.check()
def test_save_and_restore(self):
"""Test if saving a visualization and restoring it works."""
engine = self.e
scene = self.scene
# Save visualization.
f = BytesIO()
f.name = abspath('test.mv2') # We simulate a file.
engine.save_visualization(f)
f.seek(0) # So we can read this saved data.
# Remove existing scene.
engine.close_scene(scene)
# Load visualization
engine.load_visualization(f)
self.scene = engine.current_scene
self.check()
def test_deepcopied(self):
"""Test if the MayaVi2 visualization can be deep-copied."""
############################################################
# Test if the MayaVi2 visualization can be deep-copied.
# Pop the source object.
s = self.scene
sources = s.children
s.children = []
# Add it back to see if that works without error.
s.children.extend(sources)
self.check()
# Now deepcopy the source and replace the existing one with
# the copy. This basically simulates cutting/copying the
# object from the UI via the right-click menu on the tree
# view, and pasting the copy back.
sources1 = copy.deepcopy(sources)
s.children[:] = sources1
self.check()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "ef08c7d7990ea747ad3e81e2382ff3b4",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 122,
"avg_line_length": 31.401459854014597,
"alnum_prop": 0.5743840074384008,
"repo_name": "dmsurti/mayavi",
"id": "7e086e05911f4d75941eb96f4f85c439e281716f",
"size": "4506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mayavi/tests/test_image_plane_widget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1054"
},
{
"name": "GAP",
"bytes": "34817"
},
{
"name": "Python",
"bytes": "2494055"
},
{
"name": "Shell",
"bytes": "147"
}
],
"symlink_target": ""
}
|
import optparse
import StringIO
import sys
from pyang import plugin
_COPYRIGHT_NOTICE = """
// DO NOT EDIT
// generated by pyang using OpenConfig https://github.com/openconfig/public
//
// Copyright (C) 2014,2015 Nippon Telegraph and Telephone Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
"""
emitted_type_names = {}
def pyang_plugin_init():
plugin.register_plugin(GolangPlugin())
class GolangPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
self.multiple_modules = True
fmts['golang'] = self
def emit(self, ctx, modules, fd):
ctx.golang_identity_map = {}
ctx.golang_typedef_map = {}
ctx.golang_struct_def = []
ctx.golang_struct_names = {}
ctx.prefix_rel = {}
ctx.module_deps = []
for m in modules:
check_module_deps(ctx, m)
# visit yang statements
visit_modules(ctx)
# emit bgp_configs
emit_go(ctx)
def visit_modules(ctx):
# visit typedef and identity
for module in ctx.module_deps:
visit_typedef(ctx, module)
visit_identity(ctx, module)
# visit container
for module in ctx.module_deps:
visit_children(ctx, module, module.i_children)
def emit_go(ctx):
ctx.golang_struct_def.reverse()
done = set()
# emit
generate_header(ctx)
for mod in ctx.module_deps:
if mod not in _module_excluded:
emit_typedef(ctx, mod)
emit_identity(ctx, mod)
for struct in ctx.golang_struct_def:
struct_name = struct.uniq_name
if struct_name in done:
continue
emit_class_def(ctx, struct, struct_name, struct.module_prefix)
done.add(struct_name)
def check_module_deps(ctx, module):
own_prefix = module.i_prefix
for k, v in module.i_prefixes.items():
mod = ctx.get_module(v[0])
if mod.i_prefix != own_prefix:
check_module_deps(ctx, mod)
ctx.prefix_rel[mod.i_prefix] = k
if mod not in ctx.module_deps \
and mod.i_modulename not in _module_excluded:
ctx.module_deps.append(mod)
def dig_leafref(type_obj):
reftype = type_obj.i_type_spec.i_target_node.search_one('type')
if is_leafref(reftype):
return dig_leafref(reftype)
else:
return reftype
def emit_class_def(ctx, yang_statement, struct_name, prefix):
o = StringIO.StringIO()
if len(yang_statement.i_children) == 1 and is_list(yang_statement.i_children[0]):
return
print >> o, '//struct for container %s:%s' % (prefix, yang_statement.arg)
print >> o, 'type %s struct {' % convert_to_golang(struct_name)
for child in yang_statement.i_children:
if child.path in _path_exclude:
continue
container_or_list_name = child.uniq_name
val_name_go = convert_to_golang(child.arg)
child_prefix = get_orig_prefix(child.i_orig_module)
tag_name = child.uniq_name.lower()
print >> o, ' // original -> %s:%s' % \
(child_prefix, container_or_list_name)
# case leaf
if is_leaf(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
# case identityref
if type_name == 'identityref':
emit_type_name = convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case leafref
elif type_name == 'leafref':
t = dig_leafref(type_obj)
if is_translation_required(t):
print >> o, ' //%s:%s\'s original type is %s' \
% (child_prefix, container_or_list_name, t.arg)
emit_type_name = translate_type(t.arg)
elif is_identityref(t):
emit_type_name = convert_to_golang(t.search_one('base').arg.split(':')[-1])
else:
emit_type_name = t.arg
# case embeded enumeration
elif type_name == 'enumeration':
emit_type_name = val_name_go
# case translation required
elif is_translation_required(type_obj):
print >> o, ' //%s:%s\'s original type is %s'\
% (child_prefix, container_or_list_name, type_name)
emit_type_name = translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
# print(t)
emit_type_name = t.golang_name
# case 'case'
if is_case(child):
continue
# case leaflist
if is_leaflist(child):
type_obj = child.search_one('type')
type_name = type_obj.arg
val_name_go = val_name_go + 'List'
tag_name += '-list'
# case leafref
if type_name == 'leafref':
t = type_obj.i_type_spec.i_target_node.search_one('type')
emit_type_name = '[]'+t.arg
elif type_name == 'identityref':
emit_type_name = '[]'+convert_to_golang(type_obj.search_one('base').arg.split(':')[-1])
# case translation required
elif is_translation_required(type_obj):
print >> o, ' // original type is list of %s' % (type_obj.arg)
emit_type_name = '[]'+translate_type(type_name)
# case other primitives
elif is_builtin_type(type_obj):
emit_type_name = '[]'+type_name
# default
else:
base_module = type_obj.i_orig_module.i_prefix
t = lookup_typedef(ctx, base_module, type_name)
emit_type_name = '[]'+t.golang_name
# case container
elif is_container(child) or is_choice(child):
key = child_prefix+':'+container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = t.golang_name
if len(t.i_children) == 1 and is_list(t.i_children[0]):
l = t.i_children[0]
emit_type_name = '[]' + l.golang_name
else:
emit_type_name = t.golang_name
# case list
elif is_list(child):
key = child_prefix+':'+container_or_list_name
t = ctx.golang_struct_names[key]
val_name_go = val_name_go + 'List'
tag_name += '-list'
emit_type_name = '[]' + t.golang_name
if is_container(child):
name = emit_type_name
if name.startswith(convert_to_golang(struct_name)) and name.endswith("Config"):
tag_name = 'config'
val_name_go = 'Config'
elif name.startswith(convert_to_golang(struct_name)) and name.endswith("State"):
tag_name = 'state'
val_name_go = 'State'
print >> o, ' {0}\t{1} `mapstructure:"{2}"`'.format(val_name_go, emit_type_name, tag_name)
print >> o, '}'
print o.getvalue()
def get_orig_prefix(module):
orig = module.i_orig_module
if orig:
get_orig_prefix(orig)
else:
return module.i_prefix
def get_path(c):
path = ''
if c.parent is not None:
p = ''
if hasattr(c, 'i_module'):
mod = c.i_module
prefix = mod.search_one('prefix')
p = prefix.arg + ":" if prefix else ''
path = get_path(c.parent) + "/" + p + c.arg
return path
def visit_children(ctx, module, children):
for c in children:
prefix = ''
if is_case(c):
prefix = get_orig_prefix(c.parent.i_orig_module)
c.i_orig_module = c.parent.i_orig_module
else:
prefix = get_orig_prefix(c.i_orig_module)
c.uniq_name = c.arg
if c.arg == 'config':
c.uniq_name = c.parent.uniq_name + '-config'
if c.arg == 'state':
c.uniq_name = c.parent.uniq_name + '-state'
if c.arg == 'graceful-restart' and prefix == 'bgp-mp':
c.uniq_name = 'mp-graceful-restart'
t = c.search_one('type')
# define container embeded enums
if is_leaf(c) and c.search_one('type').arg == 'enumeration':
prefix = module.i_prefix
c.path = get_path(c)
c.golang_name = convert_to_golang(c.arg)
if prefix in ctx.golang_typedef_map:
ctx.golang_typedef_map[prefix][c.arg] = c
else:
ctx.golang_typedef_map[prefix] = {c.arg: c}
if is_list(c) or is_container(c) or is_choice(c):
c.golang_name = convert_to_golang(c.uniq_name)
if is_choice(c):
picks = pickup_choice(c)
c.i_children = picks
if ctx.golang_struct_names.get(prefix+':'+c.uniq_name):
ext_c = ctx.golang_struct_names.get(prefix+':'+c.uniq_name)
ext_c_child_count = len(getattr(ext_c, "i_children"))
current_c_child_count = len(getattr(c, "i_children"))
if ext_c_child_count < current_c_child_count:
c.module_prefix = prefix
ctx.golang_struct_names[prefix+':'+c.uniq_name] = c
idx = ctx.golang_struct_def.index(ext_c)
ctx.golang_struct_def[idx] = c
else:
c.module_prefix = prefix
ctx.golang_struct_names[prefix+':'+c.uniq_name] = c
ctx.golang_struct_def.append(c)
c.path = get_path(c)
# print(c.path)
if hasattr(c, 'i_children'):
visit_children(ctx, module, c.i_children)
def pickup_choice(c):
element = []
for child in c.i_children:
if is_case(child):
element = element + child.i_children
return element
def get_type_spec(stmt):
for s in stmt.substmts:
if hasattr(s, 'i_type_spec'):
type_sp = s.i_type_spec
return type_sp.name
return None
def visit_typedef(ctx, module):
prefix = module.i_prefix
child_map = {}
for stmts in module.substmts:
if stmts.keyword == 'typedef':
stmts.path = get_path(stmts)
# print(stmts.path)
name = stmts.arg
stmts.golang_name = convert_to_golang(name)
child_map[name] = stmts
ctx.golang_typedef_map[prefix] = child_map
if ctx.prefix_rel[prefix] != prefix:
ctx.golang_typedef_map[ctx.prefix_rel[prefix]] = child_map
def visit_identity(ctx, module):
prefix = module.i_prefix
child_map = {}
for stmts in module.substmts:
if stmts.keyword == 'identity':
name = stmts.arg
stmts.golang_name = convert_to_golang(name)
child_map[name] = stmts
base = stmts.search_one('base')
if base:
elems = base.arg.split(':')
if len(elems) > 1:
ctx.golang_identity_map[elems[0]][elems[1]].substmts.append(stmts)
else:
child_map[base.arg].substmts.append(stmts)
ctx.golang_identity_map[prefix] = child_map
def lookup_identity(ctx, default_prefix, identity_name):
result = lookup(ctx.golang_identity_map, default_prefix, identity_name)
return result
def lookup_typedef(ctx, default_prefix, type_name):
result = lookup(ctx.golang_typedef_map, default_prefix, type_name)
return result
def lookup(basemap, default_prefix, key):
if ':' in key:
pref, name = key.split(':')
else:
pref = default_prefix
name = key
if pref in basemap:
return basemap[pref].get(name, None)
else:
return key
def emit_enum(prefix, name, stmt, substmts):
type_name_org = name
type_name = stmt.golang_name
o = StringIO.StringIO()
print >> o, '// typedef for identity %s:%s' % (prefix, type_name_org)
print >> o, 'type %s string' % (type_name)
const_prefix = convert_const_prefix(type_name_org)
print >> o, 'const ('
m = {}
for sub in substmts:
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
m[sub.arg.lower()] = enum_name
print >> o, ' %s %s = "%s"' % (enum_name, type_name, sub.arg.lower())
print >> o, ')\n'
print >> o, 'var %sToIntMap = map[%s]int {' % (type_name, type_name)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print >> o, ' %s: %d,' % (enum_name, i)
print >> o, '}\n'
print >> o, 'func (v %s) ToInt() int {' % (type_name)
print >> o, 'i, ok := %sToIntMap[v]' % (type_name)
print >> o, 'if !ok {'
print >> o, 'return -1'
print >> o, '}'
print >> o, 'return i'
print >> o, '}'
print >> o, 'var IntTo%sMap = map[int]%s {' % (type_name, type_name)
for i, sub in enumerate(substmts):
enum_name = '%s_%s' % (const_prefix, convert_const_prefix(sub.arg))
print >> o, ' %d: %s,' % (i, enum_name)
print >> o, '}\n'
print >> o, 'func (v %s) Validate() error {' % (type_name)
print >> o, 'if _, ok := %sToIntMap[v]; !ok {' % (type_name)
print >> o, 'return fmt.Errorf("invalid %s: %%s", v)' % (type_name)
print >> o, '}'
print >> o, 'return nil'
print >> o, '}\n'
if stmt.search_one('default'):
default = stmt.search_one('default')
print >> o, 'func (v %s) Default() %s {' % (type_name, type_name)
print >> o, 'return %s' % m[default.arg.lower()]
print >> o, '}\n'
print >> o, 'func (v %s) DefaultAsNeeded() %s {' % (type_name, type_name)
print >> o, ' if string(v) == "" {'
print >> o, ' return v.Default()'
print >> o, '}'
print >> o, ' return v'
print >> o, '}'
print o.getvalue()
def emit_typedef(ctx, module):
prefix = module.i_prefix
t_map = ctx.golang_typedef_map[prefix]
for name, stmt in t_map.items():
if stmt.path in _typedef_exclude:
continue
# skip identityref type because currently skip identity
if get_type_spec(stmt) == 'identityref':
continue
type_name_org = name
type_name = stmt.golang_name
if type_name in emitted_type_names:
warn = "warning %s: %s has already been emitted from %s.\n"\
% (prefix+":"+type_name_org, type_name_org,
emitted_type_names[type_name])
sys.stderr.write(warn)
continue
emitted_type_names[type_name] = prefix+":"+type_name_org
t = stmt.search_one('type')
o = StringIO.StringIO()
if t.arg == 'enumeration':
emit_enum(prefix, type_name_org, stmt, t.substmts)
elif t.arg == 'union':
print >> o, '// typedef for typedef %s:%s'\
% (prefix, type_name_org)
print >> o, 'type %s string' % (type_name)
else:
print >> o, '// typedef for typedef %s:%s'\
% (prefix, type_name_org)
if not is_builtin_type(t):
m = ctx.golang_typedef_map
for k in t.arg.split(':'):
m = m[k]
print >> o, 'type %s %s' % (type_name, m.golang_name)
else:
print >> o, 'type %s %s' % (type_name, t.arg)
print o.getvalue()
def emit_identity(ctx, module):
prefix = module.i_prefix
i_map = ctx.golang_identity_map[prefix]
for name, stmt in i_map.items():
enums = stmt.search('identity')
if len(enums) > 0:
emit_enum(prefix, name, stmt, enums)
def is_reference(s):
return s.arg in ['leafref', 'identityref']
def is_leafref(s):
return s.arg in ['leafref']
def is_identityref(s):
return s.arg in ['identityref']
def is_leaf(s):
return s.keyword in ['leaf']
def is_leaflist(s):
return s.keyword in ['leaf-list']
def is_list(s):
return s.keyword in ['list']
def is_container(s):
return s.keyword in ['container']
def is_case(s):
return s.keyword in ['case']
def is_choice(s):
return s.keyword in ['choice']
def is_builtin_type(t):
return t.arg in _type_builtin
def is_translation_required(t):
return t.arg in _type_translation_map.keys()
_type_translation_map = {
'union': 'string',
'decimal64': 'float64',
'boolean': 'bool',
'empty': 'bool',
'inet:ip-address': 'string',
'inet:ip-prefix': 'string',
'inet:ipv4-address': 'string',
'inet:as-number': 'uint32',
'bgp-set-community-option-type': 'string',
'inet:port-number': 'uint16',
'yang:timeticks': 'int64',
'ptypes:install-protocol-type': 'string',
}
_type_builtin = ["union",
"int8",
"int16",
"int32",
"int64",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
]
_module_excluded = ["ietf-inet-types",
"ietf-yang-types",
]
_path_exclude = ["/rpol:routing-policy/rpol:defined-sets/rpol:neighbor-sets/rpol:neighbor-set/rpol:neighbor",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:community-sets/bgp-pol:community-set/bgp-pol:community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:ext-community-sets/bgp-pol:ext-community-set/bgp-pol:ext-community-member",
"/rpol:routing-policy/rpol:defined-sets/bgp-pol:bgp-defined-sets/bgp-pol:as-path-sets/bgp-pol:as-path-set/bgp-pol:as-path-set-member"]
_typedef_exclude =[]
def generate_header(ctx):
print _COPYRIGHT_NOTICE
print 'package config'
print ''
print 'import "fmt"'
print ''
def translate_type(key):
if key in _type_translation_map.keys():
return _type_translation_map[key]
else:
return key
# 'hoge-hoge' -> 'HogeHoge'
def convert_to_golang(type_string):
a = type_string.split('-')
a = map(lambda x: x.capitalize(), a) # XXX locale sensitive
return ''.join(a)
# 'hoge-hoge' -> 'HOGE_HOGE'
def convert_const_prefix(type_string):
a = type_string.split('-')
a = map(lambda x: x.upper(), a) # XXX locale sensitive
return '_'.join(a)
def chop_suf(s, suf):
if not s.endswith(suf):
return s
return s[:-len(suf)]
|
{
"content_hash": "265772916a8f1d350a2e0841d90f13ab",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 165,
"avg_line_length": 30.79047619047619,
"alnum_prop": 0.5419630889782452,
"repo_name": "h-naoto/gobgp",
"id": "45a25078724ae5719bf3adc168f25ccdf8501b30",
"size": "20017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/pyang_plugins/bgpyang2golang.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "1118283"
},
{
"name": "Protocol Buffer",
"bytes": "12091"
},
{
"name": "Python",
"bytes": "366622"
},
{
"name": "Shell",
"bytes": "6672"
}
],
"symlink_target": ""
}
|
import getpass
import time
import asyncio
from spade.agent import Agent
from spade.behaviour import CyclicBehaviour
class DummyAgent(Agent):
class MyBehav(CyclicBehaviour):
async def on_start(self):
print("Starting behaviour . . .")
self.counter = 0
async def run(self):
print("Counter: {}".format(self.counter))
self.counter += 1
await asyncio.sleep(1)
async def setup(self):
print("Agent starting . . .")
b = self.MyBehav()
self.add_behaviour(b)
if __name__ == "__main__":
jid = input("JID> ")
passwd = getpass.getpass()
dummy = DummyAgent(jid, passwd)
future = dummy.start()
future.result()
print("Wait until user interrupts with ctrl+C")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("Stopping...")
dummy.stop()
|
{
"content_hash": "76bae0b31227f3a5f98b9fac376379ad",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 53,
"avg_line_length": 23.41025641025641,
"alnum_prop": 0.5859802847754655,
"repo_name": "javipalanca/spade",
"id": "2790febd737ba0f7ea203bce7344f65bc0088d0f",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dummybehavior.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "38167"
},
{
"name": "Makefile",
"bytes": "2265"
},
{
"name": "Python",
"bytes": "167792"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Minutes
from cms.admin import ContentManageableModelAdmin
@admin.register(Minutes)
class MinutesAdmin(ContentManageableModelAdmin):
date_hierarchy = 'date'
def get_list_filter(self, request):
fields = list(super().get_list_filter(request))
return fields + ['is_published']
def get_list_display(self, request):
fields = list(super().get_list_display(request))
return fields + ['is_published']
|
{
"content_hash": "acf571e44eb919fabc078362ca0b26bc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 29.058823529411764,
"alnum_prop": 0.7064777327935222,
"repo_name": "manhhomienbienthuy/pythondotorg",
"id": "63f7fdd4dbdcf5aecea554f5b0940e3590f82dfa",
"size": "494",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "minutes/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7686"
},
{
"name": "HTML",
"bytes": "491673"
},
{
"name": "JavaScript",
"bytes": "20834"
},
{
"name": "PostScript",
"bytes": "19072"
},
{
"name": "Procfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "1075699"
},
{
"name": "Ruby",
"bytes": "1464"
},
{
"name": "SCSS",
"bytes": "197973"
}
],
"symlink_target": ""
}
|
from pygame.sprite import Sprite
import pygame
import os
from math import cos, sin, atan2, degrees, pi, sqrt
from mixins import ImageBatch
from random import randrange
from random import choice as randchoice
import well
def vector_add(rads1, speed1, rads2, speed2):
x = speed1 * cos(rads1) + speed2 * cos(rads2)
y = speed1 * sin(rads1) + speed2 * sin(rads2)
rads = atan2(y, x)
speed = sqrt(x ** 2 + y ** 2)
return (rads, speed)
class Celestial(Sprite, well.GravityWell):
'''
requires that self.mass and Class.Image be defined
'''
MOVEMENT_CONST = 10 ** 20
@staticmethod
def set_fps(fps):
Celestial.FPS = fps
def __init__(self, x, y):
super(Celestial, self).__init__()
self.rect = self.image.get_rect()
self.rect.centerx = x
self.rect.centery = y
self._counter = 0
self._delay = 1000 / Celestial.FPS
self._pullable = set()
@property
def x(self):
return self.rect.centerx
@property
def y(self):
return self.rect.centery
def update(self, delta_time):
self._counter += delta_time
if self._counter > self._delay:
self._counter = 0
self._update()
for obj in self._pullable:
self._pull_obj(obj)
def _update(self):
pass
def draw(self, screen):
screen.blit(self.image, self.rect)
def _pull_obj(self, obj):
dx = obj.x - self.x
dy = obj.y - self.y
distance = (dx ** 2 + dy ** 2) ** (1.0 / 2.0)
rads = atan2(-dy, dx)
speed = self.pull(distance) / Celestial.FPS
obj.accelerate(-1 * rads, -1 * speed / Celestial.MOVEMENT_CONST)
def pull_on(self, obj):
self._pullable.add(obj)
def pull_off(self, obj):
self._pullable.remove(obj)
class Sun(Celestial):
IMAGE = pygame.image.load(os.path.join('Resources', 'sprites', 'sun', 'Sun.png'))
# This is half the our Solar Mass
STAR_MASS_CONST = 1 * 10 ** 33
MASS_RANGE = (1, 51)
def __init__(self, x, y, radius=None, mass=None):
if radius is None:
radius = 50
self.radius = radius
self.image = pygame.transform.smoothscale(Sun.IMAGE, (self.radius * 2, self.radius * 2))
if mass is None:
mass = Sun.STAR_MASS_CONST * randrange(Sun.MASS_RANGE[0], Sun.MASS_RANGE[1])
self.mass = mass
super(Sun, self).__init__(x, y)
class Planet(Celestial, ImageBatch):
IMAGE_PATH = os.path.join('Resources', 'sprites', 'planets')
PLANET_MASS_CONST = 1 * 10 ** 27
MASS_RANGE = (1, 16)
def __init__(self, x, y, radius=None, mass=None, image=None):
if radius is None:
radius = 20
self.radius = radius
if image is None or image not in Planet.IMAGES.keys():
image = randchoice(Planet.IMAGES.keys())
self.image = pygame.transform.smoothscale(Planet.IMAGES[image], (self.radius * 2, self.radius * 2))
if mass is None:
mass = Planet.PLANET_MASS_CONST * randrange(Planet.MASS_RANGE[0], Planet.MASS_RANGE[1])
self.mass = mass
self.rads = randrange(0, 360) * pi / 180
super(Planet, self).__init__(x, y)
def _update(self):
self.rads += pi / 2 ** self.speed
self.rads %= 2 * pi
self.rect.centerx = self.orbit.x + self.distance * cos(self.rads)
self.rect.centery = self.orbit.y + self.distance * sin(self.rads)
def orbit(self, obj, distance, speed=None):
'''
TODO: use GravityWell.satellite() to figure out the minimal speed needed
TODO: elliptical orbits?
'''
self.orbit = obj
self.distance = distance
if speed is None:
speed = randrange(10, 15)
self.speed = speed
# def orbit(self, obj, distance):
# obj.pull_on(self)
# self.x = obj.x
# self.y = obj.y - distance
# self.rect.centerx = self.x
# self.rect.centery = self.y
# self.rads = pi / 2
# self.speed = obj.satellite(distance)
# def accelerate(self, rads, speed):
# self.rads, self.speed = vector_add(
# self.rads, self.speed,
# rads, speed
# )
# def _update(self):
# x = self.speed * cos(self.rads)
# y = self.speed * sin(self.rads)
# self.x = self.x + (x / Celestial.MOVEMENT_CONST)
# self.y = self.y + (y / Celestial.MOVEMENT_CONST)
# self.rect.centerx = self.x
# self.rect.centery = self.y
|
{
"content_hash": "160ff83c214284c7b3f7084d5ac8e41d",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 107,
"avg_line_length": 23.743589743589745,
"alnum_prop": 0.5606911447084233,
"repo_name": "Saevon/spacebattle",
"id": "25c8e88e06dc0fd6161cef57b3d00297bf213d41",
"size": "4630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celestials.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47374"
}
],
"symlink_target": ""
}
|
"""
$ python cmdln_help1.py help
HelpShell: blah blah blah
$ python cmdln_help1.py help documented
documented: blah documented blah
$ python cmdln_help1.py help hashelpfunc
hashelpfunc: blah hashelpfunc blah
$ python cmdln_help1.py help undocumented
cmdln_help1.py: no help on 'undocumented'
$ python cmdln_help1.py help undefined
cmdln_help1.py: unknown command: 'undefined'
Try 'cmdln_help1.py help' for info.
$ python cmdln_help1.py #expecttest: INTERACTIVE, PROMPT="help-test> "
help-test> help
HelpShell: blah blah blah
help-test> help documented
documented: blah documented blah
help-test> help hashelpfunc
hashelpfunc: blah hashelpfunc blah
help-test> help undocumented
no help on 'undocumented'
help-test> help undefined
unknown command: 'undefined'
help-test> ^D
"""
import sys
import cmdln
class HelpShell(cmdln.RawCmdln):
"""HelpShell: blah blah blah"""
prompt = "help-test> "
def do_documented(self, argv):
"""${cmd_name}: blah documented blah"""
def do_undocumented(self, argv):
pass
def help_hashelpfunc(self):
return "${cmd_name}: blah hashelpfunc blah"
def do_hashelpfunc(self, argv):
pass
if __name__ == "__main__":
sys.exit(HelpShell().main(loop=cmdln.LOOP_IF_EMPTY))
|
{
"content_hash": "060da23703be2b623ac93d6fbe1674fb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 74,
"avg_line_length": 28.041666666666668,
"alnum_prop": 0.6693907875185735,
"repo_name": "trentm/cmdln",
"id": "05695f213bdef23df28ea121b9e5089f3a40733b",
"size": "1369",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/cmdln_help1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1562"
},
{
"name": "Python",
"bytes": "340785"
}
],
"symlink_target": ""
}
|
"""
Widget mixin that only renders selected options with QuerySetSequence.
For details about why this is required, see :mod:`dal.widgets`.
"""
from dal.widgets import WidgetMixin
from django import forms
from django.contrib.contenttypes.models import ContentType
class QuerySetSequenceSelectMixin(WidgetMixin):
"""Support QuerySetSequence in WidgetMixin."""
def label_from_instance(self, obj):
"""Convert an object into string. Override it to customize display."""
return str(obj)
def filter_choices_to_render(self, selected_choices):
"""Overwrite self.choices to exclude unselected values."""
if len(selected_choices) == 1 and not selected_choices[0]:
selected_choices = []
ctype_models = {}
for choice in selected_choices:
ctype_pk, model_pk = choice.split('-', 1)
ctype_pk = int(ctype_pk)
ctype_models.setdefault(ctype_pk, [])
ctype_models[ctype_pk].append(model_pk)
self.choices = []
ctype = ContentType.objects.get_for_id
for ctype_pk, ids in ctype_models.items():
results = ctype(ctype_pk).model_class().objects.filter(pk__in=ids)
self.choices += [
('%s-%s' % (ctype_pk, r.pk), self.label_from_instance(r))
for r in results
]
class QuerySetSequenceSelect(QuerySetSequenceSelectMixin,
forms.Select):
"""Select widget for QuerySetSequence choices."""
class QuerySetSequenceSelectMultiple(QuerySetSequenceSelectMixin,
forms.SelectMultiple):
"""SelectMultiple widget for QuerySetSequence choices."""
|
{
"content_hash": "834e6bae4665ba6be2f4e5476386f440",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 33.3921568627451,
"alnum_prop": 0.6335877862595419,
"repo_name": "yourlabs/django-autocomplete-light",
"id": "00e3eb38e54748bef1151c7073fb66f3c2f0ab34",
"size": "1703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/dal_queryset_sequence/widgets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11205"
},
{
"name": "HTML",
"bytes": "5709"
},
{
"name": "JavaScript",
"bytes": "27379"
},
{
"name": "Python",
"bytes": "210537"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
}
|
import configparser
from functools import lru_cache
import glob
import io
import os
import re
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import pandas as pd
# import numpy as np
TEST_RESULT_ROOT = "/Users/esteele/tmp/wifi-test/root/wifi-test-results"
METADATA_FILENAME = "metadata.ini"
TEST_RESULT_GLOB = "*.csv"
TEST_RESULT_DIR_PATTERN = os.path.join(TEST_RESULT_ROOT, "test-run-{0}")
TEST_RESULT_METADATA_FILE = os.path.join(TEST_RESULT_DIR_PATTERN,
METADATA_FILENAME)
TEST_RESULT_FILES_PATTERN = os.path.join(TEST_RESULT_DIR_PATTERN,
TEST_RESULT_GLOB)
DEFAULT_REFERENCE_LINE_Y_VALUE = 250000
BW_DESC = {
125000: "360p bitrate",
250000: "480p bitrate",
500000: "720p bitrate",
}
def get_test_run_results(test_run_id):
test_result_files_glob = TEST_RESULT_FILES_PATTERN.format(test_run_id)
result_sio = io.StringIO()
for csv in glob.iglob(test_result_files_glob):
with open(csv) as f:
result_sio.write(f.read())
# Prime for reading
result_sio.seek(0)
return result_sio
def get_dataframe_from_test_run(test_run_id, downscale_factor=1):
raw_run_data = get_test_run_results(test_run_id)
summary_data = pd.read_csv(raw_run_data,
comment="#",
names=["client_id",
"timestamp",
"bytes_per_sec"])
# Create a time_offset column
summary_data["time_offset"] = summary_data['timestamp'] - \
min(summary_data['timestamp'])
summary_data["time_offset"] = \
(summary_data["time_offset"] // downscale_factor) * downscale_factor
summary_data["time_offset"] = \
pd.to_timedelta(summary_data['time_offset'], unit="s")
summary_data.set_index("time_offset", inplace=True)
return summary_data
def get_graph_title_for_run(test_run_id):
config = configparser.ConfigParser()
config.read(TEST_RESULT_METADATA_FILE.format(test_run_id))
# Don't count the global section
client_count = len(config.sections()) - 1
stream_count = sum([int(config[s]["parallel_run_count"])
for s in config.sections()
if s != "global"])
bandwidth_desc = "{0} bytes/sec".format(
config["global"]["test_bandwidth_bps"]
)
title = "Run {0} against {1} {2}.\n" \
"{3} streams between {4} clients. " \
"Each stream attempting {5}" \
.format(
test_run_id,
config["global"]["test_server_hostname"],
config["global"]["extra_run_description"],
stream_count,
client_count,
bandwidth_desc
)
return title
def get_graph_title_for_group(test_group_id):
run_ids = get_test_run_ids_for_group_id(test_group_id)
config = configparser.ConfigParser()
config.read(TEST_RESULT_METADATA_FILE.format(run_ids[0]))
# Don't count the global section
client_count = len(config.sections()) - 1
stream_count = sum([int(config[s]["parallel_run_count"])
for s in config.sections()
if s != "global"])
bandwidth_desc = "{0} bytes/sec".format(
config["global"]["test_bandwidth_bps"]
)
title = "Group run {0} against {1} {2} with {3} repeat runs.\n" \
"{4} streams between {5} clients. " \
"Each stream attempting {6}" \
.format(
test_group_id,
config["global"]["test_server_hostname"],
config["global"]["extra_run_description"],
len(run_ids),
stream_count,
client_count,
bandwidth_desc,
)
return title
def all_clients_succeeded(run_id):
config = configparser.ConfigParser()
config.read(TEST_RESULT_METADATA_FILE.format(run_id))
successful_client_count = len(config.sections()) - 1
inventory_str = config["global"]["test_group_id"]
mo = re.search("(?P<inv_count>[0-9]+)c[0-9]+s", inventory_str)
print("inv count = %s. Successful client count = %s" %
(mo.groups('inv_count'), successful_client_count))
return True
def get_bw_annotation_detail(run_id):
config = configparser.ConfigParser()
config.read(TEST_RESULT_METADATA_FILE.format(run_id))
bandwidth_bps = int(config["global"]["test_bandwidth_bps"])
return bandwidth_bps, BW_DESC.get(bandwidth_bps, "")
def show_run_df_as_line_graph(run_id, ax):
config = configparser.ConfigParser()
config.read(TEST_RESULT_METADATA_FILE.format(run_id))
test_bandwidth_bps = int(config["global"]["test_bandwidth_bps"])
if test_bandwidth_bps in BW_DESC:
reference_line_y = test_bandwidth_bps
else:
reference_line_y = DEFAULT_REFERENCE_LINE_Y_VALUE
df = get_dataframe_from_test_run(run_id)
pivot_df = df.pivot(columns="client_id",
values="bytes_per_sec")
ax = pivot_df.plot(figsize=(20, 10), ax=ax)
ax.set_xlabel("Elapsed time (sec)")
ax.set_xbound(lower=0)
ax.set_ylabel("Throughput (bytes/sec)")
ax.axhline(y=reference_line_y,
color='0.75',
linestyle="--")
# Could also do va=bottom, ha=right to put the annotation in the graph
ax.annotate(BW_DESC[reference_line_y],
xy=(1.0, reference_line_y),
xycoords=("axes fraction", "data"),
va="center", ha="left")
ax.set_title(get_graph_title_for_run(run_id))
def show_multiple_run_ids_as_line_graph(run_ids):
_, axes = plt.subplots(1, len(run_ids),
sharex=True,
sharey=True,
squeeze=False)
for idx, run_id in enumerate(run_ids):
show_run_df_as_line_graph(
run_id,
ax=axes[0][idx]
)
def show_run_df_as_boxplot(df, title, annotation_xpoint, annotation_str):
ax = df.boxplot(column="bytes_per_sec",
by="time_offset",
figsize=(10, 5),
whis=[5, 95],
showfliers=False)
ax.set_xlabel("Elapsed time (sec)")
major_loc = ticker.AutoLocator()
major_fmt = ticker.FormatStrFormatter('%d')
ax.xaxis.set_major_locator(major_loc)
ax.xaxis.set_major_formatter(major_fmt)
ax.grid()
ax.set_ylabel("Throughput (bytes/sec)")
# An empty annotation string means that it doesn't correspond to a known
# bitrate, so we won't add an annotation
if annotation_str:
ax.axhline(y=250000, color='0.75', linestyle="--")
# Could also do va=bottom, ha=right to put the annotation in the graph
ax.annotate(annotation_str,
xy=(1.0, annotation_xpoint),
xycoords=("axes fraction", "data"),
va="center", ha="left")
ax.set_title(title)
# Nerf figure title
ax.get_figure().suptitle("")
@lru_cache()
def get_test_run_ids_for_group_id(test_group_id):
matching_run_ids = []
config = configparser.ConfigParser()
all_metadata_files = glob.glob(
os.path.join(TEST_RESULT_ROOT, "*", METADATA_FILENAME)
)
for metadata_file in all_metadata_files:
config.read(metadata_file)
if config.get("global", "test_group_id") == test_group_id:
matching_run_ids.append(config.get("global", "test_run_id"))
# can return duplicates but shouldn't... workaround in the meantime
return list(set(matching_run_ids))
def show_group_as_boxplot(test_group_id):
run_ids = get_test_run_ids_for_group_id(test_group_id)
for r in run_ids:
all_clients_succeeded(r)
group_df = pd.concat([get_dataframe_from_test_run(r) for r in run_ids])
annotation_details = get_bw_annotation_detail(run_ids[0])
show_run_df_as_boxplot(group_df, get_graph_title_for_group(test_group_id),
*annotation_details)
|
{
"content_hash": "2e0eaf90ffb693797b373893f5b5bb97",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 78,
"avg_line_length": 36.89041095890411,
"alnum_prop": 0.5890580517390767,
"repo_name": "ConnectBox/wifi-test-framework",
"id": "851fe4f30eba16f9ec962f25c0e77a3e934f9c92",
"size": "8103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/network-benchmark.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "68"
},
{
"name": "Jupyter Notebook",
"bytes": "270751"
},
{
"name": "Makefile",
"bytes": "3128"
},
{
"name": "Python",
"bytes": "761978"
},
{
"name": "Shell",
"bytes": "6674"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
import unittest
from binascii import unhexlify
from bitcoin.base58 import *
def load_test_vectors(name):
with open(os.path.dirname(__file__) + '/data/' + name, 'r') as fd:
for testcase in json.load(fd):
yield testcase
class Test_base58(unittest.TestCase):
def test_encode_decode(self):
for exp_bin, exp_base58 in load_test_vectors('base58_encode_decode.json'):
exp_bin = unhexlify(exp_bin.encode('utf8'))
act_base58 = encode(exp_bin)
act_bin = decode(exp_base58)
self.assertEqual(act_base58, exp_base58)
self.assertEqual(act_bin, exp_bin)
class Test_CBase58Data(unittest.TestCase):
def test_from_data(self):
b = CBase58Data.from_bytes(b"b\xe9\x07\xb1\\\xbf'\xd5BS\x99\xeb\xf6\xf0\xfbP\xeb\xb8\x8f\x18", 0)
self.assertEqual(b.nVersion, 0)
self.assertEqual(str(b), '1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa')
b = CBase58Data.from_bytes(b'Bf\xfco,(a\xd7\xfe"\x9b\'\x9ay\x80:\xfc\xa7\xba4', 196)
self.assertEqual(b.nVersion, 196)
self.assertEqual(str(b), '2MyJKxYR2zNZZsZ39SgkCXWCfQtXKhnWSWq')
def test_invalid_base58_exception(self):
invalids = ('', # missing everything
'#', # invalid character
'1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNb', # invalid checksum
)
for invalid in invalids:
msg = '%r should have raised InvalidBase58Error but did not' % invalid
with self.assertRaises(Base58Error, msg=msg):
CBase58Data(invalid)
|
{
"content_hash": "5d58904b2404af39d233ae02c101ae2c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 105,
"avg_line_length": 36.04255319148936,
"alnum_prop": 0.6381345926800472,
"repo_name": "Peerapps/PeerChat",
"id": "a57d1fe17edbffe68d8bfa2ca57ad2d7b19817bb",
"size": "2086",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "bitcoin/tests/test_base58.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "232157"
}
],
"symlink_target": ""
}
|
from Framework.Resposta import Resposta
from Models.Sala.Sala import Sala as ModelSala
class RespostaVer(Resposta):
def __init__(self,sala):
self.corpo = ModelSala(sala)
|
{
"content_hash": "244f75243c0a7e41a51083d390f83562",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 25,
"alnum_prop": 0.7714285714285715,
"repo_name": "AEDA-Solutions/matweb",
"id": "76c16c1a1321e0a1def9da126e84f5e3e5d7fd00",
"size": "175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/Models/Sala/RespostaVer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "475557"
},
{
"name": "HTML",
"bytes": "12097161"
},
{
"name": "JavaScript",
"bytes": "190487"
},
{
"name": "PHP",
"bytes": "1122"
},
{
"name": "Python",
"bytes": "152996"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import os
import httplib as http
from flask import request
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.auth import cas
from framework.routing import Rule
from framework.flask import redirect
from framework.sessions import session
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from website import util
from website import settings
from website import language
from website.util import paths
from website.util import sanitize
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
return {
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'api_domain': settings.API_DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'api_v2_url': util.api_v2_url, # URL function for templates
'api_v2_base': util.api_v2_url(''), # Base url used by JS api helper
'sanitize': sanitize,
'js_str': lambda x: x.replace("'", r"\'").replace('"', r'\"'),
'sjson': lambda s: sanitize.safe_json(s),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
'login_url': cas.get_login_url(request.url, auto=True),
'access_token': session.data.get('auth_user_access_token') or '',
'auth_url': cas.get_login_url(request.url),
'profile_url': cas.get_profile_url(),
}
class OsfWebRenderer(WebRenderer):
"""Render a Mako template with OSF context vars.
:param trust: Optional. If ``False``, markup-safe escaping will be enabled
"""
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', renderer=render_mako_string)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('dashboard'))
status.push_status_message(language.LOGOUT, 'success')
return {}
def make_url_map(app):
"""Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
"""
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule('/<path:_>', ['get', 'post'], HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string)),
Rule('/api/v1/<path:_>', ['get', 'post'],
HTTPError(http.NOT_FOUND), json_renderer),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
notemplate,
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule('/dashboard/', 'get', website_views.dashboard, OsfWebRenderer('dashboard.mako')),
Rule('/reproducibility/', 'get',
website_views.reproducibility, OsfWebRenderer('', render_mako_string)),
Rule('/about/', 'get', website_views.redirect_about, json_renderer,),
Rule('/howosfworks/', 'get', website_views.redirect_howosfworks, json_renderer,),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako')),
Rule('/getting-started/', 'get', {}, OsfWebRenderer('public/pages/getting_started.mako')),
Rule('/explore/', 'get', {}, OsfWebRenderer('public/explore.mako')),
Rule(['/messages/', '/help/'], 'get', {}, OsfWebRenderer('public/comingsoon.mako')),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako'),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako'),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/meetings/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako'),
),
Rule(
'/presentations/',
'get',
conference_views.redirect_to_meetings,
json_renderer,
),
Rule('/news/', 'get', {}, OsfWebRenderer('public/pages/news.mako')),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako'),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/dashboard/get_nodes/', 'get', website_views.get_dashboard_nodes, json_renderer),
Rule(
[
'/dashboard/<nid>',
'/dashboard/',
],
'get', website_views.get_dashboard, json_renderer),
], prefix='/api/v1')
### Metadata ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/',
'/project/<pid>/node/<nid>/comments/',
],
'get',
project_views.comment.list_comments,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/discussion/',
'/project/<pid>/node/<nid>/comments/discussion/',
],
'get',
project_views.comment.comment_discussion,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/',
'/project/<pid>/node/<nid>/comment/',
],
'post',
project_views.comment.add_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'put',
project_views.comment.edit_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'delete',
project_views.comment.delete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/undelete/',
'/project/<pid>/node/<nid>/comment/<cid>/undelete/',
],
'put',
project_views.comment.undelete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/report/',
'/project/<pid>/node/<nid>/comment/<cid>/report/',
],
'post',
project_views.comment.report_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/unreport/',
'/project/<pid>/node/<nid>/comment/<cid>/unreport/',
],
'post',
project_views.comment.unreport_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule(
'/explore/activity/',
'get',
discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako', trust=False)
),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
OsfWebRenderer('error.mako', render_mako_string)
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string)
),
# Resend confirmation URL linked to in CAS login page
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string)
),
# TODO: Remove `auth_register_post`
Rule('/register/', 'post', auth_views.auth_register_post,
OsfWebRenderer('public/login.mako')),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(['/login/', '/account/'], 'get',
auth_views.auth_login, OsfWebRenderer('public/login.mako')),
Rule('/login/first/', 'get', auth_views.auth_login,
OsfWebRenderer('public/login.mako'),
endpoint_suffix='__first', view_kwargs={'first': True}),
Rule('/logout/', 'get', auth_views.auth_logout, notemplate),
Rule('/forgotpassword/', 'get', auth_views.forgot_password_get,
OsfWebRenderer('public/forgot_password.mako')),
Rule('/forgotpassword/', 'post', auth_views.forgot_password_post,
OsfWebRenderer('public/login.mako')),
Rule([
'/midas/', '/summit/', '/accountbeta/', '/decline/'
], 'get', auth_views.auth_registerbeta, OsfWebRenderer('', render_mako_string)),
Rule('/login/connected_tools/',
'get',
landing_page_views.connected_tools,
OsfWebRenderer('public/login_landing.mako')),
Rule('/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
OsfWebRenderer('public/login_landing.mako')),
])
### Profile ###
# Web
process_rules(app, [
Rule(
'/profile/',
'get',
profile_views.profile_view,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
'/profile/<uid>/',
'get',
profile_views.profile_view_id,
OsfWebRenderer('profile.mako', trust=False)
),
Rule(
["/user/merge/"],
'get',
auth_views.merge_user_get,
OsfWebRenderer("merge_accounts.mako", trust=False)
),
Rule(
["/user/merge/"],
'post',
auth_views.merge_user_post,
OsfWebRenderer("merge_accounts.mako", trust=False)
),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(
['/user/<uid>/<pid>/claim/'],
['get', 'post'],
project_views.contributor.claim_user_form,
OsfWebRenderer('claim_account.mako', trust=False)
),
Rule(
['/user/<uid>/<pid>/claim/verify/<token>/'],
['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako', trust=False)
),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako', trust=False),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako', trust=False),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako', trust=False),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako', trust=False),
),
Rule(
'/settings/applications/',
'get',
profile_views.oauth_application_list,
OsfWebRenderer('profile/oauth_app_list.mako', trust=False)
),
Rule(
'/settings/applications/create/',
'get',
profile_views.oauth_application_register,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
Rule(
'/settings/applications/<client_id>/',
'get',
profile_views.oauth_application_detail,
OsfWebRenderer('profile/oauth_app_detail.mako', trust=False)
),
# TODO: Uncomment once outstanding issues with this feature are addressed
# Rule(
# '/@<twitter_handle>/',
# 'get',
# profile_views.redirect_to_twitter,
# OsfWebRenderer('error.mako', render_mako_string, trust=False)
# ),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/resend/', 'put', profile_views.resend_confirmation, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
'/profile/export/',
'post',
profile_views.request_export,
json_renderer,
),
Rule(
'/profile/deactivate/',
'post',
profile_views.request_deactivation,
json_renderer,
),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule('/search/', 'get', {}, OsfWebRenderer('search.mako')),
Rule('/share/', 'get', {}, OsfWebRenderer('share_search.mako')),
Rule('/share/registration/', 'get', {'register': settings.SHARE_REGISTRATION_URL}, OsfWebRenderer('share_registration.mako')),
Rule('/share/help/', 'get', {'help': settings.SHARE_API_DOCS_URL}, OsfWebRenderer('share_api_docs.mako')),
Rule('/share_dashboard/', 'get', {}, OsfWebRenderer('share_dashboard.mako')),
Rule('/share/atom/', 'get', search_views.search_share_atom, xml_renderer),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/search/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Project
# Web
process_rules(app, [
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako')),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('index.mako')),
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'get',
project_views.node.view_project,
OsfWebRenderer('project/project.mako', trust=False)
),
# Create a new subproject/component
Rule(
'/project/<pid>/newnode/',
'post',
project_views.node.project_new_node,
notemplate
),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako')),
Rule('/api/v1/folder/<nid>', 'post', project_views.node.folder_new_post, json_renderer),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako', trust=False),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako', trust=False)
),
# Permissions
Rule(
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako') # TODO: Should this be notemplate? (post request)
),
### Logs ###
# View forks
Rule(
[
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
],
'get',
project_views.node.node_forks,
OsfWebRenderer('project/forks.mako', trust=False)
),
# Registrations
Rule(
[
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
],
'get',
project_views.register.node_register_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
],
'get',
project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako', trust=False)
),
Rule(
[
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
],
'get',
project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako', trust=False)
),
# TODO: Can't create a registration locally, so can't test this one..?
Rule(
[
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/',
],
'get',
project_views.register.node_registration_retraction_get,
OsfWebRenderer('project/retract_registration.mako', trust=False)
),
Rule(
'/ids/<category>/<path:value>/',
'get',
project_views.register.get_referent_by_identifier,
notemplate,
),
# Statistics
Rule(
[
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
],
'get',
project_views.node.project_statistics_redirect,
notemplate,
),
Rule(
[
'/project/<pid>/analytics/',
'/project/<pid>/node/<nid>/analytics/',
],
'get',
project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako', trust=False)
),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
# TODO: Start waterbutler to test
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako', trust=False),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako', trust=False)
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
OsfWebRenderer('project/view_file.mako', trust=False),
),
Rule(
[
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by projectCreator.js
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule([
'/project/<pid>/expand/',
'/project/<pid>/node/<nid>/expand/',
], 'post', project_views.node.expand, json_renderer),
Rule([
'/project/<pid>/collapse/',
'/project/<pid>/node/<nid>/collapse/',
], 'post', project_views.node.collapse, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointers/',
],
'delete',
project_views.node.remove_pointers_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>',
],
'delete',
project_views.node.delete_folder,
json_renderer,
),
Rule('/folder/', 'put', project_views.node.add_folder, json_renderer),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_folder_pointers/'
], 'get', project_views.node.get_folder_pointers, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule([
'/project/<pid>/get_most_in_common_contributors/',
'/project/<pid>/node/<nid>/get_most_in_common_contributors/',
], 'get', project_views.contributor.get_most_in_common_contributors, json_renderer),
Rule([
'/project/<pid>/get_recently_added_contributors/',
'/project/<pid>/node/<nid>/get_recently_added_contributors/',
], 'get', project_views.contributor.get_recently_added_contributors, json_renderer),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Update
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'put',
project_views.node.update_node,
json_renderer,
),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Add / remove tags
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'post', project_views.tag.project_add_tag, json_renderer),
Rule([
'/project/<pid>/tags/',
'/project/<pid>/node/<nid>/tags/',
'/project/<pid>/tags/<tag>/',
'/project/<pid>/node/<nid>/tags/<tag>/',
], 'delete', project_views.tag.project_remove_tag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
Rule([
'/project/<pid>/beforeremovecontributors/',
'/project/<pid>/node/<nid>/beforeremovecontributors/',
], 'post', project_views.contributor.project_before_remove_contributor, json_renderer),
# TODO(sloria): should be a delete request to /contributors/
Rule([
'/project/<pid>/removecontributors/',
'/project/<pid>/node/<nid>/removecontributors/',
], 'post', project_views.contributor.project_removecontributor, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/retraction/',
'/project/<pid>/node/<nid>/retraction/'
], 'post', project_views.register.node_registration_retraction_post, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'post', project_views.register.node_register_template_page_post, json_renderer),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'get',
project_views.register.node_identifiers_get,
json_renderer,
),
Rule(
[
'/project/<pid>/identifiers/',
'/project/<pid>/node/<nid>/identifiers/',
],
'post',
project_views.register.node_identifiers_post,
json_renderer,
),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/registration/<pid>/callbacks/',
],
'put',
project_views.register.registration_callbacks,
json_renderer,
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
),
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
|
{
"content_hash": "2c01c2eb39c93fd020105be00a046cb7",
"timestamp": "",
"source": "github",
"line_count": 1490,
"max_line_length": 134,
"avg_line_length": 31.85973154362416,
"alnum_prop": 0.505297971393061,
"repo_name": "ckc6cz/osf.io",
"id": "5881a3539c7d430de51040233a94889c398746bb",
"size": "47495",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "website/routes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "117020"
},
{
"name": "HTML",
"bytes": "31536"
},
{
"name": "JavaScript",
"bytes": "1164001"
},
{
"name": "Mako",
"bytes": "520012"
},
{
"name": "Python",
"bytes": "3235503"
},
{
"name": "Shell",
"bytes": "1849"
}
],
"symlink_target": ""
}
|
"""Tests for neurodsp.timefrequency.wavelets."""
from neurodsp.tests.settings import FS, FREQ1, FREQS_ARR
from neurodsp.timefrequency.wavelets import *
###################################################################################################
###################################################################################################
def test_compute_wavelet_transform(tsig):
out = compute_wavelet_transform(tsig, FS, FREQS_ARR)
assert out.ndim == 2
# Check using a list of n_cycles definitions
out = compute_wavelet_transform(tsig, FS, FREQS_ARR, n_cycles=[3, 4, 5])
def test_compute_wavelet_transform_2d(tsig2d):
out = compute_wavelet_transform(tsig2d, FS, FREQS_ARR)
assert out.ndim == 3
def test_convolve_wavelet(tsig):
out = convolve_wavelet(tsig, FS, FREQ1)
out = convolve_wavelet(tsig, FS, FREQ1, norm='amp')
def test_convolve_wavelet_2d(tsig2d):
out = convolve_wavelet(tsig2d, FS, FREQ1)
assert out.shape == tsig2d.shape
|
{
"content_hash": "09b57d23d83fc1414554f1f13d74d72e",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 99,
"avg_line_length": 32.225806451612904,
"alnum_prop": 0.5665665665665666,
"repo_name": "voytekresearch/neurodsp",
"id": "702dfca323dc22d8367b6d0ad7732f738d017aa2",
"size": "999",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "neurodsp/tests/timefrequency/test_wavelets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2726"
},
{
"name": "Python",
"bytes": "421656"
},
{
"name": "TeX",
"bytes": "6424"
}
],
"symlink_target": ""
}
|
from glanceclient.common import http
from glanceclient.common import utils
from glanceclient.v2 import image_members
from glanceclient.v2 import image_tags
from glanceclient.v2 import images
from glanceclient.v2 import metadefs
from glanceclient.v2 import schemas
from glanceclient.v2 import tasks
class Client(object):
"""Client for the OpenStack Images v2 API.
:param string endpoint: A user-supplied endpoint URL for the glance
service.
:param string token: Token for authentication.
:param integer timeout: Allows customization of the timeout for client
http requests. (optional)
"""
def __init__(self, endpoint=None, **kwargs):
endpoint, self.version = utils.endpoint_version_from_url(endpoint, 2.0)
self.http_client = http.get_http_client(endpoint=endpoint, **kwargs)
self.schemas = schemas.Controller(self.http_client)
self.images = images.Controller(self.http_client, self.schemas)
self.image_tags = image_tags.Controller(self.http_client,
self.schemas)
self.image_members = image_members.Controller(self.http_client,
self.schemas)
self.tasks = tasks.Controller(self.http_client, self.schemas)
self.metadefs_resource_type = (
metadefs.ResourceTypeController(self.http_client, self.schemas))
self.metadefs_property = (
metadefs.PropertyController(self.http_client, self.schemas))
self.metadefs_object = (
metadefs.ObjectController(self.http_client, self.schemas))
self.metadefs_namespace = (
metadefs.NamespaceController(self.http_client, self.schemas))
|
{
"content_hash": "84dd0e679e396733c2ecf0a74be11808",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 40.65909090909091,
"alnum_prop": 0.6562325321408609,
"repo_name": "mmasaki/python-glanceclient",
"id": "803673ba537baf29f069f0f202b6be0d381139fa",
"size": "2426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glanceclient/v2/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "576366"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
}
|
from Crypto.PublicKey import RSA
from user_sync.error import AssertionException
def read_key(filename):
with open(filename, 'r') as f:
return f.read()
def write_key(data, filename):
with open(filename, 'w') as f:
f.write(data)
def encrypt_file(passphrase, filename):
data = read_key(filename)
return encrypt(passphrase, data)
def decrypt_file(passphrase, filename):
data = read_key(filename)
return decrypt(passphrase, data)
def encrypt(passphrase, data):
try:
key = RSA.import_key(data, passphrase=None)
return RSA.RsaKey.export_key(key, format='PEM', passphrase=passphrase, pkcs=8).decode('ascii')
except (ValueError, IndexError, TypeError) as e:
if contains_phrase(str(e), "post boundary", "rsa key format", "out of range"):
raise AssertionException(
'{0} - Error while processing data. Data may not be in RSA format or is corrupt.'.format(str(e)))
elif contains_phrase(str(e), "no passphrase available"):
raise AssertionException(
'{0} - Error while processing data. Data is already encrypted.'.format(str(e)))
raise
def decrypt(passphrase, data):
try:
decrypted_key = RSA.import_key(data, passphrase)
return decrypted_key.export_key('PEM').decode('ascii')
except (ValueError, IndexError) as e:
if contains_phrase(str(e), "padding is incorrect"):
raise AssertionException('Password was incorrect.')
elif contains_phrase(str(e), "index out of range", "format is not supported"):
raise AssertionException(
'{0} - Error while while processing encrypted data. '
'Data may not be in RSA format or is corrupt.'.format(str(e)))
raise
def contains_phrase(result, *args):
return True in {x.lower() in result.lower() for x in args}
|
{
"content_hash": "d825aa4d475b8f9b20ddd39881d2844b",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 113,
"avg_line_length": 34.41818181818182,
"alnum_prop": 0.6460644479661912,
"repo_name": "adobe-apiplatform/user-sync.py",
"id": "60c5a98ad5664f251dd59bdd7b53da16994c830d",
"size": "1893",
"binary": false,
"copies": "2",
"ref": "refs/heads/v2",
"path": "user_sync/encryption.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "89"
},
{
"name": "Makefile",
"bytes": "602"
},
{
"name": "Python",
"bytes": "434861"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
}
|
import json
import logging
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.forms import fields
from horizon import workflows
from openstack_dashboard.contrib.sahara.content.data_processing \
.utils import helpers
from openstack_dashboard.contrib.sahara.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
JOB_BINARY_CREATE_URL = ("horizon:project:data_processing.job_binaries"
":create-job-binary")
class AdditionalLibsAction(workflows.Action):
lib_binaries = forms.DynamicChoiceField(
label=_("Choose libraries"),
required=False,
add_item_link=JOB_BINARY_CREATE_URL,
widget=forms.Select(
attrs={
'class': 'switched',
'data-switch-on': 'jobtype',
'data-jobtype-pig': _("Choose libraries"),
'data-jobtype-hive': _("Choose libraries"),
'data-jobtype-shell': _("Choose additional files"),
'data-jobtype-spark': _("Choose libraries"),
'data-jobtype-java': _("Choose libraries"),
'data-jobtype-mapreduce.streaming': _("Choose libraries")
}))
lib_ids = forms.CharField(
required=False,
widget=forms.HiddenInput())
def populate_lib_binaries_choices(self, request, context):
job_binaries = saharaclient.job_binary_list(request)
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, ('', _("-- not selected --")))
return choices
class Meta(object):
name = _("Libs")
help_text_template = (
"project/data_processing.jobs/_create_job_libs_help.html")
class GeneralConfigAction(workflows.Action):
job_name = forms.CharField(label=_("Name"))
job_type = forms.ChoiceField(label=_("Job Type"),
widget=forms.Select(attrs={
'class': 'switchable',
'data-slug': 'jobtype'
}))
main_binary = forms.DynamicChoiceField(
label=_("Choose a main binary"),
required=False,
help_text=_("Choose the binary which "
"should be used in this Job."),
add_item_link=JOB_BINARY_CREATE_URL,
widget=fields.DynamicSelectWidget(
attrs={
'class': 'switched',
'data-switch-on': 'jobtype',
'data-jobtype-pig': _("Choose a main binary"),
'data-jobtype-hive': _("Choose a main binary"),
'data-jobtype-shell': _("Choose a shell script"),
'data-jobtype-spark': _("Choose a main binary"),
'data-jobtype-storm': _("Choose a main binary"),
'data-jobtype-mapreduce.streaming': _("Choose a main binary")
}))
job_description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
def __init__(self, request, context, *args, **kwargs):
super(GeneralConfigAction,
self).__init__(request, context, *args, **kwargs)
if request.REQUEST.get("guide_job_type"):
self.fields["job_type"].initial = (
request.REQUEST.get("guide_job_type").lower())
def populate_job_type_choices(self, request, context):
choices = []
choices_list = saharaclient.job_types_list(request)
for choice in choices_list:
job_type = choice.name.lower()
if job_type in helpers.JOB_TYPE_MAP:
choices.append((job_type, helpers.JOB_TYPE_MAP[job_type][0]))
return choices
def populate_main_binary_choices(self, request, context):
job_binaries = saharaclient.job_binary_list(request)
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, ('', _("-- not selected --")))
return choices
def clean(self):
cleaned_data = super(workflows.Action, self).clean()
job_type = cleaned_data.get("job_type", "")
if job_type in ["Java", "MapReduce"]:
cleaned_data['main_binary'] = None
return cleaned_data
class Meta(object):
name = _("Create Job Template")
help_text_template = (
"project/data_processing.jobs/_create_job_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("job_name", "job_type", "job_description", "main_binary")
def contribute(self, data, context):
for k, v in data.items():
if k == "job_type":
context[k] = helpers.JOB_TYPE_MAP[v][1]
else:
context[k] = v
return context
class ConfigureLibs(workflows.Step):
action_class = AdditionalLibsAction
template_name = "project/data_processing.jobs/library_template.html"
def contribute(self, data, context):
chosen_libs = json.loads(data.get("lib_ids", '[]'))
for index, library in enumerate(chosen_libs):
context["lib_%s" % index] = library
return context
class CreateJob(workflows.Workflow):
slug = "create_job"
name = _("Create Job Template")
finalize_button_name = _("Create")
success_message = _("Job created")
failure_message = _("Could not create job template")
success_url = "horizon:project:data_processing.jobs:index"
default_steps = (GeneralConfig, ConfigureLibs)
def handle(self, request, context):
main_locations = []
lib_locations = []
for k in context.keys():
if k.startswith('lib_'):
lib_locations.append(context.get(k))
if context.get("main_binary", None):
main_locations.append(context["main_binary"])
try:
job = saharaclient.job_create(
request,
context["job_name"],
context["job_type"],
main_locations,
lib_locations,
context["job_description"])
hlps = helpers.Helpers(request)
if hlps.is_from_guide():
request.session["guide_job_id"] = job.id
request.session["guide_job_type"] = context["job_type"]
request.session["guide_job_name"] = context["job_name"]
self.success_url = (
"horizon:project:data_processing.wizard:jobex_guide")
return True
except Exception:
exceptions.handle(request)
return False
|
{
"content_hash": "06c6c9a5c3db4b2b7905910c33620a91",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 79,
"avg_line_length": 35.35751295336787,
"alnum_prop": 0.5665298944900352,
"repo_name": "damien-dg/horizon",
"id": "cbb5efbdf307cbda3382a6400d16988ddc7a8849",
"size": "7367",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/contrib/sahara/content/data_processing/jobs/workflows/create.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "105413"
},
{
"name": "HTML",
"bytes": "513351"
},
{
"name": "JavaScript",
"bytes": "955324"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4813652"
},
{
"name": "Shell",
"bytes": "18658"
}
],
"symlink_target": ""
}
|
import sys
import gtk
import pygtk
pygtk.require('2.0')
import mateapplet
from nm_dispatcher_olsrd_applet import applet_factory
if __name__ == '__main__': # testing for execution
print('Starting factory')
if len(sys.argv) > 1 and sys.argv[1] == '-d': # debugging
mainWindow = gtk.Window()
mainWindow.set_title('Applet window')
mainWindow.connect('destroy', gtk.main_quit)
applet = mateapplet.Applet()
applet_factory(applet, None)
applet.reparent(mainWindow)
mainWindow.show_all()
gtk.main()
sys.exit()
else:
mateapplet.matecomponent_factory('OAFIID:MATE_nm-dispatcher-olsrd-applet_Factory',
mateapplet.Applet.__gtype__,
'MATE_nm-dispatcher-olsrd-applet',
'0.0',
applet_factory)
|
{
"content_hash": "9d9bcfaf11381895762ec2e17a912cbc",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 84,
"avg_line_length": 27.692307692307693,
"alnum_prop": 0.7069444444444445,
"repo_name": "eighthave/nm-dispatcher-olsrd-applet",
"id": "bc8727a7584d8d09cc6cce22bbed05bee5db36b6",
"size": "739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nm-dispatcher-olsrd-applet-factoryMate.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Perl",
"bytes": "30"
},
{
"name": "Python",
"bytes": "2361"
}
],
"symlink_target": ""
}
|
import pika
connection=pika.BlockingConnection(pika.ConnectionParameters(host="10.0.0.25"))
channel=connection.channel()
channel.exchange_declare(exchange="direct_logs",type="direct")
result=channel.queue_declare(exclusive=True)
queue_name=result.method.queue
severties=["error",]
for i in severties:
channel.queue_bind(exchange="direct_logs",queue=queue_name,routing_key=i)
def callback(ch,method,properties,body):
print(body)
channel.basic_consume(callback,queue=queue_name,no_ack=True)
channel.start_consuming()
|
{
"content_hash": "9eca2504fb49494e33795330df0ba630",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 25.238095238095237,
"alnum_prop": 0.7830188679245284,
"repo_name": "xiaoyongaa/ALL",
"id": "a45d4b301048853dfdc50ed57e35276c09dfd5fc",
"size": "530",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "网络编程进阶/RabbitMQ/rabbitMQ(消费者)关键字2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "432257"
},
{
"name": "C++",
"bytes": "129981"
},
{
"name": "Groff",
"bytes": "26852"
},
{
"name": "HTML",
"bytes": "201234"
},
{
"name": "Python",
"bytes": "462513"
},
{
"name": "Shell",
"bytes": "9245"
}
],
"symlink_target": ""
}
|
import sys
import pygfa
import networkx as nx
import matplotlib.pyplot as plt
import argparse
import logging
logging.basicConfig(level=logging.DEBUG)
if __name__ == '__main__':
# -------------------------CLI-ARGUMENT-MANAGEMENT-------------------------------- #
parser = argparse.ArgumentParser (description="Compute graph structure from GFA file.")
parser.add_argument('-f', '--file', metavar='file', type=str, nargs=1, required=True)
parser.add_argument('-s', '--subgraph', metavar='subgraph_key', type=str, nargs=1, required=False)
parser.add_argument('-d', '--display', action='store_true', default=False)
parser.add_argument('-c', '--convert', metavar=("gfa_version", "output_file"), type=str, nargs=2, \
required=False)
# -------------------------------------------------------------------------------- #
try:
args = parser.parse_args()
tmp_pygfa = pygfa.gfa.GFA.from_file (args.file[0], is_rGFA = True)
node_color = "r"
if args.subgraph:
tmp_pygfa = tmp_pygfa.get_subgraph (args.subgraph[0])
node_color = "b"
if args.display:
edge_labels = dict ( [ \
( (node1, node2), key )\
for node1, node2, key in tmp_pygfa.edges(keys=True) \
])
layout = nx.spring_layout (tmp_pygfa._graph)
nx.draw (tmp_pygfa._graph, layout, with_labels = True, node_color=node_color)
nx.draw_networkx_edge_labels (tmp_pygfa, layout, edge_labels = edge_labels)
plt.show ()
if args.convert:
version = 1
if args.convert[0] in ("2", "gfa2", "GFA2"):
version = 2
elif args.convert[0] in ("1", "gfa1", "GFA1"):
version = 1
else:
raise ValueError("Invalid GFA version given")
tmp_pygfa.dump(gfa_version=version, out=args.convert[1])
except SystemExit:
pass
except EnvironmentError as env_error:
print(repr(env_error))
|
{
"content_hash": "5235f864f5ddec003ff348efb9cbf1a8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 103,
"avg_line_length": 37.8421052631579,
"alnum_prop": 0.5127491886879926,
"repo_name": "AlgoLab/pygfa",
"id": "21e4f75b2101dedeea4cf761be7b049f722cfdd4",
"size": "2157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "290476"
},
{
"name": "Shell",
"bytes": "104"
}
],
"symlink_target": ""
}
|
from msrest.pipeline import ClientRawResponse
from .. import models
class Paths(object):
"""Paths operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An objec model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_empty(
self, account_name, custom_headers=None, raw=False, **operation_config):
"""
Get a 200 to test a valid base uri
:param account_name: Account Name
:type account_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
"""
# Construct URL
url = '/customuri'
path_format_arguments = {
'accountName': self._serialize.url("account_name", account_name, 'str', skip_quote=True),
'host': self._serialize.url("self.config.host", self.config.host, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
|
{
"content_hash": "61cebe347291d1a264c6eaf9c3a6f340",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 101,
"avg_line_length": 34.784615384615385,
"alnum_prop": 0.637328615656789,
"repo_name": "sharadagarwal/autorest",
"id": "478f2a330a550b6b20e042501b67aa686d450ced",
"size": "2735",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/CustomBaseUri/autorestparameterizedhosttestclient/operations/paths.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "12942"
},
{
"name": "C#",
"bytes": "11450022"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "4693719"
},
{
"name": "JavaScript",
"bytes": "4685941"
},
{
"name": "PowerShell",
"bytes": "29614"
},
{
"name": "Python",
"bytes": "2274436"
},
{
"name": "Ruby",
"bytes": "232193"
},
{
"name": "Shell",
"bytes": "423"
},
{
"name": "TypeScript",
"bytes": "179577"
}
],
"symlink_target": ""
}
|
import functools
import collections
from paddle.trainer.config_parser import *
from .activations import LinearActivation, SigmoidActivation, TanhActivation, \
ReluActivation, IdentityActivation, SoftmaxActivation
from .evaluators import *
from .poolings import MaxPooling, AvgPooling, BasePoolingType
from .attrs import *
from .default_decorators import *
try:
import cPickle as pickle
except ImportError:
import pickle
import copy
__all__ = [
"full_matrix_projection",
"AggregateLevel",
"ExpandLevel",
"identity_projection",
"dotmul_projection",
"dotmul_operator",
"repeat_layer",
"table_projection",
"mixed_layer",
"data_layer",
"embedding_layer",
"fc_layer",
"grumemory",
"pooling_layer",
"lstmemory",
"last_seq",
"first_seq",
"cos_sim",
"hsigmoid",
"conv_projection",
"regression_cost",
'classification_cost',
"LayerOutput",
'img_conv_layer',
'img_pool_layer',
'batch_norm_layer',
'img_cmrnorm_layer',
'addto_layer',
'concat_layer',
'lstm_step_layer',
'recurrent_group',
'memory',
'StaticInput',
'expand_layer',
'scaling_layer',
'scaling_projection',
'power_layer',
'interpolation_layer',
'bilinear_interp_layer',
'trans_layer',
'sum_to_one_norm_layer',
'get_output_layer',
'LayerType',
'context_projection',
'beam_search',
'maxid_layer',
'GeneratedInput',
'SubsequenceInput',
'gru_step_layer',
'recurrent_layer',
'BaseGeneratedInput',
'conv_operator',
'conv_shift_layer',
'tensor_layer',
'selective_fc_layer',
'sampling_id_layer',
'slope_intercept_layer',
'trans_full_matrix_projection',
'linear_comb_layer',
'convex_comb_layer',
'ctc_layer',
'warp_ctc_layer',
'crf_layer',
'crf_decoding_layer',
'nce_layer',
'cross_entropy_with_selfnorm',
'cross_entropy',
'multi_binary_label_cross_entropy',
'sum_cost',
'rank_cost',
'lambda_cost',
'huber_cost',
'block_expand_layer',
'maxout_layer',
'out_prod_layer',
'print_layer',
'priorbox_layer',
'spp_layer',
]
class LayerType(object):
"""
Layer type enumerations.
"""
DATA = "data"
MIXED_LAYER = "mixed"
LSTMEMORY = "lstmemory"
GRUMEMORY = "gated_recurrent"
SEQUENCE_LAST_INSTANCE = "seqlastins"
SEQUENCE_FIRST_INSTANCE = "seqfirstins"
POOLING_MAX = "max"
POOLING_AVG = 'average'
FC_LAYER = "fc"
COST = 'cost'
COSINE_SIM_VEC = 'cos_vm'
COSINE_SIM = 'cos'
HSIGMOID = 'hsigmoid'
CONV_LAYER = "conv"
CONVTRANS_LAYER = "convt"
EXCONV_LAYER = "exconv"
EXCONVTRANS_LAYER = "exconvt"
CUDNNCONV_LAYER = "cudnn_conv"
POOL_LAYER = "pool"
BATCH_NORM_LAYER = 'batch_norm'
NORM_LAYER = 'norm'
SUM_TO_ONE_NORM_LAYER = 'sum_to_one_norm'
ADDTO_LAYER = 'addto'
CONCAT_LAYER = 'concat'
CONCAT_PROJ_LAYER = 'concat2'
LSTM_STEP_LAYER = 'lstm_step'
GRU_STEP_LAYER = 'gru_step'
GET_OUTPUT_LAYER = 'get_output'
EXPAND_LAYER = 'expand'
INTERPOLATION_LAYER = 'interpolation'
BILINEAR_INTERP_LAYER = 'bilinear_interp'
POWER_LAYER = 'power'
SCALING_LAYER = 'scaling'
TRANS_LAYER = 'trans'
OUT_PROD_LAYER = 'out_prod'
FEATURE_MAP_EXPAND_LAYER = 'featmap_expand'
MEMORY = 'memory'
MAXID_LAYER = 'maxid'
EOSID_LAYER = 'eos_id'
RECURRENT_LAYER = 'recurrent'
CONV_SHIFT_LAYER = "conv_shift"
TENSOR_LAYER = "tensor"
SEL_FC_LAYER = "selective_fc"
SAMPLING_ID_LAYER = "sampling_id"
SLOPE_INTERCEPT_LAYER = "slope_intercept"
LINEAR_COMBINATION_LAYER = "convex_comb"
BLOCK_EXPAND = "blockexpand"
MAXOUT = "maxout"
SPP_LAYER = "spp"
PRINT_LAYER = "print"
PRIORBOX_LAYER = "priorbox"
CTC_LAYER = "ctc"
WARP_CTC_LAYER = "warp_ctc"
CRF_LAYER = "crf"
CRF_DECODING_LAYER = "crf_decoding"
NCE_LAYER = 'nce'
RANK_COST = "rank-cost"
LAMBDA_COST = "lambda_cost"
HUBER = "huber"
CROSS_ENTROPY = "multi-class-cross-entropy"
CROSS_ENTROPY_WITH_SELFNORM = "multi_class_cross_entropy_with_selfnorm"
SOFT_BIN_CLASS_CROSS_ENTROPY = "soft_binary_class_cross_entropy"
MULTI_BIN_LABEL_CROSS_ENTROPY = "multi_binary_label_cross_entropy"
SUM_COST = "sum_cost"
@staticmethod
def is_layer_type(type_name):
"""
If type_name is a layer type.
:param type_name: layer type name. Because layer type enumerations are
strings.
:type type_name: basestring
:return: True if is a layer_type
:rtype: bool
"""
for key in dir(LayerType):
if key.isupper():
att = getattr(LayerType, key)
if isinstance(att, basestring) and type_name == att:
return True
return False
class AggregateLevel(object):
EACH_TIMESTEP = 'non-seq'
EACH_SEQUENCE = 'seq'
class LayerOutput(object):
"""
LayerOutput is output for layer function. It is used internally by several
reasons.
- Check layer connection make sense.
- FC(Softmax) => Cost(MSE Error) is not good for example.
- Tracking layer connection.
- Pass to layer methods as input.
:param name: Layer output name.
:type name: basestring
:param layer_type: Current Layer Type. One of LayerType enumeration.
:type layer_type: basestring
:param activation: Layer Activation.
:type activation: BaseActivation.
:param parents: Layer's parents.
:type parents: list|tuple|collections.Sequence
"""
def __init__(self,
name,
layer_type,
parents=None,
activation=None,
num_filters=None,
img_norm_type=None,
size=None,
outputs=None,
reverse=None):
assert isinstance(name, basestring)
assert isinstance(layer_type, basestring)
assert size is not None
assert LayerType.is_layer_type(layer_type)
self.name = name
self.layer_type = layer_type
if parents is not None and type(parents) != list:
parents = [parents]
self.parents = [] if parents is None else parents
self.activation = activation
self.num_filters = num_filters
self.img_norm_type = img_norm_type
self.size = size
if outputs is None:
outputs = ['default']
self.outputs = outputs
self.reverse = reverse
def __repr__(self):
"""
Disable __repr__ for debug reason. Will be implemented when release
"""
assert False, "this method should not be invoked"
def __str__(self):
"""
Disable __str__ for debug reason. Will be implemented when release
"""
assert False, "this method should not be invoked"
ERROR_CLIPPING = 'error_clipping_threshold'
DROPOUT = 'drop_rate'
DEVICE = 'device'
def layer_support(*attrs):
attrs_list = list(attrs)
attrs_list.append(DEVICE)
def decorator(method):
@functools.wraps(method)
def wrapper(*args, **kwargs):
for attr in attrs_list:
for each in args:
if isinstance(each, ExtraLayerAttribute):
setattr(each, '_'.join(['can', attr]), True)
for key in kwargs:
val = kwargs[key]
if isinstance(val, ExtraLayerAttribute):
setattr(val, '_'.join(['can', attr]), True)
for each in args:
if isinstance(each, ExtraLayerAttribute):
each.check(method.__name__)
for key in kwargs:
val = kwargs[key]
if isinstance(val, ExtraLayerAttribute):
val.check(method.__name__)
return method(*args, **kwargs)
return wrapper
return decorator
@wrap_param_attr_default()
def full_matrix_projection(input, size=0, param_attr=None):
"""
Full Matrix Projection. It performs full matrix multiplication.
.. math::
out.row[i] += in.row[i] * weight
There are two styles of usage.
1. When used in mixed_layer like this, you can only set the input:
.. code-block:: python
with mixed_layer(size=100) as m:
m += full_matrix_projection(input=layer)
2. When used as an independant object like this, you must set the size:
.. code-block:: python
proj = full_matrix_projection(input=layer,
size=100,
param_attr=ParamAttr(name='_proj'))
:param input: input layer
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A FullMatrixProjection Object.
:rtype: FullMatrixProjection
"""
proj = FullMatrixProjection(
input_layer_name=input.name, size=size, **param_attr.attr)
proj.origin = input
return proj
@wrap_param_attr_default()
def trans_full_matrix_projection(input, size=0, param_attr=None):
"""
Different from full_matrix_projection, this projection performs matrix
multiplication, using transpose of weight.
.. math::
out.row[i] += in.row[i] * w^\mathrm{T}
:math:`w^\mathrm{T}` means transpose of weight.
The simply usage is:
.. code-block:: python
proj = trans_full_matrix_projection(input=layer,
size=100,
param_attr=ParamAttr(
name='_proj',
initial_mean=0.0,
initial_std=0.01))
:param input: input layer
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A TransposedFullMatrixProjection Object.
:rtype: TransposedFullMatrixProjection
"""
proj = TransposedFullMatrixProjection(
input_layer_name=input.name, size=size, **param_attr.attr)
proj.origin = input
return proj
@wrap_param_attr_default()
def table_projection(input, size=0, param_attr=None):
"""
Table Projection. It selects rows from parameter where row\_id
is in input\_ids.
.. math::
out.row[i] += table.row[ids[i]]
where :math:`out` is output, :math:`table` is parameter, :math:`ids` is input\_ids,
and :math:`i` is row\_id.
There are two styles of usage.
1. When used in mixed_layer like this, you can only set the input:
.. code-block:: python
with mixed_layer(size=100) as m:
m += table_projection(input=layer)
2. When used as an independant object like this, you must set the size:
.. code-block:: python
proj = table_projection(input=layer,
size=100,
param_attr=ParamAttr(name='_proj'))
:param input: Input layer, which must contains id fields.
:type input: LayerOutput
:param size: The parameter size. Means the width of parameter.
:type size: int
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A TableProjection Object.
:rtype: TableProjection
"""
proj = TableProjection(
input_layer_name=input.name, size=size, **param_attr.attr)
proj.origin = input
return proj
def identity_projection(input, offset=None):
"""
1. IdentityProjection if offset=None. It performs:
.. math::
out.row[i] += in.row[i]
The example usage is:
.. code-block:: python
proj = identity_projection(input=layer)
2. IdentityOffsetProjection if offset!=None. It likes IdentityProjection,
but layer size may be smaller than input size.
It select dimesions [offset, offset+layer_size) from input:
.. math::
out.row[i] += in.row[i + \\textrm{offset}]
The example usage is:
.. code-block:: python
proj = identity_projection(input=layer,
offset=10)
Note that both of two projections should not have any parameter.
:param input: Input Layer.
:type input: LayerOutput
:param offset: Offset, None if use default.
:type offset: int
:return: A IdentityProjection or IdentityOffsetProjection object
:rtype: IdentityProjection or IdentityOffsetProjection
"""
if offset is None:
proj = IdentityProjection(input_layer_name=input.name)
proj.origin = input
else:
proj = IdentityOffsetProjection(
input_layer_name=input.name, offset=offset)
proj.origin = input
return proj
@wrap_param_attr_default()
def scaling_projection(input, param_attr=None):
"""
scaling_projection multiplies the input with a scalar parameter and add to
the output.
.. math::
out += w * in
The example usage is:
.. code-block:: python
proj = scaling_projection(input=layer)
:param input: Input Layer.
:type input: LayerOutput
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A ScalingProjection object
:rtype: ScalingProjection
"""
proj = ScalingProjection(input_layer_name=input.name, **param_attr.attr)
proj.origin = input
return proj
@wrap_param_attr_default()
def dotmul_projection(input, param_attr=None):
"""
DotMulProjection with a layer as input.
It performs element-wise multiplication with weight.
.. math::
out.row[i] += in.row[i] .* weight
where :math:`.*` means element-wise multiplication.
The example usage is:
.. code-block:: python
proj = dotmul_projection(input=layer)
:param input: Input layer.
:type input: LayerOutput
:param param_attr: Parameter config, None if use default.
:type param_attr: ParameterAttribute
:return: A DotMulProjection Object.
:rtype: DotMulProjection
"""
proj = DotMulProjection(
input_layer_name=input.name, size=input.size, **param_attr.attr)
proj.origin = input
return proj
def dotmul_operator(a=None, b=None, scale=1, **kwargs):
"""
DotMulOperator takes two inputs and performs element-wise multiplication:
.. math::
out.row[i] += scale * (x.row[i] .* y.row[i])
where :math:`.*` means element-wise multiplication, and
scale is a config scalar, its default value is one.
The example usage is:
.. code-block:: python
op = dotmul_operator(x=layer1, y=layer2, scale=0.5)
:param a: Input layer1
:type a: LayerOutput
:param b: Input layer2
:type b: LayerOutput
:param scale: config scalar, default value is one.
:type scale: float
:return: A DotMulOperator Object.
:rtype: DotMulOperator
"""
if 'x' in kwargs or 'y' in kwargs:
logger.warning('x and y arguments for dotmul_operator is deprecated. '
'Please use a and b as parameter.')
a = kwargs.get('x', a) # For Backward capacity.
b = kwargs.get('y', b)
assert isinstance(a, LayerOutput)
assert isinstance(b, LayerOutput)
if a.size is not None and b.size is not None:
assert a.size == b.size
op = DotMulOperator(input_layer_names=[a.name, b.name], scale=scale)
op.origin = [a, b]
return op
@wrap_bias_attr_default(['padding_attr'])
def context_projection(input,
context_len,
context_start=None,
padding_attr=False):
"""
Context Projection.
It just simply reorganizes input sequence, combines "context_len" sequence
to one context from context_start. "context_start" will be set to
-(context_len - 1) / 2 by default. If context position out of sequence
length, padding will be filled as zero if padding_attr = False, otherwise
it is trainable.
For example, origin sequence is [A B C D E F G], context len is 3, then
after context projection and not set padding_attr, sequence will
be [ 0AB ABC BCD CDE DEF EFG FG0 ].
:param input: Input Sequence.
:type input: LayerOutput
:param context_len: context length.
:type context_len: int
:param context_start: context start position. Default is
-(context_len - 1)/2
:type context_start: int
:param padding_attr: Padding Parameter Attribute. If false, it means padding
always be zero. Otherwise Padding is learnable, and
parameter attribute is set by this parameter.
:type padding_attr: bool|ParameterAttribute
:return: Projection
:rtype: Projection
"""
context_start = -(
context_len - 1) / 2 if context_start is None else context_start
extra_dict = dict()
trainable = isinstance(padding_attr, ParameterAttribute)
if trainable:
extra_dict = padding_attr.attr
proj = ContextProjection(
input_layer_name=input.name,
context_length=context_len,
context_start=context_start,
trainable_padding=trainable,
**extra_dict)
proj.origin = input
return proj
class MixedLayerType(LayerOutput):
"""
The internal object for trainer_helpers.
"""
class AddToSealedMixedLayerException(Exception):
def __init__(self):
Exception.__init__(self)
def __init__(self, name, size, act, bias_attr, layer_attr, parents=None):
"""
Ctor.
:param name: layer name.
:type name: basestring
:param size: layer size.
:type size: int
:param act: activation type.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will
get a default Bias.
:type bias_attr: ParameterAttribute or None means has bias. Any other
type means no bias.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute or None
"""
LayerOutput.__init__(
self,
name,
LayerType.MIXED_LAYER,
parents,
size=size,
activation=act)
self.bias_attr = bias_attr
self.layer_attr = layer_attr
self.inputs = []
self.finalized = False
def __iadd__(self, other):
"""
+ += operator
:param other: Other projection.
:type other: Projection
:return: self.
:rtype: MixedLayerType
"""
if not self.finalized:
assert isinstance(other, Projection) or isinstance(other, Operator)
self.inputs.append(other)
if isinstance(other, Projection):
self.parents.append(other.origin)
else:
self.parents.extend(other.origin)
return self
else:
raise MixedLayerType.AddToSealedMixedLayerException()
def __enter__(self):
assert len(self.inputs) == 0
return self
def __exit__(self, *args, **kwargs):
del args, kwargs # unused parameter to suppress warning
assert len(self.inputs) != 0
ml = MixedLayer(
name=self.name,
size=self.size,
active_type=self.activation.name,
bias=ParamAttr.to_bias(self.bias_attr),
inputs=self.inputs,
**ExtraLayerAttribute.to_kwargs(self.layer_attr))
# update the size which might be computed inside MixedLayer
# according to the operator's output size
self.size = ml.config.size
@wrap_name_default("mixed")
@wrap_act_default(act=LinearActivation())
@wrap_bias_attr_default(has_bias=False)
@layer_support(ERROR_CLIPPING, DROPOUT)
def mixed_layer(size=0,
input=None,
name=None,
act=None,
bias_attr=False,
layer_attr=None):
"""
Mixed Layer. A mixed layer will add all inputs together, then activate.
Each inputs is a projection or operator.
There are two styles of usages.
1. When not set inputs parameter, use mixed_layer like this:
.. code-block:: python
with mixed_layer(size=256) as m:
m += full_matrix_projection(input=layer1)
m += identity_projection(input=layer2)
2. You can also set all inputs when invoke mixed_layer as follows:
.. code-block:: python
m = mixed_layer(size=256,
input=[full_matrix_projection(input=layer1),
full_matrix_projection(input=layer2)])
:param name: mixed layer name. Can be referenced by other layer.
:type name: basestring
:param size: layer size.
:type size: int
:param input: inputs layer. It is an optional parameter. If set,
then this function will just return layer's name.
:param act: Activation Type.
:type act: BaseActivation
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute or None or bool
:param layer_attr: The extra layer config. Default is None.
:type layer_attr: ExtraLayerAttribute
:return: MixedLayerType object can add inputs or layer name.
:rtype: MixedLayerType
"""
if input is None:
return MixedLayerType(name, size, act, bias_attr, layer_attr)
else:
with mixed_layer(
name=name,
size=size,
act=act,
bias_attr=bias_attr,
layer_attr=layer_attr) as m:
if isinstance(input, collections.Sequence):
for each in input:
m += each
else:
m += input
return m
@layer_support()
def data_layer(name, size, height=None, width=None, layer_attr=None):
"""
Define DataLayer For NeuralNetwork.
The example usage is:
.. code-block:: python
data = data_layer(name="input",
size=1000)
:param name: Name of this data layer.
:type name: basestring
:param size: Size of this data layer.
:type size: int
:param height: Height of this data layer, used for image
:type size: int|None
:param width: Width of this data layer, used for image
:type size: int|None
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
type=LayerType.DATA,
name=name,
size=size,
height=height,
width=width,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.DATA, size=size)
@wrap_name_default("embedding")
@wrap_param_attr_default()
@layer_support(ERROR_CLIPPING)
def embedding_layer(input, size, name=None, param_attr=None, layer_attr=None):
"""
Define a embedding Layer.
:param name: Name of this embedding layer.
:type name: basestring
:param input: The input layer for this embedding. NOTE: must be Index Data.
:type input: LayerOutput
:param size: The embedding dimension.
:type size: int
:param param_attr: The embedding parameter attribute. See ParameterAttribute
for details.
:type param_attr: ParameterAttribute|None
:param layer_attr: Extra layer Config. Default is None.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
with mixed_layer(
name=name,
size=size,
act=LinearActivation(),
bias_attr=False,
layer_attr=layer_attr) as mix:
mix += table_projection(input=input, size=size, param_attr=param_attr)
return mix
@wrap_name_default()
@wrap_param_attr_default()
@wrap_bias_attr_default()
@wrap_act_default()
@layer_support(ERROR_CLIPPING, DROPOUT)
def fc_layer(input,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None,
layer_attr=None):
"""
Helper for declare fully connected layer.
The example usage is:
.. code-block:: python
fc = fc_layer(input=layer,
size=1024,
act=LinearActivation(),
bias_attr=False)
which is equal to:
.. code-block:: python
with mixed_layer(size=1024) as fc:
fc += full_matrix_projection(input=layer)
:param name: The Layer Name.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
:param size: The layer dimension.
:type size: int
:param act: Activation Type. Default is tanh.
:type act: BaseActivation
:param param_attr: The Parameter Attribute|list.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
assert not isinstance(param_attr, collections.Sequence)
param_attr = [param_attr]
else:
if isinstance(param_attr, collections.Sequence):
assert len(input) == len(param_attr)
else:
param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))]
assert isinstance(input, collections.Sequence)
Layer(
inputs=[
Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr)
],
name=name,
type=LayerType.FC_LAYER,
size=size,
bias=ParamAttr.to_bias(bias_attr),
active_type=act.name,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.FC_LAYER, input, activation=act, size=size)
@wrap_name_default("print")
def print_layer(input, name=None):
"""
Print the output value of input layers. This layer is useful for debugging.
:param name: The Layer Name.
:type name: basestring
:param input: The input layer. Could be a list/tuple of input layer.
:type input: LayerOutput|list|tuple
:return: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
assert isinstance(input, collections.Sequence) # list or tuple
for each in input:
assert isinstance(each, LayerOutput)
Layer(
name=name,
type=LayerType.PRINT_LAYER,
inputs=[l.name for l in input], )
# this layer don't return anything, can not be input of other layer.
@wrap_name_default("priorbox")
def priorbox_layer(input,
image,
aspect_ratio,
variance,
min_size,
max_size=[],
name=None):
"""
Compute the priorbox and set the variance. This layer is necessary for ssd.
:param name: The Layer Name.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput
:param image: The network input image.
:type image: LayerOutput
:param aspect_ratio: The aspect ratio.
:type aspect_ratio: list
:param variance: The bounding box variance.
:type min_size: The min size of the priorbox width/height.
:param min_size: list
:type max_size: The max size of the priorbox width/height. Could be NULL.
:param max_size: list
:return: LayerOutput
"""
# plus one for ratio 1.
num_filters = (len(aspect_ratio) * 2 + 1 + len(max_size)) * 4
size = (input.size / input.num_filters) * num_filters * 2
Layer(
name=name,
type=LayerType.PRIORBOX_LAYER,
inputs=[input.name, image.name],
size=size,
min_size=min_size,
max_size=max_size,
aspect_ratio=aspect_ratio,
variance=variance)
return LayerOutput(
name,
LayerType.PRIORBOX_LAYER,
parents=[input, image],
num_filters=num_filters,
size=size)
@wrap_name_default("seq_pooling")
@wrap_bias_attr_default(has_bias=False)
@wrap_param_default(['pooling_type'], default_factory=lambda _: MaxPooling())
@layer_support()
def pooling_layer(input,
pooling_type=None,
name=None,
bias_attr=None,
agg_level=AggregateLevel.EACH_TIMESTEP,
layer_attr=None):
"""
Pooling layer for sequence inputs, not used for Image.
The example usage is:
.. code-block:: python
seq_pool = pooling_layer(input=layer,
pooling_type=AvgPooling(),
agg_level=AggregateLevel.EACH_SEQUENCE)
:param agg_level: AggregateLevel.EACH_TIMESTEP or
AggregateLevel.EACH_SEQUENCE
:type agg_level: AggregateLevel
:param name: layer name.
:type name: basestring
:param input: input layer name.
:type input: LayerOutput
:param pooling_type: Type of pooling, MaxPooling(default), AvgPooling,
SumPooling, SquareRootNPooling.
:type pooling_type: BasePoolingType|None
:param bias_attr: Bias parameter attribute. False if no bias.
:type bias_attr: ParameterAttribute|None|False
:param layer_attr: The Extra Attributes for layer, such as dropout.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
extra_dict = dict()
# noinspection PyUnresolvedReferences
if isinstance(pooling_type, AvgPooling):
extra_dict['average_strategy'] = pooling_type.strategy
elif isinstance(pooling_type, MaxPooling) and \
pooling_type.output_max_index is not None:
assert isinstance(pooling_type.output_max_index, bool)
extra_dict['output_max_index'] = pooling_type.output_max_index
extra_dict.update(ExtraLayerAttribute.to_kwargs(layer_attr))
Layer(
name=name,
type=pooling_type.name,
inputs=[Input(input.name)],
bias=ParamAttr.to_bias(bias_attr),
trans_type=agg_level,
**extra_dict)
return LayerOutput(
name, pooling_type.name, parents=[input], size=input.size)
@wrap_bias_attr_default()
@wrap_param_attr_default()
@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation())
@wrap_act_default(param_names=["act", 'state_act'], act=TanhActivation())
@wrap_name_default("lstmemory")
@layer_support(DROPOUT)
def lstmemory(input,
name=None,
reverse=False,
act=None,
gate_act=None,
size=None,
state_act=None,
bias_attr=None,
param_attr=None,
layer_attr=None):
"""
Long Short-term Memory Cell.
The memory cell was implemented as follow equations.
.. math::
i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)
f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)
o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)
h_t & = o_t tanh(c_t)
NOTE: In PaddlePaddle's implementation, the multiplications
:math:`W_{xi}x_{t}` , :math:`W_{xf}x_{t}`,
:math:`W_{xc}x_t`, :math:`W_{xo}x_{t}` are not done in the lstmemory layer,
so an additional mixed_layer with full_matrix_projection or a fc_layer must
be included in the configuration file to complete the input-to-hidden
mappings before lstmemory is called.
NOTE: This is a low level user interface. You can use network.simple_lstm
to config a simple plain lstm layer.
Please refer to **Generating Sequences With Recurrent Neural Networks** for
more details about LSTM.
Link_ goes as below.
.. _Link: http://arxiv.org/abs/1308.0850
:param name: The lstmemory layer name.
:type name: basestring
:param input: input layer name.
:type input: LayerOutput
:param reverse: is sequence process reversed or not.
:type reverse: bool
:param act: activation type, TanhActivation by default. :math:`h_t`
:type act: BaseActivation
:param gate_act: gate activation type, SigmoidActivation by default.
:type gate_act: BaseActivation
:param state_act: state activation type, TanhActivation by default.
:type state_act: BaseActivation
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:param layer_attr: Extra Layer attribute
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert gate_act.support_hppl
assert state_act.support_hppl
assert act.support_hppl
assert input.size is not None and input.size % 4 == 0
if size is not None:
if input.size / 4 == size:
plog = logger.warning
else:
plog = logger.fatal
plog("NOTE: The lstmemory layer[%s]'s size is set by previous input "
"layer. The lstm size should be equal with input layer size/4. The"
" size which is set explicitly will be ignored." % name)
Layer(
name=name,
type=LayerType.LSTMEMORY,
active_type=act.name,
active_state_type=state_act.name,
active_gate_type=gate_act.name,
reversed=reverse,
bias=ParamAttr.to_bias(bias_attr),
inputs=[Input(input.name, **param_attr.attr)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.LSTMEMORY, [input],
size=input.size / 4,
reverse=reverse)
@wrap_bias_attr_default()
@wrap_param_attr_default()
@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation())
@wrap_act_default(param_names=["act"], act=TanhActivation())
@wrap_name_default("gru")
@layer_support(DROPOUT)
def grumemory(input,
name=None,
reverse=False,
act=None,
gate_act=None,
size=None,
bias_attr=None,
param_attr=None,
layer_attr=None):
"""
Gate Recurrent Unit Layer.
The memory cell was implemented as follow equations.
1. update gate :math:`z`: defines how much of the previous memory to
keep around or the unit updates its activations. The update gate
is computed by:
.. math::
z_t = \\sigma(W_{z}x_{t} + U_{z}h_{t-1} + b_z)
2. reset gate :math:`r`: determines how to combine the new input with the
previous memory. The reset gate is computed similarly to the update gate:
.. math::
r_t = \\sigma(W_{r}x_{t} + U_{r}h_{t-1} + b_r)
3. The candidate activation :math:`\\tilde{h_t}` is computed similarly to
that of the traditional recurrent unit:
.. math::
{\\tilde{h_t}} = tanh(W x_{t} + U (r_{t} \odot h_{t-1}) + b)
4. The hidden activation :math:`h_t` of the GRU at time t is a linear
interpolation between the previous activation :math:`h_{t-1}` and the
candidate activation :math:`\\tilde{h_t}`:
.. math::
h_t = (1 - z_t) h_{t-1} + z_t {\\tilde{h_t}}
NOTE: In PaddlePaddle's implementation, the multiplication operations
:math:`W_{r}x_{t}`, :math:`W_{z}x_{t}` and :math:`W x_t` are not computed in
gate_recurrent layer. Consequently, an additional mixed_layer with
full_matrix_projection or a fc_layer must be included before grumemory
is called.
More details can be found by referring to `Empirical Evaluation of Gated
Recurrent Neural Networks on Sequence Modeling.
<https://arxiv.org/abs/1412.3555>`_
The simple usage is:
.. code-block:: python
gru = grumemory(input)
:param name: The gru layer name.
:type name: None|basestring
:param input: input layer.
:type input: LayerOutput.
:param reverse: Whether sequence process is reversed or not.
:type reverse: bool
:param act: activation type, TanhActivation by default. This activation
affects the :math:`{\\tilde{h_t}}`.
:type act: BaseActivation
:param gate_act: gate activation type, SigmoidActivation by default.
This activation affects the :math:`z_t` and :math:`r_t`. It is the
:math:`\\sigma` in the above formula.
:type gate_act: BaseActivation
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param param_attr: Parameter Attribute.
:type param_attr: ParameterAttribute|None|False
:param layer_attr: Extra Layer attribute
:type layer_attr: ExtraLayerAttribute|None
:param size: Stub parameter of size, but actually not used. If set this size
will get a warning.
:type size: None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert act.support_hppl
assert gate_act.support_hppl
assert input.size is not None and input.size % 3 == 0
if size is not None:
if input.size / 3 == size:
plog = logger.warning
else:
plog = logger.fatal
plog("NOTE: the gru memory layer's size is set by previous input layer,"
" and should be input size / 3. Set size explicitly will be "
"ignored.")
Layer(
name=name,
type=LayerType.GRUMEMORY,
active_type=act.name,
active_gate_type=gate_act.name,
reversed=reverse,
bias=ParamAttr.to_bias(bias_attr),
inputs=[Input(input.name, **param_attr.attr)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.GRUMEMORY, [input],
size=input.size / 3,
reverse=reverse)
@wrap_name_default()
@layer_support()
def last_seq(input,
name=None,
agg_level=AggregateLevel.EACH_TIMESTEP,
layer_attr=None):
"""
Get Last Timestamp Activation of a sequence.
:param agg_level: Aggregated level
:param name: Layer name.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
if input.reverse is not None and input.reverse:
logger.warning("You are getting the last instance of a sequence that"
" is a output of a REVERSED layer. There is no time"
" series information at all. Maybe you want to use"
" first_seq instead.")
Layer(
name=name,
type=LayerType.SEQUENCE_LAST_INSTANCE,
inputs=[input.name],
trans_type=agg_level,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.SEQUENCE_LAST_INSTANCE,
parents=[input],
size=input.size)
@wrap_name_default()
@layer_support()
def first_seq(input,
name=None,
agg_level=AggregateLevel.EACH_TIMESTEP,
layer_attr=None):
"""
Get First Timestamp Activation of a sequence.
:param agg_level: aggregation level
:param name: Layer name.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
if input.reverse is not None and not input.reverse:
logger.warning('You are getting the first instance for a time series,'
' and it is a normal recurrent layer output. There is no'
' time series information at all. Maybe you want to use'
' last_seq instead.')
Layer(
name=name,
type=LayerType.SEQUENCE_FIRST_INSTANCE,
inputs=[input.name],
trans_type=agg_level,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.SEQUENCE_FIRST_INSTANCE,
parents=[input],
size=input.size)
class ExpandLevel(object):
FROM_TIMESTEP = AggregateLevel.EACH_TIMESTEP
FROM_SEQUENCE = AggregateLevel.EACH_SEQUENCE
@wrap_name_default()
@layer_support()
def expand_layer(input,
expand_as,
name=None,
bias_attr=False,
expand_level=ExpandLevel.FROM_TIMESTEP,
layer_attr=None):
"""
A layer for "Expand Dense data or (sequence data where the length of each
sequence is one) to sequence data."
The example usage is:
.. code-block:: python
expand = expand_layer(input=layer1,
expand_as=layer2,
expand_level=ExpandLevel.FROM_TIMESTEP)
:param input: Input layer
:type input: LayerOutput
:param expand_as: Expand as this layer's sequence info.
:type expand_as: LayerOutput
:param name: Layer name.
:type name: basestring
:param bias_attr: Bias attribute. None means default bias. False means no
bias.
:type bias_attr: ParameterAttribute|None|False
:param expand_level: whether input layer is timestep(default) or sequence.
:type expand_level: ExpandLevel
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
inputs=[input.name, expand_as.name],
name=name,
bias=ParamAttr.to_bias(bias_attr=bias_attr),
type=LayerType.EXPAND_LAYER,
trans_type=expand_level,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
size=input.size,
layer_type=LayerType.EXPAND_LAYER,
parents=[input, expand_as])
@wrap_name_default()
@layer_support()
def repeat_layer(input, num_repeats, name=None, layer_attr=None):
"""
A layer for repeating the input for num_repeats times. This is equivalent
to apply concat_layer() with num_repeats same input.
.. math::
y = [x, x, \cdots, x]
The example usage is:
.. code-block:: python
expand = repeat_layer(layer, 4)
:param input: Input layer
:type input: LayerOutput
:param num_repeats: Repeat the input so many times
:type num_repeats: int
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
l = Layer(
inputs=[input.name],
name=name,
num_filters=num_repeats,
type=LayerType.FEATURE_MAP_EXPAND_LAYER,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
size=l.config.size,
layer_type=LayerType.FEATURE_MAP_EXPAND_LAYER,
parents=[input])
@wrap_name_default()
@layer_support()
def interpolation_layer(input, weight, name=None, layer_attr=None):
"""
This layer is for linear interpolation with two inputs,
which is used in NEURAL TURING MACHINE.
.. math::
y.row[i] = w[i] * x_1.row[i] + (1 - w[i]) * x_2.row[i]
where :math:`x_1` and :math:`x_2` are two (batchSize x dataDim) inputs,
:math:`w` is (batchSize x 1) weight vector, and :math:`y` is
(batchSize x dataDim) output.
The example usage is:
.. code-block:: python
interpolation = interpolation_layer(input=[layer1, layer2], weight=layer3)
:param input: Input layer.
:type input: list|tuple
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, collections.Sequence)
assert len(input) == 2
assert isinstance(input[0], LayerOutput) and isinstance(input[1],
LayerOutput)
if input[0].size is not None and input[1].size is not None:
assert input[0].size == input[1].size
assert isinstance(weight, LayerOutput)
if weight.size is not None:
assert weight.size == 1
Layer(
name=name,
type=LayerType.INTERPOLATION_LAYER,
inputs=[weight.name, input[0].name, input[1].name],
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.INTERPOLATION_LAYER,
parents=[weight, input[0], input[1]],
size=input[0].size)
@wrap_name_default()
@layer_support()
def bilinear_interp_layer(input,
out_size_x=None,
out_size_y=None,
name=None,
layer_attr=None):
"""
This layer is to implement bilinear interpolation on conv layer output.
Please refer to Wikipedia: https://en.wikipedia.org/wiki/Bilinear_interpolation
The simple usage is:
.. code-block:: python
bilinear = bilinear_interp_layer(input=layer1, out_size_x=64, out_size_y=64)
:param input: A input layer.
:type input: LayerOutput.
:param out_size_x: bilinear interpolation output width.
:type out_size_x: int|None
:param out_size_y: bilinear interpolation output height.
:type out_size_y: int|None
:param name: The layer's name, which cna not be specified.
:type name: None|basestring
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert input.layer_type == LayerType.CONV_LAYER
assert isinstance(input.activation, LinearActivation)
assert out_size_x > 0 and out_size_y > 0
assert input.num_filters is not None
num_channels = input.num_filters
l = Layer(
name=name,
inputs=Input(
input.name,
bilinear_interp=BilinearInterp(
out_size_x=out_size_x,
out_size_y=out_size_y,
channels=num_channels)),
type=LayerType.BILINEAR_INTERP_LAYER,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.BILINEAR_INTERP_LAYER,
parents=[input],
num_filters=num_channels,
size=l.config.size)
@wrap_name_default()
@layer_support()
def power_layer(input, weight, name=None, layer_attr=None):
"""
This layer applies a power function to a vector element-wise,
which is used in NEURAL TURING MACHINE.
.. math::
y = x^w
where :math:`x` is a input vector, :math:`w` is scalar weight,
and :math:`y` is a output vector.
The example usage is:
.. code-block:: python
power = power_layer(input=layer1, weight=layer2)
:param input: Input layer.
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput) and isinstance(weight, LayerOutput)
if weight.size is not None:
assert weight.size == 1
Layer(
name=name,
type=LayerType.POWER_LAYER,
inputs=[weight.name, input.name],
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.POWER_LAYER, parents=[input, weight], size=input.size)
@wrap_name_default()
@layer_support()
def scaling_layer(input, weight, name=None, layer_attr=None):
"""
A layer for multiplying input vector by weight scalar.
.. math::
y = w x
where :math:`x` is size=dataDim input, :math:`w` is size=1 weight,
and :math:`y` is size=dataDim output.
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
The example usage is:
.. code-block:: python
scale = scaling_layer(input=layer1, weight=layer2)
:param input: Input layer.
:type input: LayerOutput
:param weight: Weight layer.
:type weight: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(weight, LayerOutput) and isinstance(input, LayerOutput)
if weight.size is not None:
assert weight.size == 1
Layer(
name=name,
type=LayerType.SCALING_LAYER,
inputs=[weight.name, input.name],
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.SCALING_LAYER, parents=[weight, input], size=input.size)
@wrap_name_default()
@layer_support()
def trans_layer(input, name=None, layer_attr=None):
"""
A layer for transposition.
.. math::
y = x^\mathrm{T}
where :math:`x` is (M x N) input, and :math:`y` is (N x M) output.
The example usage is:
.. code-block:: python
trans = trans_layer(input=layer)
:param input: Input layer.
:type input: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.TRANS_LAYER,
inputs=[input.name],
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.TRANS_LAYER, parents=[input], size=input.size)
@wrap_name_default()
@layer_support()
def cos_sim(a, b, scale=5, size=1, name=None, layer_attr=None):
"""
Cosine Similarity Layer. The cosine similarity equation is here.
.. math::
similarity = cos(\\theta) = {\\mathbf{a} \\cdot \\mathbf{b}
\\over \\|\\mathbf{a}\\| \\|\\mathbf{b}\\|}
The size of a is M, size of b is M*N,
Similarity will be calculated N times by step M. The output size is
N. The scale will be multiplied to similarity.
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
:param name: layer name
:type name: basestring
:param a: input layer a
:type a: LayerOutput
:param b: input layer b
:type b: LayerOutput
:param scale: scale for cosine value. default is 5.
:type scale: float
:param size: layer size. NOTE size_a * size should equal size_b.
:type size: int
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput)
if size == 1:
Layer(
name=name,
type=LayerType.COSINE_SIM,
cos_scale=scale,
inputs=[a.name, b.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
else:
if a.size is not None and b.size is not None:
assert size == b.size / a.size
Layer(
name=name,
type=LayerType.COSINE_SIM_VEC,
size=size,
cos_scale=scale,
inputs=[a.name, b.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.COSINE_SIM, parents=[a, b], size=size)
@wrap_name_default()
@wrap_bias_attr_default(has_bias=True)
@wrap_param_attr_default()
@layer_support()
def hsigmoid(input,
label,
num_classes,
name=None,
bias_attr=None,
param_attr=None,
layer_attr=None):
"""
Organize the classes into a binary tree. At each node, a sigmoid function
is used to calculate the probability of belonging to the right branch.
This idea is from "F. Morin, Y. Bengio (AISTATS 05):
Hierarchical Probabilistic Neural Network Language Model."
The example usage is:
.. code-block:: python
cost = hsigmoid(input=[layer1, layer2],
label=data_layer,
num_classes=3)
:param input: Input layers. It could be a LayerOutput or list/tuple of
LayerOutput.
:type input: LayerOutput|list|tuple
:param label: Label layer.
:type label: LayerOutput
:param num_classes: number of classes.
:type num_classes: int
:param name: layer name
:type name: basestring
:param bias_attr: Bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
if not isinstance(param_attr, collections.Sequence):
param_attr = [param_attr]
else:
if not isinstance(param_attr, collections.Sequence):
param_attr = [param_attr] * len(input)
else:
assert len(param_attr) == len(input)
assert isinstance(input, collections.Sequence)
assert isinstance(label, LayerOutput)
assert label.layer_type == LayerType.DATA
ipts_for_layer = []
parents = []
for each_input, each_param_attr in zip(input, param_attr):
assert isinstance(each_input, LayerOutput)
ipts_for_layer.append(Input(each_input.name, **each_param_attr.attr))
parents.append(each_input)
ipts_for_layer.append(label.name)
parents.append(label)
l = Layer(
name=name,
type=LayerType.HSIGMOID,
num_classes=num_classes,
bias=ParamAttr.to_bias(bias_attr),
inputs=ipts_for_layer,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.HSIGMOID, parents=parents, size=l.config.size)
@wrap_name_default("conv")
@wrap_param_attr_default()
@wrap_bias_attr_default()
@wrap_act_default(act=ReluActivation())
@layer_support(DROPOUT)
def img_conv_layer(input,
filter_size,
num_filters,
name=None,
num_channels=None,
act=None,
groups=1,
stride=1,
padding=0,
bias_attr=None,
param_attr=None,
shared_biases=True,
layer_attr=None,
filter_size_y=None,
stride_y=None,
padding_y=None,
trans=False,
layer_type=None):
"""
Convolution layer for image. Paddle can support both square and non-square
input currently.
The details of convolution layer, please refer UFLDL's `convolution
<http://ufldl.stanford.edu/tutorial/supervised/
FeatureExtractionUsingConvolution/>`_ .
Convolution Transpose (deconv) layer for image. Paddle can support both square
and non-square input currently.
The details of convolution transpose layer,
please refer to the following explanation and references therein
<http://datascience.stackexchange.com/questions/6107/
what-are-deconvolutional-layers/>`_ .
The num_channel means input image's channel number. It may be 1 or 3 when
input is raw pixels of image(mono or RGB), or it may be the previous layer's
num_filters * num_group.
There are several group of filter in PaddlePaddle implementation.
Each group will process some channel of the inputs. For example, if an input
num_channel = 256, group = 4, num_filter=32, the PaddlePaddle will create
32*4 = 128 filters to process inputs. The channels will be split into 4
pieces. First 256/4 = 64 channels will process by first 32 filters. The
rest channels will be processed by rest group of filters.
:param name: Layer name.
:type name: basestring
:param input: Layer Input.
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel. Or input a tuple for
two image dimension.
:type filter_size: int|tuple|list
:param filter_size_y: The y dimension of a filter kernel. Since PaddlePaddle
currently supports rectangular filters, the filter's
shape will be (filter_size, filter_size_y).
:type filter_size_y: int|None
:param num_filters: Each filter group's number of filter
:param act: Activation type. Default is tanh
:type act: BaseActivation
:param groups: Group size of filters.
:type groups: int
:param stride: The x dimension of the stride. Or input a tuple for two image
dimension.
:type stride: int|tuple|list
:param stride_y: The y dimension of the stride.
:type stride_y: int
:param padding: The x dimension of the padding. Or input a tuple for two
image dimension
:type padding: int|tuple|list
:param padding_y: The y dimension of the padding.
:type padding_y: int
:param bias_attr: Convolution bias attribute. None means default bias.
False means no bias.
:type bias_attr: ParameterAttribute|False
:param num_channels: number of input channels. If None will be set
automatically from previous output.
:type num_channels: int
:param param_attr: Convolution param attribute. None means default attribute
:type param_attr: ParameterAttribute
:param shared_biases: Is biases will be shared between filters or not.
:type shared_biases: bool
:param layer_attr: Layer Extra Attribute.
:type layer_attr: ExtraLayerAttribute
:param trans: true if it is a convTransLayer, false if it is a convLayer
:type trans: bool
:param layer_type: specify the layer_type, default is None. If trans=True,
layer_type has to be "exconvt", otherwise layer_type
has to be either "exconv" or "cudnn_conv"
:type layer_type: String
:return: LayerOutput object.
:rtype: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
if filter_size_y is None:
if isinstance(filter_size, collections.Sequence):
assert len(filter_size) == 2
filter_size, filter_size_y = filter_size
else:
filter_size_y = filter_size
if stride_y is None:
if isinstance(stride, collections.Sequence):
assert len(stride) == 2
stride, stride_y = stride
else:
stride_y = stride
if padding_y is None:
if isinstance(padding, collections.Sequence):
assert len(padding) == 2
padding, padding_y = padding
else:
padding_y = padding
if param_attr.attr.get('initial_smart'):
# special initial for conv layers.
init_w = (2.0 / (filter_size**2 * num_channels))**0.5
param_attr.attr["initial_mean"] = 0.0
param_attr.attr["initial_std"] = init_w
param_attr.attr["initial_strategy"] = 0
param_attr.attr["initial_smart"] = False
if layer_type:
if trans:
assert layer_type in ["exconvt"]
else:
assert layer_type in ["exconv", "cudnn_conv"]
lt = layer_type
else:
lt = LayerType.CONVTRANS_LAYER if trans else LayerType.CONV_LAYER
l = Layer(
name=name,
inputs=Input(
input.name,
conv=Conv(
filter_size=filter_size,
padding=padding,
stride=stride,
channels=num_channels,
groups=groups,
filter_size_y=filter_size_y,
padding_y=padding_y,
stride_y=stride_y),
**param_attr.attr),
active_type=act.name,
num_filters=num_filters,
bias=ParamAttr.to_bias(bias_attr),
shared_biases=shared_biases,
type=lt,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
lt,
parents=[input],
activation=act,
num_filters=num_filters,
size=l.config.size)
@wrap_name_default("pool")
@layer_support()
def img_pool_layer(input,
pool_size,
name=None,
num_channels=None,
pool_type=None,
stride=1,
padding=0,
layer_attr=None,
pool_size_y=None,
stride_y=None,
padding_y=None):
"""
Image pooling Layer.
The details of pooling layer, please refer ufldl's pooling_ .
.. _pooling: http://ufldl.stanford.edu/tutorial/supervised/Pooling/
:param padding: pooling padding width.
:type padding: int
:param padding_y: pooling padding height. It's equal to padding by default.
:type padding_y: int|None
:param name: name of pooling layer
:type name: basestring.
:param input: layer's input
:type input: LayerOutput
:param pool_size: pooling window width
:type pool_size: int
:param pool_size_y: pooling window height. It's eaqual to pool_size by default.
:type pool_size_y: int|None
:param num_channels: number of input channel.
:type num_channels: int
:param pool_type: pooling type. MaxPooling or AvgPooling. Default is
MaxPooling.
:type pool_type: BasePoolingType
:param stride: stride width of pooling.
:type stride: int
:param stride_y: stride height of pooling. It is equal to stride by default.
:type stride_y: int|None
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
if pool_type is None:
pool_type = MaxPooling()
elif isinstance(pool_type, AvgPooling):
pool_type.name = 'avg'
type_name = pool_type.name + '-projection' \
if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)) \
else pool_type.name
pool_size_y = pool_size if pool_size_y is None else pool_size_y
stride_y = stride if stride_y is None else stride_y
padding_y = padding if padding_y is None else padding_y
l = Layer(
name=name,
type=LayerType.POOL_LAYER,
inputs=[
Input(
input.name,
pool=Pool(
pool_type=type_name,
channels=num_channels,
size_x=pool_size,
start=None,
stride=stride,
padding=padding,
size_y=pool_size_y,
stride_y=stride_y,
padding_y=padding_y))
],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.POOL_LAYER,
parents=[input],
num_filters=num_channels,
size=l.config.size)
@wrap_name_default("spp")
@layer_support()
def spp_layer(input,
name=None,
num_channels=None,
pool_type=None,
pyramid_height=None,
layer_attr=None):
"""
Spatial Pyramid Pooling in Deep Convolutional Networks for Visual Recognition.
The details please refer to
`Kaiming He's paper <https://arxiv.org/abs/1406.4729>`_.
:param name: layer name.
:type name: basestring
:param input: layer's input.
:type input: LayerOutput
:param num_channels: number of input channel.
:type num_channels: int
:param pool_type: Pooling type. MaxPooling or AveragePooling. Default is MaxPooling.
:type scale: BasePoolingType
:param pyramid_height: pyramid height.
:type pyramid_height: int
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
if pool_type is None:
pool_type = MaxPooling()
elif isinstance(pool_type, AvgPooling):
pool_type.name = 'avg'
type_name = pool_type.name
if (isinstance(pool_type, AvgPooling) or isinstance(pool_type, MaxPooling)):
type_name += '-projection'
l = Layer(
name=name,
type=LayerType.SPP_LAYER,
inputs=Input(
input.name,
spp=SpatialPyramidPool(
pool_type=type_name,
channels=num_channels,
pyramid_height=pyramid_height)),
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
layer_type=LayerType.SPP_LAYER,
parents=[input],
num_filters=num_channels,
size=l.config.size)
def __img_norm_layer__(name, input, size, norm_type, scale, power, num_channels,
blocked, layer_attr):
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
l = Layer(
name=name,
type=LayerType.NORM_LAYER,
inputs=Input(
input.name,
norm=Norm(
norm_type=norm_type,
channels=num_channels,
size=size,
scale=scale,
pow=power,
blocked=blocked)),
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
layer_type=LayerType.NORM_LAYER,
parents=[input],
num_filters=num_channels,
img_norm_type=norm_type,
size=l.config.size)
@wrap_name_default("crmnorm")
@layer_support()
def img_cmrnorm_layer(input,
size,
scale=0.0128,
power=0.75,
name=None,
num_channels=None,
layer_attr=None):
"""
Response normalization across feature maps.
The details please refer to
`Alex's paper <http://www.cs.toronto.edu/~fritz/absps/imagenet.pdf>`_.
:param name: layer name.
:type name: None|basestring
:param input: layer's input.
:type input: LayerOutput
:param size: Normalize in number of :math:`size` feature maps.
:type size: int
:param scale: The hyper-parameter.
:type scale: float
:param power: The hyper-parameter.
:type power: float
:param num_channels: input layer's filers number or channels. If
num_channels is None, it will be set automatically.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
return __img_norm_layer__(name, input, size, "cmrnorm-projection", scale,
power, num_channels, 0, layer_attr)
@wrap_bias_attr_default()
@wrap_param_attr_default(default_factory=lambda _: ParamAttr(initial_mean=1.0,
initial_std=0.))
@wrap_act_default(act=ReluActivation())
@wrap_name_default("batch_norm")
@layer_support(DROPOUT)
def batch_norm_layer(input,
act=None,
name=None,
num_channels=None,
bias_attr=None,
param_attr=None,
layer_attr=None,
batch_norm_type=None,
moving_average_fraction=0.9,
use_global_stats=None):
"""
Batch Normalization Layer. The notation of this layer as follow.
:math:`x` is the input features over a mini-batch.
.. math::
\\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
\ mini-batch\ mean \\\\
\\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
\\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
\\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
\\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
The details of batch normalization please refer to this
`paper <http://arxiv.org/abs/1502.03167>`_.
:param name: layer name.
:type name: basestring
:param input: batch normalization input. Better be linear activation.
Because there is an activation inside batch_normalization.
:type input: LayerOutput
:param batch_norm_type: We have batch_norm and cudnn_batch_norm. batch_norm
supports both CPU and GPU. cudnn_batch_norm requires
cuDNN version greater or equal to v4 (>=v4). But
cudnn_batch_norm is faster and needs less memory
than batch_norm. By default (None), we will
automaticly select cudnn_batch_norm for GPU and
batch_norm for CPU. Otherwise, select batch norm
type based on the specified type. If you use cudnn_batch_norm,
we suggested you use latest version, such as v5.1.
:type batch_norm_type: None|string, None or "batch_norm" or "cudnn_batch_norm"
:param act: Activation Type. Better be relu. Because batch
normalization will normalize input near zero.
:type act: BaseActivation
:param num_channels: num of image channels or previous layer's number of
filters. None will automatically get from layer's
input.
:type num_channels: int
:param bias_attr: :math:`\\beta`, better be zero when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type bias_attr: ParameterAttribute
:param param_attr: :math:`\\gamma`, better be one when initialize. So the
initial_std=0, initial_mean=1 is best practice.
:type param_attr: ParameterAttribute
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:param use_global_stats: whether use moving mean/variance statistics
during testing peroid. If None or True,
it will use moving mean/variance statistics during
testing. If False, it will use the mean
and variance of current batch of test data for
testing.
:type use_global_stats: bool|None.
:param moving_average_fraction: Factor used in the moving average
computation, referred to as facotr,
:math:`runningMean = newMean*(1-factor)
+ runningMean*factor`
:type moving_average_fraction: float.
:return: LayerOutput object.
:rtype: LayerOutput
"""
if not isinstance(act, ReluActivation):
logger.log(logging.WARN,
"%s is not recommend for batch normalization's activation, "
"maybe the relu is better" % act.name)
if not isinstance(input.activation, LinearActivation):
logger.log(logging.WARN,
"The activation should be inside batch normalization, the "
"previous layer's activation may be Linear")
if num_channels is None:
if input.num_filters is not None:
num_channels = input.num_filters
else:
num_channels = input.size
assert (batch_norm_type is None) or (batch_norm_type == "batch_norm") or \
(batch_norm_type == "cudnn_batch_norm")
l = Layer(
name=name,
inputs=Input(
input.name, image=Image(channels=num_channels), **param_attr.attr),
active_type=act.name,
type=LayerType.BATCH_NORM_LAYER,
batch_norm_type=batch_norm_type,
bias=ParamAttr.to_bias(bias_attr),
moving_average_fraction=moving_average_fraction,
use_global_stats=use_global_stats,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.BATCH_NORM_LAYER,
parents=[input],
activation=act,
num_filters=num_channels,
size=l.config.size)
@wrap_name_default()
@layer_support()
def sum_to_one_norm_layer(input, name=None, layer_attr=None):
"""
A layer for sum-to-one normalization,
which is used in NEURAL TURING MACHINE.
.. math::
out[i] = \\frac {in[i]} {\sum_{k=1}^N in[k]}
where :math:`in` is a (batchSize x dataDim) input vector,
and :math:`out` is a (batchSize x dataDim) output vector.
The example usage is:
.. code-block:: python
sum_to_one_norm = sum_to_one_norm_layer(input=layer)
:param input: Input layer.
:type input: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.SUM_TO_ONE_NORM_LAYER,
inputs=[input.name],
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.SUM_TO_ONE_NORM_LAYER, parents=[input], size=input.size)
@wrap_name_default("addto")
@wrap_act_default(act=LinearActivation())
@wrap_bias_attr_default(has_bias=False)
@layer_support(DROPOUT)
def addto_layer(input, act=None, name=None, bias_attr=None, layer_attr=None):
"""
AddtoLayer.
.. math::
y = f(\\sum_{i} x_i + b)
where :math:`y` is output, :math:`x` is input, :math:`b` is bias,
and :math:`f` is activation function.
The example usage is:
.. code-block:: python
addto = addto_layer(input=[layer1, layer2],
act=ReluActivation(),
bias_attr=False)
This layer just simply add all input layers together, then activate the sum
inputs. Each input of this layer should be the same size, which is also the
output size of this layer.
There is no weight matrix for each input, because it just a simple add
operation. If you want a complicated operation before add, please use
mixed_layer.
It is a very good way to set dropout outside the layers. Since not all
PaddlePaddle layer support dropout, you can add an add_to layer, set
dropout here.
Please refer to dropout_layer for details.
:param name: Layer name.
:type name: basestring
:param input: Input layers. It could be a LayerOutput or list/tuple of
LayerOutput.
:type input: LayerOutput|list|tuple
:param act: Activation Type, default is tanh.
:type act: BaseActivation
:param bias_attr: Bias attribute. If False, means no bias. None is default
bias.
:type bias_attr: ParameterAttribute|bool
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
num_filters = None
if isinstance(input, LayerOutput):
input = [input]
assert isinstance(input, collections.Sequence)
ipts_for_layer = []
for each_input in input:
assert isinstance(each_input, LayerOutput)
ipts_for_layer.append(Input(each_input.name))
if each_input.num_filters is not None:
num_filters = each_input.num_filters
l = Layer(
name=name,
type=LayerType.ADDTO_LAYER,
inputs=ipts_for_layer,
bias=ParamAttr.to_bias(bias_attr),
active_type=act.name,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.ADDTO_LAYER,
parents=input,
activation=act,
num_filters=num_filters,
size=l.config.size)
@wrap_act_default(act=IdentityActivation())
@wrap_name_default("concat")
@layer_support()
def concat_layer(input, act=None, name=None, layer_attr=None, bias_attr=None):
"""
Concat all input vector into one huge vector.
Inputs can be list of LayerOutput or list of projection.
The example usage is:
.. code-block:: python
concat = concat_layer(input=[layer1, layer2])
:param name: Layer name.
:type name: basestring
:param input: input layers or projections
:type input: list|tuple|collections.Sequence
:param act: Activation type.
:type act: BaseActivation
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
elif isinstance(input, Projection):
input = [input]
else:
assert isinstance(input, collections.Sequence)
def __is_type__(o, tp):
if not isinstance(o, collections.Sequence):
if o == tp:
return True
elif len(o.__bases__) == 0:
return False
else:
for bs in o.__bases__:
if __is_type__(bs, tp):
return True
return False
else:
tmp = map(lambda _x: __is_type__(_x, tp), o)
a = tmp[0]
for b in tmp[1:]:
assert a == b
return a
def __reduce_concat_type__(a, b):
assert __is_type__([a, b], Projection) or __is_type__([a, b],
LayerOutput)
return a
is_concat_layer = __is_type__(
reduce(__reduce_concat_type__, map(type, input)), LayerOutput)
layer_type = (LayerType.CONCAT_LAYER
if is_concat_layer else LayerType.CONCAT_PROJ_LAYER)
if layer_type == LayerType.CONCAT_LAYER:
assert not bias_attr
Layer(
name=name,
type=layer_type,
inputs=[x.name for x in input] if is_concat_layer else input,
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
**ExtraLayerAttribute.to_kwargs(layer_attr))
sz = 0
for each_input in input:
if each_input.size is not None:
sz += each_input.size
else:
sz = None
break
return LayerOutput(
name,
layer_type=layer_type,
parents=input if is_concat_layer else [x.origin for x in input],
activation=act,
size=sz)
def memory(name,
size,
is_seq=False,
boot_layer=None,
boot_bias=None,
boot_bias_active_type=None,
boot_with_const_id=None):
"""
The memory layers is a layer cross each time step. Reference this output
as previous time step layer :code:`name` 's output.
The default memory is zero in first time step, previous time step's
output in the rest time steps.
If boot_bias, the first time step value is this bias and
with activation.
If boot_with_const_id, then the first time stop is a IndexSlot, the
Arguments.ids()[0] is this :code:`cost_id`.
If boot_layer is not null, the memory is just the boot_layer's output.
Set :code:`is_seq` is true boot layer is sequence.
The same name layer in recurrent group will set memory on each time
step.
:param name: memory's name.
:type name: basestring
:param size: size of memory.
:type size: int
:param is_seq: is sequence for boot_layer
:type is_seq: bool
:param boot_layer: boot layer of memory.
:type boot_layer: LayerOutput|None
:param boot_bias: boot layer's bias
:type boot_bias: ParameterAttribute|None
:param boot_bias_active_type: boot layer's active type.
:type boot_bias_active_type: BaseActivation
:param boot_with_const_id: boot layer's id.
:type boot_with_const_id: int
:return: LayerOutput object which is a memory.
:rtype: LayerOutput
"""
if boot_bias_active_type is None:
boot_bias_active_type = LinearActivation()
assert boot_bias is None or isinstance(boot_bias, ParameterAttribute)
if isinstance(boot_bias, ParameterAttribute):
boot_bias = ParamAttr.to_bias(boot_bias)
assert boot_layer is None or isinstance(boot_layer, LayerOutput)
agent_name = Memory(name, size, is_seq, boot_layer.name
if boot_layer is not None else None, boot_bias,
boot_bias_active_type.name, boot_with_const_id)
lout = LayerOutput(
name=agent_name,
size=size,
layer_type=LayerType.MEMORY,
parents=[boot_layer] if boot_layer is not None else None)
return lout
@wrap_bias_attr_default()
@wrap_act_default(
param_names=['gate_act', 'state_act'], act=SigmoidActivation())
@wrap_act_default(act=TanhActivation())
@wrap_name_default('lstm_step')
@layer_support()
def lstm_step_layer(input,
state,
size,
act=None,
name=None,
gate_act=None,
state_act=None,
bias_attr=None,
layer_attr=None):
"""
LSTM Step Layer. It used in recurrent_group. The lstm equations are shown
as follow.
.. math::
i_t & = \\sigma(W_{xi}x_{t} + W_{hi}h_{t-1} + W_{ci}c_{t-1} + b_i)
f_t & = \\sigma(W_{xf}x_{t} + W_{hf}h_{t-1} + W_{cf}c_{t-1} + b_f)
c_t & = f_tc_{t-1} + i_t tanh (W_{xc}x_t+W_{hc}h_{t-1} + b_c)
o_t & = \\sigma(W_{xo}x_{t} + W_{ho}h_{t-1} + W_{co}c_t + b_o)
h_t & = o_t tanh(c_t)
The input of lstm step is :math:`Wx_t + Wh_{t-1}`, and user should use
:code:`mixed_layer` and :code:`full_matrix_projection` to calculate these
input vector.
The state of lstm step is :math:`c_{t-1}`. And lstm step layer will do
.. math::
i_t = \\sigma(input + W_{ci}c_{t-1} + b_i)
...
This layer contains two outputs. Default output is :math:`h_t`. The other
output is :math:`o_t`, which name is 'state' and can use
:code:`get_output_layer` to extract this output.
:param name: Layer's name.
:type name: basestring
:param size: Layer's size. NOTE: lstm layer's size, should be equal as
:code:`input.size/4`, and should be equal as
:code:`state.size`.
:type size: int
:param input: input layer. :math:`Wx_t + Wh_{t-1}`
:type input: LayerOutput
:param state: State Layer. :math:`c_{t-1}`
:type state: LayerOutput
:param act: Activation type. Default is tanh
:type act: BaseActivation
:param gate_act: Gate Activation Type. Default is sigmoid, and should
be sigmoid only.
:type gate_act: BaseActivation
:param state_act: State Activation Type. Default is sigmoid, and should
be sigmoid only.
:type state_act: BaseActivation
:param bias_attr: Bias Attribute.
:type bias_attr: ParameterAttribute
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.LSTM_STEP_LAYER,
active_type=act.name,
active_gate_type=gate_act.name,
active_state_type=state_act.name,
bias=ParamAttr.to_bias(bias_attr),
size=size,
inputs=[input.name, state.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.LSTM_STEP_LAYER,
parents=[input, state],
activation=act,
size=size,
outputs=['default', 'state'])
@wrap_bias_attr_default()
@wrap_act_default(param_names=['gate_act'], act=SigmoidActivation())
@wrap_act_default(act=TanhActivation())
@wrap_name_default('gru_step')
@layer_support()
def gru_step_layer(input,
output_mem,
size=None,
act=None,
name=None,
gate_act=None,
bias_attr=None,
layer_attr=None):
"""
:param input:
:type input: LayerOutput
:param output_mem:
:param size:
:param act:
:param name:
:param gate_act:
:param bias_attr:
:param layer_attr:
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert input.size % 3 == 0
if size is None:
size = input.size / 3
Layer(
name=name,
type=LayerType.GRU_STEP_LAYER,
inputs=[input.name, output_mem.name],
bias=ParamAttr.to_bias(bias_attr),
size=size,
active_type=act.name,
active_gate_type=gate_act.name,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.GRU_STEP_LAYER,
parents=[input, output_mem],
size=size,
activation=act)
@wrap_name_default()
@layer_support()
def get_output_layer(input, arg_name, name=None, layer_attr=None):
"""
Get layer's output by name. In PaddlePaddle, a layer might return multiple
values, but returns one layer's output. If the user wants to use another
output besides the default one, please use get_output_layer first to get
the output from input.
:param name: Layer's name.
:type name: basestring
:param input: get output layer's input. And this layer should contains
multiple outputs.
:type input: LayerOutput
:param arg_name: Output name from input.
:type arg_name: basestring
:param layer_attr: Layer's extra attribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
# GetOutputLayer
assert arg_name in input.outputs, 'Get Output From an not existed input.' \
' The get output name is %s, which not' \
' in %s' % (
arg_name, ",".join(input.outputs))
Layer(
name=name,
type=LayerType.GET_OUTPUT_LAYER,
inputs=[Input(
input.name, input_layer_argument=arg_name)],
size=input.size,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.GET_OUTPUT_LAYER,
parents=[input],
size=input.size)
@wrap_name_default()
@wrap_act_default()
@wrap_bias_attr_default()
@wrap_param_attr_default()
@layer_support()
def recurrent_layer(input,
act=None,
bias_attr=None,
param_attr=None,
name=None,
reverse=False,
layer_attr=None):
"""
Simple recurrent unit layer. It is just a fully connect layer through both
time and neural network.
For each sequence [start, end] it performs the following computation\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = start \\\\
out_{i} = act(in_{i} + out_{i-1} * W) \\ \\ \\text{for} \\ start < i <= end
If reversed is true, the order is reversed\:
.. math::
out_{i} = act(in_{i}) \\ \\ \\text{for} \\ i = end \\\\
out_{i} = act(in_{i} + out_{i+1} * W) \\ \\ \\text{for} \\ start <= i < end
:param input: Input Layer
:type input: LayerOutput
:param act: activation.
:type act: BaseActivation
:param bias_attr: bias attribute.
:type bias_attr: ParameterAttribute
:param param_attr: parameter attribute.
:type param_attr: ParameterAttribute
:param name: name of the layer
:type name: basestring
:param layer_attr: Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.RECURRENT_LAYER,
inputs=Input(input.name, **param_attr.attr),
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
reversed=reverse,
**ExtraAttr.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.RECURRENT_LAYER,
parents=[input],
size=input.size,
activation=act,
reverse=reverse)
class StaticInput(object):
"""
StaticInput is only used in recurrent_group which defines a read-only memory
that can be a sequence or non-sequence.
"""
def __init__(self, input, is_seq=False, size=None):
assert isinstance(input, LayerOutput)
self.input = input
self.is_seq = is_seq
assert input.size is not None or size is not None
if size is not None:
input.size = size
class SubsequenceInput(object):
"""
Input sequence has sub-sequence, used in recurrent_group.
The example usage is:
.. code-block:: python
input = SubsequenceInput(layer)
"""
def __init__(self, input):
assert isinstance(input, LayerOutput)
assert input.size is not None
self.input = input
@wrap_name_default("recurrent_group")
def recurrent_group(step,
input,
reverse=False,
name=None,
targetInlink=None,
is_generating=False):
"""
Recurrent layer group is an extremely flexible recurrent unit in
PaddlePaddle. As long as the user defines the calculation done within a
time step, PaddlePaddle will iterate such a recurrent calculation over
sequence input. This is extremely usefull for attention based model, or
Neural Turning Machine like models.
The basic usage (time steps) is:
.. code-block:: python
def step(input):
output = fc_layer(input=layer,
size=1024,
act=LinearActivation(),
bias_attr=False)
return output
group = recurrent_group(input=layer,
step=step)
You can see following configs for further usages:
- time steps: lstmemory_group, paddle/gserver/tests/sequence_layer_group.conf, \
demo/seqToseq/seqToseq_net.py
- sequence steps: paddle/gserver/tests/sequence_nest_layer_group.conf
:param step: recurrent one time step function.The input of this function is
input of the group. The return of this function will be
recurrent group's return value.
The recurrent group scatter a sequence into time steps. And
for each time step, will invoke step function, and return
a time step result. Then gather each time step of output into
layer group's output.
:type step: callable
:param name: recurrent_group's name.
:type name: basestring
:param input: Input links array.
LayerOutput will be scattered into time steps.
SubsequenceInput will be scattered into sequence steps.
StaticInput will be imported to each time step, and doesn't change
through time. It's a mechanism to access layer outside step function.
:type input: LayerOutput|StaticInput|SubsequenceInput|list|tuple
:param reverse: If reverse is set true, the recurrent unit will process the
input sequence in a reverse order.
:type reverse: bool
:param targetInlink: the input layer which share info with layer group's output
Param input specifies multiple input layers. For
SubsequenceInput inputs, config should assign one input
layer that share info(the number of sentences and the number
of words in each sentence) with all layer group's outputs.
targetInlink should be one of the layer group's input.
:type targetInlink: LayerOutput|SubsequenceInput
:param is_generating: If is generating, none of input type should be LayerOutput;
else, for training or testing, one of the input type must
be LayerOutput.
: type is_generating: bool
:return: LayerOutput object.
:rtype: LayerOutput
"""
model_type('recurrent_nn')
def is_single_input(x):
return isinstance(x, LayerOutput) or isinstance(x, StaticInput) \
or isinstance(x, SubsequenceInput)
if is_single_input(input):
input = [input]
assert isinstance(input, collections.Sequence)
def is_in_links(x):
return isinstance(x, LayerOutput) or isinstance(x, SubsequenceInput)
in_links = filter(is_in_links, input)
def targetInlink_in_inlinks():
for inlink in in_links:
if isinstance(inlink, SubsequenceInput):
if targetInlink == inlink.input:
return True
elif targetInlink == inlink:
return True
return False
assert (targetInlink == None or targetInlink_in_inlinks())
targetInlinkName = None if targetInlink == None \
else targetInlink.name if isinstance(targetInlink, LayerOutput) \
else targetInlink.input.name
contains_sub_seq = [False]
def map_in_links(x):
if isinstance(x, SubsequenceInput):
contains_sub_seq[0] = True
return Link(name=x.input.name, has_subseq=True)
else:
return x.name
RecurrentLayerGroupWithoutOutLinksBegin(
name=name,
in_links=map(map_in_links, in_links),
seq_reversed=reverse,
target_inlinkname=targetInlinkName)
in_args = []
has_LayerOutput = False
for each_input in input:
assert is_single_input(each_input)
if isinstance(each_input, LayerOutput):
in_args.append(each_input)
has_LayerOutput = True
elif isinstance(each_input, SubsequenceInput):
in_args.append(each_input.input)
has_LayerOutput = True
else:
mem_name = "__%s_memory__" % each_input.input.name
mem = memory(
name=mem_name,
is_seq=each_input.is_seq,
size=each_input.input.size,
boot_layer=each_input.input)
with mixed_layer(
name=mem_name,
size=each_input.input.size,
act=IdentityActivation()) as mix:
mix += identity_projection(mem)
in_args.append(mem)
assert (is_generating != has_LayerOutput)
layer_outs = step(*in_args)
if isinstance(layer_outs, LayerOutput):
layer_outs = [layer_outs]
for ot in layer_outs:
assert isinstance(ot, LayerOutput)
ot.reverse = reverse
if contains_sub_seq[0]:
RecurrentLayerGroupSetOutLink(Link(ot.name, has_subseq=True))
else:
RecurrentLayerGroupSetOutLink(ot.name)
RecurrentLayerGroupEnd(name=name)
if len(layer_outs) == 1:
return layer_outs[0]
else:
return layer_outs
class BaseGeneratedInput(object):
def __init__(self):
self.bos_id = None
self.eos_id = None
def before_real_step(self):
raise NotImplementedError()
def after_real_step(self, *args):
raise NotImplementedError()
class GeneratedInput(BaseGeneratedInput):
def after_real_step(self, input):
return maxid_layer(input=input, name='__beam_search_predict__')
def before_real_step(self):
predict_id = memory(
name='__beam_search_predict__',
size=self.size,
boot_with_const_id=self.bos_id)
trg_emb = embedding_layer(
input=predict_id,
size=self.embedding_size,
param_attr=ParamAttr(name=self.embedding_name))
return trg_emb
def __init__(self, size, embedding_name, embedding_size):
super(GeneratedInput, self).__init__()
self.size = size
self.embedding_name = embedding_name
self.embedding_size = embedding_size
@wrap_name_default()
def maxid_layer(input, name=None, layer_attr=None):
"""
A layer for finding the id which has the maximal value for each sample.
The result is stored in output.ids.
The example usage is:
.. code-block:: python
maxid = maxid_layer(input=layer)
:param input: Input layer name.
:type input: LayerOutput
:param name: Layer name.
:type name: basestring
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
l = Layer(
name=name,
type='maxid',
inputs=[input.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.MAXID_LAYER,
parents=[input],
size=l.config.size)
@wrap_name_default()
def out_prod_layer(input1, input2, name=None, layer_attr=None):
"""
A layer for computing the outer product of two vectors
The result is a matrix of size(input1) x size(input2)
The example usage is:
.. code-block:: python
out_prod = out_prod_layer(input1=vec1, input2=vec2)
:param name: Layer name.
:type name: basestring
:param input1: The first input layer name.
:type input: LayerOutput
:param input2: The second input layer name.
:type input2: LayerOutput
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input1, LayerOutput)
assert isinstance(input2, LayerOutput)
l = Layer(
name=name,
type=LayerType.OUT_PROD_LAYER,
inputs=[input1.name, input2.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.OUT_PROD_LAYER,
parents=[input1, input2],
size=l.config.size)
@wrap_name_default()
def eos_layer(input, eos_id, name=None, layer_attr=None):
"""
A layer for checking EOS for each sample:
- output_id = (input_id == conf.eos_id)
The result is stored in output\_.ids.
It is used by recurrent layer group.
The example usage is:
.. code-block:: python
eos = eos_layer(input=layer, eos_id=id)
:param name: Layer name.
:type name: basestring
:param input: Input layer name.
:type input: LayerOutput
:param eos_id: end id of sequence
:type eos_id: int
:param layer_attr: extra layer attributes.
:type layer_attr: ExtraLayerAttribute.
:return: LayerOutput object.
:rtype: LayerOutput
"""
l = Layer(
name=name,
type=LayerType.EOSID_LAYER,
eos_id=eos_id,
inputs=[input.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name=name,
layer_type=LayerType.EOSID_LAYER,
parents=[input],
size=l.config.size)
@wrap_name_default()
def beam_search(step,
input,
bos_id,
eos_id,
beam_size,
max_length=500,
name=None,
num_results_per_sample=None):
"""
Beam search is a heuristic search algorithm used in sequence generation.
It explores a graph by expanding the most promising nodes in a limited set
to maintain tractability.
The example usage is:
.. code-block:: python
def rnn_step(input):
last_time_step_output = memory(name='rnn', size=512)
with mixed_layer(size=512, name='rnn') as simple_rnn:
simple_rnn += full_matrix_projection(input)
simple_rnn += last_time_step_output
return simple_rnn
beam_gen = beam_search(name="decoder",
step=rnn_step,
input=[StaticInput(encoder_last)],
bos_id=0,
eos_id=1,
beam_size=5)
Please see the following demo for more details:
- machine translation : demo/seqToseq/translation/gen.conf \
demo/seqToseq/seqToseq_net.py
:param name: Name of the recurrent unit that generates sequences.
:type name: base string
:param step: A callable function that defines the calculation in a time
step, and it is applied to sequences with arbitrary length by
sharing a same set of weights.
You can refer to the first parameter of recurrent_group, or
demo/seqToseq/seqToseq_net.py for more details.
:type step: callable
:param input: Input data for the recurrent unit
:type input: list
:param bos_id: Index of the start symbol in the dictionary. The start symbol
is a special token for NLP task, which indicates the
beginning of a sequence. In the generation task, the start
symbol is essential, since it is used to initialize the RNN
internal state.
:type bos_id: int
:param eos_id: Index of the end symbol in the dictionary. The end symbol is
a special token for NLP task, which indicates the end of a
sequence. The generation process will stop once the end
symbol is generated, or a pre-defined max iteration number
is exceeded.
:type eos_id: int
:param max_length: Max generated sequence length.
:type max_length: int
:param beam_size: Beam search for sequence generation is an iterative search
algorithm. To maintain tractability, every iteration only
only stores a predetermined number, called the beam_size,
of the most promising next words. The greater the beam
size, the fewer candidate words are pruned.
:type beam_size: int
:param num_results_per_sample: Number of the generated results per input
sequence. This number must always be less than
beam size.
:type num_results_per_sample: int
:return: The generated word index.
:rtype: LayerOutput
"""
if num_results_per_sample is None:
num_results_per_sample = beam_size
if num_results_per_sample > beam_size:
logger.warning("num_results_per_sample should be less than beam_size")
if isinstance(input, StaticInput) or isinstance(input, BaseGeneratedInput):
input = [input]
generated_input_index = -1
real_input = []
for i, each_input in enumerate(input):
assert isinstance(each_input, StaticInput) or isinstance(
each_input, BaseGeneratedInput)
if isinstance(each_input, BaseGeneratedInput):
assert generated_input_index == -1
generated_input_index = i
else:
real_input.append(each_input)
assert generated_input_index != -1
gipt = input[generated_input_index]
assert isinstance(gipt, BaseGeneratedInput)
gipt.bos_id = bos_id
gipt.eos_id = eos_id
def __real_step__(*args):
eos_name = "__%s_eos_layer__" % name
RecurrentLayerGroupSetGenerator(
Generator(
eos_layer_name=eos_name,
max_num_frames=max_length,
beam_size=beam_size,
num_results_per_sample=num_results_per_sample))
args = list(args)
args.insert(generated_input_index, gipt.before_real_step())
predict = gipt.after_real_step(step(*args))
eos_layer(input=predict, eos_id=eos_id, name=eos_name)
return predict
tmp = recurrent_group(
step=__real_step__,
input=real_input,
reverse=False,
name=name,
is_generating=True)
return tmp
def __cost_input__(input, label, weight=None):
"""
inputs and parents for cost layers.
"""
ipts = [Input(input.name), Input(label.name)]
parents = [input, label]
if weight is not None:
assert weight.layer_type == LayerType.DATA
ipts.append(Input(weight.name))
parents.append(weight)
return ipts, parents
@wrap_name_default()
@layer_support()
def regression_cost(input, label, weight=None, name=None, layer_attr=None):
"""
Regression Layer.
TODO(yuyang18): Complete this method.
:param name: layer name.
:type name: basestring
:param input: Network prediction.
:type input: LayerOutput
:param label: Data label.
:type label: LayerOutput
:param weight: The weight affects the cost, namely the scale of cost.
It is an optional argument.
:type weight: LayerOutput
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
ipts, parents = __cost_input__(input, label, weight)
Layer(
inputs=ipts,
type="square_error",
name=name,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.COST, parents=parents, size=1)
@wrap_name_default("cost")
@layer_support()
def classification_cost(input,
label,
weight=None,
name=None,
evaluator=classification_error_evaluator,
layer_attr=None):
"""
classification cost Layer.
:param name: layer name.
:type name: basestring
:param input: input layer name. network output.
:type input: LayerOutput
:param label: label layer name. data_layer often.
:type label: LayerOutput
:param weight: The weight affects the cost, namely the scale of cost.
It is an optional argument.
:type weight: LayerOutput
:param evaluator: Evaluator method.
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert input.layer_type != LayerType.DATA
assert isinstance(input.activation, SoftmaxActivation)
assert label.layer_type == LayerType.DATA
ipts, parents = __cost_input__(input, label, weight)
Layer(
name=name,
type="multi-class-cross-entropy",
inputs=ipts,
**ExtraLayerAttribute.to_kwargs(layer_attr))
def __add_evaluator__(e):
assert callable(e)
assert hasattr(e, 'is_evaluator')
assert isinstance(e.is_evaluator, bool)
assert e.is_evaluator
assert hasattr(e, "for_classification")
assert isinstance(e.for_classification, bool)
assert e.for_classification
e(name=e.__name__, input=input, label=label, weight=weight)
if not isinstance(evaluator, collections.Sequence):
evaluator = [evaluator]
for each_evaluator in evaluator:
__add_evaluator__(each_evaluator)
return LayerOutput(name, LayerType.COST, parents=parents, size=1)
def conv_operator(img,
filter,
filter_size,
num_filters,
num_channels=None,
stride=1,
padding=0,
filter_size_y=None,
stride_y=None,
padding_y=None):
"""
Different from img_conv_layer, conv_op is an Operator, which can be used
in mixed_layer. And conv_op takes two inputs to perform convolution.
The first input is the image and the second is filter kernel. It only
support GPU mode.
The example usage is:
.. code-block:: python
op = conv_operator(img=input1,
filter=input2,
filter_size=3,
num_filters=64,
num_channels=64)
:param img: input image
:type img: LayerOutput
:param filter: input filter
:type filter: LayerOutput
:param filter_size: The x dimension of a filter kernel.
:type filter_size: int
:param filter_size_y: The y dimension of a filter kernel. Since
PaddlePaddle now supports rectangular filters,
the filter's shape can be (filter_size, filter_size_y).
:type filter_size_y: int
:param num_filters: channel of output data.
:type num_filters: int
:param num_channels: channel of input data.
:type num_channels: int
:param stride: The x dimension of the stride.
:type stride: int
:param stride_y: The y dimension of the stride.
:type stride_y: int
:param padding: The x dimension of padding.
:type padding: int
:param padding_y: The y dimension of padding.
:type padding_y: int
:return: A ConvOperator Object.
:rtype: ConvOperator
"""
if filter_size_y is None:
filter_size_y = filter_size
if stride_y is None:
stride_y = stride
if padding_y is None:
padding_y = padding
if num_channels is None:
num_channels = img.num_filters
assert isinstance(filter, LayerOutput)
if filter.size is not None:
filter.size = filter_size * filter_size_y * num_filters * num_channels
op = ConvOperator(
input_layer_names=[img.name, filter.name],
num_filters=num_filters,
conv_conf=Conv(
filter_size=filter_size,
padding=padding,
stride=stride,
channels=num_channels,
filter_size_y=filter_size_y,
padding_y=padding_y,
stride_y=stride_y,
groups=1))
op.origin = [img, filter]
return op
@wrap_param_attr_default()
def conv_projection(input,
filter_size,
num_filters,
num_channels=None,
stride=1,
padding=0,
filter_size_y=None,
stride_y=None,
padding_y=None,
groups=1,
param_attr=None):
"""
ConvProjection with a layer as input.
It performs element-wise multiplication with weight.
Different from img_conv_layer and conv_op, conv_projection is an Projection,
which can be used in mixed_layer and conat_layer. It use cudnn to implement
conv and only support GPU mode.
The example usage is:
.. code-block:: python
proj = conv_projection(img=input1,
filter_size=3,
num_filters=64,
num_channels=64)
:param input: input layer
:type input: LayerOutput
:param filter_size: The x dimension of a filter kernel.
:type filter_size: int
:param filter_size_y: The y dimension of a filter kernel. Since
PaddlePaddle now supports rectangular filters,
the filter's shape can be (filter_size, filter_size_y).
:type filter_size_y: int
:param num_filters: channel of output data.
:type num_filters: int
:param num_channels: channel of input data.
:type num_channels: int
:param stride: The x dimension of the stride.
:type stride: int
:param stride_y: The y dimension of the stride.
:type stride_y: int
:param padding: The x dimension of padding.
:type padding: int
:param padding_y: The y dimension of padding.
:type padding_y: int
:param groups: The group number.
:type groups: int
:param param_attr: Convolution param attribute. None means default attribute
:type param_attr: ParameterAttribute
:return: A DotMulProjection Object.
:rtype: DotMulProjection
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
if filter_size_y is None:
if isinstance(filter_size, collections.Sequence):
assert len(filter_size) == 2
filter_size, filter_size_y = filter_size
else:
filter_size_y = filter_size
if stride_y is None:
if isinstance(stride, collections.Sequence):
assert len(stride) == 2
stride, stride_y = stride
else:
stride_y = stride
if padding_y is None:
if isinstance(padding, collections.Sequence):
assert len(padding) == 2
padding, padding_y = padding
else:
padding_y = padding
if param_attr.attr.get('initial_smart'):
# special initial for conv layers.
init_w = (2.0 / (filter_size**2 * num_channels))**0.5
param_attr.attr["initial_mean"] = 0.0
param_attr.attr["initial_std"] = init_w
param_attr.attr["initial_strategy"] = 0
param_attr.attr["initial_smart"] = False
proj = ConvProjection(
input_layer_name=input.name,
num_filters=num_filters,
conv_conf=Conv(
filter_size=filter_size,
padding=padding,
stride=stride,
channels=num_channels,
filter_size_y=filter_size_y,
padding_y=padding_y,
stride_y=stride_y,
groups=groups),
**param_attr.attr)
proj.origin = input
return proj
@wrap_name_default()
@layer_support()
def conv_shift_layer(a, b, name=None, layer_attr=None):
"""
This layer performs cyclic convolution for two input. For example:
- a[in]: contains M elements.
- b[in]: contains N elements (N should be odd).
- c[out]: contains M elements.
.. math::
c[i] = \sum_{j=-(N-1)/2}^{(N-1)/2}a_{i+j} * b_{j}
In this formular:
- a's index is computed modulo M. When it is negative, then get item from
the right side (which is the end of array) to the left.
- b's index is computed modulo N. When it is negative, then get item from
the right size (which is the end of array) to the left.
The example usage is:
.. code-block:: python
conv_shift = conv_shift_layer(input=[layer1, layer2])
:param name: layer name
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
:param b: input layer b
:type b: LayerOutput
:param layer_attr: layer's extra attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput)
assert b.size is None or b.size % 2 == 1 # size of b must be odd.
Layer(
name=name,
type=LayerType.CONV_SHIFT_LAYER,
inputs=[a.name, b.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.CONV_SHIFT_LAYER, parents=[a, b], size=a.size)
@wrap_name_default()
@wrap_param_attr_default()
@wrap_bias_attr_default()
@wrap_act_default(act=LinearActivation())
@layer_support(ERROR_CLIPPING, DROPOUT)
def tensor_layer(a,
b,
size,
act=None,
name=None,
param_attr=None,
bias_attr=None,
layer_attr=None):
"""
This layer performs tensor operation for two input.
For example, each sample:
.. math::
y_{i} = a * W_{i} * {b^\mathrm{T}}, i=0,1,...,K-1
In this formular:
- :math:`a`: the first input contains M elements.
- :math:`b`: the second input contains N elements.
- :math:`y_{i}`: the i-th element of y.
- :math:`W_{i}`: the i-th learned weight, shape if [M, N]
- :math:`b^\mathrm{T}`: the transpose of :math:`b_{2}`.
The simple usage is:
.. code-block:: python
tensor = tensor_layer(a=layer1, b=layer2, size=1000)
:param name: layer name
:type name: basestring
:param a: Input layer a.
:type a: LayerOutput
:param b: input layer b.
:type b: LayerOutput
:param size: the layer dimension.
:type size: int.
:param act: Activation Type. Default is tanh.
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(a, LayerOutput) and isinstance(b, LayerOutput)
Layer(
name=name,
size=size,
type=LayerType.TENSOR_LAYER,
active_type=act.name,
bias=ParamAttr.to_bias(bias_attr),
inputs=[Input(a.name, **param_attr.attr), Input(b.name)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.TENSOR_LAYER, parents=[a, b], activation=act, size=size)
@wrap_name_default()
@wrap_param_attr_default()
@wrap_bias_attr_default()
@wrap_act_default()
@layer_support()
def selective_fc_layer(input,
select,
size,
act=None,
name=None,
pass_generation=False,
has_selected_colums=True,
mul_ratio=0.02,
param_attr=None,
bias_attr=None,
layer_attr=None):
"""
Selectived fully connected layer. Different from fc_layer, the output
of this layer maybe sparse. It requires an additional input to indicate
several selected columns for output. If the selected columns is not
specified, selective_fc_layer acts exactly like fc_layer.
The simple usage is:
.. code-block:: python
sel_fc = selective_fc_layer(input=input, size=128, act=TanhActivation())
:param name: The Layer Name.
:type name: basestring
:param input: The input layer.
:type input: LayerOutput|list|tuple
:param select: The select layer. The output of select layer should be a
sparse binary matrix, and treat as the mask of selective fc.
:type select: LayerOutput
:param size: The layer dimension.
:type size: int
:param act: Activation Type. Default is tanh.
:type act: BaseActivation
:param param_attr: The Parameter Attribute.
:type param_attr: ParameterAttribute
:param bias_attr: The Bias Attribute. If no bias, then pass False or
something not type of ParameterAttribute. None will get a
default Bias.
:type bias_attr: ParameterAttribute|None|Any
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
assert not isinstance(param_attr, collections.Sequence)
param_attr = [param_attr]
else:
if isinstance(param_attr, collections.Sequence):
assert len(input) == len(param_attr)
else:
param_attr = [copy.deepcopy(param_attr) for _ in range(len(input))]
assert isinstance(input, collections.Sequence)
assert isinstance(select, LayerOutput)
if select.size is not None:
assert select.size == size
Layer(
inputs=[
Input(ipt.name, **attr.attr) for ipt, attr in zip(input, param_attr)
] + [select.name],
name=name,
type=LayerType.SEL_FC_LAYER,
size=size,
bias=ParameterAttribute.to_bias(bias_attr),
active_type=act.name,
selective_fc_pass_generation=pass_generation,
has_selected_colums=has_selected_colums,
selective_fc_full_mul_ratio=mul_ratio,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.SEL_FC_LAYER,
list(input) + [select],
activation=act,
size=size)
@wrap_name_default()
@layer_support()
def sampling_id_layer(input, name=None, layer_attr=None):
"""
A layer for sampling id from multinomial distribution from the input layer.
Sampling one id for one sample.
The simple usage is:
.. code-block:: python
samping_id = sampling_id_layer(input=input)
:param input: The input layer.
:type input: LayerOutput
:param name: The Layer Name.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
l = Layer(
name=name,
type=LayerType.SAMPLING_ID_LAYER,
inputs=[Input(input.name)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.SAMPLING_ID_LAYER, input, size=l.config.size)
@wrap_name_default()
@layer_support()
def slope_intercept_layer(input,
name=None,
slope=1.0,
intercept=0.0,
layer_attr=None):
"""
This layer for applying a slope and an intercept to the input
element-wise. There is no activation and weight.
.. math::
y = slope * x + intercept
The simple usage is:
.. code-block:: python
scale = slope_intercept_layer(input=input, slope=-1.0, intercept=1.0)
:param input: The input layer.
:type input: LayerOutput
:param name: The Layer Name.
:type name: basestring
:param slope: the scale factor.
:type slope: float.
:param intercept: the offset.
:type intercept: float.
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
Layer(
name=name,
type=LayerType.SLOPE_INTERCEPT_LAYER,
slope=slope,
intercept=intercept,
inputs=[Input(input.name)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.SLOPE_INTERCEPT_LAYER, input, size=input.size)
@wrap_name_default()
@layer_support()
def linear_comb_layer(weights, vectors, size=None, name=None, layer_attr=None):
"""
A layer for weighted sum of vectors takes two inputs.
- Input: size of weights is M
size of vectors is M*N
- Output: a vector of size=N
.. math::
z(i) = \sum_{j=0}^{M-1} x(j) y(i+Nj)
where :math:`0 \le i \le N-1`
Or in the matrix notation:
.. math::
z = x^\mathrm{T} Y
In this formular:
- :math:`x`: weights
- :math:`y`: vectors.
- :math:`z`: the output.
Note that the above computation is for one sample. Multiple samples are
processed in one batch.
The simple usage is:
.. code-block:: python
linear_comb = linear_comb_layer(weights=weight, vectors=vectors,
size=elem_dim)
:param weights: The weight layer.
:type weights: LayerOutput
:param vectors: The vector layer.
:type vectors: LayerOutput
:param size: the dimension of this layer.
:type size: int
:param name: The Layer Name.
:type name: basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(weights, LayerOutput) and isinstance(vectors, LayerOutput)
if vectors.size is not None and weights.size is not None:
assert vectors.size % weights.size == 0
if size is None:
size = vectors.size / weights.size
else:
assert size == vectors.size / weights.size
Layer(
name=name,
type=LayerType.LINEAR_COMBINATION_LAYER,
size=size,
inputs=[Input(weights.name), Input(vectors.name)],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.LINEAR_COMBINATION_LAYER, [weights, vectors], size=size)
convex_comb_layer = linear_comb_layer
@wrap_name_default()
@layer_support()
def block_expand_layer(input,
block_x=0,
block_y=0,
stride_x=0,
stride_y=0,
padding_x=0,
padding_y=0,
num_channels=None,
name=None,
layer_attr=None):
"""
Expand feature map to minibatch matrix.
- matrix width is: block_y * block_x * num_channels
- matirx height is: outputH * outputW
.. math::
outputH = 1 + (2 * padding_y + imgSizeH - block_y + stride_y - 1) / stride_y
outputW = 1 + (2 * padding_x + imgSizeW - block_x + stride_x - 1) / stride_x
The expand method is the same with ExpandConvLayer, but saved the transposed
value. After expanding, output.sequenceStartPositions will store timeline.
The number of time steps are outputH * outputW and the dimension of each
time step is block_y * block_x * num_channels. This layer can be used after
convolution neural network, and before recurrent neural network.
The simple usage is:
.. code-block:: python
block_expand = block_expand_layer(input,
num_channels=128,
stride_x=1,
stride_y=1,
block_x=1,
block_x=3)
:param input: The input layer.
:type input: LayerOutput
:param num_channels: The channel number of input layer.
:type num_channels: int|None
:param block_x: The width of sub block.
:type block_x: int
:param block_y: The width of sub block.
:type block_y: int
:param stride_x: The stride size in horizontal direction.
:type stride_x: int
:param stride_y: The stride size in vertical direction.
:type stride_y: int
:param padding_x: The padding size in horizontal direction.
:type padding_x: int
:param padding_y: The padding size in vertical direction.
:type padding_y: int
:param name: The name of this layer, which can not specify.
:type name: None|basestring.
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
l = Layer(
name=name,
inputs=Input(
input.name,
block_expand=BlockExpand(
channels=num_channels,
block_x=block_x,
block_y=block_y,
stride_x=stride_x,
stride_y=stride_y,
padding_x=padding_x,
padding_y=padding_y)),
type=LayerType.BLOCK_EXPAND,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.BLOCK_EXPAND, parents=[input], size=l.config.size)
@wrap_name_default()
@layer_support()
def maxout_layer(input,
groups,
num_channels=None,
size_x=None,
size_y=None,
name=None,
layer_attr=None):
"""
A layer to do max out on conv layer output.
- Input: output of a conv layer.
- Output: feature map size same as input. Channel is (input channel) / groups.
So groups should be larger than 1, and the num of channels should be able
to devided by groups.
Please refer to Paper:
- Maxout Networks: http://www.jmlr.org/proceedings/papers/v28/goodfellow13.pdf
- Multi-digit Number Recognition from Street View \
Imagery using Deep Convolutional Neural Networks: \
https://arxiv.org/pdf/1312.6082v4.pdf
The simple usage is:
.. code-block:: python
maxout = maxout_layer(input,
num_channels=128,
groups=4)
:param input: The input layer.
:type input: LayerOutput
:param num_channels: The channel number of input layer. If None will be set
automatically from previous output.
:type num_channels: int|None
:param groups: The group number of input layer.
:type groups: int
:param size_x: conv output width. If None will be set
automatically from previous output.
:type size_x: int|None
:param size_y: conv output height. If None will be set
automatically from previous output.
:type size_y: int|None
:param name: The name of this layer, which can not specify.
:type name: None|basestring.
:param layer_attr: Extra Layer attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert input.layer_type == LayerType.CONV_LAYER
assert isinstance(input.activation, LinearActivation)
assert groups > 1
if num_channels is None:
assert input.num_filters is not None
num_channels = input.num_filters
assert num_channels % groups == 0
l = Layer(
name=name,
inputs=Input(
input.name, maxout=MaxOut(
channels=num_channels, groups=groups)),
type=LayerType.MAXOUT,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.MAXOUT, parents=[input], size=l.config.size)
@wrap_name_default()
@layer_support()
def ctc_layer(input,
label,
size=None,
name=None,
norm_by_times=False,
layer_attr=None):
"""
Connectionist Temporal Classification (CTC) is designed for temporal
classication task. That is, for sequence labeling problems where the
alignment between the inputs and the target labels is unknown.
More details can be found by referring to `Connectionist Temporal
Classification: Labelling Unsegmented Sequence Data with Recurrent
Neural Networks <http://machinelearning.wustl.edu/mlpapers/paper_files/
icml2006_GravesFGS06.pdf>`_
Note:
Considering the 'blank' label needed by CTC, you need to use
(num_classes + 1) as the input size. num_classes is the category number.
And the 'blank' is the last category index. So the size of 'input' layer, such as
fc_layer with softmax activation, should be num_classes + 1. The size of ctc_layer
should also be num_classes + 1.
The simple usage:
.. code-block:: python
ctc = ctc_layer(input=input,
label=label,
size=9055,
norm_by_times=True)
:param input: The input layer.
:type input: LayerOutput
:param label: The data layer of label with variable length.
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer
:type name: basestring|None
:param norm_by_times: Whether to normalization by times. False by default.
:type norm_by_times: bool
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
assert isinstance(label, LayerOutput)
if label.size is not None:
if size is not None:
assert size == label.size + 1
else:
size = label.size + 1
Layer(
name=name,
type=LayerType.CTC_LAYER,
size=size,
norm_by_times=norm_by_times,
inputs=[input.name, label.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.CTC_LAYER, [input, label], size=size)
@wrap_name_default()
@layer_support()
def warp_ctc_layer(input,
label,
size=None,
name=None,
blank=0,
norm_by_times=False,
layer_attr=None):
"""
A layer intergrating the open-source `warp-ctc
<https://github.com/baidu-research/warp-ctc>` library, which is used in
`Deep Speech 2: End-toEnd Speech Recognition in English and Mandarin
<https://arxiv.org/pdf/1512.02595v1.pdf>`, to compute Connectionist Temporal
Classification (CTC) loss.
More details of CTC can be found by referring to `Connectionist Temporal
Classification: Labelling Unsegmented Sequence Data with Recurrent
Neural Networks <http://machinelearning.wustl.edu/mlpapers/paper_files/
icml2006_GravesFGS06.pdf>`_
Note:
- Let num_classes represent the category number. Considering the 'blank'
label needed by CTC, you need to use (num_classes + 1) as the input
size. Thus, the size of both warp_ctc_layer and 'input' layer should
be set to num_classes + 1.
- You can set 'blank' to any value ranged in [0, num_classes], which
should be consistent as that used in your labels.
- As a native 'softmax' activation is interated to the warp-ctc library,
'linear' activation is expected instead in the 'input' layer.
The simple usage:
.. code-block:: python
ctc = warp_ctc_layer(input=input,
label=label,
size=1001,
blank=1000,
norm_by_times=False)
:param input: The input layer.
:type input: LayerOutput
:param label: The data layer of label with variable length.
:type label: LayerOutput
:param size: category numbers + 1.
:type size: int
:param name: The name of this layer, which can not specify.
:type name: basestring|None
:param blank: the 'blank' label used in ctc
:type blank: int
:param norm_by_times: Whether to normalization by times. False by default.
:type norm_by_times: bool
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
assert isinstance(label, LayerOutput)
if label.size is not None:
if size is not None:
assert size == label.size + 1
else:
size = label.size + 1
Layer(
name=name,
type=LayerType.WARP_CTC_LAYER,
size=size,
blank=blank,
norm_by_times=norm_by_times,
inputs=[input.name, label.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.WARP_CTC_LAYER, parents=[input, label], size=size)
@wrap_name_default()
@wrap_param_attr_default()
@layer_support()
def crf_layer(input,
label,
size=None,
weight=None,
param_attr=None,
name=None,
layer_attr=None):
"""
A layer for calculating the cost of sequential conditional random
field model.
The simple usage:
.. code-block:: python
crf = crf_layer(input=input,
label=label,
size=label_dim)
:param input: The first input layer is the feature.
:type input: LayerOutput
:param label: The second input layer is label.
:type label: LayerOutput
:param size: The category number.
:type size: int
:param weight: The third layer is "weight" of each sample, which is an
optional argument.
:type weight: LayerOutput
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layers. It is not necessary.
:type name: None|basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
assert isinstance(label, LayerOutput)
assert weight is None or isinstance(weight, LayerOutput)
if input.size is not None and label.size is not None:
assert input.size == label.size
if size is None:
size = input.size
else:
assert size == input.size
ipts = [Input(input.name, **param_attr.attr), Input(label.name)]
if weight is not None:
ipts.append(Input(weight.name))
Layer(
name=name,
type=LayerType.CRF_LAYER,
size=size,
inputs=ipts,
**ExtraLayerAttribute.to_kwargs(layer_attr))
parents = [input, label]
if weight is not None:
parents.append(weight)
# The size for LayerOutput means the dimension of the output.
# It's different from the meaning of crf layer, which is the number of
# classes.
return LayerOutput(name, LayerType.CRF_LAYER, parents, size=1)
@wrap_name_default()
@wrap_param_attr_default()
@layer_support()
def crf_decoding_layer(input,
size,
label=None,
param_attr=None,
name=None,
layer_attr=None):
"""
A layer for calculating the decoding sequence of sequential conditional
random field model. The decoding sequence is stored in output.ids.
If a second input is provided, it is treated as the ground-truth label, and
this layer will also calculate error. output.value[i] is 1 for incorrect
decoding or 0 for correct decoding.
:param input: The first input layer.
:type input: LayerOutput
:param size: size of this layer.
:type size: int
:param label: None or ground-truth label.
:type label: LayerOutput or None
:param param_attr: Parameter attribute. None means default attribute
:type param_attr: ParameterAttribute
:param name: The name of this layers. It is not necessary.
:type name: None|basestring
:param layer_attr: Extra Layer config.
:type layer_attr: ExtraLayerAttribute|None
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput)
assert label is None or isinstance(label, LayerOutput)
ipts = [Input(input.name, **param_attr.attr)]
if label is not None:
ipts.append(Input(label.name))
Layer(
name=name,
type=LayerType.CRF_DECODING_LAYER,
size=size,
inputs=ipts,
**ExtraLayerAttribute.to_kwargs(layer_attr))
parents = [input]
if label is not None:
parents.append(label)
# The size for LayerOutput means the dimension of the output.
# It's different from the meaning of crf layer, which is the number of
# classes.
return LayerOutput(name, LayerType.CRF_DECODING_LAYER, parents, size=1)
@wrap_bias_attr_default(has_bias=True)
@wrap_name_default()
@layer_support()
def nce_layer(input,
label,
num_classes,
weight=None,
num_neg_samples=10,
neg_distribution=None,
name=None,
bias_attr=None,
layer_attr=None):
"""
Noise-contrastive estimation.
Implements the method in the following paper:
A fast and simple algorithm for training neural probabilistic language models.
The example usage is:
.. code-block:: python
cost = nce_layer(input=layer1, label=layer2, weight=layer3,
num_classes=3, neg_distribution=[0.1,0.3,0.6])
:param name: layer name
:type name: basestring
:param input: input layers. It could be a LayerOutput of list/tuple of LayerOutput.
:type input: LayerOutput|list|tuple|collections.Sequence
:param label: label layer
:type label: LayerOutput
:param weight: weight layer, can be None(default)
:type weight: LayerOutput
:param num_classes: number of classes.
:type num_classes: int
:param num_neg_samples: number of negative samples. Default is 10.
:type num_neg_samples: int
:param neg_distribution: The distribution for generating the random negative labels.
A uniform distribution will be used if not provided.
If not None, its length must be equal to num_classes.
:type neg_distribution: list|tuple|collections.Sequence|None
:param bias_attr: Bias parameter attribute. True if no bias.
:type bias_attr: ParameterAttribute|None|False
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: layer name.
:rtype: LayerOutput
"""
if isinstance(input, LayerOutput):
input = [input]
assert isinstance(input, collections.Sequence)
assert isinstance(label, LayerOutput)
assert label.layer_type == LayerType.DATA
if neg_distribution is not None:
assert isinstance(neg_distribution, collections.Sequence)
assert len(neg_distribution) == num_classes
assert sum(neg_distribution) == 1
ipts_for_layer = []
parents = []
for each_input in input:
assert isinstance(each_input, LayerOutput)
ipts_for_layer.append(each_input.name)
parents.append(each_input)
ipts_for_layer.append(label.name)
parents.append(label)
if weight is not None:
assert isinstance(weight, LayerOutput)
assert weight.layer_type == LayerType.DATA
ipts_for_layer.append(weight.name)
parents.append(weight)
l = Layer(
name=name,
type=LayerType.NCE_LAYER,
num_classes=num_classes,
neg_sampling_dist=neg_distribution,
num_neg_samples=num_neg_samples,
inputs=ipts_for_layer,
bias=ParamAttr.to_bias(bias_attr),
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.NCE_LAYER, parents=parents, size=l.config.size)
"""
following are cost Layers.
"""
@wrap_name_default()
@layer_support()
def rank_cost(left,
right,
label,
weight=None,
name=None,
coeff=1.0,
layer_attr=None):
"""
A cost Layer for learning to rank using gradient descent. Details can refer
to `papers <http://research.microsoft.com/en-us/um/people/cburges/papers/
ICML_ranking.pdf>`_.
This layer contains at least three inputs. The weight is an optional
argument, which affects the cost.
.. math::
C_{i,j} & = -\\tilde{P_{ij}} * o_{i,j} + log(1 + e^{o_{i,j}})
o_{i,j} & = o_i - o_j
\\tilde{P_{i,j}} & = \\{0, 0.5, 1\\} \ or \ \\{0, 1\\}
In this formula:
- :math:`C_{i,j}` is the cross entropy cost.
- :math:`\\tilde{P_{i,j}}` is the label. 1 means positive order
and 0 means reverse order.
- :math:`o_i` and :math:`o_j`: the left output and right output.
Their dimension is one.
The simple usage:
.. code-block:: python
cost = rank_cost(left=out_left,
right=out_right,
label=label)
:param left: The first input, the size of this layer is 1.
:type left: LayerOutput
:param right: The right input, the size of this layer is 1.
:type right: LayerOutput
:param label: Label is 1 or 0, means positive order and reverse order.
:type label: LayerOutput
:param weight: The weight affects the cost, namely the scale of cost.
It is an optional argument.
:type weight: LayerOutput
:param name: The name of this layers. It is not necessary.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert left.size == 1
assert right.size == 1
assert label.size == 1
ipts = [left.name, right.name, label.name]
parents = [left, right, label]
if weight is not None:
ipts.append(weight.name)
parents.append(weight)
Layer(
name=name,
type=LayerType.RANK_COST,
inputs=ipts,
coeff=coeff,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.RANK_COST, parents=parents, size=1)
@wrap_name_default()
@layer_support()
def lambda_cost(input,
score,
name,
NDCG_num=5,
max_sort_size=-1,
layer_attr=None):
"""
lambdaCost for lambdaRank LTR approach.
The simple usage:
.. code-block:: python
cost = lambda_cost(input=input,
score=score,
NDCG_num=8,
max_sort_size=-1)
:param input: Samples of the same query should be loaded as sequence.
:type input: LayerOutput
:param score: The 2nd input. Score of each sample.
:type input: LayerOutput
:param NDCG_num: The size of NDCG (Normalized Discounted Cumulative Gain),
e.g., 5 for NDCG@5. It must be less than for equal to the
minimum size of lists.
:type NDCG_num: int
:param max_sort_size: The size of partial sorting in calculating gradient.
If max_sort_size = -1, then for each list, the
algorithm will sort the entire list to get gradient.
In other cases, max_sort_size must be greater than or
equal to NDCG_num. And if max_sort_size is greater
than the size of a list, the algorithm will sort the
entire list of get gradient.
:type max_sort_size: int
:param name: The name of this layers. It is not necessary.
:type name: None|basestring
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
assert isinstance(input, LayerOutput) and isinstance(score, LayerOutput)
if score.size is not None:
assert score.size == 1
Layer(
name=name,
type=LayerType.LAMBDA_COST,
inputs=[input.name, score.name],
NDCG_num=NDCG_num,
max_sort_size=max_sort_size,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.LAMBDA_COST, parents=[input, score], size=1)
@wrap_name_default()
@layer_support()
def cross_entropy(input, label, name=None, coeff=1.0, layer_attr=None):
"""
A loss layer for multi class entropy.
.. code-block:: python
cost = cross_entropy(input=input_layer,
label=label_layer)
:param input: The first input layer.
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:type name: None|basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput.
"""
Layer(
name=name,
type=LayerType.CROSS_ENTROPY,
inputs=[input.name, label.name],
coeff=coeff,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name, LayerType.CROSS_ENTROPY, parents=[input, label], size=1)
@wrap_name_default()
@layer_support()
def cross_entropy_with_selfnorm(input,
label,
name=None,
coeff=1.0,
softmax_selfnorm_alpha=0.1,
layer_attr=None):
"""
A loss layer for multi class entropy with selfnorm.
.. code-block:: python
cost = cross_entropy_with_selfnorm(input=input_layer,
label=label_layer)
:param input: The first input layer.
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:type name: None|basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
:param softmax_selfnorm_alpha: The scale factor affects the cost.
:type softmax_selfnorm_alpha: float.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput.
"""
Layer(
name=name,
type=LayerType.CROSS_ENTROPY_WITH_SELFNORM,
inputs=[input.name, label.name],
coeff=coeff,
softmax_selfnorm_alpha=softmax_selfnorm_alpha,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.CROSS_ENTROPY_WITH_SELFNORM,
parents=[input, label],
size=1)
@wrap_name_default()
@layer_support()
def sum_cost(input, name=None, layer_attr=None):
"""
A loss layer which calculate the sum of the input as loss
.. code-block:: python
cost = sum_cost(input=input_layer)
:param input: The first input layer.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:type name: None|basestring.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput.
"""
assert isinstance(input, LayerOutput)
Layer(
name=name,
type=LayerType.SUM_COST,
inputs=[input.name],
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.SUM_COST, parents=[input], size=1)
@wrap_name_default()
@layer_support()
def huber_cost(input, label, name=None, coeff=1.0, layer_attr=None):
"""
A loss layer for huber loss.
.. code-block:: python
cost = huber_cost(input=input_layer,
label=label_layer)
:param input: The first input layer.
:type input: LayerOutput.
:param label: The input label.
:type input: LayerOutput.
:param name: The name of this layers. It is not necessary.
:type name: None|basestring.
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float.
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput.
"""
assert isinstance(input, LayerOutput)
if input.size is not None:
assert input.size == 1
Layer(
name=name,
type=LayerType.HUBER,
inputs=[input.name, label.name],
coeff=coeff,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(name, LayerType.HUBER, parents=[input, label], size=1)
@wrap_name_default()
@layer_support()
def multi_binary_label_cross_entropy(input,
label,
name=None,
coeff=1.0,
layer_attr=None):
"""
A loss layer for multi binary label cross entropy.
.. code-block:: python
cost = multi_binary_label_cross_entropy(input=input_layer,
label=label_layer)
:param input: The first input layer.
:type input: LayerOutput
:param label: The input label.
:type input: LayerOutput
:param type: The type of cost.
:type type: basestring
:param name: The name of this layers. It is not necessary.
:type name: None|basestring
:param coeff: The coefficient affects the gradient in the backward.
:type coeff: float
:param layer_attr: Extra Layer Attribute.
:type layer_attr: ExtraLayerAttribute
:return: LayerOutput object.
:rtype: LayerOutput
"""
if input.activation is None or \
not isinstance(input.activation, SigmoidActivation):
logger.log(
logging.WARN,
"%s is not recommend for multi_binary_label_cross_entropy's activation, "
"maybe the sigmoid is better" % repr(input.activation))
Layer(
name=name,
type=LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY,
inputs=[input.name, label.name],
coeff=coeff,
**ExtraLayerAttribute.to_kwargs(layer_attr))
return LayerOutput(
name,
LayerType.MULTI_BIN_LABEL_CROSS_ENTROPY,
parents=[input, label],
size=1)
|
{
"content_hash": "8b21758403bdb9c473694e5bdda1c232",
"timestamp": "",
"source": "github",
"line_count": 4754,
"max_line_length": 93,
"avg_line_length": 32.56583929322676,
"alnum_prop": 0.6056207934477903,
"repo_name": "beckett1124/Paddle",
"id": "9b6e5774bc82dc05e14a2565fa9cce98764adf04",
"size": "155428",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/trainer_config_helpers/layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "225634"
},
{
"name": "C++",
"bytes": "3017072"
},
{
"name": "CMake",
"bytes": "119561"
},
{
"name": "CSS",
"bytes": "21642"
},
{
"name": "Cuda",
"bytes": "482425"
},
{
"name": "HTML",
"bytes": "9018"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "41202"
},
{
"name": "Python",
"bytes": "1097788"
},
{
"name": "Shell",
"bytes": "78197"
}
],
"symlink_target": ""
}
|
"""Tests for _adapter._low."""
import time
import unittest
from grpc._adapter import _low
_STREAM_LENGTH = 300
_TIMEOUT = 5
_AFTER_DELAY = 2
_FUTURE = time.time() + 60 * 60 * 24
_BYTE_SEQUENCE = b'\abcdefghijklmnopqrstuvwxyz0123456789' * 200
_BYTE_SEQUENCE_SEQUENCE = tuple(
bytes(bytearray((row + column) % 256 for column in range(row)))
for row in range(_STREAM_LENGTH))
class LonelyClientTest(unittest.TestCase):
def testLonelyClient(self):
host = 'nosuchhostexists'
port = 54321
method = 'test method'
deadline = time.time() + _TIMEOUT
after_deadline = deadline + _AFTER_DELAY
metadata_tag = object()
finish_tag = object()
completion_queue = _low.CompletionQueue()
channel = _low.Channel('%s:%d' % (host, port), None)
client_call = _low.Call(channel, method, host, deadline)
client_call.invoke(completion_queue, metadata_tag, finish_tag)
first_event = completion_queue.get(after_deadline)
self.assertIsNotNone(first_event)
second_event = completion_queue.get(after_deadline)
self.assertIsNotNone(second_event)
kinds = [event.kind for event in (first_event, second_event)]
self.assertItemsEqual(
(_low.Event.Kind.METADATA_ACCEPTED, _low.Event.Kind.FINISH),
kinds)
self.assertIsNone(completion_queue.get(after_deadline))
completion_queue.stop()
stop_event = completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.STOP, stop_event.kind)
class EchoTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
def tearDown(self):
self.server.stop()
# NOTE(nathaniel): Yep, this is weird; it's a consequence of
# grpc_server_destroy's being what has the effect of telling the server's
# completion queue to pump out all pending events/tags immediately rather
# than gracefully completing all outstanding RPCs while accepting no new
# ones.
# TODO(nathaniel): Deallocation of a Python object shouldn't have this kind
# of observable side effect let alone such an important one.
del self.server
self.server_completion_queue.stop()
self.client_completion_queue.stop()
while True:
event = self.server_completion_queue.get(_FUTURE)
if event is not None and event.kind is _low.Event.Kind.STOP:
break
while True:
event = self.client_completion_queue.get(_FUTURE)
if event is not None and event.kind is _low.Event.Kind.STOP:
break
self.server_completion_queue = None
self.client_completion_queue = None
def _perform_echo_test(self, test_data):
method = 'test method'
details = 'test details'
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
complete_tag = object()
service_tag = object()
read_tag = object()
status_tag = object()
server_data = []
client_data = []
client_call = _low.Call(self.channel, method, self.host, deadline)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_completion_queue.get(_FUTURE)
self.assertIsNotNone(service_accepted)
self.assertIs(service_accepted.kind, _low.Event.Kind.SERVICE_ACCEPTED)
self.assertIs(service_accepted.tag, service_tag)
self.assertEqual(method, service_accepted.service_acceptance.method)
self.assertEqual(self.host, service_accepted.service_acceptance.host)
self.assertIsNotNone(service_accepted.service_acceptance.call)
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.premetadata()
metadata_accepted = self.client_completion_queue.get(_FUTURE)
self.assertIsNotNone(metadata_accepted)
self.assertEqual(_low.Event.Kind.METADATA_ACCEPTED, metadata_accepted.kind)
self.assertEqual(metadata_tag, metadata_accepted.tag)
# TODO(nathaniel): Test transmission and reception of metadata.
for datum in test_data:
client_call.write(datum, write_tag)
write_accepted = self.client_completion_queue.get(_FUTURE)
self.assertIsNotNone(write_accepted)
self.assertIs(write_accepted.kind, _low.Event.Kind.WRITE_ACCEPTED)
self.assertIs(write_accepted.tag, write_tag)
self.assertIs(write_accepted.write_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_completion_queue.get(_FUTURE)
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag)
write_accepted = self.server_completion_queue.get(_FUTURE)
self.assertIsNotNone(write_accepted)
self.assertEqual(_low.Event.Kind.WRITE_ACCEPTED, write_accepted.kind)
self.assertEqual(write_tag, write_accepted.tag)
self.assertTrue(write_accepted.write_accepted)
client_call.read(read_tag)
read_accepted = self.client_completion_queue.get(_FUTURE)
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNotNone(read_accepted.bytes)
client_data.append(read_accepted.bytes)
client_call.complete(complete_tag)
complete_accepted = self.client_completion_queue.get(_FUTURE)
self.assertIsNotNone(complete_accepted)
self.assertIs(complete_accepted.kind, _low.Event.Kind.COMPLETE_ACCEPTED)
self.assertIs(complete_accepted.tag, complete_tag)
self.assertIs(complete_accepted.complete_accepted, True)
server_call.read(read_tag)
read_accepted = self.server_completion_queue.get(_FUTURE)
self.assertIsNotNone(read_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
server_call.status(_low.Status(_low.Code.OK, details), status_tag)
server_terminal_event_one = self.server_completion_queue.get(_FUTURE)
server_terminal_event_two = self.server_completion_queue.get(_FUTURE)
if server_terminal_event_one.kind == _low.Event.Kind.COMPLETE_ACCEPTED:
status_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
status_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(status_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.COMPLETE_ACCEPTED, status_accepted.kind)
self.assertEqual(status_tag, status_accepted.tag)
self.assertTrue(status_accepted.complete_accepted)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(finish_tag, rpc_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, ''), rpc_accepted.status)
client_call.read(read_tag)
client_terminal_event_one = self.client_completion_queue.get(_FUTURE)
client_terminal_event_two = self.client_completion_queue.get(_FUTURE)
if client_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = client_terminal_event_one
finish_accepted = client_terminal_event_two
else:
read_accepted = client_terminal_event_two
finish_accepted = client_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(finish_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertEqual(read_tag, read_accepted.tag)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, finish_accepted.kind)
self.assertEqual(finish_tag, finish_accepted.tag)
self.assertEqual(_low.Status(_low.Code.OK, details), finish_accepted.status)
server_timeout_none_event = self.server_completion_queue.get(0)
self.assertIsNone(server_timeout_none_event)
client_timeout_none_event = self.client_completion_queue.get(0)
self.assertIsNone(client_timeout_none_event)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
def testNoEcho(self):
self._perform_echo_test(())
def testOneByteEcho(self):
self._perform_echo_test([b'\x07'])
def testOneManyByteEcho(self):
self._perform_echo_test([_BYTE_SEQUENCE])
def testManyOneByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE)
def testManyManyByteEchoes(self):
self._perform_echo_test(_BYTE_SEQUENCE_SEQUENCE)
class CancellationTest(unittest.TestCase):
def setUp(self):
self.host = 'localhost'
self.server_completion_queue = _low.CompletionQueue()
self.server = _low.Server(self.server_completion_queue)
port = self.server.add_http2_addr('[::]:0')
self.server.start()
self.client_completion_queue = _low.CompletionQueue()
self.channel = _low.Channel('%s:%d' % (self.host, port), None)
def tearDown(self):
self.server.stop()
del self.server
self.server_completion_queue.stop()
self.client_completion_queue.stop()
while True:
event = self.server_completion_queue.get(0)
if event is not None and event.kind is _low.Event.Kind.STOP:
break
while True:
event = self.client_completion_queue.get(0)
if event is not None and event.kind is _low.Event.Kind.STOP:
break
def testCancellation(self):
method = 'test method'
deadline = _FUTURE
metadata_tag = object()
finish_tag = object()
write_tag = object()
service_tag = object()
read_tag = object()
test_data = _BYTE_SEQUENCE_SEQUENCE
server_data = []
client_data = []
client_call = _low.Call(self.channel, method, self.host, deadline)
client_call.invoke(self.client_completion_queue, metadata_tag, finish_tag)
self.server.service(service_tag)
service_accepted = self.server_completion_queue.get(_FUTURE)
server_call = service_accepted.service_acceptance.call
server_call.accept(self.server_completion_queue, finish_tag)
server_call.premetadata()
metadata_accepted = self.client_completion_queue.get(_FUTURE)
self.assertIsNotNone(metadata_accepted)
for datum in test_data:
client_call.write(datum, write_tag)
write_accepted = self.client_completion_queue.get(_FUTURE)
server_call.read(read_tag)
read_accepted = self.server_completion_queue.get(_FUTURE)
server_data.append(read_accepted.bytes)
server_call.write(read_accepted.bytes, write_tag)
write_accepted = self.server_completion_queue.get(_FUTURE)
self.assertIsNotNone(write_accepted)
client_call.read(read_tag)
read_accepted = self.client_completion_queue.get(_FUTURE)
client_data.append(read_accepted.bytes)
client_call.cancel()
# cancel() is idempotent.
client_call.cancel()
client_call.cancel()
client_call.cancel()
server_call.read(read_tag)
server_terminal_event_one = self.server_completion_queue.get(_FUTURE)
server_terminal_event_two = self.server_completion_queue.get(_FUTURE)
if server_terminal_event_one.kind == _low.Event.Kind.READ_ACCEPTED:
read_accepted = server_terminal_event_one
rpc_accepted = server_terminal_event_two
else:
read_accepted = server_terminal_event_two
rpc_accepted = server_terminal_event_one
self.assertIsNotNone(read_accepted)
self.assertIsNotNone(rpc_accepted)
self.assertEqual(_low.Event.Kind.READ_ACCEPTED, read_accepted.kind)
self.assertIsNone(read_accepted.bytes)
self.assertEqual(_low.Event.Kind.FINISH, rpc_accepted.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), rpc_accepted.status)
finish_event = self.client_completion_queue.get(_FUTURE)
self.assertEqual(_low.Event.Kind.FINISH, finish_event.kind)
self.assertEqual(_low.Status(_low.Code.CANCELLED, ''), finish_event.status)
server_timeout_none_event = self.server_completion_queue.get(0)
self.assertIsNone(server_timeout_none_event)
client_timeout_none_event = self.client_completion_queue.get(0)
self.assertIsNone(client_timeout_none_event)
self.assertSequenceEqual(test_data, server_data)
self.assertSequenceEqual(test_data, client_data)
class ExpirationTest(unittest.TestCase):
@unittest.skip('TODO(nathaniel): Expiration test!')
def testExpiration(self):
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "5104c17e9fcf94f71028f57f800ea2ad",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 80,
"avg_line_length": 37.64035087719298,
"alnum_prop": 0.713431212615552,
"repo_name": "tatsuhiro-t/grpc",
"id": "b04ac1c95098a0cf23b89ce304223ccb3ebec89d",
"size": "14402",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/src/grpc/_adapter/_low_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3099175"
},
{
"name": "C#",
"bytes": "438721"
},
{
"name": "C++",
"bytes": "562283"
},
{
"name": "JavaScript",
"bytes": "141573"
},
{
"name": "Makefile",
"bytes": "1133706"
},
{
"name": "Objective-C",
"bytes": "131348"
},
{
"name": "PHP",
"bytes": "100743"
},
{
"name": "Protocol Buffer",
"bytes": "133769"
},
{
"name": "Python",
"bytes": "628072"
},
{
"name": "Ruby",
"bytes": "290916"
},
{
"name": "Shell",
"bytes": "18376"
}
],
"symlink_target": ""
}
|
import os
import unittest
import numpy as np
from scipy import sparse
from autosklearn.pipeline.components.data_preprocessing.one_hot_encoding import OneHotEncoder
from autosklearn.pipeline.util import _test_preprocessing
class OneHotEncoderTest(unittest.TestCase):
def setUp(self):
self.categorical = [True,
True,
True,
False,
False,
True,
True,
True,
False,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
True,
True,
True,
True]
this_directory = os.path.dirname(__file__)
self.X_train = np.loadtxt(os.path.join(this_directory, "dataset.pkl"))
def test_default_configuration(self):
transformations = []
for i in range(10):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1,
categorical_features=self.categorical,
**{hp_name: default[hp_name] for hp_name in
default if default[hp_name] is not None})
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
self.assertFalse(
(transformations[-1] != transformations[-2]).all())
def test_default_configuration_no_encoding(self):
transformations = []
for i in range(10):
transformation, original = _test_preprocessing(OneHotEncoder)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation == original).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1] == transformations[-2]).all())
def test_default_configuration_sparse_data(self):
transformations = []
self.X_train[~np.isfinite(self.X_train)] = 0
self.X_train = sparse.csc_matrix(self.X_train)
for i in range(10):
configuration_space = OneHotEncoder.get_hyperparameter_search_space()
default = configuration_space.get_default_configuration()
preprocessor = OneHotEncoder(random_state=1,
categorical_features=self.categorical,
**{hp_name: default[hp_name] for
hp_name in
default if
default[hp_name] is not None})
transformer = preprocessor.fit(self.X_train.copy())
Xt = transformer.transform(self.X_train.copy())
transformations.append(Xt)
if len(transformations) > 1:
self.assertFalse(
(transformations[-1].todense() != transformations[
-2].todense()).all())
def test_default_configuration_sparse_no_encoding(self):
transformations = []
for i in range(10):
transformation, original = _test_preprocessing(OneHotEncoder,
make_sparse=True)
self.assertEqual(transformation.shape, original.shape)
self.assertTrue((transformation.todense() == original.todense()).all())
transformations.append(transformation)
if len(transformations) > 1:
self.assertTrue(
(transformations[-1].todense() == transformations[-2].todense()).all())
|
{
"content_hash": "63da81576784e0857f4f6a266fb8532b",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 93,
"avg_line_length": 40.858333333333334,
"alnum_prop": 0.4482969610442586,
"repo_name": "hmendozap/auto-sklearn",
"id": "d062a202de11aba213d795dd0621870cff4d6e7c",
"size": "4903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_pipeline/components/data_preprocessing/test_one_hot_encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6722"
},
{
"name": "Makefile",
"bytes": "6791"
},
{
"name": "Python",
"bytes": "1207634"
},
{
"name": "Shell",
"bytes": "851"
}
],
"symlink_target": ""
}
|
def detect_platform():
""" Attempt to detect the current platform. Returns : 'Sitara Am35172',
'BeagleBone >=3.8'. """
platform = ''
with open('/proc/cpuinfo', 'rb') as f:
cpuinfo = f.read().lower()
#In this part do it the parameter verification with 'v7l' that
#represent de ARMv7 architecture (Using the parameter ARMV7 this don't
#work) and with 'neon vfpv3' that represent a feature at this processor
if ('v7l' in cpuinfo and
('neon vfpv3' in cpuinfo)):
platform = 'Sitara'
#With this we verify that verification in cpuinfo was made
print 'Entro a cpuinfo'
import commands
uname_status, uname = commands.getstatusoutput('uname -a')
if uname_status > 0:
exit('uname failed, cannot detect kernel version! uname output:\n %s' % uname)
if ('3.7.2-cm-t3517' in uname):
platform += ' 3.7.2'
#With this we verify that verification inside of uname was made
print "Entro a uname_a"
print platform
else:
platform += ' >=3.8'
print platform
print 'saliendo'
return platform
#With this We execute the function Locally, withouth complete compilation
detect_platform()
|
{
"content_hash": "3327e684c0058b5194852ac9cab19389",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 82,
"avg_line_length": 37.03225806451613,
"alnum_prop": 0.681184668989547,
"repo_name": "dec4n/PyBBIO-CM-T3517",
"id": "39501f99b40b6d40bcd1a6c726678c19d03aff01",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "platform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1176"
}
],
"symlink_target": ""
}
|
import pickle
import numpy as np
from PIL import Image
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
import openface
import redis
from cumera.celeryconf import app
from demos.classifier import align, net
from models import FaceImage
from redis_cli import REDIS_CONNECTION_POOL
class Face:
def __init__(self, rep, identity):
self.rep = rep
self.identity = identity
def __repr__(self):
return "{{id: {}, rep[0:5]: {}}}".format(
str(self.identity),
self.rep[0:5]
)
class FaceIdentifier:
def __init__(self):
self.users = ['unknown']
self.__training = False
self.r = redis.Redis(connection_pool=REDIS_CONNECTION_POOL)
self.r.delete('images')
dummy = "data/faces/raw/2jeonghan/2jeonghan-"
for i in range(22):
self.process_frame(dummy + str(i) + '.jpg', 'unknown')
def train_svm(self, username):
self.r.set('training', True)
face_images = FaceImage.objects.filter(user__username=username)
files = map(lambda x: x.file.path, face_images)
for file in files:
self.process_frame(file, username)
train_svm.delay()
return
@staticmethod
def get_data(images):
X = []
y = []
for img in images:
X.append(img.rep)
y.append(img.identity)
numIdentities = len(set(y)) - 1
if numIdentities == 0:
return None
X = np.vstack(X)
y = np.array(y)
return (X, y)
def process_frame(self, image_path, identity=None):
img = Image.open(image_path)
width, hegiht = img.size
buf = np.fliplr(np.asarray(img))
rgbFrame = np.zeros((hegiht, width, 3), dtype=np.uint8)
rgbFrame[:, :, 0] = buf[:, :, 2]
rgbFrame[:, :, 1] = buf[:, :, 1]
rgbFrame[:, :, 2] = buf[:, :, 0]
bb = align.getLargestFaceBoundingBox(rgbFrame)
bbs = [bb] if bb is not None else []
for bb in bbs:
landmarks = align.findLandmarks(rgbFrame, bb)
alignedFace = align.align(96, rgbFrame, bb,
landmarks=landmarks,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
print 'No aligned face'
continue
rep = net.forward(alignedFace)
if identity is not None:
self.r.rpush('images', pickle.dumps(Face(rep, identity)))
return rep
def predict(self, image_path):
if self.r.get('training') == "True":
return 'training'
rep = self.process_frame(image_path)
serialized_svm = self.r.get('classifier')
if rep is None or serialized_svm is None:
return 'unknown'
svm = pickle.loads(serialized_svm)
return svm.predict(rep)[0]
@app.task
def train_svm():
r = redis.Redis(connection_pool=REDIS_CONNECTION_POOL)
images = list(map(lambda i: pickle.loads(i), r.lrange('images', 0, -1)))
print("+ Training SVM on {} labeled images.".format(len(images)))
d = FaceIdentifier.get_data(images)
if d is None:
SVM = None
print "d is None"
return
else:
(X, y) = d
numIdentities = len(set(y))
if numIdentities < 1:
print "Invalid training"
return
param_grid = [
{'C': [1, 10, 100, 1000],
'kernel': ['linear']},
{'C': [1, 10, 100, 1000],
'gamma': [0.001, 0.0001],
'kernel': ['rbf']}
]
svm_str = pickle.dumps(GridSearchCV(SVC(C=1), param_grid, cv=5).fit(X, y))
r.set('classifier', svm_str)
r.set('training', False)
identifier = FaceIdentifier()
|
{
"content_hash": "b63e9e63f2678429a36f40dbfb9ee24c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 93,
"avg_line_length": 28.796992481203006,
"alnum_prop": 0.554046997389034,
"repo_name": "helloworldajou/webserver",
"id": "00bbde3ac566158a3ff056a4a96e901bc7b97eff",
"size": "3830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apiserver/face_identifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2202"
},
{
"name": "HTML",
"bytes": "5204"
},
{
"name": "JavaScript",
"bytes": "236"
},
{
"name": "Lua",
"bytes": "52269"
},
{
"name": "Makefile",
"bytes": "7417"
},
{
"name": "Nginx",
"bytes": "428"
},
{
"name": "Python",
"bytes": "134625"
},
{
"name": "Shell",
"bytes": "5471"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_tatooine_valariangang_large1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "64cdce0a60c7e6df7ab398dad63452b2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 24.153846153846153,
"alnum_prop": 0.7006369426751592,
"repo_name": "anhstudios/swganh",
"id": "68217281e971cf09ea71116255f880544b0cc666",
"size": "459",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/poi/shared_tatooine_valariangang_large1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import os
from resource_management.core.logger import Logger
from resource_management.core.resources.system import Execute
from resource_management.core.resources.system import File
from resource_management.core import shell
from resource_management.core.shell import as_user
from resource_management.core.exceptions import Fail
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import get_unique_id_and_date
from resource_management.libraries.functions import Direction, SafeMode
from utils import get_dfsadmin_base_command
from namenode_ha_state import NamenodeHAState
safemode_to_instruction = {SafeMode.ON: "enter",
SafeMode.OFF: "leave"}
NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE = "namenode-upgrade-in-progress"
def prepare_upgrade_check_for_previous_dir():
"""
During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up some data.
Check that there is no "previous" folder inside the NameNode Name Dir.
"""
import params
if params.dfs_ha_enabled:
namenode_ha = NamenodeHAState()
if namenode_ha.is_active(params.hostname) or namenode_ha.is_active(params.public_hostname):
Logger.info("NameNode High Availability is enabled and this is the Active NameNode.")
problematic_previous_namenode_dirs = set()
nn_name_dirs = params.dfs_name_dir.split(',')
for nn_dir in nn_name_dirs:
if os.path.isdir(nn_dir):
# Check for a previous folder, which is not allowed.
previous_dir = os.path.join(nn_dir, "previous")
if os.path.isdir(previous_dir):
problematic_previous_namenode_dirs.add(previous_dir)
if len(problematic_previous_namenode_dirs) > 0:
message = 'WARNING. The following NameNode Name Dir(s) have a "previous" folder from an older version.\n' \
'Please back it up first, and then delete it, OR Finalize (E.g., "hdfs dfsadmin -finalizeUpgrade").\n' \
'NameNode Name Dir(s): {0}\n' \
'***** Then, retry this step. *****'.format(", ".join(problematic_previous_namenode_dirs))
Logger.error(message)
raise Fail(message)
def prepare_upgrade_enter_safe_mode(hdfs_binary):
"""
During a NonRolling (aka Express Upgrade), preparing the NameNode requires first entering Safemode.
:param hdfs_binary: name/path of the HDFS binary to use
"""
import params
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
safe_mode_enter_cmd = dfsadmin_base_command + " -safemode enter"
try:
# Safe to call if already in Safe Mode
desired_state = SafeMode.ON
safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, params.dfs_ha_enabled, hdfs_binary)
Logger.info("Transition successful: {0}, original state: {1}".format(str(safemode_transition_successful), str(original_state)))
if not safemode_transition_successful:
raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
except Exception, e:
message = "Could not enter safemode. Error: {0}. As the HDFS user, call this command: {1}".format(str(e), safe_mode_enter_cmd)
Logger.error(message)
raise Fail(message)
def prepare_upgrade_save_namespace(hdfs_binary):
"""
During a NonRolling (aka Express Upgrade), preparing the NameNode requires saving the namespace.
:param hdfs_binary: name/path of the HDFS binary to use
"""
import params
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
save_namespace_cmd = dfsadmin_base_command + " -saveNamespace"
try:
Logger.info("Checkpoint the current namespace.")
as_user(save_namespace_cmd, params.hdfs_user, env={'PATH': params.hadoop_bin_dir})
except Exception, e:
message = format("Could not save the NameSpace. As the HDFS user, call this command: {save_namespace_cmd}")
Logger.error(message)
raise Fail(message)
def prepare_upgrade_backup_namenode_dir():
"""
During a NonRolling (aka Express Upgrade), preparing the NameNode requires backing up the NameNode Name Dirs.
"""
import params
i = 0
failed_paths = []
nn_name_dirs = params.dfs_name_dir.split(',')
backup_destination_root_dir = "{0}/{1}".format(params.namenode_backup_dir, params.stack_version_unformatted)
if len(nn_name_dirs) > 0:
Logger.info("Backup the NameNode name directory's CURRENT folder.")
for nn_dir in nn_name_dirs:
i += 1
namenode_current_image = os.path.join(nn_dir, "current")
unique = get_unique_id_and_date() + "_" + str(i)
# Note that /tmp may not be writeable.
backup_current_folder = "{0}/namenode_{1}/".format(backup_destination_root_dir, unique)
if os.path.isdir(namenode_current_image) and not os.path.isdir(backup_current_folder):
try:
os.makedirs(backup_current_folder)
Execute(('cp', '-ar', namenode_current_image, backup_current_folder),
sudo=True
)
except Exception, e:
failed_paths.append(namenode_current_image)
if len(failed_paths) > 0:
Logger.error("Could not backup the NameNode Name Dir(s) to {0}, make sure that the destination path is "
"writeable and copy the directories on your own. Directories: {1}".format(backup_destination_root_dir,
", ".join(failed_paths)))
def prepare_upgrade_finalize_previous_upgrades(hdfs_binary):
"""
During a NonRolling (aka Express Upgrade), preparing the NameNode requires Finalizing any upgrades that are in progress.
:param hdfs_binary: name/path of the HDFS binary to use
"""
import params
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
finalize_command = dfsadmin_base_command + " -rollingUpgrade finalize"
try:
Logger.info("Attempt to Finalize if there are any in-progress upgrades. "
"This will return 255 if no upgrades are in progress.")
code, out = shell.checked_call(finalize_command, logoutput=True, user=params.hdfs_user)
if out:
expected_substring = "there is no rolling upgrade in progress"
if expected_substring not in out.lower():
Logger.warning('Finalize command did not contain substring: %s' % expected_substring)
else:
Logger.warning("Finalize command did not return any output.")
except Exception, e:
Logger.warning("Ensure no upgrades are in progress.")
def reach_safemode_state(user, safemode_state, in_ha, hdfs_binary):
"""
Enter or leave safemode for the Namenode.
:param user: user to perform action as
:param safemode_state: Desired state of ON or OFF
:param in_ha: bool indicating if Namenode High Availability is enabled
:param hdfs_binary: name/path of the HDFS binary to use
:return: Returns a tuple of (transition success, original state). If no change is needed, the indicator of
success will be True
"""
Logger.info("Prepare to transition into safemode state %s" % safemode_state)
import params
original_state = SafeMode.UNKNOWN
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
safemode_base_command = dfsadmin_base_command + " -safemode "
safemode_check_cmd = safemode_base_command + " get"
grep_pattern = format("Safe mode is {safemode_state}")
safemode_check_with_grep = format("{safemode_check_cmd} | grep '{grep_pattern}'")
code, out = shell.call(safemode_check_cmd, user=user, logoutput=True)
Logger.info("Command: %s\nCode: %d." % (safemode_check_cmd, code))
if code == 0 and out is not None:
Logger.info(out)
re_pattern = r"Safe mode is (\S*)"
Logger.info("Pattern to search: {0}".format(re_pattern))
m = re.search(re_pattern, out, re.IGNORECASE)
if m and len(m.groups()) >= 1:
original_state = m.group(1).upper()
if original_state == safemode_state:
return (True, original_state)
else:
# Make a transition
command = safemode_base_command + safemode_to_instruction[safemode_state]
Execute(command,
user=user,
logoutput=True,
path=[params.hadoop_bin_dir])
code, out = shell.call(safemode_check_with_grep, user=user)
Logger.info("Command: %s\nCode: %d. Out: %s" % (safemode_check_with_grep, code, out))
if code == 0:
return (True, original_state)
return (False, original_state)
def prepare_rolling_upgrade(hdfs_binary):
"""
This can be called during either Rolling Upgrade or Express Upgrade (aka nonrolling)
Rolling Upgrade for HDFS Namenode requires the following.
0. Namenode must be up
1. If HA: leave safemode if the safemode status is not OFF
2. Execute a rolling upgrade "prepare"
3. Execute a rolling upgrade "query"
:param hdfs_binary: name/path of the HDFS binary to use
"""
import params
if not params.upgrade_direction or params.upgrade_direction not in [Direction.UPGRADE, Direction.DOWNGRADE]:
raise Fail("Could not retrieve upgrade direction: %s" % str(params.upgrade_direction))
Logger.info(format("Performing a(n) {params.upgrade_direction} of HDFS"))
if params.security_enabled:
kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
Execute(kinit_command, user=params.hdfs_user, logoutput=True)
if params.upgrade_direction == Direction.UPGRADE:
if params.dfs_ha_enabled:
Logger.info('High Availability is enabled, must leave safemode before calling "-rollingUpgrade prepare"')
desired_state = SafeMode.OFF
safemode_transition_successful, original_state = reach_safemode_state(params.hdfs_user, desired_state, True, hdfs_binary)
if not safemode_transition_successful:
raise Fail("Could not transition to safemode state %s. Please check logs to make sure namenode is up." % str(desired_state))
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
prepare = dfsadmin_base_command + " -rollingUpgrade prepare"
query = dfsadmin_base_command + " -rollingUpgrade query"
Execute(prepare,
user=params.hdfs_user,
logoutput=True)
Execute(query,
user=params.hdfs_user,
logoutput=True)
def finalize_upgrade(upgrade_type, hdfs_binary):
"""
Finalize the Namenode upgrade, at which point it cannot be downgraded.
:param upgrade_type rolling or nonrolling
:param hdfs_binary: name/path of the HDFS binary to use
"""
Logger.info("Executing Rolling Upgrade finalize")
import params
if params.security_enabled:
kinit_command = format("{params.kinit_path_local} -kt {params.hdfs_user_keytab} {params.hdfs_principal_name}")
Execute(kinit_command, user=params.hdfs_user, logoutput=True)
dfsadmin_base_command = get_dfsadmin_base_command(hdfs_binary)
finalize_cmd = dfsadmin_base_command + " -rollingUpgrade finalize"
query_cmd = dfsadmin_base_command + " -rollingUpgrade query"
Execute(query_cmd,
user=params.hdfs_user,
logoutput=True)
Execute(finalize_cmd,
user=params.hdfs_user,
logoutput=True)
Execute(query_cmd,
user=params.hdfs_user,
logoutput=True)
# upgrade is finalized; remove the upgrade marker
delete_upgrade_marker()
def get_upgrade_in_progress_marker():
"""
Gets the full path of the file which indicates that NameNode has begun its stack upgrade.
:return:
"""
from resource_management.libraries.script.script import Script
return os.path.join(Script.get_tmp_dir(), NAMENODE_UPGRADE_IN_PROGRESS_MARKER_FILE)
def create_upgrade_marker():
"""
Creates the marker file indicating that NameNode has begun participating in a stack upgrade.
If the file already exists, nothing will be done. This will silently log exceptions on failure.
:return:
"""
# create the marker file which indicates
try:
namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
if not os.path.isfile(namenode_upgrade_in_progress_marker):
File(namenode_upgrade_in_progress_marker)
except:
Logger.warning("Unable to create NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker))
def delete_upgrade_marker():
"""
Removes the marker file indicating that NameNode has begun participating in a stack upgrade.
If the file does not exist, then nothing will be done.
Failure to remove this file could cause problems with restarts in the future. That's why
checking to see if there is a suspended upgrade is also advised. This function will raise
an exception if the file can't be removed.
:return:
"""
# create the marker file which indicates
try:
namenode_upgrade_in_progress_marker = get_upgrade_in_progress_marker()
if os.path.isfile(namenode_upgrade_in_progress_marker):
File(namenode_upgrade_in_progress_marker, action='delete')
except:
error_message = "Unable to remove NameNode upgrade marker file {0}".format(namenode_upgrade_in_progress_marker)
Logger.error(error_message)
raise Fail(error_message)
|
{
"content_hash": "5d8a298e5788b216fbf2b067c095518f",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 142,
"avg_line_length": 43.25155279503105,
"alnum_prop": 0.7086953399870755,
"repo_name": "radicalbit/ambari",
"id": "14d6ce2ce1f3e72cc6e065f4d4ff8c16a9936257",
"size": "13927",
"binary": false,
"copies": "6",
"ref": "refs/heads/trunk",
"path": "ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/scripts/namenode_upgrade.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "42212"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "182799"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "1287531"
},
{
"name": "CoffeeScript",
"bytes": "4323"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "88056"
},
{
"name": "HTML",
"bytes": "5098825"
},
{
"name": "Java",
"bytes": "29006663"
},
{
"name": "JavaScript",
"bytes": "17274453"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLSQL",
"bytes": "2160"
},
{
"name": "PLpgSQL",
"bytes": "314333"
},
{
"name": "PowerShell",
"bytes": "2087991"
},
{
"name": "Python",
"bytes": "14584206"
},
{
"name": "R",
"bytes": "1457"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "14478"
},
{
"name": "SQLPL",
"bytes": "2117"
},
{
"name": "Shell",
"bytes": "741459"
},
{
"name": "Vim script",
"bytes": "5813"
}
],
"symlink_target": ""
}
|
import sys
from treetagger3 import TreeTagger
tt = TreeTagger(encoding='utf-8',language='french')
with open (sys.argv[1], "r") as myfile:
data=myfile.read()
tags = tt.tag(data)
tagstats = {}
for tag in tags:
wordCategory = tag[1]
tagstats[wordCategory] = tagstats.get(wordCategory, 0) + 1
for tag in tagstats:
print tag+"\t"+str(tagstats[tag])
|
{
"content_hash": "725c5299eea16493e25b83e332972d0c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 25.142857142857142,
"alnum_prop": 0.7102272727272727,
"repo_name": "semplea/characters-meta",
"id": "a4a39d811eabf3b2eb1f5dc6e7625a65903c07e6",
"size": "377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/treetagger-python/tt-jdg.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "27805"
},
{
"name": "Jupyter Notebook",
"bytes": "504755"
},
{
"name": "Makefile",
"bytes": "7789"
},
{
"name": "Perl",
"bytes": "39921"
},
{
"name": "Python",
"bytes": "525979"
},
{
"name": "Shell",
"bytes": "24663"
},
{
"name": "TeX",
"bytes": "141827"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='iribaker',
version='0.2',
description='Safely convert IRI-like string to IRI.\nReplaces invalid charactes with an underscore (i.e. does not support roundtripping).\nFalls back to standard URL percent encoding.',
url='http://github.com/clariah/iribaker',
author='Rinke Hoekstra (VU University Amsterdam/University of Amsterdam)',
author_email='rinke.hoekstra@vu.nl',
license='MIT',
packages=['iribaker'],
install_requires=[
'rfc3987',
],
zip_safe=False)
|
{
"content_hash": "c0569ac9a4d27f51d90e4732ae975544",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 191,
"avg_line_length": 40.357142857142854,
"alnum_prop": 0.6743362831858407,
"repo_name": "CLARIAH-SDH/iribaker",
"id": "3189d08bfe75a6006cea5cf682ed6e75acbb3bec",
"size": "565",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3835"
}
],
"symlink_target": ""
}
|
from wxglterm_interface import TermUI, TermWindow
from term_plugin_base import TermPluginBase
import tkinter
import sys
import logging
class DefaultTkTermWindow(TermPluginBase, TermWindow):
def __init__(self):
TermPluginBase.__init__(self,
name="default_term_window",
desc="It is a python version term default_term_ui",
version=1)
TermWindow.__init__(self)
self.top = None
self.old_content = ''
def refresh(self):
if self.top:
self.top.after(20, self.__refresh)
pass
def __refresh(self):
rows = self.get_plugin_context().term_buffer.rows
cols = self.get_plugin_context().term_buffer.cols
term_buff = self.get_plugin_context().term_buffer
self.text.delete('1.0', tkinter.END)
for row in range(rows):
line = term_buff.get_line(row)
data = []
for col in range(cols):
cell = line.get_cell(col)
try:
if cell.char != 0:
data.append(cell.char)
except:
logging.exception("char error")
sys.exit(1)
pass
self.text.insert('{}.{}'.format(row + 1, 0),
u''.join(data) + u'\n')
def show(self):
if not self.top:
self.top = tkinter.Tk()
self.text = tkinter.Text(self.top)
self.text.pack(fill=tkinter.BOTH, expand=1)
class DefaultTkTermUI(TermPluginBase, TermUI):
def __init__(self):
TermPluginBase.__init__(self,
name="default_tk_term_ui",
desc="It is a python version term default_term_ui",
version=1)
TermUI.__init__(self)
if not hasattr(sys, 'argv') or len(sys.argv) == 0:
sys.argv = ['']
self.__top = None
self.__root_window = None
self.__windows = []
def __get_top_window(self):
if self.__top:
return self.__top
self.__top = tkinter.Tk()
return self.__top
def create_window(self):
w = DefaultTkTermWindow()
w.init_plugin(self.get_plugin_context(),
self.get_plugin_config())
if not self.__root_window:
w.top = self.__get_top_window()
w.text = tkinter.Text(w.top)
w.text.pack(fill=tkinter.BOTH, expand=1)
self.__root_window = w
self.__windows.append(w)
return w
def start_main_ui_loop(self):
self.__get_top_window().mainloop()
return 0
def schedule_task(self, task, miliseconds, repeated):
self.__get_top_window().after(miliseconds,
lambda: task.run())
pass
g_term_ui = DefaultTkTermUI()
def register_plugins(pm):
pm.register_plugin(g_term_ui)
|
{
"content_hash": "4eebb3ee59f4f4edca0a8ca3166dd1d2",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 79,
"avg_line_length": 27.953703703703702,
"alnum_prop": 0.5074527989400464,
"repo_name": "stonewell/wxglterm",
"id": "9c14b3e3a89ae84837517e35a860325509b542e5",
"size": "3019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysrc/plugins/default_tk_term_ui.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13741"
},
{
"name": "C++",
"bytes": "650016"
},
{
"name": "CMake",
"bytes": "36368"
},
{
"name": "Dockerfile",
"bytes": "558"
},
{
"name": "Emacs Lisp",
"bytes": "155"
},
{
"name": "GLSL",
"bytes": "2683"
},
{
"name": "Objective-C",
"bytes": "205"
},
{
"name": "Python",
"bytes": "90827"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
}
|
"""A setup module for the GRPC Python package."""
from distutils import core as _core
import setuptools
import sys
_EXTENSION_SOURCES = (
'grpc/_adapter/_c/module.c',
'grpc/_adapter/_c/types.c',
'grpc/_adapter/_c/utility.c',
'grpc/_adapter/_c/types/client_credentials.c',
'grpc/_adapter/_c/types/server_credentials.c',
'grpc/_adapter/_c/types/completion_queue.c',
'grpc/_adapter/_c/types/call.c',
'grpc/_adapter/_c/types/channel.c',
'grpc/_adapter/_c/types/server.c',
)
_EXTENSION_INCLUDE_DIRECTORIES = (
'.',
)
_EXTENSION_LIBRARIES = (
'grpc',
'gpr',
)
if not "darwin" in sys.platform:
_EXTENSION_LIBRARIES += ('rt',)
_EXTENSION_MODULE = _core.Extension(
'grpc._adapter._c', sources=list(_EXTENSION_SOURCES),
include_dirs=list(_EXTENSION_INCLUDE_DIRECTORIES),
libraries=list(_EXTENSION_LIBRARIES),
)
_PACKAGES = (
'grpc',
'grpc._adapter',
'grpc._junkdrawer',
'grpc.early_adopter',
'grpc.framework',
'grpc.framework.alpha',
'grpc.framework.base',
'grpc.framework.common',
'grpc.framework.face',
'grpc.framework.face.testing',
'grpc.framework.foundation',
)
_PACKAGE_DIRECTORIES = {
'grpc': 'grpc',
'grpc._adapter': 'grpc/_adapter',
'grpc._junkdrawer': 'grpc/_junkdrawer',
'grpc.early_adopter': 'grpc/early_adopter',
'grpc.framework': 'grpc/framework',
}
setuptools.setup(
name='grpcio',
version='0.9.0a1',
ext_modules=[_EXTENSION_MODULE],
packages=list(_PACKAGES),
package_dir=_PACKAGE_DIRECTORIES,
install_requires=[
'enum34==1.0.4',
'futures==2.2.0',
'protobuf==3.0.0a3'
]
)
|
{
"content_hash": "0c0a9138811acd67aefb8b9badadf5eb",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 57,
"avg_line_length": 24.18840579710145,
"alnum_prop": 0.6273217495506291,
"repo_name": "wkubiak/grpc",
"id": "5398b099360645659e13d6b3cb3bc0c9dbdfd1fa",
"size": "3198",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/python/src/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "2773"
},
{
"name": "C",
"bytes": "3072278"
},
{
"name": "C#",
"bytes": "545843"
},
{
"name": "C++",
"bytes": "849034"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "166351"
},
{
"name": "Makefile",
"bytes": "1362368"
},
{
"name": "Objective-C",
"bytes": "201898"
},
{
"name": "PHP",
"bytes": "62454"
},
{
"name": "Protocol Buffer",
"bytes": "93409"
},
{
"name": "Python",
"bytes": "692664"
},
{
"name": "Ruby",
"bytes": "292584"
},
{
"name": "Shell",
"bytes": "15861"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import cPickle
from twisted.persisted import styles
from buildbot.util import json
import sqlalchemy as sa
metadata = sa.MetaData()
last_access = sa.Table('last_access', metadata,
sa.Column('who', sa.String(256), nullable=False),
sa.Column('writing', sa.Integer, nullable=False),
sa.Column('last_access', sa.Integer, nullable=False),
)
changes_nextid = sa.Table('changes_nextid', metadata,
sa.Column('next_changeid', sa.Integer),
)
changes = sa.Table('changes', metadata,
sa.Column('changeid', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('author', sa.String(256), nullable=False),
sa.Column('comments', sa.String(1024), nullable=False),
sa.Column('is_dir', sa.SmallInteger, nullable=False),
sa.Column('branch', sa.String(256)),
sa.Column('revision', sa.String(256)),
sa.Column('revlink', sa.String(256)),
sa.Column('when_timestamp', sa.Integer, nullable=False),
sa.Column('category', sa.String(256)),
)
change_links = sa.Table('change_links', metadata,
sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False),
sa.Column('link', sa.String(1024), nullable=False),
)
change_files = sa.Table('change_files', metadata,
sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False),
sa.Column('filename', sa.String(1024), nullable=False),
)
change_properties = sa.Table('change_properties', metadata,
sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False),
sa.Column('property_name', sa.String(256), nullable=False),
sa.Column('property_value', sa.String(1024), nullable=False),
)
schedulers = sa.Table("schedulers", metadata,
sa.Column('schedulerid', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('name', sa.String(128), nullable=False),
sa.Column('state', sa.String(1024), nullable=False),
)
scheduler_changes = sa.Table('scheduler_changes', metadata,
sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')),
sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid')),
sa.Column('important', sa.SmallInteger),
)
scheduler_upstream_buildsets = sa.Table('scheduler_upstream_buildsets', metadata,
sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id')),
sa.Column('schedulerid', sa.Integer, sa.ForeignKey('schedulers.schedulerid')),
sa.Column('active', sa.SmallInteger),
)
sourcestamps = sa.Table('sourcestamps', metadata,
sa.Column('id', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('branch', sa.String(256)),
sa.Column('revision', sa.String(256)),
sa.Column('patchid', sa.Integer, sa.ForeignKey('patches.id')),
)
patches = sa.Table('patches', metadata,
sa.Column('id', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('patchlevel', sa.Integer, nullable=False),
sa.Column('patch_base64', sa.Text, nullable=False),
sa.Column('subdir', sa.Text),
)
sourcestamp_changes = sa.Table('sourcestamp_changes', metadata,
sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False),
sa.Column('changeid', sa.Integer, sa.ForeignKey('changes.changeid'), nullable=False),
)
buildsets = sa.Table('buildsets', metadata,
sa.Column('id', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('external_idstring', sa.String(256)),
sa.Column('reason', sa.String(256)),
sa.Column('sourcestampid', sa.Integer, sa.ForeignKey('sourcestamps.id'), nullable=False),
sa.Column('submitted_at', sa.Integer, nullable=False),
sa.Column('complete', sa.SmallInteger, nullable=False, server_default=sa.DefaultClause("0")),
sa.Column('complete_at', sa.Integer),
sa.Column('results', sa.SmallInteger),
)
buildset_properties = sa.Table('buildset_properties', metadata,
sa.Column('buildsetid', sa.Integer, sa.ForeignKey('buildsets.id'), nullable=False),
sa.Column('property_name', sa.String(256), nullable=False),
sa.Column('property_value', sa.String(1024), nullable=False),
)
buildrequests = sa.Table('buildrequests', metadata,
sa.Column('id', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('buildsetid', sa.Integer, sa.ForeignKey("buildsets.id"), nullable=False),
sa.Column('buildername', sa.String(length=256), nullable=False),
sa.Column('priority', sa.Integer, nullable=False, server_default=sa.DefaultClause("0")),
sa.Column('claimed_at', sa.Integer, server_default=sa.DefaultClause("0")),
sa.Column('claimed_by_name', sa.String(length=256)),
sa.Column('claimed_by_incarnation', sa.String(length=256)),
sa.Column('complete', sa.Integer, server_default=sa.DefaultClause("0")),
sa.Column('results', sa.SmallInteger),
sa.Column('submitted_at', sa.Integer, nullable=False),
sa.Column('complete_at', sa.Integer),
)
builds = sa.Table('builds', metadata,
sa.Column('id', sa.Integer, autoincrement=False, primary_key=True),
sa.Column('number', sa.Integer, nullable=False),
sa.Column('brid', sa.Integer, sa.ForeignKey('buildrequests.id'), nullable=False),
sa.Column('start_time', sa.Integer, nullable=False),
sa.Column('finish_time', sa.Integer),
)
def test_unicode(migrate_engine):
"""Test that the database can handle inserting and selecting Unicode"""
# set up a subsidiary MetaData object to hold this temporary table
submeta = sa.MetaData()
submeta.bind = migrate_engine
test_unicode = sa.Table('test_unicode', submeta,
sa.Column('u', sa.Unicode(length=100)),
sa.Column('b', sa.LargeBinary),
)
test_unicode.create()
# insert a unicode value in there
u = u"Frosty the \N{SNOWMAN}"
b='\xff\xff\x00'
ins = test_unicode.insert().values(u=u, b=b)
migrate_engine.execute(ins)
# see if the data is intact
row = migrate_engine.execute(sa.select([test_unicode])).fetchall()[0]
assert type(row['u']) is unicode
assert row['u'] == u
assert type(row['b']) is str
assert row['b'] == b
# drop the test table
test_unicode.drop()
def import_changes(migrate_engine):
# get the basedir from the engine - see model.py if you're wondering
# how it got there
basedir = migrate_engine.buildbot_basedir
# strip None from any of these values, just in case
def remove_none(x):
if x is None: return u""
elif isinstance(x, str):
return x.decode("utf8")
else:
return x
# if we still have a changes.pck, then we need to migrate it
changes_pickle = os.path.join(basedir, "changes.pck")
if not os.path.exists(changes_pickle):
migrate_engine.execute(changes_nextid.insert(),
next_changeid=1)
return
#if not quiet: print "migrating changes.pck to database"
# 'source' will be an old b.c.changes.ChangeMaster instance, with a
# .changes attribute. Note that we use 'r', and not 'rb', because these
# pickles were written using the old text pickle format, which requires
# newline translation
with open(changes_pickle,"r") as f:
source = cPickle.load(f)
styles.doUpgrade()
#if not quiet: print " (%d Change objects)" % len(source.changes)
# first, scan for changes without a number. If we find any, then we'll
# renumber the changes sequentially
have_unnumbered = False
for c in source.changes:
if c.revision and c.number is None:
have_unnumbered = True
break
if have_unnumbered:
n = 1
for c in source.changes:
if c.revision:
c.number = n
n = n + 1
# insert the changes
for c in source.changes:
if not c.revision:
continue
try:
values = dict(
changeid=c.number,
author=c.who,
comments=c.comments,
is_dir=c.isdir,
branch=c.branch,
revision=c.revision,
revlink=c.revlink,
when_timestamp=c.when,
category=c.category)
values = dict([ (k, remove_none(v)) for k, v in values.iteritems() ])
except UnicodeDecodeError, e:
raise UnicodeError("Trying to import change data as UTF-8 failed. Please look at contrib/fix_changes_pickle_encoding.py: %s" % str(e))
migrate_engine.execute(changes.insert(), **values)
# NOTE: change_links is not populated, since it is deleted in db
# version 20. The table is still created, though.
# sometimes c.files contains nested lists -- why, I do not know! But we deal with
# it all the same - see bug #915. We'll assume for now that c.files contains *either*
# lists of filenames or plain filenames, not both.
def flatten(l):
if l and type(l[0]) == list:
rv = []
for e in l:
if type(e) == list:
rv.extend(e)
else:
rv.append(e)
return rv
else:
return l
for filename in flatten(c.files):
migrate_engine.execute(change_files.insert(),
changeid=c.number,
filename=filename)
for propname,propvalue in c.properties.properties.items():
encoded_value = json.dumps(propvalue)
migrate_engine.execute(change_properties.insert(),
changeid=c.number,
property_name=propname,
property_value=encoded_value)
# update next_changeid
max_changeid = max([ c.number for c in source.changes if c.revision ] + [ 0 ])
migrate_engine.execute(changes_nextid.insert(),
next_changeid=max_changeid+1)
#if not quiet:
# print "moving changes.pck to changes.pck.old; delete it or keep it as a backup"
os.rename(changes_pickle, changes_pickle+".old")
def upgrade(migrate_engine):
metadata.bind = migrate_engine
# do some tests before getting started
test_unicode(migrate_engine)
# create the initial schema
metadata.create_all()
# and import some changes
import_changes(migrate_engine)
|
{
"content_hash": "dc27578fa79c673c1689fb51472dfd78",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 147,
"avg_line_length": 38.68283582089552,
"alnum_prop": 0.6441593517893315,
"repo_name": "denny820909/builder",
"id": "297d7a2c134f6752aa6ec7aa45685c19068b232b",
"size": "11073",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/db/migrate/versions/001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "106"
},
{
"name": "C",
"bytes": "68706"
},
{
"name": "CSS",
"bytes": "18630"
},
{
"name": "D",
"bytes": "532"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "HTML",
"bytes": "69377"
},
{
"name": "Makefile",
"bytes": "1220"
},
{
"name": "Objective-C",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "21088388"
},
{
"name": "Shell",
"bytes": "2766"
},
{
"name": "Smarty",
"bytes": "393"
}
],
"symlink_target": ""
}
|
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import copy
import functools
import re
import string
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import keymgr
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.pci import request as pci_request
import nova.policy
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(count)d',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether to encrypt ephemeral storage'),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help='The cipher and mode to be used to encrypt ephemeral '
'storage. Which ciphers are available ciphers depends '
'on kernel support. See /proc/crypto for the list of '
'available options.'),
cfg.IntOpt('key_size',
default=512,
help='The bit length of the encryption key to be used to '
'encrypt ephemeral storage (in XTS mode only half of '
'the bits are used for encryption key)')
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_group(ephemeral_storage_encryption_group)
CONF.register_opts(ephemeral_storage_encryption_opts,
group='ephemeral_storage_encryption')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
AGGREGATE_ACTION_UPDATE = 'Update'
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
AGGREGATE_ACTION_DELETE = 'Delete'
AGGREGATE_ACTION_ADD = 'Add'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance.vm_state not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method=f.__name__)
if (task_state is not None and
instance.task_state not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method=f.__name__)
if must_have_launched and not instance.launched_at:
raise exception.InstanceInvalidState(
attr='launched_at',
instance_uuid=instance.uuid,
state=instance.launched_at,
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance.locked and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
if not self.skip_policy_check:
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, skip_policy_check=False, **kwargs):
self.skip_policy_check = skip_policy_check
self.image_api = image_api or image.API()
self.network_api = network_api or network.API(
skip_policy_check=skip_policy_check)
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=skip_policy_check))
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = keymgr.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance.uuid,
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _get_headroom(self, quotas, usages, deltas):
headroom = {res: quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved'])
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom['instances']
return headroom
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
quotas = objects.Quotas(context)
quotas.reserve(instances=max_count,
cores=req_cores, ram=req_ram)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
deltas = {'instances': max_count,
'cores': req_cores, 'ram': req_ram}
headroom = self._get_headroom(quotas, usages, deltas)
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = quotas[resource]
overs = ','.join(overs)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s"), params)
else:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
raise exception.TooManyInstances(overs=overs,
req=requested[resource],
used=used, allowed=total_allowed,
resource=resource)
return max_count, quotas
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in six.iteritems(metadata):
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
# For backward compatible we need raise HTTPRequestEntityTooLarge
# so we need to keep InvalidMetadataSize exception here
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance.uuid,
'name': instance.display_name,
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_LE('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance.display_name
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image, instance_type):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
root_gb = instance_type['root_gb']
if root_gb:
if int(image.get('size') or 0) > root_gb * (1024 ** 3):
raise exception.FlavorDiskTooSmall()
if int(image.get('min_disk') or 0) > root_gb:
raise exception.FlavorDiskTooSmall()
def _get_image_defined_bdms(self, base_options, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if image_mapping:
image_defined_bdms += self._prepare_image_mapping(
instance_type, image_mapping)
return image_defined_bdms
def _check_and_transform_bdm(self, context, base_options, instance_type,
image_meta, min_count, max_count,
block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# If the instance is booted by image and has a volume attached,
# the volume cannot have the same device name as root_device_name
if image_ref:
for bdm in block_device_mapping:
if (bdm.get('source_type') == 'volume' and
block_device.strip_dev(bdm.get(
'device_name')) == root_device_name):
msg = _('The volume cannot be assigned the same device'
' name as the root device %s') % root_device_name
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
base_options, instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image
# NOTE (gibi): another case is when a server is booted with an
# image to bdm mapping where the image only contains a bdm to a
# snapshot. In this case the other image to bdm mapping
# contains an unnecessary device with boot_index == 0.
# Also in this case the image_ref is None as we are booting from
# an image to volume bdm.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = (
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping += image_defined_bdms
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image, instance_type)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata, injected_files,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files)
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name is not None:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
numa_topology = hardware.numa_get_constraints(
instance_type, boot_meta)
system_metadata = {}
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
self.network_api.create_pci_requests_for_sriov_ports(context,
pci_request_info, requested_networks)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_requests': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type, pci_request_info):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
if pci_request_info and pci_request_info.requests:
filter_properties['pci_requests'] = pci_request_info
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota):
# Reserve quotas
num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
try:
for i in range(num_instances):
instance = objects.Instance(context=context)
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i, shutdown_terminate)
instances.append(instance)
if instance_group:
if check_server_group_quota:
count = objects.Quotas.count(context,
'server_group_members',
instance_group,
context.user_id)
try:
objects.Quotas.limit_check(context,
server_group_members=count + 1)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
objects.InstanceGroup.add_members(context,
instance_group.uuid,
[instance.uuid])
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
quotas.rollback()
# Commit the reservations
quotas.commit()
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
volume_id = bdm.get('volume_id')
snapshot_id = bdm.get('snapshot_id')
if snapshot_id:
# NOTE(alaski): A volume snapshot inherits metadata from the
# originating volume, but the API does not expose metadata
# on the snapshot itself. So we query the volume for it below.
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
volume_id = snapshot['volume_id']
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id:
try:
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
return utils.get_image_metadata_from_volume(volume)
return {}
@staticmethod
def _get_requested_instance_group(context, scheduler_hints,
check_quota):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
if not uuidutils.is_uuid_like(group_hint):
msg = _('Server group scheduler hint must be a UUID.')
raise exception.InvalidInput(reason=msg)
return objects.InstanceGroup.get_by_uuid(context, group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
if not self.skip_policy_check and (forced_host or forced_node):
check_policy(context, 'create:forced_host', {})
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, injected_files, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
auto_disk_config, reservation_id, max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# considertaion of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
instance_group = self._get_requested_instance_group(context,
scheduler_hints, check_server_group_quota)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host,
forced_node, instance_type,
base_options.get('pci_requests'))
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
if size is None and bdm.get('source_type') == 'blank':
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _create_block_device_mapping(self, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", block_device_mapping,
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
if bdm.volume_size == 0:
continue
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm.boot_index
for bdm in all_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.snapshot_id
volume_id = bdm.volume_id
image_id = bdm.image_id
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm.source_type == 'image' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
except (exception.CinderConnectionFailed,
exception.InvalidVolume):
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
self.volume_api.get_snapshot(context, snapshot_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
ephemeral_size = sum(bdm.volume_size or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = [bdm for bdm in all_mappings
if block_device.new_format_is_swap(bdm)]
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].volume_size or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance.uuid)
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance.uuid = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
instance.system_metadata.update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
# NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
instance.shutdown_terminate = shutdown_terminate
self.security_group_api.ensure_default(context)
instance.create()
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
except (exception.CinderConnectionFailed, exception.InvalidBDM,
exception.InvalidVolume):
with excutils.save_and_reraise_exception():
instance.destroy()
self._create_block_device_mapping(
instance_type, instance.uuid, block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
if not self.skip_policy_check:
check_policy(context, 'create', target)
if requested_networks and len(requested_networks):
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_and_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
shutdown_terminate=False, check_server_group_quota=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1:
self._check_multiple_instances_and_specified_ip(requested_networks)
if utils.is_neutron():
self._check_multiple_instances_neutron_ports(
requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota)
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_LI('instance termination disabled'),
instance=instance)
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_LI("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
quotas = None
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
quotas = self._create_reservations(context,
instance,
original_task_state,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
quotas.commit()
return
shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED)
if not instance.host and not shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
quotas.commit()
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_local_delete = True
try:
if not shelved_offloaded:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_LI('Instance is already in deleting state, '
'ignoring this request'),
instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete',
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if quotas:
quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_LI('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s'),
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_LI('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = self._downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_LI('Migration %s may have been confirmed during '
'delete'),
migration.id, context=context, instance=instance)
return
quotas = self._reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, instance, original_task_state,
project_id, user_id):
instance_vcpus = instance.vcpus
instance_memory_mb = instance.memory_mb
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if original_task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
instance.instance_type_id ==
migration.new_instance_type_id):
old_inst_type_id = migration.old_instance_type_id
try:
old_inst_type = flavors.get_flavor(old_inst_type_id)
except exception.FlavorNotFound:
LOG.warning(_LW("Flavor %d not found"), old_inst_type_id)
pass
else:
instance_vcpus = old_inst_type['vcpus']
vram_mb = int(old_inst_type.get('extra_specs',
{}).get(VIDEO_RAM, 0))
instance_memory_mb = (old_inst_type['memory_mb'] + vram_mb)
LOG.debug("going to delete a resizing instance",
instance=instance)
quotas = objects.Quotas(context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return quotas
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info(_LI("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database."),
instance=instance)
else:
LOG.warning(_LW("instance's host %s is down, deleting from "
"database"), instance.host, instance=instance)
instance.info_cache.delete()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in SHELVED_OFFLOADED
# state, instance['host'] is None, here, use shelved_host as host
# to deallocate network info and reset instance['host'] after that.
# Here we shouldn't use instance.save(), because this will mislead
# user who may think the instance's host has been changed, and
# actually, the instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname('system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _LW("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy()
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations)
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
num_instances, quotas = self._check_num_instances_quota(
context, flavor, 1, 1)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
@wrap_check_policy
@check_instance_lock
@check_instance_state(must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete(context, instance, 'force_delete', self._do_delete,
task_state=task_states.DELETING)
def force_stop(self, context, instance, do_cast=True, clean_shutdown=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast, clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
LOG.debug("Fetching instance by UUID",
instance_uuid=instance_id)
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif strutils.is_int_like(instance_id):
LOG.debug("Fetching instance by numeric id %s", instance_id)
instance = objects.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
LOG.debug("Invalid instance id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
if not self.skip_policy_check:
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, limit=None, marker=None,
want_objects=False, expected_attrs=None, sort_keys=None,
sort_dirs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs'
parameter.
"""
# TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if not self.skip_policy_check:
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s" % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
def _remap_metadata_filter(metadata):
filters['metadata'] = jsonutils.loads(metadata)
def _remap_system_metadata_filter(metadata):
filters['system_metadata'] = jsonutils.loads(metadata)
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter,
'metadata': _remap_metadata_filter,
'system_metadata': _remap_system_metadata_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in six.iteritems(search_opts):
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
filter_ip = 'ip6' in filters or 'ip' in filters
orig_limit = limit
if filter_ip and limit:
LOG.debug('Removing limit for DB query due to IP filter')
limit = None
inst_models = self._get_instances_by_filters(context, filters,
limit=limit, marker=marker, expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
if filter_ip:
inst_models = self._ip_filter(inst_models, filters, orig_limit)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
@staticmethod
def _ip_filter(inst_models, filters, limit):
ipv4_f = re.compile(str(filters.get('ip')))
ipv6_f = re.compile(str(filters.get('ip6')))
def _match_instance(instance):
nw_info = compute_utils.get_nw_info_for_instance(instance)
for vif in nw_info:
for fixed_ip in vif.fixed_ips():
address = fixed_ip.get('address')
if not address:
continue
version = fixed_ip.get('version')
if ((version == 4 and ipv4_f.match(address)) or
(version == 6 and ipv6_f.match(address))):
return True
return False
result_objs = []
for instance in inst_models:
if _match_instance(instance):
result_objs.append(instance)
if limit and len(result_objs) == limit:
break
return objects.InstanceList(objects=result_objs)
def _get_instances_by_filters(self, context, filters,
limit=None, marker=None, expected_attrs=None,
sort_keys=None, sort_dirs=None):
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return objects.InstanceList.get_by_filters(
context, filters=filters, limit=limit, marker=marker,
expected_attrs=fields, sort_keys=sort_keys, sort_dirs=sort_dirs)
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.objects.instance.Instance object
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
if self.is_volume_backed_instance(context, instance):
# TODO(flwang): The log level will be changed to INFO after
# string freeze (Liberty).
LOG.debug("It's not supported to backup volume backed instance.",
context=context, instance=instance)
raise exception.InvalidRequest()
else:
image_meta = self._create_image(context, instance,
name, 'backup',
extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
if extra_properties is None:
extra_properties = {}
instance_uuid = instance.uuid
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
sent_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# Delete properties that are non-inheritable
image_props = sent_meta.get("properties", {})
for key in image_props.keys():
if key in CONF.non_inheritable_image_properties:
del image_props[key]
sent_meta['name'] = name
sent_meta['is_public'] = False
# The properties set up above and in extra_properties have precedence
properties.update(extra_properties or {})
sent_meta['properties'].update(properties)
return self.image_api.create(context, sent_meta)
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.objects.instance.Instance object
:param image_meta: metadata for the new image
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta['name'] = name
image_meta['is_public'] = False
properties = image_meta['properties']
if instance.root_device_name:
properties['root_device_name'] = instance.root_device_name
properties.update(extra_properties or {})
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
exception.NovaException, NotImplementedError) as err:
if strutils.bool_from_string(properties.get(
'os_require_quiesce')):
raise
else:
LOG.info(_LI('Skipping quiescing instance: '
'%(reason)s.'), {'reason': err},
context=context, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
# NOTE (ndipanov): Remove swap/ephemerals from mappings as they will be
# in the block_device_mapping for the new image.
image_mappings = properties.get('mappings')
if image_mappings:
properties['mappings'] = [m for m in image_mappings
if not block_device.is_swap_or_ephemeral(
m['virtual'])]
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
for attr in ('status', 'location', 'id', 'owner'):
image_meta.pop(attr, None)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
return self.image_api.create(context, image_meta)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance.vm_state not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method='soft reboot')
if reboot_type == 'SOFT' and instance.task_state is not None:
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method='reboot')
expected_task_state = [None]
if reboot_type == 'HARD':
expected_task_state.extend([task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING])
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=instance.host,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = self._reverse_upsize_quota_delta(context, migration)
quotas = self._reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = self._downsize_quota_delta(context, instance)
quotas = self._reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_quota_delta(context, new_flavor,
old_flavor, sense, compare):
"""Calculate any quota adjustment required at a particular point
in the resize cycle.
:param context: the request context
:param new_flavor: the target instance type
:param old_flavor: the original instance type
:param sense: the sense of the adjustment, 1 indicates a
forward adjustment, whereas -1 indicates a
reversal of a prior adjustment
:param compare: the direction of the comparison, 1 indicates
we're checking for positive deltas, whereas
-1 indicates negative deltas
"""
def _quota_delta(resource):
return sense * (new_flavor[resource] - old_flavor[resource])
deltas = {}
if compare * _quota_delta('vcpus') > 0:
deltas['cores'] = _quota_delta('vcpus')
if compare * _quota_delta('memory_mb') > 0:
deltas['ram'] = _quota_delta('memory_mb')
return deltas
@staticmethod
def _upsize_quota_delta(context, new_flavor, old_flavor):
"""Calculate deltas required to adjust quota for an instance upsize.
"""
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, 1)
@staticmethod
def _reverse_upsize_quota_delta(context, migration_ref):
"""Calculate deltas required to reverse a prior upsizing
quota adjustment.
"""
old_flavor = objects.Flavor.get_by_id(
context, migration_ref['old_instance_type_id'])
new_flavor = objects.Flavor.get_by_id(
context, migration_ref['new_instance_type_id'])
return API._resize_quota_delta(context, new_flavor, old_flavor, -1, -1)
@staticmethod
def _downsize_quota_delta(context, instance):
"""Calculate deltas required to adjust quota for an instance downsize.
"""
old_flavor = instance.get_flavor('old')
new_flavor = instance.get_flavor('new')
return API._resize_quota_delta(context, new_flavor, old_flavor, 1, -1)
@staticmethod
def _reserve_quota_delta(context, deltas, instance):
"""If there are deltas to reserve, construct a Quotas object and
reserve the deltas for the given project.
@param context: The nova request context.
@param deltas: A dictionary of the proposed delta changes.
@param instance: The instance we're operating on, so that
quotas can use the correct project_id/user_id.
@return: nova.objects.quotas.Quotas
"""
quotas = objects.Quotas(context=context)
if deltas:
project_id, user_id = quotas_obj.ids_from_instance(context,
instance)
quotas.reserve(project_id=project_id, user_id=user_id,
**deltas)
return quotas
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit()
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = instance.get_flavor()
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = self._get_headroom(quotas, usages, deltas)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warning(_LW("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used,
allowed=total_allowed,
resource=resource)
else:
quotas = objects.Quotas(context=context)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [],
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance.display_name
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance, clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug('Locking', context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
def is_expected_locked_by(self, context, instance):
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
return False
return True
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
if not self.skip_policy_check and not self.is_expected_locked_by(
context, instance):
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance.uuid)['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance.uuid:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance.uuid:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
return self.db.instance_metadata_get(context, instance.uuid)
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
instances = self._get_instances_by_filters(context, filters={},
sort_keys=['created_at'],
sort_dirs=['desc'])
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return utils.filter_and_format_resource_metadata('instance', instances,
search_filts, metadata_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance.uuid for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance.image_ref:
return True
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
root_bdm = bdms.root_bdm()
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
LOG.error(_LE('Instance compute service state on %s '
'expected to be down, but it was up.'), inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return objects.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in six.iteritems(filters):
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def _service_update(self, context, host_name, binary, params_to_update):
"""Performs the actual service update operation."""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
return self._service_update(context, host_name, binary,
params_to_update)
def _service_delete(self, context, service_id):
"""Performs the actual Service deletion operation."""
objects.Service.get_by_id(context, service_id).destroy()
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self._service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return objects.InstanceList.get_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return objects.ComputeNode.get_by_id(context, int(compute_id))
def compute_node_get_all(self, context):
return objects.ComputeNodeList.get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return objects.ComputeNodeList.get_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance.uuid)
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance.uuid, request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate(context=context)
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create()
self.scheduler_client.update_aggregates(context, [aggregate])
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE)
if values:
aggregate.update_metadata(values)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE_META)
aggregate.update_metadata(metadata)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=msg)
aggregate.destroy()
self.scheduler_client.delete_aggregate(context, aggregate)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None,
action_name=AGGREGATE_ACTION_ADD):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
_hosts = hosts or aggregate.hosts
host_aggregates = objects.AggregateList.get_by_metadata_key(
context, 'availability_zone', hosts=_hosts)
conflicting_azs = [
agg.availability_zone for agg in host_aggregates
if agg.availability_zone != metadata['availability_zone']
and agg.id != aggregate.id]
if conflicting_azs:
msg = _("One or more hosts already in availability zone(s) "
"%s") % conflicting_azs
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason):
if action_name == AGGREGATE_ACTION_ADD:
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE:
raise exception.InvalidAggregateActionUpdate(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE_META:
raise exception.InvalidAggregateActionUpdateMeta(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_DELETE:
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=reason)
raise exception.NovaException(
_("Unexpected aggregate action %s") % action_name)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate.metadata,
hosts=[host_name], aggregate=aggregate)
aggregate.add_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name, key_type):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
count = objects.Quotas.count(context, 'key_pairs', user_id)
try:
objects.Quotas.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
fingerprint = self._generate_fingerprint(public_key, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = self._generate_key_pair(
user_id, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'create.end', key_name)
return keypair, private_key
def _generate_fingerprint(self, public_key, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_fingerprint(public_key)
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_x509_fingerprint(public_key)
def _generate_key_pair(self, user_id, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_key_pair()
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_winrm_x509_cert(user_id)
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return objects.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, skip_policy_check=False, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.skip_policy_check = skip_policy_check
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
quotas = objects.Quotas(context)
try:
quotas.reserve(security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info(_LI("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = objects.Quotas(context=context)
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"security group"))
LOG.info(_LI("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = objects.Quotas.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
objects.Quotas.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
rules.append(rule)
LOG.info(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.info(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
return self.db.security_group_default_rule_get(context, id)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
security_group = self.db.security_group_get(
context, id, columns_to_join=['instances'])
for instance in security_group['instances']:
if instance.host is not None:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance.host, instance)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
# First, we get the security group rules that reference these groups as
# the grantee..
security_group_rules = set()
for group_id in group_ids:
security_group_rules.update(
self.db.security_group_rule_get_by_security_group_grantee(
context,
group_id))
# ..then we distill the rules into the groups to which they belong..
security_groups = set()
for rule in security_group_rules:
security_group = self.db.security_group_get(
context, rule['parent_group_id'],
columns_to_join=['instances'])
security_groups.add(security_group)
# ..then we find the instances that are members of these groups..
instances = {}
for security_group in security_groups:
for instance in security_group['instances']:
if instance.uuid not in instances:
instances[instance.uuid] = instance
# ..then we send a request to refresh the rules for each instance.
for instance in instances.values():
if instance.host:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance.host, instance)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = objects.Instance(uuid=instance_uuid)
groups = objects.SecurityGroupList.get_by_instance(context, instance)
return [{'name': group.name} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
|
{
"content_hash": "54372a7bfa8931e94f7cf37d67867dac",
"timestamp": "",
"source": "github",
"line_count": 4140,
"max_line_length": 79,
"avg_line_length": 43.9963768115942,
"alnum_prop": 0.5755689148755113,
"repo_name": "alvarolopez/nova",
"id": "1cf915196efa044c23f52566b5b4587e11fa703f",
"size": "182959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/compute/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16127736"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "316332"
}
],
"symlink_target": ""
}
|
import operator
from fractions import Fraction, gcd
from functools import lru_cache
from numbers import Real, Integral, Rational, Complex
from math import trunc, frexp, log10, log2, isnan, isinf, floor
from itertools import count
from fixedpoint.qformat import QFormat
def lowest_set_bit(x):
"""The lowest set bit in a value.
Args:
x: An integer in which to find the lowest set bit.
Returns:
An integer which at most one set bit which will be
the lowest set bit of x.
"""
lowest_bit = (x & ~(x - 1))
return lowest_bit
def signed_left_shift(x, shift):
"""An arithmetic shift operator which can shift left or right.
Args:
shift: The signed number of bits to shift. Negative values
shift right. Positive values shift left.
Returns:
The x shifted left by shift places.
"""
if shift < 0:
return x << -shift
if shift > 0:
return x >> shift
return x
def prime_factors(n):
"""The prime factors of n.
Args:
n: The positive integer for which to find prime factors.
Returns:
A list of the prime factors of n.
"""
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
@lru_cache()
def has_finite_expansion(denominator, base):
"""Can a fraction with denominator be expressed with a power-of-base denominator?
Example:
# Can 3/16 be represented as a fraction of the form 1/10**x
# for some value of x?
>>> has_finite_expansion(16, 10)
True
Args:
denominator: The denominator of the source fraction we wish to represent.
base: The base of the denominator of the target representation.
Returns:
True if a fraction with the source denominator can be represented as a
fraction whose denominator is a power of base.
"""
denominator_factors = set(prime_factors(denominator))
base_factors = set(prime_factors(base))
return len(denominator_factors - base_factors) == 0
def fraction_with_base(f, base):
"""Convert Fraction f into a fraction with a denominator that is a power-of-base.
Example:
# For example, how can 3/16 be represented as a fraction of the form 1/10**x
# for some value of x?
>>> fraction_with_base(Fraction(3, 16), 10)
(1875, 10000)
Args:
f: A Fraction to be represented with a different denominator.
base: The base of the denominator of the target representation.
Returns:
A 2-tuple containing the numerator and denominator of the result fraction. Note that
a tuple is returned, rather than a Fraction, to prevent the result being simplified
back to the argument f.
Raises:
ValueError: If f cannot be exactly represented as a fraction with a denominator which
is a power of base.
"""
if not has_finite_expansion(f.denominator, base):
raise ValueError("Cannot convert {f} to a fraction with power-of-{base} denominator without"
"loss of precision".format(f=f, base=base))
for power in count():
decimal_denominator = base**power
factor = Fraction(decimal_denominator, f.denominator)
if factor.denominator == 1:
return (f.numerator * factor.numerator,
f.denominator * factor.numerator)
def _make_operators(mono, poly):
def op(self, other):
if isinstance(other, FixedPoint):
return mono(self, other)
elif isinstance(other, int):
return mono(self, FixedPoint(other))
elif isinstance(other, Fraction):
return poly(Fraction(self), other)
elif isinstance(other, float):
return poly(float(self), other)
elif isinstance(other, complex):
return poly(complex(self), other)
return NotImplemented
op.__name__ = '__{}__'.format(poly.__name__)
op.__doc__ = mono.__doc__
def rop(self, other):
if isinstance(other, FixedPoint):
return mono(other, self)
if isinstance(other, Integral):
return mono(FixedPoint(other), self)
elif isinstance(other, Rational):
return poly(Fraction(other), Fraction(self))
elif isinstance(other, Real):
return poly(float(other), float(self))
elif isinstance(other, Complex):
return poly(complex(other), complex(self))
return NotImplemented
rop.__name__ = '__r{}__'.format(poly.__name__)
rop.__doc__ = mono.__doc__
return op, rop
def _add(a, b):
assert isinstance(a, FixedPoint)
assert isinstance(b, FixedPoint)
promoted_qformat = QFormat.from_qformats(a.qformat, b.qformat)
result_qformat = QFormat(promoted_qformat.integer_bits + 1, promoted_qformat.fraction_bits)
lhs_op = FixedPoint(a, result_qformat)
rhs_op = FixedPoint(b, result_qformat)
result_numerator = lhs_op._numerator + rhs_op._numerator
return FixedPoint._from_numerator(result_numerator, result_qformat)
def _mul(a, b):
assert isinstance(a, FixedPoint)
assert isinstance(b, FixedPoint)
result_qformat = QFormat(a.qformat.integer_bits + b.qformat.integer_bits + 1,
a.qformat.fraction_bits + b.qformat.fraction_bits)
lhs_op = FixedPoint(a, result_qformat)
rhs_op = FixedPoint(b, result_qformat)
result_numerator = (lhs_op._numerator * rhs_op._numerator) // result_qformat.denominator
return FixedPoint._from_numerator(result_numerator, result_qformat)
def _truediv(dividend, divisor):
assert isinstance(dividend, FixedPoint)
assert isinstance(divisor, FixedPoint)
result_qformat = QFormat(dividend.qformat.integer_bits + divisor.qformat.fraction_bits + 1,
divisor.qformat.integer_bits + dividend.qformat.fraction_bits)
working_qformat = QFormat.from_qformats(dividend.qformat, divisor.qformat, result_qformat)
lhs_op = FixedPoint(dividend, working_qformat)
rhs_op = FixedPoint(divisor, working_qformat)
# We use Fraction's round() here rather than floor division to get correct rounding
working_numerator = round(Fraction(lhs_op._numerator * working_qformat.denominator, rhs_op._numerator))
working_result = FixedPoint._from_numerator(working_numerator, working_qformat)
return FixedPoint(working_result, result_qformat)
def _pow(base, exponent):
assert isinstance(base, FixedPoint)
assert isinstance(base, FixedPoint)
if exponent.is_integer():
integer_exponent = abs(floor(exponent))
result_qformat = QFormat(max(base.qformat.integer_bits - 1, 0) * integer_exponent + 1,
base.qformat.fraction_bits * integer_exponent)
result_numerator = base._numerator ** integer_exponent
positive_result = FixedPoint._from_numerator(result_numerator, result_qformat)
if exponent >= 0:
return positive_result
else:
reciprocal_result = 1 / positive_result
return reciprocal_result
return float(base) ** float(exponent)
class FixedPoint(Rational):
"""A signed, fixed-point, binary, immutable, number type."""
@classmethod
def _from_float(cls, f):
"""Create a FixedPoint using a QFormat without loss of precision.
Args:
f (float): A float of which to create an equivalent FixedPoint representation.
Returns:
A FixedPoint object the QFormat for which will have sufficient precision to
exactly represent the float.
Raises:
OverflowError: If f is NaN or infinite.
"""
assert isinstance(f, Real)
if isnan(f) or isinf(f):
raise OverflowError("{} cannot be represented by {}".format(f, cls.__name__))
# 1. Work out where the binary point is
fr, exp = frexp(f)
numerator = int(fr * (2**53)) # 53 binary places in the fraction
binary_point_index = 53 - exp # Is one based
# 2. Work out how many significant figures there are to the left of the binary point; call this m
most_significant_set_index = numerator.bit_length() - 1
num_leading_bits = 1 + most_significant_set_index - binary_point_index
m = max(num_leading_bits, 0) + 1 # One to accommodate sign
# 3. Work out how many significant figures there are to the right of the binary point; call this n
lowest_bit = lowest_set_bit(numerator)
least_significant_set_index = lowest_bit.bit_length() - 1
num_trailing_bits = binary_point_index - least_significant_set_index
n = max(num_trailing_bits, 0)
# 4. Make QFormat(m, n)
qformat = QFormat(m, n)
# 5. Shift the numerator to fit Qm.n
# We want the binary point to be at index n
shift = binary_point_index - n
shifted_numerator = signed_left_shift(numerator, shift)
return cls._from_numerator(shifted_numerator, qformat)
@classmethod
def _from_integer(cls, i):
"""Create a FixedPoint using a QFormat with sufficient precision to represent the integer.
Args:
i: The integer to be represented in FixedPoint.
Returns:
A FixedPoint representation with sufficient precision to represent i.
"""
assert isinstance(i, int)
num_bits = i.bit_length() + 1 # Additional bit for sign information
qformat = QFormat(num_bits, 0)
return cls._from_numerator(i, qformat)
@classmethod
def _from_rational_exact(cls, r):
"""Create a FixedPoint using a QFormat with sufficient precision to represent the integer.
Args:
r: A rational number to be represented exactly.
Returns:
An exact FixedPoint representation of r.
Raises:
ValueError: If r cannot be represented exactly.
"""
assert isinstance(r, Rational)
integer_part = trunc(r)
fraction_part = abs(r - integer_part)
binary_numerator, binary_denominator = fraction_with_base(fraction_part, base=2)
qformat = QFormat(integer_part.bit_length() + 1,
int(log2(binary_denominator)))
return cls._from_numerator(binary_numerator, qformat)
@classmethod
def _from_value_approximately(cls, value, qformat):
"""Create a possibly approximate representation of value with specified precision.
Args:
value: The value to be represented in fixed point.
qformat: The precision of the result.
"""
assert qformat is not None
numerator = round(value * qformat.denominator)
return cls._from_numerator(numerator, qformat)
@classmethod
def _from_number_with_arbitrary_precision(cls, value):
"""Represent a number in FixedPoint with sufficient precision.
Args:
value: The Number to be converted to a FixedPoint representation.
Raises:
TypeError: If value cannot be represented with finite precision.
"""
if not isinstance(value, Real):
raise TypeError("{} cannot represent non-real value {} of type {}"
.format(cls.__name__, value, type(value).__name__))
# Exact type check. Subclasses handed by Rational case, below
if type(value) == cls:
return value
if isinstance(value, int):
return cls._from_integer(value)
if isinstance(value, Rational):
return cls._from_rational_exact(value)
if isinstance(value, Real):
return cls._from_float(value)
raise TypeError("{} cannot represent value {}".format(cls.__name__, value))
@classmethod
def _from_fixed_point_with_specific_precision(cls, value, qformat):
"""Represent an existing FixedPoint number with different precision.
Args:
value: The FixedPoint number to be represented.
qformat: The precision of the result.
Raises:
OverflowError: If value cannot be represented without overflow.
"""
if qformat == value.qformat:
return value
numerator = qformat.rescale_numerator(value._numerator, value.qformat)
return cls._from_numerator(numerator, qformat)
@classmethod
def _from_numerator(cls, numerator, qformat):
"""Allocate a new FixedPoint object.
Args:
numerator: The numerator value, which when divided by qformat.denominator gives
the actual numeric value of the new object.
qformat: The precision of the new object.
Returns:
A new FixedPoint instance.
Raises:
OverflowError: If numerator exceeds the precision of qformat.
"""
n = qformat.check_numerator(numerator)
obj = super().__new__(cls)
obj._numerator = n
obj._qformat = qformat
return obj
def __new__(cls, value, qformat=None):
"""Obtain a FixedPoint instance.
Args:
value (Real): A real number type. e.g. float, int, or an existing FixedPoint.
qformat: An optional QFormat. If not supplied a QFormat with sufficient precision to
represent value without loss of information will be used. If supplied this will
be the QFormat of the returned FixedPoint instance.
Raises:
ValueError: If value cannot be represented in finite precision when a qformat was not supplied.
TypeError: If value cannot be represented as a FixedPoint value.
"""
try:
fixed_point = cls._from_number_with_arbitrary_precision(value)
except ValueError:
if qformat is None:
raise
fixed_point = cls._from_value_approximately(value, qformat)
except OverflowError as e:
raise ValueError(str(e))
return fixed_point if (qformat is None) else cls._from_fixed_point_with_specific_precision(fixed_point, qformat)
@property
def qformat(self):
"""Obtain the Q format of the number as a 2-tuple.
The zeroth element gives the signed integer precision in bits, the first element gives
the fractional precision in bits.
"""
return self._qformat
def __repr__(self):
return "{}({!s}, {!r})".format(self.__class__.__name__, self, self.qformat)
def __str__(self):
# All fractions with a finite binary representation (i.e. FixedPoint instances) also have a finite decimal
# representation since all binary fractions have the form of k/2**a and all decimals have the form
# k/(2**a * 5**b). The latter is a special case of the former.
# To avoid loss of precision we construct the representation using integer math only, avoiding going via float.
if self._numerator < 0:
integer_part = -(abs(self._numerator) >> self._qformat.fraction_bits)
else:
integer_part = self._numerator >> self._qformat.fraction_bits
integer_digits = str(integer_part)
fractional_part = abs(self._numerator) & (2**self._qformat.fraction_bits - 1)
if fractional_part == 0:
return integer_digits
fraction = Fraction(fractional_part, self._qformat.denominator)
decimal_numerator, decimal_denominator = fraction_with_base(fraction, base=10)
decimal_digits = str(decimal_numerator).zfill(int(log10(decimal_denominator)))
return "{}.{}".format(integer_digits, decimal_digits)
@property
def numerator(self):
"""The numerator of an irreducible rational representation of the number.
Note: This is NOT the same as the internal _numerator value, which may be in reducible form.
"""
g = gcd(self._numerator, self._qformat.denominator)
return self._numerator // g
@property
def denominator(self):
"""The denominator of an irreducible rational representation of the number.
Note: This is NOT the same as the qformat.denominator, which may be in reducible form.
"""
g = gcd(self._numerator, self._qformat.denominator)
return self._qformat.denominator // g
def __eq__(self, other):
return Fraction(self) == other
def __lt__(self, other):
return Fraction(self) < other
def __le__(self, other):
return Fraction(self) <= other
def __gt__(self, other):
return Fraction(self) > other
def __ge__(self, other):
return Fraction(self) >= other
__add__, __radd__ = _make_operators(_add, operator.add)
__mul__, __rmul__ = _make_operators(_mul, operator.mul)
__truediv__, __rtruediv__ = _make_operators(_truediv, operator.truediv)
__pow__, __rpow__ = _make_operators(_pow, operator.pow)
def __neg__(self):
# This can overflow for the most negative value of the current QFormat - the positive value can't be
# represented - so the result must have one additional bit of precision.
result_qformat = QFormat(self._qformat.integer_bits + 1, self._qformat.fraction_bits)
return FixedPoint._from_numerator(-self._numerator, result_qformat)
def __pos__(self):
return self
def __abs__(self):
return FixedPoint._from_numerator(abs(self._numerator), self._qformat)
def __trunc__(self):
if self._numerator < 0:
return -(abs(self._numerator) >> self._qformat.fraction_bits)
return self._numerator >> self._qformat.fraction_bits
def __floor__(self):
return self.numerator // self.denominator
def __ceil__(self):
return -(-self.numerator // self.denominator)
def __round__(self, ndigits=None):
"""Round to nearest with optional decimal precision.
Numbers exactly halfway between two nearest numbers are
rounded towards the nearest ending with an even digit.
Args:
ndigits: An optional number of decimal places to which
to round. Positive values round to fractional places
(0.1, 0.01, etc) and negative values round to integer
places(10, 100, etc). Zero rounds to whole numbers.
Returns:
If ndigits is None, the nearest integer, otherwise the
nearest FixedPoint at the precision specified by ndigits.
"""
if ndigits is None:
quotient, remainder = divmod(self.numerator, self.denominator)
if remainder * 2 < self.denominator:
return quotient
elif remainder * 2 > self.denominator:
return quotient + 1
else:
return quotient if quotient % 2 == 0 else quotient + 1
shift = 10**abs(ndigits)
if ndigits > 0:
shifted = self * shift
rounded_to_integer = round(shifted)
rounded_fixed_point = FixedPoint(rounded_to_integer)
unshifted_fixed_point = rounded_fixed_point / shift
return FixedPoint(unshifted_fixed_point, self._qformat)
else:
shifted = self / shift
rounded_to_integer = round(shifted)
rounded_fixed_point = FixedPoint(rounded_to_integer)
unshifted_fixed_point = rounded_fixed_point * shift
return FixedPoint(unshifted_fixed_point, self._qformat)
def __floordiv__(self, other):
return floor(self / other)
def __rfloordiv__(self, other):
return floor(other / self)
def __mod__(self, other):
div = self // other
return self - other*div
def __rmod__(self, other):
div = other // self
return other - self*div
def is_integer(self):
return floor(self) == self
|
{
"content_hash": "a1b8b74bc8c6449ad0d3729881c0025e",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 120,
"avg_line_length": 36.151624548736464,
"alnum_prop": 0.6263730776912323,
"repo_name": "sixty-north/fixedpointtest",
"id": "d2eca778dfb7096a4b0d283390dc386432827950",
"size": "20028",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixedpoint/fixedpoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66094"
}
],
"symlink_target": ""
}
|
__author__ = "Christopher Perry"
__email__ = "ozbonus@gmail.com"
"""
Take a text file wherein each line of text consist of space delimited characters
followed by an integer. This program prints the Mth element of the (1-based)
list. In the case of an index error, that input is ignored.
"""
import sys
def mth_to_last_element(filename = sys.argv[1]):
with open(filename, "r") as data:
for line in data:
try:
# Could do this in fewer lines, but this ain't code golf.
chars = line.split()
index = int(chars.pop())
print(chars[-index])
except IndexError:
pass
if __name__ == "__main__":
mth_to_last_element()
|
{
"content_hash": "893fb1f33d84339e4790c72c8a56a800",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 80,
"avg_line_length": 30.416666666666668,
"alnum_prop": 0.589041095890411,
"repo_name": "OzBonus/CodeEval",
"id": "de3ed08b14a3df598698abfcfd0654481aaf3e7c",
"size": "753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moderate_mth_to_last_element.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16351"
}
],
"symlink_target": ""
}
|
from json import dumps
try:
from urlparse import parse_qsl
except ImportError:
from urllib.parse import parse_qsl
def facebook_compliance_fix(session):
def _compliance_fix(r):
# if Facebook claims to be sending us json, let's trust them.
if 'application/json' in r.headers['content-type']:
return r
# Facebook returns a content-type of text/plain when sending their
# x-www-form-urlencoded responses, along with a 200. If not, let's
# assume we're getting JSON and bail on the fix.
if 'text/plain' in r.headers['content-type'] and r.status_code == 200:
token = dict(parse_qsl(r.text, keep_blank_values=True))
else:
return r
expires = token.get('expires')
if expires is not None:
token['expires_in'] = expires
token['token_type'] = 'Bearer'
r._content = dumps(token)
return r
session.register_compliance_hook('access_token_response', _compliance_fix)
return session
|
{
"content_hash": "e32e061ef66f856c6657925439f9164b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 78,
"avg_line_length": 33.38709677419355,
"alnum_prop": 0.6309178743961352,
"repo_name": "emedinaa/contentbox",
"id": "b6b8b369faf616abfe2563596e2cb5a917c21d10",
"size": "1035",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "third_party/requests_oauthlib/compliance_fixes/facebook.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "420520"
},
{
"name": "HTML",
"bytes": "54100"
},
{
"name": "JavaScript",
"bytes": "1778"
},
{
"name": "Python",
"bytes": "49359"
},
{
"name": "Ruby",
"bytes": "413"
}
],
"symlink_target": ""
}
|
from nltk import RegexpParser
from collections import Counter
import word2vec, uuid, random
from string import punctuation
from datetime import datetime
from db_tables import metadata, CriteriaConceptStaging, ConceptPredictors, \
ConceptPredictorsReject, ConceptTerms, ConceptTermsReject, CriteriaTagged
# helper functions
def get_past_predictors(engine):
'''pulls all the past predictors from other concepts into a list of lists'''
result = engine.execute(select([ConceptPredictors.c.concept_id,
ConceptPredictors.c.predictor]))
def skip_terms():
term_list = ['month', 'months', 'patient', 'patients', 'history', 'day', 'days',
'year', 'years', 'week', 'weeks', 'subject', 'subjects', 'study', 'inclusion criteria', 'exclusion criteria',
'history of', 'patients with', 'age', 'investigator', 'use', 'evidence', 'women', 'men', 'woman', 'man',
'female', 'male', 'enrollment', 'time']
predictor_list = ['inclusion criteria', 'exclusion criteria']
return term_list, predictor_list
def get_syn(word, model, num):
#if the initial term has a space replace it with an underscore
word = '_'.join(word.split(' '))
try:
indexes, metrics = model.cosine(word, n=num)
return model.generate_response(indexes, metrics).tolist()
except Exception as e:
print e
return [word]
def human_checker_initial(term, syns, term_list, term_exc):
'''This function loops through the possible similar words and
lets human input decide if they actually are or not'''
if len(syns) > 1:
for syn in syns:
synterm, prob = syn
if synterm in term_list or synterm in term_exc:
continue
answer_switch = True
while answer_switch:
add_term = raw_input('Is this a similar term to %s? (Y, N, exit): ' % (term))
if add_term.lower() == 'y':
term_list.update([synterm])
answer_switch = False
elif add_term.lower() == 'exit':
#pass switch to exit program
exit_switch = False
return term_list, term_exc, exit_switch
elif add_term.lower() == 'n':
term_exc.update([synterm])
answer_switch = False
else:
pass
exit_switch = False
return term_list, term_exc, exit_switch
#get list of synonyms
def choose_more_terms(model, initial_term, term_list, term_exc, num):
syns = get_syn(initial_term, model, num)
#if there was no record of the initial term in the model just the initial term will be returned
#in that case just return the existing term
if len(syns) == 1:
return term_list, term_exc
#replace underscores in phrases with spaces
syns = [(' '.join(term[0].split('_')), term[1]) for term in syns]
term_list, term_exc, exit_switch = human_checker_initial(initial_term, syns, term_list, term_exc)
return term_list, term_exc
#look for more predictors for each concept by finding sentnces that have
#concept terms in them and looking for predictors in those sentences
def active_learn_predictors(data, term_list, pred_list, pred_exc):
'''
data is
term_list is
pred_list is
pred_exc is
'''
def get_pred(text, term_list, pred_exc, pred_list):
pred_options_dict = Counter()
for sent in text:
#if the sentance has less than 2 words skip it
if len(sent) <= 1:
continue
#crate a sentence rank for judging weight of terms found
sent_rank = 0
for term in term_list:
if term.lower() in ' '.join(zip(*sent)[0]).lower():
sent_rank += 1
result = chunker(sent)
preds = [' '.join(x) for x in [[x[0] for x in term] for term in result]]
preds.append(' '.join([sent[0][0], sent[1][0]]))
#lower case all preds
preds = [x.lower() for x in preds]
preds = preds * sent_rank
pred_options_dict.update(preds)
#get top 20 predictors that have not been seen before
sorted_preds = sorted(pred_options_dict.items(), key=lambda x: x[1], reverse=True)
counter = 0
top_preds = []
for pred in sorted_preds:
if pred[0] not in pred_list and pred[0] not in pred_exc:
top_preds.append(pred)
counter += 1
if counter == 15 or counter == len(sorted_preds):
return top_preds
#if there are no preds return empty list
return top_preds
#get chunks for preds
def chunker(sent):
chunk_reg1 = r"""
CHUNK: {<NN.*><IN>}
"""
chunk_reg2 = r"""
CHUNK: {<VB.*><DT>}
"""
chunk_reg3 = r"""
CHUNK: {<NN.*><VB.*>}
"""
results = []
for chunk_reg in [chunk_reg1, chunk_reg2, chunk_reg3]:
cp = nltk.RegexpParser(chunk_reg)
try:
tree = cp.parse(sent)
except Exception as e:
print e
print sent
asdfa
for subtree in tree.subtrees():
if subtree.label() == 'CHUNK':
results.append(subtree[:])
return results
def human_checker(term, pred_list, top_preds, pred_exc):
'''This function loops through the possible predictors and
lets human input decide if they actually are or not'''
print 'Are the following predictors of this concept %r?' % (initial_term)
if len(top_preds) > 1:
for pred in top_preds:
print 'Predictor: \x1b[35m %s \x1b[0m Score: \x1b[36m %d \x1b[0m' % (pred[0], pred[1])
answer_switch = True
while answer_switch:
add_pred = raw_input('Is this a predictor of %s? (Y, N, exit): ' % (initial_term))
if add_pred.lower() == 'y':
pred_list.update([pred[0]])
answer_switch = False
elif add_pred.lower() == 'exit':
#pass switch to exit program
exit_switch = True
return pred_list, pred_exc, exit_switch
elif add_pred.lower() == 'n':
pred_exc.update([pred[0]])
answer_switch = False
else:
pass
exit_switch = False
return pred_list, pred_exc, exit_switch
top_preds = get_pred(data, term_list, pred_exc, pred_list)
pred_list, pred_exc, exit_switch = human_checker(term_list, pred_list, top_preds, pred_exc)
print 'Predictive Term Learning Round Complete'
return pred_list, pred_exc, exit_switch
#look for more terms for each concept by finding sentnces that have
#predictors in them and looking for terms in those sentences
def active_learn_terms(data, term_list, pred_list, term_exc, past_predictors, model):
'''
data is
term_list is
pred_list is
term_exc is
past_predictors is
model is
'''
def get_term(text, term_list, term_exc, pred_list):
term_options_dict = Counter()
for sent in text:
#skip sentence if it contains less than one word
if len(sent) <= 1:
continue
#crate a sentence rank for judging weight of terms found
sent_rank = 0
for pred in pred_list:
if pred[0].lower() in ' '.join(zip(*sent)[0]).lower():
sent_rank += pred[1]
result = chunker(sent)
terms = [' '.join(x) for x in [[x[0] for x in term] for term in result]]
terms.append(' '.join([sent[0][0], sent[1][0]]))
#lower case all preds
terms = [x.lower() for x in terms]
#add weights to terms by multiplying by sent_rank
terms = terms * sent_rank
term_options_dict.update(terms)
#get top 20 predictors that have not been seen before
sorted_terms = sorted(term_options_dict.items(), key=lambda x: x[1], reverse=True)
counter = 0
top_terms = []
for term in sorted_terms:
if term[0] not in term_list and term[0] not in term_exc:
top_terms.append(term)
counter += 1
if counter == 15 or counter == len(sorted_terms):
return top_terms
#if there are no preds return empty list
return top_terms
#get chunks for preds
def chunker(sent):
chunk_reg1 = r"""
CHUNK: {(<NN.*><POS>)?<RB>?<JJ.*>*<NN.*>+}
"""
#was causing too many bad results
# chunk_reg2 = r"""
# CHUNK: {(<JJ.*>|<VB.*>)<XX>}
# """
results = []
for chunk_reg in [chunk_reg1]:
cp = nltk.RegexpParser(chunk_reg)
tree = cp.parse(sent)
for subtree in tree.subtrees():
if subtree.label() == 'CHUNK':
results.append(subtree[:])
return results
def human_checker(term_list, top_terms, term_exc):
'''This function loops through the possible terms and
lets human input decide if they actually are or not'''
print 'Are the following terms part of this concept: %r?' % (initial_term)
if len(top_terms) > 1:
for term in top_terms:
print 'Term: \x1b[35m %s \x1b[0m Score: \x1b[36m %d \x1b[0m' % (term[0], (term[1]))
answer_switch = True
while answer_switch:
add_term = raw_input('Is this similar to %s? (Y, N, exit): ' % (initial_term))
if add_term.lower() == 'y':
term_list.update([term[0]])
term_list, term_exc = choose_more_terms(model, initial_term, term_list, term_exc, 20)
answer_switch = False
elif add_term.lower() == 'exit':
#pass switch to exit program
exit_switch = True
return term_list, term_exc, exit_switch
elif add_term.lower() == 'n':
term_exc.update([term[0]])
answer_switch = False
else:
pass
exit_switch = False
return term_list, term_exc, exit_switch
def weight_preds(past_predictors, pred_list):
pred_weight_list = []
#create a combined list of all preds, create Counter dict
tot_pred_list = []
for p in past_predictors:
tot_pred_list += p
count_pred = Counter(tot_pred_list)
#add weights to pred terms and create new pred weight lists
list_preds = list(pred_list)
for idx in range(len(list_preds)):
weight = len(past_predictors) - (count_pred[list_preds[idx]]-1)
pred_weight_list.append((list_preds[idx], weight))
return pred_weight_list
pred_weight_list = weight_preds(past_predictors, pred_list)
top_terms = get_term(data, term_list, term_exc, pred_weight_list)
term_list, term_exc, exit_switch = human_checker(term_list, top_terms, term_exc)
print 'Concept Term Learning Round Complete'
return term_list, term_exc, exit_switch
def run_active_learning(term_list, term_exc, pred_list, pred_exc, engine, concept_id, user_id, model):
'''
term_list is
term_exc is
pred_list is
pred_exc is
engine is
concept_id is
user_id is
model is
'''
# get initial terms with word2vec model
term_list, term_exc = choose_more_terms(model, initial_term, term_list, term_exc, 20)
if concept_id:
new_concept = 0
else:
new_concept = 1
concept_id = str(uuid.uuid4())
past_predictors = get_past_predictors(engine)
#check if there are any past predictiors and if not create and empty list
if not past_predictors:
past_predictors = []
counter = 0
exit_switch = False
criteria_tracking = []
while not exit_switch and counter < 10:
# load in a random chunk of 10,000 trials
#select a random number between 0-249
while True:
rand_select = random.choice(xrange(250))
if rand_select not in criteria_tracking:
criteria_tracking.append(rand_select)
break
#need to figure out a way to get random sentences from this, rand() is way to slow
result = engine.execute(select([CriteriaTagged.c.tagged_text]).where(CriteriaTagged.c.random_select
== rand_select))
#convert into list of lists
data = [eval(r.tagged_text)[0] for r in result]
#mark punctuation with XX tag and convert inner list to tuples for processing
data = [[(w[0], w[1]) if w[0] not in punctuation else (w[0], 'XX') for w in s] for s in data]
pred_list_new, pred_exc_new, exit_switch = active_learn_predictors(data, term_list, pred_list, pred_exc)
term_list_new, term_exc_new, exit_switch = active_learn_terms(data, term_list, pred_list, term_exc, past_predictors, model)
#write the concept name row to db after first pass
if counter == 0:
engine.execute(CriteriaConceptStaging.insert(), [{'user_id': user_id,
'update_time': datetime.now(),
'concept_id': concept_id,
'new_concept': new_concept,
'update_type': 'concept-name',
'value':initial_term}])
#update the difference in the 4 sets to the database
old = [pred_list, pred_exc, term_list, term_exc]
new = [pred_list_new, pred_exc_new, term_list_new, term_exc_new]
update_type = ['predictor', 'predictor-reject', 'term', 'term-reject']
for ix, s in enumerate(new):
new_values = s.intersection(old[ix])
old[ix] = s
cur_time = datetime.now()
update = update_type[ix]
#instert data into db
engine.execute(CriteriaConceptStaging.insert(), [{'user_id': user_id,
'update_time': cur_time,
'concept_id': concept_id,
'new_concept': new_concept,
'update_type': update,
'value':value}
for value in new_values])
counter += 1
return term_list, pred_list
def active_learn_main(engine, initial_term, user_id, concept_id=False):
'''
engine is
initial_term is
user_id is
concept_id is
'''
#user will select a term and then the term will be run through the word2vec model to come up with similar terms
#if it is an existing concept pull the existing data from db else start from scratch
if concept_id:
term_list = engine.execute(select([ConceptTerms.c.term]).where(ConceptTerms.c.concept_id
== concept_id))
term_exc = engine.execute(select([ConceptTerms_reject.c.term]).where(ConceptTerms_reject.c.concept_id
== concept_id))
pred_list = engine.execute(select([ConceptPredictors.c.predictor]).where(ConceptPredictors.c.concept_id
== concept_id))
pred_exc = engine.execute(select([ConceptPredictorsReject.c.predictor]).where(ConceptPredictorsReject.c.concept_id
== concept_id))
else:
term_list = set([initial_term])
term_exc = set()
pred_list = set()
pred_exc = set()
#load in model
#model = word2vec.load('/groups/clinicaltrials/clinicaltrials/data/criteria.bin')
#clusters = word2vec.load_clusters('/groups/clinicaltrials/clinicaltrials/data/criteria-clusters.txt')
model = word2vec.load('../data/criteria.bin')
clusters = word2vec.load_clusters('../data/criteria-clusters.txt')
# add clusters to model
model.clusters = clusters
#add skip terms to term_exc and pred_exc
skip_term, skip_pred = skip_terms()
term_exc.update(skip_term)
pred_exc.update(skip_pred)
term_list, pred_list = run_active_learning(term_list, term_exc, pred_list, pred_exc, engine, concept_id, user_id, model)
|
{
"content_hash": "5b75144976e42380a466cee65cd51d8d",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 134,
"avg_line_length": 38.91555555555556,
"alnum_prop": 0.5326062128825948,
"repo_name": "jasonost/clinicaltrials",
"id": "d3c24e71437313458212a6623ef688d43b22903e",
"size": "17512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ctapp/active_learn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11284"
},
{
"name": "HTML",
"bytes": "80253"
},
{
"name": "JavaScript",
"bytes": "312613"
},
{
"name": "Python",
"bytes": "366405"
},
{
"name": "SQLPL",
"bytes": "626"
},
{
"name": "Shell",
"bytes": "1655"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils import timezone
from employees.models import Employee
import datetime
from mis.base import BaseProfile
class Organizer(BaseProfile):
name = models.CharField(max_length=255, verbose_name="Name of Organizer", unique=True)
email = models.EmailField(blank=True, null=True, verbose_name="Email Address")
url = models.URLField(blank=True, null=True, verbose_name="Website Address")
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
# generic event model
class Events(BaseProfile):
LEVEL_CHOICES = (
('0', 'unknown'),
('1', 'institutional'),
('2', 'local'),
('3', 'national'),
('4', 'international'),
)
# http://www.evenues.com/event-planning-guide/types-of-meetings-and-events
EVENT_TYPES = (
('0', 'Unknown'),
('1', 'Conference'),
('2', 'Meeting'),
('3', 'Banquet'),
('4', 'Seminar'),
('5', 'Conclave'),
('6', 'Workshop'),
('7', 'Convention'),
('8', 'Symposium'),
)
title = models.CharField(max_length=256,help_text="Enter the Event/Conference/Symposium name here")
type = models.CharField(max_length=1, choices=EVENT_TYPES, default=0, help_text="What Type of Event is this?")
venue = models.CharField(max_length=256, blank=True, help_text="Where did this happen (City,Province, Place)")
start_date = models.DateField(default=timezone.now, verbose_name="Date Started")
end_date = models.DateField(default=timezone.localtime(timezone.now()) + datetime.timedelta(days=2),
verbose_name="Date Ended")
level = models.CharField(max_length=1, choices=LEVEL_CHOICES, default=0)
organizers = models.ManyToManyField(Organizer, verbose_name="List of Organizers")
class Meta:
ordering = ('title',)
unique_together = ('title', 'venue')
abstract = True
# method to return the number of duration of the event
# manager to return all local, national, international
def __str__(self):
return self.title
class Presentation(Events):
RESEARCH_TYPES = (
('0', 'unknown'),
('1', 'study'),
('2', 'project'),
)
PRESENTATION_TYPES = (
('0', 'unknown'),
('1', 'oral'),
('2', 'poster'),
)
research_title = models.CharField(max_length=256, help_text="Enter the Research Title")
authors = models.ManyToManyField(Employee, blank=True, verbose_name="List of authors")
presentor = models.ForeignKey(Employee, blank=True, related_name="presentor", verbose_name="Presented by", null=True)
research_type = models.CharField(max_length=1, verbose_name="Type of Research", choices=RESEARCH_TYPES, default=0)
presentation_type = models.CharField(max_length=1, verbose_name="Type of Presentation", choices=PRESENTATION_TYPES,
default=0)
other_info = models.TextField(blank=True, null=True)
class Meta:
ordering = ('research_title',)
verbose_name_plural = 'Research Presentations'
def get_authors(self):
ret = ",".join([str(author) for author in self.authors.all()])
return ret
#class Conference(Events):
# pass
|
{
"content_hash": "cc871cb1ba28a6ebc2ff390de0c0ae60",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 121,
"avg_line_length": 34.73684210526316,
"alnum_prop": 0.6242424242424243,
"repo_name": "rcdosado/URO-MIS",
"id": "3ee4845e8885e0e80bd0e9b73d58493d6bcceded",
"size": "3300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "119"
},
{
"name": "HTML",
"bytes": "8348"
},
{
"name": "Python",
"bytes": "60007"
}
],
"symlink_target": ""
}
|
import os
import yaml
import sys
from jinja2 import Environment, Template
autogen_warning="""////
AUTOGENERATED FILE - this file was generated via ./gen_template_docs.py.
Changes to .adoc or HTML files may be overwritten! Please change the
generator or the input template (./*.jinja)
////
"""
template = Template(open('./template.adoc.jinja').read())
module_dirs = ['./jboss']
def generate_doc_for_module(module_file):
with open(module_file) as open_file:
data = yaml.load(open_file)
output_file = os.path.join(os.path.dirname(module_file), 'README.adoc')
print ("Generating %s..." % os.path.join(os.path.relpath(output_file, os.getcwd())), os.path.basename(output_file))
with open(output_file, "w") as text_file:
text_file.write(autogen_warning)
text_file.write(template.render(data))
def scan_for_modules(curdir=os.getcwd()):
if not os.path.isdir(curdir):
return
for file in sorted(os.listdir(curdir)):
full_file = os.path.join(curdir, file)
if os.path.isdir(full_file):
scan_for_modules(full_file)
elif os.path.basename(full_file) == 'module.yaml':
generate_doc_for_module(full_file)
# expects to be run from the root of the repository
if __name__ == "__main__":
# the user may specify a particular template to parse,
if 1 < len(sys.argv):
sys.argv.pop(0)
for t in sys.argv:
generate_doc_for_module(t)
# otherwise we'll look for them all (and do an index)
else:
for dir in module_dirs:
scan_for_modules(dir)
|
{
"content_hash": "8db0efa11f3f90fe682e4b28002344a5",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 119,
"avg_line_length": 32.6530612244898,
"alnum_prop": 0.644375,
"repo_name": "jboss-openshift/cct_module",
"id": "56ead40884965e4df399254827b45ee1e18b2094",
"size": "1623",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "generate_docs.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "213299"
},
{
"name": "HTML",
"bytes": "633"
},
{
"name": "Jinja",
"bytes": "4187"
},
{
"name": "Python",
"bytes": "3838"
},
{
"name": "Shell",
"bytes": "158964"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
import sys
from django.conf import settings
from django.template import (Node, Variable, TemplateSyntaxError,
TokenParser, Library, TOKEN_TEXT, TOKEN_VAR)
from django.template.base import render_value_in_context
from django.template.defaulttags import token_kwargs
from django.utils import six
from django.utils import translation
register = Library()
class GetAvailableLanguagesNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = [(k, translation.ugettext(v)) for k, v in settings.LANGUAGES]
return ''
class GetLanguageInfoNode(Node):
def __init__(self, lang_code, variable):
self.lang_code = lang_code
self.variable = variable
def render(self, context):
lang_code = self.lang_code.resolve(context)
context[self.variable] = translation.get_language_info(lang_code)
return ''
class GetLanguageInfoListNode(Node):
def __init__(self, languages, variable):
self.languages = languages
self.variable = variable
def get_language_info(self, language):
# ``language`` is either a language code string or a sequence
# with the language code as its first item
if len(language[0]) > 1:
return translation.get_language_info(language[0])
else:
return translation.get_language_info(str(language))
def render(self, context):
langs = self.languages.resolve(context)
context[self.variable] = [self.get_language_info(lang) for lang in langs]
return ''
class GetCurrentLanguageNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language()
return ''
class GetCurrentLanguageBidiNode(Node):
def __init__(self, variable):
self.variable = variable
def render(self, context):
context[self.variable] = translation.get_language_bidi()
return ''
class TranslateNode(Node):
def __init__(self, filter_expression, noop, asvar=None,
message_context=None):
self.noop = noop
self.asvar = asvar
self.message_context = message_context
self.filter_expression = filter_expression
if isinstance(self.filter_expression.var, six.string_types):
self.filter_expression.var = Variable("'%s'" %
self.filter_expression.var)
def render(self, context):
self.filter_expression.var.translate = not self.noop
if self.message_context:
self.filter_expression.var.message_context = (
self.message_context.resolve(context))
output = self.filter_expression.resolve(context)
value = render_value_in_context(output, context)
if self.asvar:
context[self.asvar] = value
return ''
else:
return value
class BlockTranslateNode(Node):
def __init__(self, extra_context, singular, plural=None, countervar=None,
counter=None, message_context=None):
self.extra_context = extra_context
self.singular = singular
self.plural = plural
self.countervar = countervar
self.counter = counter
self.message_context = message_context
def render_token_list(self, tokens):
result = []
vars = []
for token in tokens:
if token.token_type == TOKEN_TEXT:
result.append(token.contents.replace('%', '%%'))
elif token.token_type == TOKEN_VAR:
result.append('%%(%s)s' % token.contents)
vars.append(token.contents)
return ''.join(result), vars
def render(self, context, nested=False):
if self.message_context:
message_context = self.message_context.resolve(context)
else:
message_context = None
tmp_context = {}
for var, val in self.extra_context.items():
tmp_context[var] = val.resolve(context)
# Update() works like a push(), so corresponding context.pop() is at
# the end of function
context.update(tmp_context)
singular, vars = self.render_token_list(self.singular)
if self.plural and self.countervar and self.counter:
count = self.counter.resolve(context)
context[self.countervar] = count
plural, plural_vars = self.render_token_list(self.plural)
if message_context:
result = translation.npgettext(message_context, singular,
plural, count)
else:
result = translation.ungettext(singular, plural, count)
vars.extend(plural_vars)
else:
if message_context:
result = translation.pgettext(message_context, singular)
else:
result = translation.ugettext(singular)
default_value = settings.TEMPLATE_STRING_IF_INVALID
render_value = lambda v: render_value_in_context(
context.get(v, default_value), context)
data = dict((v, render_value(v)) for v in vars)
context.pop()
try:
result = result % data
except (KeyError, ValueError):
if nested:
# Either string is malformed, or it's a bug
raise TemplateSyntaxError("'blocktrans' is unable to format "
"string returned by gettext: %r using %r" % (result, data))
with translation.override(None):
result = self.render(context, nested=True)
return result
class LanguageNode(Node):
def __init__(self, nodelist, language):
self.nodelist = nodelist
self.language = language
def render(self, context):
with translation.override(self.language.resolve(context)):
output = self.nodelist.render(context)
return output
@register.tag("get_available_languages")
def do_get_available_languages(parser, token):
"""
This will store a list of available languages
in the context.
Usage::
{% get_available_languages as languages %}
{% for language in languages %}
...
{% endfor %}
This will just pull the LANGUAGES setting from
your setting file (or the default settings) and
put it into the named variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_available_languages' requires 'as variable' (got %r)" % args)
return GetAvailableLanguagesNode(args[2])
@register.tag("get_language_info")
def do_get_language_info(parser, token):
"""
This will store the language information dictionary for the given language
code in a context variable.
Usage::
{% get_language_info for LANGUAGE_CODE as l %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for string as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoNode(parser.compile_filter(args[2]), args[4])
@register.tag("get_language_info_list")
def do_get_language_info_list(parser, token):
"""
This will store a list of language information dictionaries for the given
language codes in a context variable. The language codes can be specified
either as a list of strings or a settings.LANGUAGES style tuple (or any
sequence of sequences whose first items are language codes).
Usage::
{% get_language_info_list for LANGUAGES as langs %}
{% for l in langs %}
{{ l.code }}
{{ l.name }}
{{ l.name_local }}
{{ l.bidi|yesno:"bi-directional,uni-directional" }}
{% endfor %}
"""
args = token.split_contents()
if len(args) != 5 or args[1] != 'for' or args[3] != 'as':
raise TemplateSyntaxError("'%s' requires 'for sequence as variable' (got %r)" % (args[0], args[1:]))
return GetLanguageInfoListNode(parser.compile_filter(args[2]), args[4])
@register.filter
def language_name(lang_code):
return translation.get_language_info(lang_code)['name']
@register.filter
def language_name_local(lang_code):
return translation.get_language_info(lang_code)['name_local']
@register.filter
def language_bidi(lang_code):
return translation.get_language_info(lang_code)['bidi']
@register.tag("get_current_language")
def do_get_current_language(parser, token):
"""
This will store the current language in the context.
Usage::
{% get_current_language as language %}
This will fetch the currently active language and
put it's value into the ``language`` context
variable.
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageNode(args[2])
@register.tag("get_current_language_bidi")
def do_get_current_language_bidi(parser, token):
"""
This will store the current language layout in the context.
Usage::
{% get_current_language_bidi as bidi %}
This will fetch the currently active language's layout and
put it's value into the ``bidi`` context variable.
True indicates right-to-left layout, otherwise left-to-right
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
args = token.contents.split()
if len(args) != 3 or args[1] != 'as':
raise TemplateSyntaxError("'get_current_language_bidi' requires 'as variable' (got %r)" % args)
return GetCurrentLanguageBidiNode(args[2])
@register.tag("trans")
def do_translate(parser, token):
"""
This will mark a string for translation and will
translate the string for the current language.
Usage::
{% trans "this is a test" %}
This will mark the string for translation so it will
be pulled out by mark-messages.py into the .po files
and will run the string through the translation engine.
There is a second form::
{% trans "this is a test" noop %}
This will only mark for translation, but will return
the string unchanged. Use it when you need to store
values into forms that should be translated later on.
You can use variables instead of constant strings
to translate stuff you marked somewhere else::
{% trans variable %}
This will just try to translate the contents of
the variable ``variable``. Make sure that the string
in there is something that is in the .po file.
It is possible to store the translated string into a variable::
{% trans "this is a test" as var %}
{{ var }}
Contextual translations are also supported::
{% trans "this is a test" context "greeting" %}
This is equivalent to calling pgettext instead of (u)gettext.
"""
class TranslateParser(TokenParser):
def top(self):
value = self.value()
# Backwards Compatiblity fix:
# FilterExpression does not support single-quoted strings,
# so we make a cheap localized fix in order to maintain
# backwards compatibility with existing uses of ``trans``
# where single quote use is supported.
if value[0] == "'":
m = re.match("^'([^']+)'(\|.*$)", value)
if m:
value = '"%s"%s' % (m.group(1).replace('"','\\"'), m.group(2))
elif value[-1] == "'":
value = '"%s"' % value[1:-1].replace('"','\\"')
noop = False
asvar = None
message_context = None
while self.more():
tag = self.tag()
if tag == 'noop':
noop = True
elif tag == 'context':
message_context = parser.compile_filter(self.value())
elif tag == 'as':
asvar = self.tag()
else:
raise TemplateSyntaxError(
"Only options for 'trans' are 'noop', " \
"'context \"xxx\"', and 'as VAR'.")
return value, noop, asvar, message_context
value, noop, asvar, message_context = TranslateParser(token.contents).top()
return TranslateNode(parser.compile_filter(value), noop, asvar,
message_context)
@register.tag("blocktrans")
def do_block_translate(parser, token):
"""
This will translate a block of text with parameters.
Usage::
{% blocktrans with bar=foo|filter boo=baz|filter %}
This is {{ bar }} and {{ boo }}.
{% endblocktrans %}
Additionally, this supports pluralization::
{% blocktrans count count=var|length %}
There is {{ count }} object.
{% plural %}
There are {{ count }} objects.
{% endblocktrans %}
This is much like ngettext, only in template syntax.
The "var as value" legacy format is still supported::
{% blocktrans with foo|filter as bar and baz|filter as boo %}
{% blocktrans count var|length as count %}
Contextual translations are supported::
{% blocktrans with bar=foo|filter context "greeting" %}
This is {{ bar }}.
{% endblocktrans %}
This is equivalent to calling pgettext/npgettext instead of
(u)gettext/(u)ngettext.
"""
bits = token.split_contents()
options = {}
remaining_bits = bits[1:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'count':
value = token_kwargs(remaining_bits, parser, support_legacy=True)
if len(value) != 1:
raise TemplateSyntaxError('"count" in %r tag expected exactly '
'one keyword argument.' % bits[0])
elif option == "context":
try:
value = remaining_bits.pop(0)
value = parser.compile_filter(value)
except Exception:
msg = (
'"context" in %r tag expected '
'exactly one argument.') % bits[0]
six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2])
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
if 'count' in options:
countervar, counter = list(six.iteritems(options['count']))[0]
else:
countervar, counter = None, None
if 'context' in options:
message_context = options['context']
else:
message_context = None
extra_context = options.get('with', {})
singular = []
plural = []
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
singular.append(token)
else:
break
if countervar and counter:
if token.contents.strip() != 'plural':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags inside it")
while parser.tokens:
token = parser.next_token()
if token.token_type in (TOKEN_VAR, TOKEN_TEXT):
plural.append(token)
else:
break
if token.contents.strip() != 'endblocktrans':
raise TemplateSyntaxError("'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents)
return BlockTranslateNode(extra_context, singular, plural, countervar,
counter, message_context)
@register.tag
def language(parser, token):
"""
This will enable the given language just for this block.
Usage::
{% language "de" %}
This is {{ bar }} and {{ boo }}.
{% endlanguage %}
"""
bits = token.split_contents()
if len(bits) != 2:
raise TemplateSyntaxError("'%s' takes one argument (language)" % bits[0])
language = parser.compile_filter(bits[1])
nodelist = parser.parse(('endlanguage',))
parser.delete_first_token()
return LanguageNode(nodelist, language)
|
{
"content_hash": "86b0bcb5695eb9335bcf19ff37c3f94f",
"timestamp": "",
"source": "github",
"line_count": 488,
"max_line_length": 117,
"avg_line_length": 35.35450819672131,
"alnum_prop": 0.602445951428737,
"repo_name": "ZhaoCJ/django",
"id": "bce10b9de6f1904f624422aea93b4f9240d38f0b",
"size": "17253",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "django/templatetags/i18n.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import unittest
from os import path
from integration.pegasus.test_helper import get_channel
from pyprobe import SensorError
from pyprobe.sensors.pegasus.sensor_phy_drive import PegasusPhysicalDriveSensor
FAKE_UTIL = path.join(path.dirname(__file__), 'pegasus.py')
NO_CONTROLLERS = path.join(path.dirname(__file__), 'no_controllers.py')
FAILED_DRIVE = path.join(path.dirname(__file__), 'pegasus_drive_failure.py')
class PegasusPhysicalDriveSensorDefinitionTest(unittest.TestCase):
def test_existing_drive_should_work(self):
subject = PegasusPhysicalDriveSensor()
params = {'drive': 2, 'strict': PegasusPhysicalDriveSensor.ZERO_TOLERANCE_ID}
result = subject.execute('1234', '127.0.0.1', params, {'executable': FAKE_UTIL})
self.assertEqual(38, get_channel(result, "Temperatur").value)
self.assertEqual(0, get_channel(result, "Raw Read Error Rate").value)
self.assertEqual(0, get_channel(result, "Reallocated Sector Count").value)
self.assertEqual(0, get_channel(result, "Seek Error Rate").value)
self.assertEqual(0, get_channel(result, "Spin Retry Count").value)
self.assertEqual(0, get_channel(result, "Reallocated Event Count").value)
self.assertEqual(0, get_channel(result, "Current Pending Sector").value)
self.assertEqual(0, get_channel(result, "Offline Uncorrectable").value)
# Channels not present for this harddrive
self.assertIsNone(get_channel(result, "Multi Zone Error Rate"))
self.assertIsNone(get_channel(result, "Calibration Retry Count"))
def test_incorrect_drive_number_should_return_error(self):
subject = PegasusPhysicalDriveSensor()
params = {'drive': 7, 'strict': PegasusPhysicalDriveSensor.ZERO_TOLERANCE_ID}
result = subject.execute('1234', '127.0.0.1', params, {'executable': FAKE_UTIL})
self.assertIsInstance(result, SensorError)
self.assertEqual(PegasusPhysicalDriveSensor.ERROR_CODE_DRIVE_NOT_FOUND, result.code)
self.assertEqual("Drive 7 not found.", result.message)
def test_drive_failure_should_return_error(self):
subject = PegasusPhysicalDriveSensor()
params = {'drive': 1, 'strict': PegasusPhysicalDriveSensor.ZERO_TOLERANCE_ID}
result = subject.execute('1234', '127.0.0.1', params, {'executable': FAILED_DRIVE})
self.assertIsInstance(result, SensorError)
def test_temperature_should_be_first_sensor(self):
subject = PegasusPhysicalDriveSensor()
params = {'drive': 2, 'strict': PegasusPhysicalDriveSensor.ZERO_TOLERANCE_ID}
result = subject.execute('1234', '127.0.0.1', params, {'executable': FAKE_UTIL})
# dann: ist der Kanal für die Temperatur der Erste
self.assertEqual("Temperatur", result.channel[0].name)
def test_flaky_hitachi_values_should_be_corrected(self):
subject = PegasusPhysicalDriveSensor()
params = {'drive': 3, 'strict': PegasusPhysicalDriveSensor.ZERO_TOLERANCE_ID}
result = subject.execute('1234', '127.0.0.1', params, {'executable': FAKE_UTIL})
# dann: wird der Wert für Reallocated Sector Count auf 0 korrigiert (bei Hitachi werden unter Last manchmal
# falsche Werte zurückgeliefert).
self.assertEqual(0, get_channel(result, "Reallocated Sector Count").value)
|
{
"content_hash": "93a6fdc65d152a49b38b5150ddcb3710",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 115,
"avg_line_length": 52,
"alnum_prop": 0.703125,
"repo_name": "dittert/pyprobe",
"id": "312cb15020c80a59f54baa2d702813c6ee517dc0",
"size": "3346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/pegasus/PegasusPhysicalDriveSensorExecutionTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "55"
},
{
"name": "Python",
"bytes": "251844"
},
{
"name": "Shell",
"bytes": "2744"
}
],
"symlink_target": ""
}
|
from network import LoRa
import time
import binascii
import socket
import struct
APP_EUI = 'AD A4 DA E3 AC 12 67 6B'
APP_KEY = '11 B0 28 2A 18 9B 75 B0 B4 D2 D8 C7 FA 38 54 8B'
DEV_ADDR = '00 00 00 0A'
NWK_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
APP_SWKEY = '2B 7E 15 16 28 AE D2 A6 AB F7 15 88 09 CF 4F 3C'
class Compliance:
def __init__(self, activation=LoRa.OTAA):
self.lora = LoRa(mode=LoRa.LORAWAN)
self.lora.compliance_test(True, 0, False) # enable testing
self.activation = activation
self._join()
self.s = socket.socket(socket.AF_LORA, socket.SOCK_RAW)
self.s.setsockopt(socket.SOL_LORA, socket.SO_DR, 3)
self.s.setsockopt(socket.SOL_LORA, socket.SO_CONFIRMED, False)
def _join(self):
if self.activation == LoRa.OTAA:
app_eui = binascii.unhexlify(APP_EUI.replace(' ',''))
app_key = binascii.unhexlify(APP_KEY.replace(' ',''))
self.lora.join(activation=LoRa.OTAA, auth=(app_eui, app_key), timeout=0)
else:
dev_addr = struct.unpack(">l", binascii.unhexlify(DEV_ADDR.replace(' ','')))[0]
nwk_swkey = binascii.unhexlify(NWK_SWKEY.replace(' ',''))
app_swkey = binascii.unhexlify(APP_SWKEY.replace(' ',''))
self.lora.join(activation=LoRa.ABP, auth=(dev_addr, nwk_swkey, app_swkey))
# wait until the module has joined the network
while not self.lora.has_joined():
time.sleep(5)
print("Joining...")
print("Network joined!")
def run(self):
while True:
while not self.lora.compliance_test().running:
time.sleep(5)
self.s.send('Ready')
print('Test running!')
self.s.setblocking(True)
self.tx_payload = bytes([(self.lora.compliance_test().downlink_counter >> 8) & 0xFF,
self.lora.compliance_test().downlink_counter & 0xFF])
while self.lora.compliance_test().running:
if self.lora.compliance_test().state < 6: # re-join
try:
self.s.send(self.tx_payload)
time.sleep(2)
except Exception:
time.sleep(1)
if self.lora.compliance_test().link_check:
self.tx_payload = bytes([5, self.lora.compliance_test().demod_margin,
self.lora.compliance_test().nbr_gateways])
# set the state to 1 and clear the link check flag
self.lora.compliance_test(True, 1, False)
else:
if self.lora.compliance_test().state == 4:
rx_payload = self.s.recv(255)
if rx_payload:
self.tx_payload = bytes([rx_payload[0]])
for i in range(1, len(rx_payload)):
self.tx_payload += bytes([(rx_payload[i] + 1) & 0xFF])
self.lora.compliance_test(True, 1) # set the state to 1
else:
self.tx_payload = bytes([(self.lora.compliance_test().downlink_counter >> 8) & 0xFF,
self.lora.compliance_test().downlink_counter & 0xFF])
else:
time.sleep(2)
self._join()
|
{
"content_hash": "1ba9e12125966d6f5aea2a30d57bf09d",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 112,
"avg_line_length": 41.36046511627907,
"alnum_prop": 0.5094180489176272,
"repo_name": "Xykon/pycom-micropython-sigfox",
"id": "08f2a645d3bfcadb22542aa142786e29b0b53ecf",
"size": "3880",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "esp32/tools/lora/certification/certification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "55179"
},
{
"name": "C",
"bytes": "32133296"
},
{
"name": "C++",
"bytes": "642137"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "104211"
},
{
"name": "Objective-C",
"bytes": "10903"
},
{
"name": "Python",
"bytes": "1000724"
},
{
"name": "Shell",
"bytes": "13441"
}
],
"symlink_target": ""
}
|
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
Written (W) 2095-2010 Andre Kahles
Copyright (C) 2009-2010 by Friedrich Miescher Laboratory, Tuebingen, Germany
This script finds an optimal parameter set to maximize the performance of a
given intronfeature file.
For detailed usage information type:
python find_optimal_param_set.py
"""
import sys
import cPickle
class Feature(object):
"""Is an intron feature object"""
def __init__(self, max_mm=80, feature_string=''):
if feature_string == '':
self.alignment_support = 0
self.submission_support = 1
self.mm_ex = dict()
self.max_mm = max_mm + 1
else:
self.alignment_support = int(feature_string[0])
self.submission_support = int(feature_string[1])
self.mm_ex = dict()
self.max_mm = max_mm + 1
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
self.mm_ex[key] = int(value)
def merge_features(self, feature_string):
"""Merges information in feature_string into current feature object"""
self.alignment_support += int(feature_string[0])
self.submission_support += int(feature_string[1])
for _sl in feature_string[2:]:
(key, value) = _sl.split(':')
try:
self.mm_ex[key] += int(value)
except KeyError:
self.mm_ex[key] = int(value)
def add_mm_ex(self, ex, mm):
"""Adds mm ex information"""
self.alignment_support += 1
try:
self.mm_ex[(ex*self.max_mm) + mm] += 1
except KeyError:
self.mm_ex[(ex*self.max_mm) + mm] = 1
def get_feature_string(self):
"""Returns string with mm ex elements."""
_line = (str(self.alignment_support) + '\t' + str(self.submission_support) + '\t')
for key in self.mm_ex:
_line += (str(key) + ':' + str(self.mm_ex[key]) + '\t')
return _line[:-1]
def get_submission_support(self):
"""Returns submission support"""
return int(self.submission_support)
def is_valid(self, mm, ex, mc, options):
"""Returns true, if at least one alignment fulfills the requirements with respect to mm, ex, and mc. False otherwise."""
if self.alignment_support < mc:
return False
is_valid = False
for key in self.mm_ex.keys():
_ex = int(key) / (options.max_feat_mismatches + 1)
_mm = int(key) % (options.max_feat_mismatches + 1)
if _mm <= mm and _ex >= ex:
is_valid = True
break
return is_valid
def parse_options(argv):
"""Parses options from the command line """
from optparse import OptionParser, OptionGroup
parser = OptionParser()
required = OptionGroup(parser, 'REQUIRED')
required.add_option('-b', '--best_score', dest='best_scores', metavar='FILE', help='file to store the best scoring parameters', default='-')
required.add_option('-m', '--matrix', dest='matrix', metavar='FILE', help='file to store the full performance matrix', default='-')
required.add_option('-f', '--features', dest='features', metavar='FILE', help='alignment intron features', default='-')
required.add_option('-i', '--annotation_introns', dest='anno_int', metavar='FILE', help='annotation intron list', default='-')
optional = OptionGroup(parser, 'OPTIONAL')
optional.add_option('-E', '--exclude_introns', dest='exclude_introns', metavar='STRINGLIST', help='list of comma separated intron files to exclude from submitted features', default='-')
optional.add_option('-I', '--max_intron_len', dest='max_intron_len', metavar='INT', type='int', help='maximal intron length [10000000]', default=10000000)
optional.add_option('-s', '--ignore_strand', dest='ignore_strand', action='store_true', help='ignore strand information present in annotation', default=False)
optional.add_option('-X', '--max_feat_mismatches', dest='max_feat_mismatches', metavar='INT', type='int', help='max number of mismatches for feat generation [80] (do only change, if you are absolutely sure!)', default=80)
optional.add_option('-v', '--verbose', dest='verbose', action='store_true', help='verbosity', default=False)
parser.add_option_group(required)
parser.add_option_group(optional)
(options, args) = parser.parse_args()
if len(argv) < 2:
parser.print_help()
sys.exit(2)
return options
def get_performance_value(full_features, mm, ex, mc, annotation_list, options):
"""Builds up a filtered intron list from the given alignment features and compares to the annotation."""
alignment_list = dict()
for feat in full_features.keys():
chrm = feat[0]
intron = (0, int(feat[1]), int(feat[2]))
### filter step
if (intron[2] - intron[1]) > options.max_intron_len:
continue
if not full_features[feat].is_valid(mm, ex, mc, options):
continue
try:
alignment_list[chrm][intron] = 0
except KeyError:
alignment_list[chrm] = {intron:0}
### match intron lists
total_precision = float(0)
total_recall = float(0)
key_count = 0
for chrm in annotation_list.keys():
if alignment_list.has_key(chrm):
matches = len(set(annotation_list[chrm].keys()).intersection(set(alignment_list[chrm].keys())))
total_precision += (float(matches) / float(max(1, len(alignment_list[chrm].keys()))))
total_recall += (float(matches) / float(max(1, len(annotation_list[chrm].keys()))))
### do not include chromosomes with zero values into average
if matches > 0:
key_count += 1
total_precision /= max(1.0, float(key_count))
total_recall /= max(1.0, float(key_count))
return (total_precision, total_recall)
def main():
"""Main function extracting intron features."""
options = parse_options(sys.argv)
### get list of annotated introns
annotation_list = cPickle.load(open(options.anno_int, 'r'))
if options.ignore_strand:
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if intron[0] == 0:
continue
annotation_list[chrm][(0, intron[1], intron[2])] = annotation_list[chrm][intron]
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
del skiplist
### filter annotation for max intron length
print '\nFiltering intron list for max intron len'
print '-----------------------------------------'
skipped = 0
for chrm in annotation_list.keys():
skiplist = set()
for intron in annotation_list[chrm].keys():
if (intron[2] - intron[1]) > options.max_intron_len:
skiplist.add(intron)
for intron in skiplist:
del annotation_list[chrm][intron]
skipped += len(skiplist)
print '%s introns removed from annotation' % skipped
del skiplist
full_features = dict()
if options.verbose:
print 'Parsing %s' % options.features
line_counter = 0
for line in open(options.features, 'r'):
if options.verbose and line_counter % 1000 == 0:
print 'parsed %i features from %s' % (line_counter, options.features)
line_counter += 1
sl = line.strip().split('\t')
(chrm, start, stop) = sl[:3]
try:
full_features[(chrm, start, stop)].full_features(sl[3:])
except KeyError:
full_features[(chrm, start, stop)] = Feature(80, sl[3:])
### filter full feature list for excluded introns
if options.exclude_introns != '-':
_ex_introns = options.exclude_introns.strip().split(',')
### handle leading or trailing commas
if _ex_introns[0] == '':
_ex_introns = _ex_introns[1:]
if _ex_introns[-1] == '':
_ex_introns = _ex_introns[:-1]
for _infile in _ex_introns:
_ex_intron = cPickle.load(open(_infile, 'r'))
for chrm in _ex_intron.keys():
for _intron in _ex_intron[chrm].keys():
try:
del full_features[(chrm, str(_intron[1]), str(_intron[2]))]
except KeyError:
continue
del _ex_intron
if options.verbose:
print 'Parsing completed.'
print 'parsed %i features from %s' % (line_counter, options.features)
### SEARCH SPACE
### iterate over different filter dimensions
#ex_list = [2, 4, 6, 8, 10, 12, 15, 20, 25, 30] # 10
#ex_list = [2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 ] # 15
ex_list = [1, 2, 4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18 ] # 15
mm_list = [0, 1, 2, 3, 4, 5, 6] # 7
mc_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] # 10 ==> 700 combinations
checked_combs = 0
# pre rec fsc
max_pre = (0.0, 0.0, 0.0)
max_rec = (0.0, 0.0, 0.0)
max_fsc = (0.0, 0.0, 0.0)
max_pre_idx = (0, 0, 0)
max_rec_idx = (0, 0, 0)
max_fsc_idx = (0, 0, 0)
matrix_file = open(options.matrix, 'w')
for ex in ex_list:
for mm in mm_list:
for mc in mc_list:
if options.verbose and checked_combs % 10 == 0:
print 'checked %i parameter combinations' % checked_combs
print 'best scores so far:\n \tbest fScore: %0.2f, best recall: %0.2f, best precision: %0.2f' % (max_fsc[2], max_rec[1], max_pre[0])
checked_combs += 1
(pre, rec) = get_performance_value(full_features, mm, ex, mc, annotation_list, options)
if float(rec) + float(pre) > 0:
fsc = (2 * float(rec) * float(pre)) / (float(rec) + float(pre))
else:
fsc = 0.0
if pre > max_pre[0]:
max_pre = (pre, rec, fsc)
max_pre_idx = (ex, mm, mc)
if rec > max_rec[1]:
max_rec = (pre, rec, fsc)
max_rec_idx = (ex, mm, mc)
if fsc > max_fsc[2]:
max_fsc = (pre, rec, fsc)
max_fsc_idx = (ex, mm, mc)
### store information
### ex mm mc pre rec fsc
print >> matrix_file, '%s\t%s\t%s\t%s\t%s\t%s' % (ex, mm, mc, pre, rec, fsc)
matrix_file.close()
best_file = open(options.best_scores, 'w')
# best precision
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_pre_idx[0], max_pre_idx[1], max_pre_idx[2], max_pre[0], max_pre[1], max_pre[2])
# best recall
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_rec_idx[0], max_rec_idx[1], max_rec_idx[2], max_rec[0], max_rec[1], max_rec[2])
# best fScore
print >> best_file, '%s\t%s\t%s\t%s\t%s\t%s' % (max_fsc_idx[0], max_fsc_idx[1], max_fsc_idx[2], max_fsc[0], max_fsc[1], max_fsc[2])
best_file.close()
if __name__ == "__main__":
main()
|
{
"content_hash": "c94190053384ed6f63bd6b328394755e",
"timestamp": "",
"source": "github",
"line_count": 297,
"max_line_length": 225,
"avg_line_length": 38.696969696969695,
"alnum_prop": 0.5605150961454799,
"repo_name": "ratschlab/RNA-geeq",
"id": "9b554a43a71a552d9fca008fe02ddf826d4f4ccb",
"size": "11493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SAFT/find_optimal_param_set.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "227206"
},
{
"name": "Shell",
"bytes": "11631"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.