text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import division
import urllib2
import sys
import os
import json
import datetime
import traceback
import logging
import logging.handlers
import inspect
import math
import onebusaway
if __name__ == "__main__":
logger = onebusaway.getLogger()
if len(sys.argv) < 2:
print float("NaN")
sys.exit(2)
stopId = sys.argv[1]
busId = None
if len(sys.argv) > 2:
busId = sys.argv[2]
arrivalIndex = 0
if len(sys.argv) > 3:
try:
arrivalIndex = int(sys.argv[3])
except Exception as e:
print float("NaN")
sys.exit(3)
apiKey = onebusaway.getAPIKey()
nextArrival = onebusaway.safeGetNextArrivalInSeconds(apiKey, stopId, busId, arrivalIndex)
print nextArrival
if math.isnan(nextArrival):
sys.exit(1)
sys.exit(0)
|
{
"content_hash": "26f212938b3fe7bda606d86242699d4d",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 90,
"avg_line_length": 20.08108108108108,
"alnum_prop": 0.7146702557200538,
"repo_name": "wlindley/onebusaway",
"id": "4c5439dfe3021775a729056654eb5c2766721840",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getNextArrivalAtStop.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9025"
}
],
"symlink_target": ""
}
|
import os
import logging
import decimal
import base64
import json
from datetime import datetime
from lib import config, util, util_tetcoin
ASSET_MAX_RETRY = 3
D = decimal.Decimal
def parse_issuance(db, message, cur_block_index, cur_block):
if message['status'] != 'valid':
return
def modify_extended_asset_info(asset, description):
"""adds an asset to asset_extended_info collection if the description is a valid json link. or, if the link
is not a valid json link, will remove the asset entry from the table if it exists"""
if util.is_valid_url(description, suffix='.json', allow_no_protocol=True):
db.asset_extended_info.update({'asset': asset},
{'$set': {
'info_url': description,
'info_status': 'needfetch',
'fetch_info_retry': 0, # retry ASSET_MAX_RETRY times to fetch info from info_url
'info_data': {},
'errors': []
}}, upsert=True)
#^ valid info_status settings: needfetch, valid, invalid, error
#additional fields will be added later in events, once the asset info is pulled
else:
db.asset_extended_info.remove({ 'asset': asset })
#remove any saved asset image data
imagePath = os.path.join(config.DATA_DIR, config.SUBDIR_ASSET_IMAGES, asset + '.png')
if os.path.exists(imagePath):
os.remove(imagePath)
tracked_asset = db.tracked_assets.find_one(
{'asset': message['asset']}, {'_id': 0, '_history': 0})
#^ pulls the tracked asset without the _id and history fields. This may be None
if message['locked']: #lock asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'locked',
'locked': True,
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Locking asset %s" % (message['asset'],))
elif message['transfer']: #transfer asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'transferred',
'owner': message['issuer'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Transferring asset %s to address %s" % (message['asset'], message['issuer']))
elif message['quantity'] == 0 and tracked_asset is not None: #change description
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'changed_description',
'description': message['description'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
modify_extended_asset_info(message['asset'], message['description'])
logging.info("Changing description for asset %s to '%s'" % (message['asset'], message['description']))
else: #issue new asset or issue addition qty of an asset
if not tracked_asset: #new issuance
tracked_asset = {
'_change_type': 'created',
'_at_block': cur_block_index, #the block ID this asset is current for
'_at_block_time': cur_block['block_time_obj'],
#^ NOTE: (if there are multiple asset tracked changes updates in a single block for the same
# asset, the last one with _at_block == that block id in the history array is the
# final version for that asset at that block
'asset': message['asset'],
'owner': message['issuer'],
'description': message['description'],
'divisible': message['divisible'],
'locked': False,
'total_issued': message['quantity'],
'total_issued_normalized': util_tetcoin.normalize_quantity(message['quantity'], message['divisible']),
'_history': [] #to allow for block rollbacks
}
db.tracked_assets.insert(tracked_asset)
logging.info("Tracking new asset: %s" % message['asset'])
modify_extended_asset_info(message['asset'], message['description'])
else: #issuing additional of existing asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'issued_more',
},
"$inc": {
'total_issued': message['quantity'],
'total_issued_normalized': util_tetcoin.normalize_quantity(message['quantity'], message['divisible'])
},
"$push": {'_history': tracked_asset} }, upsert=False)
logging.info("Adding additional %s quantity for asset %s" % (
util_tetcoin.normalize_quantity(message['quantity'], message['divisible']), message['asset']))
return True
def inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, new_status='error', errors=[]):
asset['fetch_info_retry'] += 1
asset['errors'] = errors
if asset['fetch_info_retry'] == max_retry:
asset['info_status'] = new_status
db.asset_extended_info.save(asset)
def sanitize_json_data(data):
data['asset'] = util.sanitize_eliteness(data['asset'])
if 'description' in data: data['description'] = util.sanitize_eliteness(data['description'])
if 'website' in data: data['website'] = util.sanitize_eliteness(data['website'])
if 'pgpsig' in data: data['pgpsig'] = util.sanitize_eliteness(data['pgpsig'])
return data
def process_asset_info(db, asset, info_data):
# sanity check
assert asset['info_status'] == 'needfetch'
assert 'info_url' in asset
assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch
errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
if not isinstance(info_data, dict) or 'asset' not in info_data:
errors.append('Invalid data format')
elif asset['asset'] != info_data['asset']:
errors.append('asset field does not match asset name')
if len(errors) > 0:
inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
return (False, errors)
asset['info_status'] = 'valid'
#fetch any associated images...
#TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(info_data['image'],
config.SUBDIR_ASSET_IMAGES, asset['asset'], fetch_timeout=5)
asset['info_data'] = sanitize_json_data(info_data)
db.asset_extended_info.save(asset)
return (True, None)
def fetch_all_asset_info(db):
assets = list(db.asset_extended_info.find({'info_status': 'needfetch'}))
asset_info_urls = []
def asset_fetch_complete_hook(urls_data):
logging.info("Enhanced asset info fetching complete. %s unique URLs fetched. Processing..." % len(urls_data))
for asset in assets:
logging.debug("Looking at asset %s: %s" % (asset, asset['info_url']))
if asset['info_url']:
info_url = ('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url']
assert info_url in urls_data
if not urls_data[info_url][0]: #request was not successful
inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, errors=[urls_data[info_url][1]])
logging.warn("Fetch for asset at %s not successful: %s (try %i of %i)" % (
info_url, urls_data[info_url][1], asset['fetch_info_retry'], ASSET_MAX_RETRY))
else:
result = process_asset_info(db, asset, urls_data[info_url][1])
if not result[0]:
logging.info("Processing for asset %s at %s not successful: %s" % (asset['asset'], info_url, result[1]))
else:
logging.info("Processing for asset %s at %s successful" % (asset['asset'], info_url))
#compose and fetch all info URLs in all assets with them
for asset in assets:
if not asset['info_url']: continue
if asset.get('disabled', False):
logging.info("ExtendedAssetInfo: Skipping disabled asset %s" % asset['asset'])
continue
#may or may not end with .json. may or may not start with http:// or https://
asset_info_urls.append(('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url'])
asset_info_urls_str = ', '.join(asset_info_urls)
asset_info_urls_str = (asset_info_urls_str[:2000] + ' ...') if len(asset_info_urls_str) > 2000 else asset_info_urls_str #truncate if necessary
if len(asset_info_urls):
logging.info('Fetching enhanced asset info for %i assets: %s' % (len(asset_info_urls), asset_info_urls_str))
util.stream_fetch(asset_info_urls, asset_fetch_complete_hook,
fetch_timeout=10, max_fetch_size=4*1024, urls_group_size=20, urls_group_time_spacing=20,
per_request_complete_callback=lambda url, data: logging.debug("Asset info URL %s retrieved, result: %s" % (url, data)))
def get_escrowed_balances(addresses):
addresses_holder = ','.join(['?' for e in range(0,len(addresses))])
sql ='''SELECT (source || '_' || give_asset) AS source_asset, source AS address, give_asset AS asset, SUM(give_remaining) AS quantity
FROM orders
WHERE source IN ({}) AND status = ? AND give_asset != ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['open', 'TET']
results = util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx0_address || '_' || forward_asset) AS source_asset, tx0_address AS address, forward_asset AS asset, SUM(forward_quantity) AS quantity
FROM order_matches
WHERE tx0_address IN ({}) AND forward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['TET', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx1_address || '_' || backward_asset) AS source_asset, tx1_address AS address, backward_asset AS asset, SUM(backward_quantity) AS quantity
FROM order_matches
WHERE tx1_address IN ({}) AND backward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['TET', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager_remaining) AS quantity
FROM bets
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(forward_quantity) AS quantity
FROM bet_matches
WHERE tx0_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(backward_quantity) AS quantity
FROM bet_matches
WHERE tx1_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx0_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx1_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XTN, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
escrowed_balances = {}
for order in results:
if order['address'] not in escrowed_balances:
escrowed_balances[order['address']] = {}
if order['asset'] not in escrowed_balances[order['address']]:
escrowed_balances[order['address']][order['asset']] = 0
escrowed_balances[order['address']][order['asset']] += order['quantity']
return escrowed_balances
|
{
"content_hash": "e64246a881c45f52d34eb70bb5bdc9ea",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 159,
"avg_line_length": 52.132616487455195,
"alnum_prop": 0.5889996562392574,
"repo_name": "Tetchain/tetblockd",
"id": "185584856303477c6956874f76f629cbe43e2c1b",
"size": "14545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/components/assets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "5576"
},
{
"name": "Python",
"bytes": "313318"
},
{
"name": "Shell",
"bytes": "5102"
}
],
"symlink_target": ""
}
|
from qtpy import QtWidgets, QtGui
from echo.qt.autoconnect import autoconnect_callbacks_to_qt
from echo import CallbackProperty
from echo.qt.connect import UserDataWrapper
def test_autoconnect_callbacks_to_qt():
class Data(object):
pass
data1 = Data()
data2 = Data()
class CustomWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(CustomWidget, self).__init__(parent=parent)
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
self.combotext_planet = QtWidgets.QComboBox(objectName='combotext_planet')
self.layout.addWidget(self.combotext_planet)
self.combotext_planet.addItem('earth')
self.combotext_planet.addItem('mars')
self.combotext_planet.addItem('jupiter')
self.combodata_dataset = QtWidgets.QComboBox(objectName='combodata_dataset')
self.layout.addWidget(self.combodata_dataset)
self.combodata_dataset.addItem('data1', UserDataWrapper(data1))
self.combodata_dataset.addItem('data2', UserDataWrapper(data2))
self.text_name = QtWidgets.QLineEdit(objectName='text_name')
self.layout.addWidget(self.text_name)
self.valuetext_age = QtWidgets.QLineEdit(objectName='valuetext_age')
self.layout.addWidget(self.valuetext_age)
self.value_height = QtWidgets.QSlider(objectName='value_height')
self.value_height.setMinimum(0)
self.value_height.setMaximum(10)
self.layout.addWidget(self.value_height)
self.bool_log = QtWidgets.QToolButton(objectName='bool_log')
self.bool_log.setCheckable(True)
self.layout.addWidget(self.bool_log)
class Person(object):
planet = CallbackProperty()
dataset = CallbackProperty()
name = CallbackProperty()
age = CallbackProperty()
height = CallbackProperty()
log = CallbackProperty()
widget = CustomWidget()
person = Person()
connect_kwargs = {'height': {'value_range': (0, 100)},
'age': {'fmt': '{:.2f}'}}
c1 = autoconnect_callbacks_to_qt(person, widget, connect_kwargs=connect_kwargs) # noqa
# Check that modifying things in the Qt widget updates the callback properties
widget.combotext_planet.setCurrentIndex(2)
assert person.planet == 'jupiter'
widget.combodata_dataset.setCurrentIndex(1)
assert person.dataset is data2
widget.text_name.setText('Lovelace')
widget.text_name.editingFinished.emit()
assert person.name == 'Lovelace'
widget.valuetext_age.setText('76')
widget.valuetext_age.editingFinished.emit()
assert person.age == 76
widget.value_height.setValue(7)
assert person.height == 70
widget.bool_log.setChecked(True)
assert person.log
# Check that modifying the callback properties updates the Qt widget
person.planet = 'mars'
assert widget.combotext_planet.currentIndex() == 1
person.dataset = data1
assert widget.combodata_dataset.currentIndex() == 0
person.name = 'Curie'
assert widget.text_name.text() == 'Curie'
person.age = 66.3
assert widget.valuetext_age.text() == '66.30'
person.height = 54
assert widget.value_height.value() == 5
person.log = False
assert not widget.bool_log.isChecked()
def test_autoconnect_with_empty_qt_item():
# The following test just makes sure that if a widget without children
# is ever passed to autoconnect_callbacks_to_qt, things don't crash
widget = QtGui.QPalette()
class Person(object):
name = CallbackProperty()
person = Person()
autoconnect_callbacks_to_qt(person, widget)
|
{
"content_hash": "8921d7a828df77fef2a0219c262be43a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 91,
"avg_line_length": 30.87704918032787,
"alnum_prop": 0.6625962304220866,
"repo_name": "glue-viz/echo",
"id": "ed2beaf01fc0103f45a184bd7e64cf337c2819a7",
"size": "3767",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "echo/qt/tests/test_autoconnect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "111753"
}
],
"symlink_target": ""
}
|
from gdc_ng_models.models.misc import *
|
{
"content_hash": "3a24721e2c1ede693d2d7edc7f457210",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.775,
"repo_name": "NCI-GDC/gdcdatamodel",
"id": "7123efb48cfa1451d2c83ded892b6750c3ad638e",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "gdcdatamodel/models/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "153557"
},
{
"name": "Shell",
"bytes": "207"
}
],
"symlink_target": ""
}
|
from tornado.gen import coroutine
from qiita_core.util import execute_as_transaction
from qiita_db.software import Software
from .base_handlers import BaseHandler
class SoftwareHandler(BaseHandler):
@coroutine
@execute_as_transaction
def get(self):
# active True will only show active software
active = True
user = self.current_user
if user is not None and user.level in {'admin', 'dev'}:
active = False
software = Software.iter(active=active)
self.render("software.html", software=software)
|
{
"content_hash": "8ad58de66fa5ed9c6eb18005adb019cc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 29.894736842105264,
"alnum_prop": 0.6883802816901409,
"repo_name": "ElDeveloper/qiita",
"id": "9b1d5e238727694132f83c3030a23705e9ec5e21",
"size": "918",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qiita_pet/handlers/software.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2353"
},
{
"name": "HTML",
"bytes": "548553"
},
{
"name": "JavaScript",
"bytes": "83566"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "84815"
},
{
"name": "Python",
"bytes": "2293282"
},
{
"name": "SQLPL",
"bytes": "7501"
},
{
"name": "Shell",
"bytes": "3180"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
import py2exe
setup(
name='Facturacion',
version='1.0',
packages=['', 'Objects', 'Collections', 'Controllers'],
url='https://atalgaba.com',
license='GNU GENERAL PUBLIC LICENSE',
author='IhToN',
author_email='atalgaba@gmail.com',
description='Programa simple de facturación',
console=['Facturacion.py']
)
|
{
"content_hash": "04da42ea4170722d78a9fe892dbd8f86",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 59,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6711229946524064,
"repo_name": "IhToN/DAW1-PRG",
"id": "450e1d667a2e85c66c60e425ca2ae301853438c5",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Facturacion/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "276667"
}
],
"symlink_target": ""
}
|
import sys
import asyncio
import os
from queue import Empty
from molotov.util import cancellable_sleep, printable_error, multiprocessing
class SharedConsole(object):
"""Multi-process compatible stdout console.
"""
def __init__(self, interval=0.1, max_lines_displayed=20, stream=sys.stdout):
self._stream = multiprocessing.Queue()
self._interval = interval
self._stop = True
self._creator = os.getpid()
self._stop = False
self._max_lines_displayed = max_lines_displayed
self.stream = stream
async def stop(self):
self._stop = True
while True:
try:
self.stream.write(self._stream.get_nowait())
except Empty:
break
self.stream.flush()
async def flush(self):
self.stream.flush()
await asyncio.sleep(0)
async def display(self):
if os.getpid() != self._creator:
return
while not self._stop:
lines_displayed = 0
while True:
try:
line = self._stream.get_nowait()
self.stream.write(line)
lines_displayed += 1
except Empty:
break
if self._stop or lines_displayed > self._max_lines_displayed:
break
else:
await asyncio.sleep(0)
self.stream.flush()
if not self._stop:
await cancellable_sleep(self._interval)
def print(self, line, end="\n"):
if os.getpid() != self._creator:
line = "[%d] %s" % (os.getpid(), line)
line += end
self._stream.put_nowait(line)
def print_error(self, error, tb=None):
for line in printable_error(error, tb):
self.print(line)
def print_block(self, start, callable, end="OK"):
if os.getpid() != self._creator:
prefix = "[%d] " % os.getpid()
else:
prefix = ""
self._stream.put(prefix + start + "...\n")
res = callable()
self._stream.put(prefix + "OK\n")
return res
|
{
"content_hash": "56e2a76bbd1cc23bee93a691a3e8b096",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 80,
"avg_line_length": 29.445945945945947,
"alnum_prop": 0.5222579164754475,
"repo_name": "loads/molotov",
"id": "ad15c1b390f53f2ca07febe696376b3ab134ebb7",
"size": "2179",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "molotov/sharedconsole.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "801"
},
{
"name": "Python",
"bytes": "134872"
}
],
"symlink_target": ""
}
|
"""Simple collision check.
This module provides simple collision checking appropriate for
shmups. It provides routines to check whether two moving circles
collided during the past frame.
An equivalent C-based version will be used automatically if it was
compiled and installed with the module. If available, it will be noted
in the docstrings for the functions.
Basic Usage:
from bulletml.collision import collides
for bullet in bullets:
if collides(player, bullet): ... # Kill the player.
"""
from __future__ import division
def overlaps(a, b):
"""Return true if two circles are overlapping.
Usually, you'll want to use the 'collides' method instead, but
this one can be useful for just checking to see if the player has
entered an area or hit a stationary oject.
(This function is unoptimized.)
"""
dx = a.x - b.x
dy = a.y - b.y
try:
radius = a.radius + b.radius
except AttributeError:
radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)
return dx * dx + dy * dy <= radius * radius
def collides(a, b):
"""Return true if the two moving circles collide.
a and b should have the following attributes:
x, y - required, current position
px, py - not required, defaults to x, y, previous frame position
radius - not required, defaults to 0.5
(This function is unoptimized.)
"""
# Current locations.
xa = a.x
xb = b.x
ya = a.y
yb = b.y
# Treat b as a point, we only need one radius.
try:
radius = a.radius + b.radius
except AttributeError:
radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)
# Previous frame locations.
try: pxa = a.px
except KeyError: pxa = xa
try: pya = a.py
except KeyError: pya = ya
try: pxb = b.px
except KeyError: pxb = xb
try: pyb = b.py
except KeyError: pyb = yb
# Translate b's final position to be relative to a's start.
# And now, circle/line collision.
dir_x = pxa + (xb - xa) - pxb
dir_y = pya + (yb - ya) - pyb
diff_x = pxa - pxb
diff_y = pya - pyb
if (dir_x < 0.0001 and dir_x > -0.0001
and dir_y < 0.0001 and dir_y > -0.0001):
# b did not move relative to a, so do point/circle.
return diff_x * diff_x + diff_y * diff_y < radius * radius
# dot(diff, dir) / dot(dir, dir)
t = (diff_x * dir_x + diff_y * dir_y) / (dir_x * dir_x + dir_y * dir_y)
if t < 0:
t = 0
elif t > 1:
t = 1
dist_x = pxa - (pxb + dir_x * t)
dist_y = pya - (pyb + dir_y * t)
# dist_sq < radius_sq
return dist_x * dist_x + dist_y * dist_y <= radius * radius
def collides_all(a, others):
"""Filter the second argument to those that collide with the first.
This is equivalent to filter(lambda o: collides(a, o), others),
but is much faster when the compiled extension is available (which
it is not currently).
"""
return filter(lambda o: collides(a, o), others)
try:
from bulletml._collision import collides, overlaps, collides_all
except ImportError:
pass
|
{
"content_hash": "5bae265604d37f0d807c45fb184a2eed",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 75,
"avg_line_length": 27.848214285714285,
"alnum_prop": 0.6219942289195255,
"repo_name": "andarms/pyweek22",
"id": "2e88b3c0a492008a8c62cb0a23c0c92fbd9f3f4f",
"size": "3119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bulletml/collision.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7118"
},
{
"name": "Python",
"bytes": "81896"
}
],
"symlink_target": ""
}
|
import os
import time
from cm_api.api_client import ApiResource
# If the exit code is not zero Cloudera Director will fail
# Post creation scripts also have access to the following environment variables:
# DEPLOYMENT_HOST_PORT
# ENVIRONMENT_NAME
# DEPLOYMENT_NAME
# CLUSTER_NAME
# CM_USERNAME
# CM_PASSWORD
def main():
cmhost = os.environ['DEPLOYMENT_HOST_PORT'].split(":")[0]
api = ApiResource(cmhost, username='admin', password='admin')
all_clusters = api.get_all_clusters()
for cluster in all_clusters:
if (cluster.name == os.environ['CLUSTER_NAME']):
break
template = cluster.create_host_template("cdsw-gateway")
if __name__ == "__main__":
main()
|
{
"content_hash": "08a0020ab35248881806402be746a765",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 26,
"alnum_prop": 0.6634615384615384,
"repo_name": "TobyHFerguson/director-scripts",
"id": "b80b4ea975497f5b5b344f7d28734c58b4b38c42",
"size": "746",
"binary": false,
"copies": "1",
"ref": "refs/heads/old_master",
"path": "cloud-lab/aws/scripts/create_template.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "10260"
},
{
"name": "Python",
"bytes": "53414"
},
{
"name": "Shell",
"bytes": "185087"
}
],
"symlink_target": ""
}
|
import os
from flask_script import Manager, Shell
from app import create_app
from flask import url_for
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
def make_shell():
return dict(app=app)
manager.add_command("shell", Shell(make_context=make_shell))
def run_test(discover_name):
import unittest
tests = unittest.TestLoader().discover(discover_name)
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def test():
run_test('tests')
@manager.command
def list_routes():
import urllib
output = []
for rule in app.url_map.iter_rules():
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
url = url_for(rule.endpoint, **options)
line = urllib.unquote(
"{:50s} {:20s} {}".format(rule.endpoint, methods, url))
output.append(line)
for line in sorted(output):
print line
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "6e8c0fb8d8a84230358f6df4881d3f39",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 67,
"avg_line_length": 21.224489795918366,
"alnum_prop": 0.6298076923076923,
"repo_name": "utofu/oauth-server",
"id": "df450c53164104a5ea3d3f94524bf09b734a4d35",
"size": "1062",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "732"
},
{
"name": "Python",
"bytes": "23780"
}
],
"symlink_target": ""
}
|
"""Support for Actions on Google Assistant Smart Home Control."""
from __future__ import annotations
import logging
from typing import Any
import voluptuous as vol
# Typing imports
from homeassistant.const import CONF_API_KEY, CONF_NAME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ALIASES,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_PROJECT_ID,
CONF_REPORT_STATE,
CONF_ROOM_HINT,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
DEFAULT_EXPOSE_BY_DEFAULT,
DEFAULT_EXPOSED_DOMAINS,
DOMAIN,
SERVICE_REQUEST_SYNC,
)
from .const import EVENT_QUERY_RECEIVED # noqa: F401
from .http import GoogleAssistantView, GoogleConfig
from .const import EVENT_COMMAND_RECEIVED, EVENT_SYNC_RECEIVED # noqa: F401, isort:skip
_LOGGER = logging.getLogger(__name__)
CONF_ALLOW_UNLOCK = "allow_unlock"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_EXPOSE, default=True): cv.boolean,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ROOM_HINT): cv.string,
}
)
GOOGLE_SERVICE_ACCOUNT = vol.Schema(
{
vol.Required(CONF_PRIVATE_KEY): cv.string,
vol.Required(CONF_CLIENT_EMAIL): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
def _check_report_state(data):
if data[CONF_REPORT_STATE] and CONF_SERVICE_ACCOUNT not in data:
raise vol.Invalid("If report state is enabled, a service account must exist")
return data
GOOGLE_ASSISTANT_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_PROJECT_ID): cv.string,
vol.Optional(
CONF_EXPOSE_BY_DEFAULT, default=DEFAULT_EXPOSE_BY_DEFAULT
): cv.boolean,
vol.Optional(
CONF_EXPOSED_DOMAINS, default=DEFAULT_EXPOSED_DOMAINS
): cv.ensure_list,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ENTITY_SCHEMA},
# str on purpose, makes sure it is configured correctly.
vol.Optional(CONF_SECURE_DEVICES_PIN): str,
vol.Optional(CONF_REPORT_STATE, default=False): cv.boolean,
vol.Optional(CONF_SERVICE_ACCOUNT): GOOGLE_SERVICE_ACCOUNT,
# deprecated configuration options
vol.Remove(CONF_ALLOW_UNLOCK): cv.boolean,
vol.Remove(CONF_API_KEY): cv.string,
},
extra=vol.PREVENT_EXTRA,
),
_check_report_state,
)
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): GOOGLE_ASSISTANT_SCHEMA}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, yaml_config: dict[str, Any]):
"""Activate Google Actions component."""
if DOMAIN not in yaml_config:
return True
config = yaml_config[DOMAIN]
google_config = GoogleConfig(hass, config)
await google_config.async_initialize()
hass.http.register_view(GoogleAssistantView(google_config))
if google_config.should_report_state:
google_config.async_enable_report_state()
async def request_sync_service_handler(call: ServiceCall):
"""Handle request sync service calls."""
agent_user_id = call.data.get("agent_user_id") or call.context.user_id
if agent_user_id is None:
_LOGGER.warning(
"No agent_user_id supplied for request_sync. Call as a user or pass in user id as agent_user_id"
)
return
await google_config.async_sync_entities(agent_user_id)
# Register service only if key is provided
if CONF_SERVICE_ACCOUNT in config:
hass.services.async_register(
DOMAIN, SERVICE_REQUEST_SYNC, request_sync_service_handler
)
return True
|
{
"content_hash": "7286b81b1b712a3c63d807d6a0e08080",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 112,
"avg_line_length": 30.566929133858267,
"alnum_prop": 0.6622874806800618,
"repo_name": "kennedyshead/home-assistant",
"id": "13516783233ade65c43133b68bc19259ec3ea823",
"size": "3882",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/google_assistant/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "33970989"
},
{
"name": "Shell",
"bytes": "4900"
}
],
"symlink_target": ""
}
|
"""
Display return data as a progress bar
"""
try:
import progressbar
HAS_PROGRESSBAR = True
except ImportError:
HAS_PROGRESSBAR = False
def __virtual__():
return True if HAS_PROGRESSBAR else False
def output(ret, bar, **kwargs): # pylint: disable=unused-argument
"""
Update the progress bar
"""
if "return_count" in ret:
val = ret["return_count"]
# Avoid to fail if targets are behind a syndic. In this case actual return count will be
# higher than targeted by MoM itself.
# TODO: implement a way to get the proper target minions count and remove this workaround.
# Details are in #44239.
if val > bar.maxval:
bar.maxval = val
bar.update(val)
return ""
def progress_iter(progress):
"""
Initialize and return a progress bar iter
"""
widgets = [
progressbar.Percentage(),
" ",
progressbar.Bar(),
" ",
progressbar.Timer(),
" Returns: [",
progressbar.Counter(),
"/{}]".format(progress["minion_count"]),
]
bar = progressbar.ProgressBar(widgets=widgets, maxval=progress["minion_count"])
bar.start()
return bar
|
{
"content_hash": "54198c9c7d2ecffc2f4744817b375533",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 24.32,
"alnum_prop": 0.6011513157894737,
"repo_name": "saltstack/salt",
"id": "1d00a379cc468bc4cf9b0b57bae5494611bf1bf4",
"size": "1216",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "salt/output/progress.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
import p4_hlir.hlir.p4 as p4
from tenjin.util import *
import os
import globals as gl
import re
import socket
from collections import defaultdict, OrderedDict
from util.topo_sorting import Graph, Node
from time import sleep
import sys
import json
from pkg_resources import resource_string
_SMART_DIR = os.path.dirname(os.path.realpath(__file__))
templates_dir = os.path.join(_SMART_DIR, "templates")
plugin_base = os.path.join(_SMART_DIR, 'plugin/')
re_brackets = re.compile('[\[\]]')
def bytes_round_up(bits):
return (bits + 7) / 8
def get_header_instance_name(p4_header_instance):
header_name = p4_header_instance.name
# cannot have brackets in the header name in C
header_name = re_brackets.sub("_", header_name)
return header_name
def get_field_instance_name(p4_field_instance, header_name = None):
if header_name is None:
header_name = get_header_instance_name(p4_field_instance.instance)
# return "%s_%s" % (header_name, p4_field_instance.name)
return "%s_%s" % (header_name, p4_field_instance.name)
def get_action_name(p4_action):
return p4_action.name
def get_table_name(p4_table):
return p4_table.name
def get_conditional_node_name(p4_conditional_node):
return p4_conditional_node.name
def int_to_byte_array(val, num_bytes):
res = []
for i in xrange(num_bytes):
res.append(int(val % 256))
val /= 256
res.reverse()
return res
def build_switch_match_key(field_widths, value):
res = []
for width in reversed(field_widths):
mask = (1 << width) - 1
val = value & mask
num_bytes = max(bytes_round_up(width), 4)
res = int_to_byte_array(val, num_bytes) + res
value = value >> width
return res
def produce_parser_topo_sorting(hlir):
header_graph = Graph()
def walk_rec(hlir, parse_state, prev_hdr_node, tag_stacks_index):
assert(isinstance(parse_state, p4.p4_parse_state))
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == p4.parse_call.extract:
hdr = call[1]
if hdr.virtual:
base_name = hdr.base_name
current_index = tag_stacks_index[base_name]
if current_index > hdr.max_index:
return
tag_stacks_index[base_name] += 1
name = base_name + "[%d]" % current_index
hdr = hlir.p4_header_instances[name]
if hdr not in header_graph:
header_graph.add_node(hdr)
hdr_node = header_graph.get_node(hdr)
if prev_hdr_node:
prev_hdr_node.add_edge_to(hdr_node)
else:
header_graph.root = hdr;
prev_hdr_node = hdr_node
for branch_case, next_state in parse_state.branch_to.items():
if not next_state: continue
if not isinstance(next_state, p4.p4_parse_state):
continue
walk_rec(hlir, next_state, prev_hdr_node, tag_stacks_index.copy())
start_state = hlir.p4_parse_states["start"]
walk_rec(hlir, start_state, None, defaultdict(int))
header_topo_sorting = header_graph.produce_topo_sorting()
# topo_sorting = []
# for hdr in header_topo_sorting:
# topo_sorting.append(get_header_instance_name(hdr))
# dirty fix, to ensure that all tags in a stag are contiguous
# this makes the (questionable) assumption that all those tags are
# contiguous in the deparsed packet
buckets = defaultdict(list)
for header_instance in header_topo_sorting:
base_name = header_instance.base_name
buckets[base_name].append(header_instance)
collapsed = []
for header_instance in header_topo_sorting:
base_name = header_instance.base_name
collapsed += buckets[base_name]
buckets[base_name] = []
return collapsed
def render_dict_populate_fields(render_dict, hlir, meta_config_json):
render_dict["ordered_field_instances_all"] = []
render_dict["ordered_field_instances_virtual"] = []
render_dict["ordered_field_instances_non_virtual"] = []
render_dict["field_info"] = {}
render_dict["ordered_header_instances_all"] = []
render_dict["ordered_header_instances_virtual"] = []
render_dict["ordered_header_instances_non_virtual"] = []
render_dict["tag_stacks_depth"] = defaultdict(int)
render_dict["tag_stacks_first"] = {}
render_dict["header_info"] = {}
phv_offset = 0
# TODO: improve
headers_regular = produce_parser_topo_sorting(hlir)
base_names_regular = set()
for header_instance in headers_regular:
base_names_regular.add(header_instance.base_name)
headers_metadata = []
headers_virtual = []
headers_non_virtual = []
for header_instance in hlir.p4_header_instances.values():
if header_instance.virtual:
# TODO: temp fix, do something better
if header_instance.base_name not in base_names_regular:
continue
headers_virtual.append(header_instance)
elif header_instance.metadata:
headers_metadata.append(header_instance)
headers_non_virtual = headers_regular + headers_metadata
headers_ordered = headers_virtual + headers_non_virtual
get_name = lambda x: get_header_instance_name(x)
render_dict["ordered_header_instances_all"] = map(get_name, headers_ordered)
render_dict["ordered_header_instances_virtual"] = map(get_name, headers_virtual)
render_dict["ordered_header_instances_non_virtual"] = map(get_name, headers_non_virtual)
render_dict["ordered_header_instances_metadata"] = map(get_name, headers_metadata)
render_dict["ordered_header_instances_regular"] = map(get_name, headers_regular)
for header_instance in headers_ordered:
header_name = get_header_instance_name(header_instance)
base_name = header_instance.base_name
h_info = {}
virtual = header_instance.virtual
h_info["base_name"] = base_name
h_info["virtual"] = virtual
h_info["is_metadata"] = header_instance.metadata
if virtual:
if header_instance.index == p4.P4_NEXT:
type_ = "next"
elif header_instance.index == p4.P4_LAST:
type_ = "last"
else:
assert(False)
h_info["virtual_type"] = type_
else:
h_info["byte_offset_phv"] = phv_offset
if header_instance.index is not None:
# assume they are ordered
if base_name not in render_dict["tag_stacks_first"]:
render_dict["tag_stacks_first"][base_name] = header_name
render_dict["tag_stacks_depth"][base_name] += 1
h_bit_width = 0
h_byte_width_phv = 0
h_info["fields"] = []
field_index = 0
for field_instance in header_instance.fields:
field_name = get_field_instance_name(field_instance, header_name)
render_dict["ordered_field_instances_all"].append(field_name)
if virtual:
render_dict["ordered_field_instances_virtual"].append(field_name)
else:
render_dict["ordered_field_instances_non_virtual"].append(field_name)
f_info = {}
bit_width = field_instance.width
h_bit_width += bit_width
f_info["field_index"] = field_index
field_index += 1
f_info["virtual"] = virtual
f_info["bit_width"] = bit_width
f_info["bit_offset_hdr"] = field_instance.offset % 8
f_info["byte_offset_hdr"] = field_instance.offset / 8
f_info["parent_header"] = header_name
f_info["is_metadata"] = h_info["is_metadata"]
# only 2 data types: uint32_t and byte_buf_t
if bit_width <= 32:
f_info["data_type"] = "uint32_t"
f_info["byte_width_phv"] = 4
f_info["mask"] = hex(int(socket.htonl((1 << bit_width) - 1)))
else:
# TODO
assert(bit_width % 8 == 0)
f_info["data_type"] = "byte_buf_t"
f_info["byte_width_phv"] = bit_width / 8
f_info["mask"] = hex(0)
if f_info["is_metadata"]:
f_info["default"] = int_to_byte_array(field_instance.default,
f_info["byte_width_phv"])
h_byte_width_phv += f_info["byte_width_phv"]
if not virtual:
f_info["byte_offset_phv"] = phv_offset
phv_offset += f_info["byte_width_phv"]
render_dict["field_info"][field_name] = f_info
h_info["fields"].append(field_name)
h_info["bit_width"] = h_bit_width
h_info["byte_width_phv"] = h_byte_width_phv
render_dict["header_info"][header_name] = h_info
# total length (in bytes) of the phv memory block. Will be allocated
# dynamically (once) at run time
phv_length = phv_offset
render_dict["phv_length"] = phv_length
render_dict["num_headers"] = len(render_dict["ordered_field_instances_non_virtual"])
print "total phv length (in bytes):", phv_length
# checksums
cond_idx = 1
calculated_fields_verify = OrderedDict()
calculated_fields_update = OrderedDict()
calculated_fields_conds = []
for header_instance in headers_regular:
for field_instance in header_instance.fields:
field_name = get_field_instance_name(field_instance)
calculated_fields_verify[field_name] = []
calculated_fields_update[field_name] = []
for calculation in field_instance.calculation:
type_, calc, if_cond = calculation
assert(calc.output_width == field_instance.width)
if if_cond:
if_cond = dump_p4_expression(if_cond)
calculated_fields_conds.append(if_cond)
if_cond_idx = cond_idx
cond_idx += 1
else:
if_cond_idx = 0
if type_ == "verify":
calculated_fields_verify[field_name] += [(calc.name, if_cond_idx)]
if type_ == "update":
calculated_fields_update[field_name] += [(calc.name, if_cond_idx)]
render_dict["calculated_fields_conds"] = calculated_fields_conds
render_dict["calculated_fields_verify"] = calculated_fields_verify
render_dict["calculated_fields_update"] = calculated_fields_update
config = meta_config_json
# This is the set of data carried for recirculation
render_dict["meta_carry"] = config["meta_carry"]
# This maps metadata short names to full references
metadata_name_map = config["meta_mappings"]["standard_metadata"]
intrinsic_metadata_name_map = config["meta_mappings"]["intrinsic_metadata"]
pre_metadata_name_map = config["meta_mappings"]["pre_metadata"]
render_dict["metadata_name_map"] = metadata_name_map
render_dict["intrinsic_metadata_name_map"] = intrinsic_metadata_name_map
render_dict["pre_metadata_name_map"] = pre_metadata_name_map
render_dict["extra_metadata_name_map"] = {}
if "extra_metadata" in config["meta_mappings"]:
render_dict["extra_metadata_name_map"] = config["meta_mappings"]["extra_metadata"]
# Calculate offsets for metadata entries
offset = 0
metadata_offsets = {}
num_intrinsic_metadata = 0
num_pre_metadata = 0
for header_instance in map(get_name, headers_non_virtual):
h_info = render_dict["header_info"][header_instance]
if h_info["is_metadata"]:
for field in h_info["fields"]:
if field in metadata_name_map.values():
metadata_offsets[field] = offset
elif field in intrinsic_metadata_name_map.values():
num_intrinsic_metadata += 1
metadata_offsets[field] = offset
elif field in pre_metadata_name_map.values():
num_pre_metadata += 1
metadata_offsets[field] = offset
offset += render_dict["field_info"][field]["byte_width_phv"]
enable_intrinsic = 1 if len(intrinsic_metadata_name_map) == num_intrinsic_metadata else 0
enable_pre = 1 if len(pre_metadata_name_map) == num_pre_metadata else 0
render_dict["metadata_offsets"] = metadata_offsets
render_dict["enable_intrinsic"] = enable_intrinsic
render_dict["enable_pre"] = enable_pre
def render_dict_populate_data_types(render_dict, hlir):
render_dict["field_data_types"] = ["uint32_t", "byte_buf_t"]
def render_dict_populate_parse_states(render_dict, hlir):
render_dict["parse_states"] = OrderedDict()
render_dict["value_sets"] = OrderedDict()
header_info = render_dict["header_info"]
field_info = render_dict["field_info"]
value_sets_used = set()
for name, parse_state in hlir.p4_parse_states.items():
parse_info = {}
call_sequence = []
for call in parse_state.call_sequence:
call_type = call[0]
if call_type == p4.parse_call.extract:
extract_name = get_header_instance_name(call[1])
call_sequence += [(str(call_type), extract_name)]
elif call_type == p4.parse_call.set:
metadata_name = get_field_instance_name(call[1])
target_byte_width = field_info[metadata_name]["byte_width_phv"]
metadata_value = call[2]
if type(metadata_value) is p4.p4_field:
call_sequence += [(str(call_type),
metadata_name,
"latest",
get_field_instance_name(metadata_value))]
elif type(metadata_value) is int:
value = call[2]
byte_array = int_to_byte_array(value, target_byte_width)
call_sequence += [(str(call_type),
metadata_name,
"immediate",
byte_array)]
else:
# tuple, for current
call_sequence += [(str(call_type),
metadata_name,
"current",
metadata_value)]
else: # counters
# TODO
assert(False)
parse_info["call_sequence"] = call_sequence
branch_on = []
key_fields_bit_widths = []
key_byte_width = 0
for switch_ref in parse_state.branch_on:
if type(switch_ref) is p4.p4_field:
field_ref = switch_ref
field_instance_name = get_field_instance_name(field_ref)
field_bit_width = field_info[field_instance_name]["bit_width"]
key_fields_bit_widths.append(field_bit_width)
num_bytes = max(bytes_round_up(field_bit_width), 4)
branch_on.append(("field_ref", field_instance_name))
elif type(switch_ref) is tuple:
offset, bit_width = switch_ref
key_fields_bit_widths.append(bit_width)
num_bytes = max(bytes_round_up(bit_width), 4)
branch_on.append(("current", switch_ref))
else:
assert(False)
key_byte_width += num_bytes
parse_info["branch_on"] = branch_on
value_set_info = {}
branch_to = []
for branch_case, next_state in parse_state.branch_to.items():
value, mask = 0, 0;
if branch_case == p4.P4_DEFAULT:
case_type = "default"
elif type(branch_case) is p4.p4_parse_value_set:
if branch_case in value_sets_used:
print "a value set can only be used in one parse state"
assert(False)
value_set_info["match_data"] = branch_on
value_set_info["byte_width"] = key_byte_width
render_dict["value_sets"][branch_case.name] = value_set_info
value_sets_used.add(branch_case)
case_type = "value_set"
value = branch_case.name
elif type(branch_case) is int:
case_type = "value"
value = build_switch_match_key(key_fields_bit_widths,
branch_case)
# print hex(branch_case), key_fields_bit_widths, value
else:
case_type = "value_masked"
value = build_switch_match_key(key_fields_bit_widths,
branch_case[0])
mask = build_switch_match_key(key_fields_bit_widths,
branch_case[1])
if next_state is None:
# should not happen any more
assert(False)
next_parse_state = ("parse_state", "end")
if isinstance(next_state, p4.p4_parse_state):
next_parse_state = ("parse_state", next_state.name)
elif isinstance(next_state, p4.p4_conditional_node):
next_parse_state = ("conditional_table",
get_conditional_node_name(next_state))
elif isinstance(next_state, p4.p4_table):
next_parse_state = ("table",
get_table_name(next_state))
else:
assert(False)
branch_to += [(case_type, value, mask, next_parse_state)]
parse_info["branch_to"] = branch_to
render_dict["parse_states"][name] = parse_info
def render_dict_populate_actions(render_dict, hlir):
render_dict["action_info"] = {}
field_info = render_dict["field_info"]
# these are the only actions for which we have to generate C code, since we
# are using the flat sequence of primitive calls
table_actions_set = set()
for _, table in hlir.p4_tables.items():
for action in table.actions: table_actions_set.add(action)
# we need to get the size of the match data
for action in table_actions_set:
a_info = {}
param_names = []
param_byte_widths = []
param_bit_widths = []
for param, width in zip(action.signature, action.signature_widths):
if not width :
print "unused parameter discarded"
continue
param_names += [param]
param_byte_widths += [max(bytes_round_up(width), 4)]
param_bit_widths += [width]
a_info["param_names"] = param_names
a_info["param_byte_widths"] = param_byte_widths
a_info["param_bit_widths"] = param_bit_widths
# find out which fields / instances we need to copy for parallel
# execution semantics
field_access = defaultdict(set)
for call in action.flat_call_sequence:
primitive = call[0]
for index, arg in enumerate(call[1]):
if type(arg) is p4.p4_field or\
type(arg) is p4.p4_header_instance:
sig_arg_name = primitive.signature[index]
flags = primitive.signature_flags[sig_arg_name]
if "access" not in flags:
field_access[arg].add(p4.P4_WRITE)
field_access[arg].add(p4.P4_READ)
else:
access = flags["access"]
field_access[arg].add(access)
field_copies = set()
header_copies = set()
for arg, access in field_access.items():
if len(access) > 1: # read and write
if type(arg) is p4.p4_field:
field_copies.add(get_field_instance_name(arg))
else:
header_copies.add(get_header_instance_name(arg))
a_info["field_copies"] = field_copies
a_info["header_copies"] = header_copies
call_sequence = []
for call in action.flat_call_sequence:
primitive_name = get_action_name(call[0]).upper()
primitive_args = []
# is this outdated, do I really need to have the immedaite value be
# the exact same size as the destination? Let's try to get rid of
# it.
# width = 0
# if call[0].name == "add_to_field" or\
# call[0].name == "modify_field" or\
# call[0].name == "modify_field_with_hash_based_offset":
# ref_arg = call[1][0]
# field_name = get_field_instance_name(ref_arg)
# width = field_info[field_name]["byte_width_phv"]
for arg in call[1]:
if type(arg) is int or type(arg) is long:
# assert(width > 0)
tmp = arg
nbytes = 0
while tmp > 0:
nbytes += 1
tmp /= 256
width = max(4, nbytes)
type_ = "immediate"
value = int_to_byte_array(arg, width)
elif type(arg) is p4.p4_field:
type_ = "field_ref"
value = get_field_instance_name(arg)
elif type(arg) is p4.p4_header_instance:
type_ = "header_ref"
value = get_header_instance_name(arg)
elif type(arg) is p4.p4_signature_ref:
type_ = "param"
value = arg.idx
elif type(arg) is p4.p4_parse_state:
type_ = "parse_state"
value = arg.name
elif type(arg) is p4.p4_field_list:
type_ = "field_list"
value = arg.name
elif type(arg) is p4.p4_field_list_calculation:
type_ = "p4_field_calculation"
value = arg.name
elif type(arg) is p4.p4_counter:
type_ = "p4_counter"
value = arg.name
elif type(arg) is p4.p4_meter:
type_ = "p4_meter"
value = arg.name
elif type(arg) is p4.p4_register:
type_ = "p4_register"
value = arg.name
else:
print type(arg)
assert(False)
primitive_args.append((type_, value))
call_sequence.append((primitive_name, primitive_args))
a_info["call_sequence"] = call_sequence
action_name = get_action_name(action)
render_dict["action_info"][action_name] = a_info
def render_dict_populate_conditional_tables(render_dict, hlir):
render_dict["conditional_table_info"] = OrderedDict()
for name, cnode in hlir.p4_conditional_nodes.items():
ct_info = {}
ct_info["expression"] = str(cnode.condition)
conditional_name = get_conditional_node_name(cnode)
ct_info["expression_computation"] = dump_p4_expression(cnode.condition)
ct_info["next_tables"] = {} # True, False
for b, next_ in cnode.next_.items():
if next_ is None:
next_name = None
if isinstance(next_, p4.p4_conditional_node):
next_name = get_conditional_node_name(next_)
elif isinstance(next_, p4.p4_table):
next_name = get_table_name(next_)
ct_info["next_tables"][b] = next_name
render_dict["conditional_table_info"][conditional_name] = ct_info
def render_dict_populate_tables(render_dict, hlir):
render_dict["table_info"] = OrderedDict()
field_info = render_dict["field_info"]
action_info = render_dict["action_info"]
select_tables = []
action_data_tables = []
for name, table in hlir.p4_tables.items():
t_info = {}
# can be None
t_info["min_size"] = table.min_size
t_info["max_size"] = table.max_size
t_info["support_timeout"] = table.support_timeout
t_info["action_profile"] = None
act_prof = table.action_profile
if act_prof is not None:
t_info["action_profile"] = act_prof.name
action_data_tables.append(name)
if act_prof.selector is not None:
select_tables.append(name)
# will be set in render_dict_populate_counters
t_info["bytes_counter"] = None
t_info["packets_counter"] = None
t_info["meter"] = None
t_info["registers"] = []
match_types = []
match_precedence = {
p4.p4_match_type.P4_MATCH_VALID: 0,
p4.p4_match_type.P4_MATCH_EXACT: 1,
p4.p4_match_type.P4_MATCH_TERNARY: 3,
p4.p4_match_type.P4_MATCH_LPM: 2,
}
for _, m_type, _ in table.match_fields:
if m_type not in match_precedence:
print m_type, "match not yet supported"
assert(False)
match_types.append(m_type)
# If no match fields, indicate exact match
if len(match_types) == 0:
match_type = p4.p4_match_type.P4_MATCH_EXACT
elif p4.p4_match_type.P4_MATCH_TERNARY in match_types:
match_type = p4.p4_match_type.P4_MATCH_TERNARY
elif match_types.count(p4.p4_match_type.P4_MATCH_LPM) >= 2:
print "cannot have 2 different lpm in a single table"
assert(False)
elif p4.p4_match_type.P4_MATCH_LPM in match_types:
match_type = p4.p4_match_type.P4_MATCH_LPM
else:
# that includes the case when we only have one valid match and
# nothing else
match_type = p4.p4_match_type.P4_MATCH_EXACT
type_mappings = {
p4.p4_match_type.P4_MATCH_EXACT: "exact",
p4.p4_match_type.P4_MATCH_TERNARY: "ternary",
p4.p4_match_type.P4_MATCH_LPM: "lpm",
p4.p4_match_type.P4_MATCH_RANGE: "range",
p4.p4_match_type.P4_MATCH_VALID: "valid",
None: "none",
}
t_info["match_type"] = type_mappings[match_type]
# basically same code as for branch_on in parse functions, because the
# build_key function is going to be the same
match_fields = []
key_fields_bit_widths = []
big_mask = []
has_mask = False
key_byte_width = 0
reordered_fields_idx = sorted(
range(len(table.match_fields)),
key = lambda x: match_precedence[table.match_fields[x][1]]
)
for field_ref, m_type, mask in table.match_fields:
if m_type is p4.p4_match_type.P4_MATCH_VALID:
if type(field_ref) is p4.p4_header_instance:
header_ref = field_ref
elif type(field_ref) is p4.p4_field:
header_ref = field_ref.instance
else:
assert(False) # should not happen
header_instance_name = get_header_instance_name(header_ref)
field_bit_width = 1
key_fields_bit_widths.append(field_bit_width)
num_bytes = 1
key_byte_width += num_bytes # this will use only 1 byte, not 4
match_fields += [(header_instance_name, type_mappings[m_type])]
else:
field_instance_name = get_field_instance_name(field_ref)
field_bit_width = field_info[field_instance_name]["bit_width"]
key_fields_bit_widths.append(field_bit_width)
num_bytes = max(bytes_round_up(field_bit_width), 4)
key_byte_width += num_bytes
match_fields += [(field_instance_name, type_mappings[m_type])]
# common to all match types
if mask:
big_mask += int_to_byte_array(mask, num_bytes)
has_mask = True
else:
big_mask += [255 for i in xrange(num_bytes)]
t_info["match_fields"] = match_fields
t_info["reordered_match_fields_idx"] = reordered_fields_idx
t_info["reordered_match_fields"] = [match_fields[i] \
for i in reordered_fields_idx]
t_info["has_mask"] = has_mask
t_info["big_mask"] = big_mask
# will be useful for PD code
t_info["key_fields_bit_widths"] = key_fields_bit_widths
t_info["key_byte_width"] = key_byte_width
t_info["actions"] = [get_action_name(a) for a in table.actions]
t_info["next_tables"] = {}
with_hit_miss_spec = False
if "hit" in table.next_:
with_hit_miss_spec = True
t_info["with_hit_miss_spec"] = with_hit_miss_spec
if with_hit_miss_spec:
for event in {"hit", "miss"}:
t = table.next_[event]
if t:
t_info["next_tables"][event] = get_table_name(t)
else:
t_info["next_tables"][event] = None
else:
for a in table.actions:
t = table.next_[a]
if t:
t_info["next_tables"][get_action_name(a)] = get_table_name(t)
else:
t_info["next_tables"][get_action_name(a)] = None
actions_idx = {}
idx = 0
for action in t_info["actions"]:
actions_idx[action] = idx
idx += 1
t_info["actions_idx"] = actions_idx
if table.action_profile is None:
action_data_byte_width = 0
for action in t_info["actions"]:
a_info = action_info[action]
action_data_byte_width = max(action_data_byte_width,
sum(a_info["param_byte_widths"]))
t_info["action_data_byte_width"] = action_data_byte_width
else:
# with an action profile, the first bit is for group vs.member, the
# remaining 31 bits are for index value
t_info["action_data_byte_width"] = 4
table_name = get_table_name(table)
render_dict["table_info"][table_name] = t_info
render_dict["select_tables"] = select_tables
render_dict["action_data_tables"] = action_data_tables
def render_dict_populate_table_types(render_dict, hlir):
render_dict["table_types"] = ["lpm", "exact", "ternary", "range", "valid",
"none"]
def render_dict_populate_action_profiles(render_dict, hlir):
action_profiles = OrderedDict()
action_info = render_dict["action_info"]
for name, act_prof in hlir.p4_action_profiles.items():
act_prof_info = {}
act_prof_info["actions"] = [get_action_name(a) for a in act_prof.actions]
# actions_idx = {}
# idx = 0
# for action in act_prof_info["actions"]:
# actions_idx[action] = idx
# idx += 1
# act_prof_info["actions_idx"] = actions_idx
action_data_byte_width = 0
for action in act_prof_info["actions"]:
a_info = action_info[action]
action_data_byte_width = max(action_data_byte_width,
sum(a_info["param_byte_widths"]))
act_prof_info["action_data_byte_width"] = action_data_byte_width
act_prof_info["size"] = act_prof.size
act_prof_info["selection_key"] = None
if act_prof.selector is not None:
act_prof_info["selection_key"] = act_prof.selector.selection_key.name
action_profiles[name] = act_prof_info
render_dict["action_profiles"] = action_profiles
def dump_p4_expression(expression):
# This function generates a regiter name for an expression to store its
# result.
def get_next_register():
register_name = "reg[" + str(get_next_register.register_counter) + "]"
get_next_register.register_counter += 1
return register_name
get_next_register.register_counter = 0
# Assignment statements used to generate C code are stored in this array.
# An assignment statement is a tuple of the following format:
# (register_name, operator, operand1, optional operand2, ...)
register_assignments = []
dump_register_assignments(expression, get_next_register, register_assignments)
return register_assignments
# Only supports expressions involving uint32_t fields
def dump_register_assignments(expression, get_next_register, register_assignments):
if expression is None: return None
# The result of an expression should be stored in the first register. While
# generating C code, we are assuming that the top-level expression stores
# its result in reg[0].
register = get_next_register()
if type(expression) is int:
register_assignments.append((register, "assign_immediate", expression))
elif type(expression) is p4.p4_header_instance:
assert(False)
elif type(expression) is p4.p4_field:
assert(expression.width <= 32)
register_assignments.append((register, "assign_field", get_field_instance_name(expression)))
elif type(expression) is p4.p4_expression:
left = expression.left
right = expression.right
op = expression.op
if op == "not":
operand_register = dump_register_assignments(right, get_next_register, register_assignments)
register_assignments.append((register, "not", operand_register))
elif op in {"or", "and", "==", "!=", ">", ">=", "<", "<=", "&"}:
left_register = dump_register_assignments(left, get_next_register, register_assignments)
right_register = dump_register_assignments(right, get_next_register, register_assignments)
register_assignments.append((register, op, left_register, right_register))
elif op == "valid":
if type(right) is p4.p4_header_instance:
register_assignments.append((register, "valid_header", get_header_instance_name(right)))
elif type(right) is p4.p4_field:
register_assignments((register, "valid_field", get_field_instance_name(right)))
else:
assert(False)
else:
print "operation", op, "is not supported by the behavioral model"
assert(False)
else:
print "Expression ", expression, "of type", type(expression), "not supported"
assert(False)
return register
def render_dict_populate_field_lists(render_dict, hlir):
field_lists = {}
for name, field_list in hlir.p4_field_lists.items():
f_list = []
prev_header = None # for payload, to make my life easier
for field in field_list.fields:
if type(field) is int or type(field) is long:
print "values not supported in field lists yet"
assert(False)
type_ = "immediate"
value = int_to_byte_array(field, width)
elif type(field) is p4.p4_field:
type_ = "field_ref"
value = get_field_instance_name(field)
prev_header = field.instance
elif type(field) is p4.p4_header_instance:
type_ = "header_ref"
value = get_header_instance_name(field)
prev_header = field
elif type(field) is p4.p4_field_list:
print "recursive field lists not supported yet"
assert(False)
elif field is p4.P4_PAYLOAD:
type_ = "payload"
value = get_header_instance_name(prev_header)
elif type(field) is p4.p4_sized_integer:
assert(field.width % 8 == 0)
type_ = "integer"
value = int_to_byte_array(field, field.width / 8)
f_list += [(type_, value)]
field_lists[name] = f_list
p4_pd_prefix = render_dict["p4_pd_prefix"]
learn_quanta = []
field_list_set = set()
for _, table in hlir.p4_tables.items():
for action in table.actions:
for call in action.flat_call_sequence:
primitive = call[0]
if primitive.name == "generate_digest":
for arg in call[1]:
if type(arg) is p4.p4_field_list:
field_list_object = arg
field_instances = OrderedDict()
for field_instance in [x[1] for x in field_lists[field_list_object.name]]:
# This is a map. The value is always None.
field_instances[field_instance] = None
prefix = p4_pd_prefix + field_list_object.name
lf_prefix = "lf_" + field_list_object.name
lq = {"name" : field_list_object.name,
"entry_type" : prefix + "_digest_entry_t",
"msg_type" : prefix + "_digest_msg_t",
"cb_fn_type" : prefix + "_digest_notify_cb",
"register_fn" : prefix + "_register",
"deregister_fn" : prefix + "_deregister",
"notify_ack_fn" : prefix + "_notify_ack",
"lf_register_fn" : lf_prefix + "_register",
"lf_deregister_fn" : lf_prefix + "_deregister",
"lf_notify_ack_fn" : lf_prefix + "_notify_ack",
"fields" : field_instances}
if field_list_object.name not in field_list_set:
learn_quanta.append(lq)
field_list_set.add(field_list_object.name)
render_dict["field_lists"] = field_lists
render_dict["learn_quanta"] = learn_quanta
def render_dict_populate_field_list_calculations(render_dict, hlir):
field_list_calculations = {}
def get_size(field, size = 0):
if type(field) is int or type(field) is long:
print "values not supported in field lists yet"
assert(False)
elif type(field) is p4.p4_sized_integer:
assert(field.width % 8 == 0)
return size + field.width
elif type(field) is p4.p4_field:
return size + field.width
elif type(field) is p4.p4_header_instance:
assert(size % 8 == 0)
for subfield in field.fields:
size = get_size(subfield, size)
return size
elif type(field) is p4.p4_field_list:
for subfield in field.fields:
size = get_size(subfield, size)
return size
elif field is p4.P4_PAYLOAD:
# TODO: ...
return size + 2048 * 8
else:
assert(False)
for name, calculation in\
hlir.p4_field_list_calculations.items():
c_info = {}
c_info["input"] = [fl.name for fl in calculation.input]
assert(len(c_info["input"]) == 1)
c_info["input_sizes"] = []
for input_ in calculation.input:
bits = get_size(input_)
assert(bits % 8 == 0)
c_info["input_sizes"] += [bits / 8]
c_info["input_size"] = max(c_info["input_sizes"])
# assert(len(calculation.algorithms) == 1)
# c_info["algorithm"] = calculation.algorithms[0]
c_info["algorithm"] = calculation.algorithm
# TODO: define exactly what I want to do here
c_info["output_width"] = calculation.output_width
if calculation.output_width > 32:
assert(calculation.output_width % 8 == 0)
c_info["output_phv_bytes"] = max(bytes_round_up(calculation.output_width), 4)
field_list_calculations[name] = c_info
render_dict["field_list_calculations"] = field_list_calculations
def render_dict_populate_counters(render_dict, hlir):
counter_info = {}
for name, counter in hlir.p4_counters.items():
type_ = str(counter.type)
if type_ == "packets" or type_ == "bytes":
c_list = [(name, type_)]
elif type_ == "packets_and_bytes":
c_list = [(name + "_bytes", "bytes"), (name + "_packets", "packets")]
else:
assert(False)
for name, type_ in c_list:
c_info = {}
c_info["type_"] = type_
if not counter.binding:
c_info["binding"] = ("global", None)
elif counter.binding[0] == p4.P4_DIRECT:
table = get_table_name(counter.binding[1])
c_info["binding"] = ("direct", table)
t_info = render_dict["table_info"][table]
counter_key = type_ + "_counter"
assert(not t_info[counter_key])
t_info[counter_key] = name
elif counter.binding[0] == p4.P4_STATIC:
table = get_table_name(counter.binding[1])
c_info["binding"] = ("static", table)
else:
assert(False)
c_info["saturating"] = counter.saturating
c_info["instance_count"] = counter.instance_count
c_info["min_width"] = counter.min_width
counter_info[name] = c_info
# some kind of dirty hack, even though the one counter has been replaced
# by one bytes counter and one packets counter, we still need this
# information (mapping name -> type) for the implementation of COUNT()
# in actions.c
if str(counter.type) == "packets_and_bytes":
c_info = {}
c_info["type_"] = "packets_and_bytes"
c_info["binding"] = ("global", None)
c_info["saturating"] = counter.saturating
c_info["instance_count"] = counter.instance_count
c_info["min_width"] = counter.min_width
counter_info[counter.name] = c_info
render_dict["counter_info"] = counter_info
def render_dict_populate_meters(render_dict, hlir):
meter_info = {}
for name, meter in hlir.p4_meters.items():
m_info = {}
m_info["type_"] = str(meter.type)
if not meter.binding:
m_info["binding"] = ("global", None)
elif meter.binding[0] == p4.P4_DIRECT:
table = get_table_name(meter.binding[1])
m_info["binding"] = ("direct", table)
assert(meter.result)
m_info["result"] = get_field_instance_name(meter.result)
t_info = render_dict["table_info"][table]
meter_key = "meter"
assert(not t_info[meter_key])
t_info[meter_key] = name
elif meter.binding[0] == p4.P4_STATIC:
table = get_table_name(meter.binding[1])
m_info["binding"] = ("static", table)
else:
assert(False)
m_info["instance_count"] = meter.instance_count
meter_info[name] = m_info
render_dict["meter_info"] = meter_info
def render_dict_populate_registers(render_dict, hlir):
register_info = {}
for name, register in hlir.p4_registers.items():
r_info = {}
if not register.width:
print "Layout registers are not supported for now"
assert(False)
r_info["width"] = register.width
r_info["byte_width"] = max(bytes_round_up(register.width), 4)
r_info["mask"] = int_to_byte_array((1 << register.width) - 1, r_info["byte_width"])
if not register.binding:
r_info["binding"] = ("global", None)
elif register.binding[0] == p4.P4_DIRECT:
table = get_table_name(register.binding[1])
r_info["binding"] = ("direct", table)
t_info = render_dict["table_info"][table]
t_info["registers"].append(name)
elif register.binding[0] == p4.P4_STATIC:
r_info["binding"] = ("static", table)
else:
assert(False)
r_info["instance_count"] = register.instance_count
register_info[name] = r_info
render_dict["register_info"] = register_info
def get_type(byte_width):
if byte_width == 1:
return "uint8_t"
elif byte_width == 2:
return "uint16_t"
elif byte_width <= 4:
return "uint32_t"
else:
return "uint8_t *"
def get_thrift_type(byte_width):
if byte_width == 1:
return "byte"
elif byte_width == 2:
return "i16"
elif byte_width <= 4:
return "i32"
elif byte_width == 6:
return "MacAddr_t"
elif byte_width == 16:
return "IPv6_t"
else:
return "binary"
def render_dict_create(hlir,
p4_name, p4_prefix,
meta_config,
public_inc_path,
dump_yaml = False):
render_dict = {}
render_dict["p4_name"] = p4_name
render_dict["public_inc_path"] = public_inc_path
render_dict["p4_prefix"] = p4_prefix
render_dict["p4_pd_prefix"] = "p4_pd_" + p4_prefix + "_"
render_dict["get_type"] = get_type
render_dict["get_thrift_type"] = get_thrift_type
if not meta_config:
meta_config_json = json.loads(resource_string(__name__, 'meta_config.json'))
else:
with open(meta_config, 'r') as fp:
meta_config_json = json.load(fp)
render_dict_populate_fields(render_dict, hlir, meta_config_json)
render_dict_populate_data_types(render_dict, hlir)
render_dict_populate_parse_states(render_dict, hlir)
render_dict_populate_actions(render_dict, hlir)
render_dict_populate_tables(render_dict, hlir)
render_dict_populate_table_types(render_dict, hlir)
render_dict_populate_action_profiles(render_dict, hlir)
render_dict_populate_conditional_tables(render_dict, hlir)
render_dict_populate_field_lists(render_dict, hlir)
render_dict_populate_field_list_calculations(render_dict, hlir)
render_dict_populate_counters(render_dict, hlir)
render_dict_populate_meters(render_dict, hlir)
render_dict_populate_registers(render_dict, hlir)
if hlir.p4_egress_ptr:
render_dict["egress_entry_table"] = get_table_name(hlir.p4_egress_ptr)
else:
render_dict["egress_entry_table"] = None
if dump_yaml:
with open("yaml_dump.yml", 'w') as f:
dump_render_dict(render_dict, f)
return render_dict
def ignore_template_file(filename):
"""
Ignore these files in template dir
"""
pattern = re.compile('^\..*|.*\.cache$|.*~$')
return pattern.match(filename)
def gen_file_lists(current_dir, gen_dir, public_inc_path):
"""
Generate target files from template; only call once
"""
files_out = []
if not public_inc_path:
public_inc_path = os.path.join(gen_dir, "public_inc")
for root, subdirs, files in os.walk(current_dir):
for filename in files:
if ignore_template_file(filename):
continue
relpath = os.path.relpath(os.path.join(root, filename), current_dir)
template_file = relpath
# Put the include files in public include path.
if len(root) > 4 and root[-4:] == "/inc":
target_file = os.path.join(public_inc_path, filename)
else:
target_file = os.path.join(gen_dir, relpath)
files_out.append((template_file, target_file))
return files_out
def render_all_files(render_dict, gen_dir, with_thrift = False, with_plugin_list=[]):
files = gen_file_lists(templates_dir, gen_dir, render_dict["public_inc_path"])
for template, target in files:
# not very robust
if (not with_thrift) and ("thrift" in target):
continue
path = os.path.dirname(target)
if not os.path.exists(path):
os.mkdir(path)
with open(target, "w") as f:
render_template(f, template, render_dict, templates_dir,
prefix = gl.tenjin_prefix)
if len(with_plugin_list) > 0:
for s in with_plugin_list:
plugin_dir = plugin_base + s
plugin_files = gen_file_lists(plugin_dir, gen_dir+'/plugin/'+s, render_dict["public_inc_path"])
for template, target in plugin_files:
path = os.path.dirname(target)
if not os.path.exists(path):
os.makedirs(path)
with open(target, "w") as f:
render_template(f, template, render_dict, plugin_dir,
prefix = gl.tenjin_prefix)
|
{
"content_hash": "f094f8e128839f95a569a877e02c3dcc",
"timestamp": "",
"source": "github",
"line_count": 1221,
"max_line_length": 107,
"avg_line_length": 40.171990171990174,
"alnum_prop": 0.5532313965341489,
"repo_name": "PrincetonUniversity/p4c-behavioral",
"id": "ef488fca181abebd87081c427c8d64368a8535aa",
"size": "49648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "p4c_bm/smart.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "431559"
},
{
"name": "C++",
"bytes": "60394"
},
{
"name": "Makefile",
"bytes": "9687"
},
{
"name": "Objective-C",
"bytes": "29730"
},
{
"name": "Python",
"bytes": "151395"
},
{
"name": "Thrift",
"bytes": "23934"
}
],
"symlink_target": ""
}
|
from calvin.runtime.south.plugins.async import async
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
class TimerEvent(async.DelayedCall):
def __init__(self, actor_id, delay, trigger_loop, repeats=False):
super(TimerEvent, self).__init__(delay, callback=self.trigger)
self._actor_id = actor_id
self._triggered = False
self.trigger_loop = trigger_loop
self.repeats = repeats
_log.debug("Set calvinsys timer %f %s on %s" % (delay, "repeat" if self.repeats else "", self._actor_id))
self.reset()
@property
def triggered(self):
return self._triggered
def ack(self):
self._triggered = False
def trigger(self):
_log.debug("Trigger calvinsys timer on %s" % (self._actor_id))
self._triggered = True
if self.repeats:
self.reset()
self.trigger_loop(actor_ids=[self._actor_id])
class TimerHandler(object):
def __init__(self, node, actor):
super(TimerHandler, self).__init__()
self._actor = actor
self.node = node
def once(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop)
def repeat(self, delay):
return TimerEvent(self._actor.id, delay, self.node.sched.trigger_loop, repeats=True)
def _trigger_loop(self):
return self.node.sched.trigger_loop(actor_ids=[self._actor.id])
def register(node, actor, events):
"""
Registers is called when the Event-system object is created.
Place an object in the event object - in this case the
nodes only timer object.
Also register any hooks for actor migration.
"""
events.timer = TimerHandler(node, actor)
|
{
"content_hash": "f39248f05684dac756a25ce0155bb5ba",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 113,
"avg_line_length": 30.80701754385965,
"alnum_prop": 0.6349658314350797,
"repo_name": "MalmoUniversity-DA366A/calvin-base",
"id": "efc245d24981ed7bd020be3e92c2ea4ded87ccd9",
"size": "2361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/calvinsys/events/timer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36376"
},
{
"name": "JavaScript",
"bytes": "9947"
},
{
"name": "Python",
"bytes": "692250"
}
],
"symlink_target": ""
}
|
"""Mock utilities that are async aware."""
import sys
if sys.version_info[:2] < (3, 8):
from asynctest.mock import * # noqa
from asynctest.mock import CoroutineMock as AsyncMock # noqa
else:
from unittest.mock import * # noqa
|
{
"content_hash": "04cfb3380bf431f9dd6ecec8df60cbfe",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 65,
"avg_line_length": 30.25,
"alnum_prop": 0.6859504132231405,
"repo_name": "mKeRix/home-assistant",
"id": "1942b2ca284b8a38ef31501e8ed4c0a6b9d6759f",
"size": "242",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/async_mock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1466026"
},
{
"name": "Python",
"bytes": "4770710"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "12407"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from schnippjs import schnippforms
from testproduct.models import News, Category
import json
from django.forms.models import modelform_factory
from django.http import HttpResponse
def testing(request):
'''
Required for selenium tests;
'''
cat = Category(name='Coding')
cat.save()
n = News(name='Djangoproductline', teaser='productivity', rating=1, somefloat=62.8, category=cat)
n.save()
if len(News.objects.all()) == 0:
raise Exception('''
This view is used for Liveserver/selenium tests. Please create at least one News object if you want to
access this views manually.
''')
MyForm = modelform_factory(News)
if request.method == 'POST':
data = json.loads(request.POST['ajax_data'])
form = MyForm(data)
if form.is_valid():
obj = form.save()
return HttpResponse(json.dumps(schnippforms.get_context(obj)))
else:
form = MyForm()
return render(request, 'testing.html', {
'schema': json.dumps(schnippforms.get_schema(form, name='myform')),
'context': json.dumps(schnippforms.get_context(News.objects.all()[0]))
})
|
{
"content_hash": "8bc179d3570d6e8167d117906ebb9992",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 115,
"avg_line_length": 32.256410256410255,
"alnum_prop": 0.6263910969793323,
"repo_name": "tonimichel/djpl-schnippjs",
"id": "864becf4885a631d3c24ef507f8e992d72a3f5d9",
"size": "1258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "products/testproduct/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1086"
},
{
"name": "Python",
"bytes": "20633"
}
],
"symlink_target": ""
}
|
import contextlib
import logging
import os
from pylib.constants import host_paths
_COLORAMA_PATH = os.path.join(
host_paths.DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src')
with host_paths.SysPath(_COLORAMA_PATH, position=0):
import colorama
class _ColorFormatter(logging.Formatter):
# pylint does not see members added dynamically in the constructor.
# pylint: disable=no-member
color_map = {
logging.DEBUG: colorama.Fore.CYAN,
logging.WARNING: colorama.Fore.YELLOW,
logging.ERROR: colorama.Fore.RED,
logging.CRITICAL: colorama.Back.RED,
}
def __init__(self, wrapped_formatter=None):
"""Wraps a |logging.Formatter| and adds color."""
super(_ColorFormatter, self).__init__(self)
self._wrapped_formatter = wrapped_formatter or logging.Formatter()
#override
def format(self, record):
message = self._wrapped_formatter.format(record)
return self.Colorize(message, record.levelno)
def Colorize(self, message, log_level):
try:
return self.color_map[log_level] + message + colorama.Style.RESET_ALL
except KeyError:
return message
class ColorStreamHandler(logging.StreamHandler):
"""Handler that can be used to colorize logging output.
Example using a specific logger:
logger = logging.getLogger('my_logger')
logger.addHandler(ColorStreamHandler())
logger.info('message')
Example using the root logger:
ColorStreamHandler.MakeDefault()
logging.info('message')
"""
def __init__(self, force_color=False):
super(ColorStreamHandler, self).__init__()
self.force_color = force_color
self.setFormatter(logging.Formatter())
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
#override
def setFormatter(self, formatter):
if self.force_color or self.is_tty:
formatter = _ColorFormatter(formatter)
super(ColorStreamHandler, self).setFormatter(formatter)
@staticmethod
def MakeDefault(force_color=False):
"""
Replaces the default logging handlers with a coloring handler. To use
a colorizing handler at the same time as others, either register them
after this call, or add the ColorStreamHandler on the logger using
Logger.addHandler()
Args:
force_color: Set to True to bypass the tty check and always colorize.
"""
# If the existing handlers aren't removed, messages are duplicated
logging.getLogger().handlers = []
logging.getLogger().addHandler(ColorStreamHandler(force_color))
@contextlib.contextmanager
def SuppressLogging(level=logging.ERROR):
"""Momentarilly suppress logging events from all loggers.
TODO(jbudorick): This is not thread safe. Log events from other threads might
also inadvertently disappear.
Example:
with logging_utils.SuppressLogging():
# all but CRITICAL logging messages are suppressed
logging.info('just doing some thing') # not shown
logging.critical('something really bad happened') # still shown
Args:
level: logging events with this or lower levels are suppressed.
"""
logging.disable(level)
yield
logging.disable(logging.NOTSET)
|
{
"content_hash": "3c4837c97eabb6be9da9d7a29fa9acdb",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 79,
"avg_line_length": 29.682242990654206,
"alnum_prop": 0.7169395465994962,
"repo_name": "chrisdickinson/nojs",
"id": "082f9e851b7f33c36f616fec0b11fe2ae06b5750",
"size": "3339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/pylib/utils/logging_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "52243"
},
{
"name": "JavaScript",
"bytes": "55472"
},
{
"name": "Python",
"bytes": "16760"
}
],
"symlink_target": ""
}
|
"""Load dataset for the Attention model (CSJ corpus).
In addition, frame stacking and skipping are used.
You can use the multi-GPU version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os.path import join, isfile
import pickle
import numpy as np
from utils.dataset.attention import DatasetBase
class Dataset(DatasetBase):
def __init__(self, data_type, train_data_size, label_type, batch_size,
map_file_path, max_epoch=None, splice=1,
num_stack=1, num_skip=1,
shuffle=False, sort_utt=True, sort_stop_epoch=None,
progressbar=False, num_gpu=1):
"""A class for loading dataset.
Args:
data_type (string): train or dev or eval1 or eval2 or eval3
train_data_size (string): train_subset or train_fullset
label_type (string): kanji or kanji_divide or kana or
kana_divide
batch_size (int): the size of mini-batch
map_file_path (string): path to the mapping file
max_epoch (int, optional): the max epoch. None means infinite loop.
splice (int, optional): frames to splice. Default is 1 frame.
num_stack (int, optional): the number of frames to stack
num_skip (int, optional): the number of frames to skip
shuffle (bool, optional): if True, shuffle utterances. This is
disabled when sort_utt is True.
sort_utt (bool, optional): if True, sort all utterances by the
number of frames and utteraces in each mini-batch are shuffled.
Otherwise, shuffle utteraces.
sort_stop_epoch (int, optional): After sort_stop_epoch, training
will revert back to a random order
progressbar (bool, optional): if True, visualize progressbar
"""
super(Dataset, self).__init__(map_file_path=map_file_path)
if data_type in ['eval1', 'eval2', 'eval3']:
self.is_test = True
else:
self.is_test = False
self.data_type = data_type
self.train_data_size = train_data_size
self.label_type = label_type
self.batch_size = batch_size * num_gpu
self.max_epoch = max_epoch
self.splice = splice
self.num_stack = num_stack
self.num_skip = num_skip
self.shuffle = shuffle
self.sort_utt = sort_utt
self.sort_stop_epoch = sort_stop_epoch
self.progressbar = progressbar
self.num_gpu = num_gpu
# paths where datasets exist
dataset_root = ['/data/inaguma/csj',
'/n/sd8/inaguma/corpus/csj/dataset']
input_path = join(dataset_root[0], 'inputs',
train_data_size, data_type)
# NOTE: ex.) save_path:
# csj_dataset_path/inputs/train_data_size/data_type/speaker/***.npy
label_path = join(dataset_root[0], 'labels',
train_data_size, data_type, label_type)
# NOTE: ex.) save_path:
# csj_dataset_path/labels/train_data_size/data_type/label_type/speaker/***.npy
# Load the frame number dictionary
if isfile(join(input_path, 'frame_num.pickle')):
with open(join(input_path, 'frame_num.pickle'), 'rb') as f:
self.frame_num_dict = pickle.load(f)
else:
dataset_root.pop(0)
input_path = join(dataset_root[0], 'inputs',
train_data_size, data_type)
label_path = join(dataset_root[0], 'labels',
train_data_size, data_type, label_type)
with open(join(input_path, 'frame_num.pickle'), 'rb') as f:
self.frame_num_dict = pickle.load(f)
# Sort paths to input & label
axis = 1 if sort_utt else 0
frame_num_tuple_sorted = sorted(self.frame_num_dict.items(),
key=lambda x: x[axis])
input_paths, label_paths = [], []
for utt_name, frame_num in frame_num_tuple_sorted:
speaker = utt_name.split('_')[0]
# ex.) utt_name: speaker_uttindex
input_paths.append(join(input_path, speaker, utt_name + '.npy'))
label_paths.append(join(label_path, speaker, utt_name + '.npy'))
self.input_paths = np.array(input_paths)
self.label_paths = np.array(label_paths)
# NOTE: Not load dataset yet
self.rest = set(range(0, len(self.input_paths), 1))
|
{
"content_hash": "3e28e051e5490dc7014d315633060e27",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 86,
"avg_line_length": 43.339622641509436,
"alnum_prop": 0.5794514584240313,
"repo_name": "hirofumi0810/tensorflow_end2end_speech_recognition",
"id": "f3a953ffceaac88bd7e53b00ce7f6e5cee18c379",
"size": "4642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/csj/data/load_dataset_attention.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "535815"
},
{
"name": "Shell",
"bytes": "2247"
}
],
"symlink_target": ""
}
|
import os
import warnings
from bento.core.pkg_objects \
import \
Extension
from bento.core.node \
import \
split_path
def translate_name(name, ref_node, from_node):
if from_node != ref_node:
parent_pkg = ref_node.path_from(from_node).replace(os.sep, ".")
return ".".join([parent_pkg, name])
else:
return name
class NodeDataFiles(object):
def __init__(self, name, nodes, ref_node, target_dir):
self.name = name
self.nodes = nodes
self.ref_node = ref_node
self.target_dir = target_dir
class NodeExtension(object):
def __init__(self, name, nodes, top_node, ref_node, sub_directory_node=None, include_dirs=None):
self.name = name
self.top_node = top_node
self.ref_node = ref_node
self.nodes = nodes
if sub_directory_node is None:
self.top_or_lib_node = top_node
else:
self.top_or_lib_node = sub_directory_node
if not ref_node.is_child_of(self.top_or_lib_node):
self.full_name = name
else:
self.full_name = translate_name(name, ref_node, self.top_or_lib_node)
if include_dirs is None:
self.include_dirs = []
else:
self.include_dirs = include_dirs
def extension_from(self, from_node=None):
if len(self.nodes) < 1:
return Extension(self.name, [])
else:
if from_node is None:
from_node = self.nodes[0].srcnode
if not from_node.is_src():
raise ValueError("node %s is not a source directory !" % from_node.abspath())
if not self.ref_node.is_child_of(from_node):
raise ValueError("from_node should be a parent of %s, but is %s" % \
(self.ref_node.abspath(), from_node.abspath()))
else:
def translate_full_name(full_name):
parent_pkg = from_node.path_from(self.top_node)
if parent_pkg == ".":
parent_components = []
else:
parent_components = split_path(parent_pkg)
full_name_components = self.full_name.split(".")
if not full_name_components[:len(parent_components)] == parent_components:
raise ValueError("Internal bug: unexpected parent/name components: %s %s" % \
(parent_components, full_name_components))
else:
return ".".join(full_name_components[len(parent_components):])
relative_name = translate_full_name(self.full_name)
return Extension(relative_name, sources=[n.path_from(from_node) for n in self.nodes])
class NodePythonPackage(object):
def __init__(self, name, nodes, top_node, ref_node, sub_directory_node=None):
self.nodes = nodes
self.top_node = top_node
self.ref_node = ref_node
if sub_directory_node is None:
self.top_or_lib_node = top_node
else:
self.top_or_lib_node = sub_directory_node
if not ref_node.is_child_of(self.top_or_lib_node):
raise IOError()
self.full_name = translate_name(name, ref_node, self.top_or_lib_node)
class NodeRepresentation(object):
"""Node-based representation of a Package content."""
def __init__(self, run_node, top_node, sub_directory_node=None):
self.top_node = top_node
self.run_node = run_node
self.sub_directory_node = sub_directory_node
if sub_directory_node is None:
self.top_or_sub_directory_node = top_node
else:
if not sub_directory_node.is_child_of(top_node):
raise IOError("sub_directory_node %r is not a subdirectory of %s" % \
(sub_directory_node, top_node))
self.top_or_sub_directory_node = sub_directory_node
self._registry = {}
for category in ("modules", "packages", "extensions",
"compiled_libraries", "datafiles"):
self._registry[category] = {}
self._extra_source_nodes = []
self._aliased_source_nodes = {}
def to_node_extension(self, extension, source_node, ref_node):
nodes = []
for s in extension.sources:
_nodes = source_node.ant_glob(s)
if len(_nodes) < 1:
#name = translate_name(extension.name, ref_node, self.top_or_sub_directory_node)
raise IOError("Sources glob entry %r for extension %r did not return any result" \
% (s, extension.name))
else:
nodes.extend(_nodes)
if extension.include_dirs:
if self.sub_directory_node:
raise NotImplementedError("include dirs translation not implemented yet")
else:
include_dirs = []
for include_dir in extension.include_dirs:
n = source_node.find_dir(include_dir)
if n is None:
raise IOError("include dir %s is invalid" % include_dir)
else:
include_dirs.append(n)
return NodeExtension(extension.name, nodes, self.top_node, ref_node, self.sub_directory_node)
def _run_in_subpackage(self, pkg, func):
for name, sub_pkg in pkg.subpackages.items():
ref_node = self.top_node.find_node(sub_pkg.rdir)
if ref_node is None:
raise IOError("directory %s relative to %s not found !" % (sub_pkg.rdir,
self.top_node.abspath()))
func(sub_pkg, ref_node)
def _update_extensions(self, pkg):
for name, extension in pkg.extensions.items():
ref_node = self.top_node
extension = self.to_node_extension(extension, self.top_node, ref_node)
self._registry["extensions"][extension.full_name] = extension
def _subpackage_extension(sub_package, ref_node):
for name, extension in sub_package.extensions.items():
extension = self.to_node_extension(extension, ref_node, ref_node)
full_name = translate_name(name, ref_node, self.top_node)
self._registry["extensions"][full_name] = extension
self._run_in_subpackage(pkg, _subpackage_extension)
def _update_libraries(self, pkg):
for name, compiled_library in pkg.compiled_libraries.items():
ref_node = self.top_node
compiled_library = self.to_node_extension(compiled_library, self.top_node, ref_node)
self._registry["compiled_libraries"][name] = compiled_library
def _subpackage_compiled_libraries(sub_package, ref_node):
for name, compiled_library in sub_package.compiled_libraries.items():
compiled_library = self.to_node_extension(compiled_library, ref_node, ref_node)
name = translate_name(name, ref_node, self.top_node)
self._registry["compiled_libraries"][name] = compiled_library
self._run_in_subpackage(pkg, _subpackage_compiled_libraries)
def _update_py_packages(self, pkg):
def _resolve_package(package_name, ref_node):
init = os.path.join(*(package_name.split(".") + ["__init__.py"]))
n = ref_node.find_node(init)
if n is None:
raise IOError("init file for package %s not found (looked for %r)!" \
% (package_name, init))
else:
p = n.parent
nodes = [p.find_node(f) for f in p.listdir() if f.endswith(".py")]
node_package = NodePythonPackage(package_name, nodes, self.top_node,
ref_node, self.sub_directory_node)
self._registry["packages"][node_package.full_name] = node_package
def _subpackage_resolve_package(sub_package, ref_node):
for package in sub_package.packages:
_resolve_package(package, ref_node)
for package in pkg.packages:
_resolve_package(package, self.top_or_sub_directory_node)
self._run_in_subpackage(pkg, _subpackage_resolve_package)
def _update_data_files(self, pkg):
for name, data_section in pkg.data_files.items():
ref_node = self.top_node.find_node(data_section.source_dir)
nodes = []
for f in data_section.files:
ns = ref_node.ant_glob(f)
if len(ns) < 1:
raise IOError("File/glob %s could not be resolved (data file section %s)" % (f, name))
else:
nodes.extend(ns)
self._registry["datafiles"][name] = NodeDataFiles(name, nodes, ref_node, data_section.target_dir)
def _update_py_modules(self, pkg):
for m in pkg.py_modules:
n = self.top_or_sub_directory_node.find_node("%s.py" % m)
if n is None:
raise IOError("file for module %s not found" % m)
else:
self._registry["modules"][m] = n
def _update_extra_sources(self, pkg):
for s in pkg.extra_source_files:
nodes = self.top_node.ant_glob(s)
if len(nodes) < 1:
warnings.warn("extra source files glob entry %r did not return any result" % (s,))
self._extra_source_nodes.extend(nodes)
def update_package(self, pkg):
self._update_py_packages(pkg)
self._update_py_modules(pkg)
self._update_extensions(pkg)
self._update_libraries(pkg)
self._update_data_files(pkg)
self._update_extra_sources(pkg)
def iter_category(self, category):
if category in self._registry:
return self._registry[category].items()
else:
raise ValueError("Unknown category %s" % category)
def register_entity(self, category, name, entity):
if category in self._registry:
self._registry[category][name] = entity
else:
raise ValueError("Category %r not registered" % category)
def iter_source_nodes(self):
for n in self._extra_source_nodes:
yield n
for d in self._registry["datafiles"].values():
for n in d.nodes:
yield n
for m in self._registry["modules"].values():
yield m
for package in self._registry["packages"].values():
for n in package.nodes:
yield n
for extension in self._registry["extensions"].values():
for n in extension.nodes:
yield n
for compiled_library in self._registry["compiled_libraries"].values():
for n in compiled_library.nodes:
yield n
def iter_source_files(self):
for n in self.iter_source_nodes():
filename = n.path_from(self.run_node)
alias = self._aliased_source_nodes.get(n, filename)
yield filename, alias
|
{
"content_hash": "fc6222a5fcb044575884483c82e239f2",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 109,
"avg_line_length": 41.846441947565545,
"alnum_prop": 0.5630537903875414,
"repo_name": "cournape/Bento",
"id": "8641c329c35c92bba14ad22c7354fd3d9ea62dbb",
"size": "11173",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bento/core/node_package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7553"
},
{
"name": "C++",
"bytes": "165"
},
{
"name": "CSS",
"bytes": "5358"
},
{
"name": "FORTRAN",
"bytes": "97"
},
{
"name": "Python",
"bytes": "1325666"
},
{
"name": "Shell",
"bytes": "6042"
}
],
"symlink_target": ""
}
|
import logging
from mock import patch
from nailgun.db.sqlalchemy.models import IPAddr
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import Node
from nailgun.test.base import BaseIntegrationTest
from nailgun.test.base import fake_tasks
from nailgun.test.base import reverse
logger = logging.getLogger(__name__)
class TestNodeDeletion(BaseIntegrationTest):
@fake_tasks(fake_rpc=False, mock_rpc=False)
@patch('nailgun.rpc.cast')
def test_node_deletion_and_attributes_clearing(self, mocked_rpc):
self.env.create(
nodes_kwargs=[
{"pending_addition": True},
]
)
self.env.launch_deployment()
cluster = self.env.clusters[0]
node = self.env.nodes[0]
resp = self.app.delete(
reverse(
'NodeHandler',
kwargs={'obj_id': node.id}),
headers=self.default_headers
)
self.assertEqual(204, resp.status_code)
node_try = self.db.query(Node).filter_by(
cluster_id=cluster.id
).first()
self.assertEqual(node_try, None)
management_net = self.db.query(NetworkGroup).\
filter(NetworkGroup.cluster_id == cluster.id).filter_by(
name='management').first()
ipaddrs = self.db.query(IPAddr).\
filter_by(node=node.id).all()
self.assertEqual(list(management_net.nodes), [])
self.assertEqual(list(ipaddrs), [])
|
{
"content_hash": "838975bc0bb73295d13a549b5c1b0aaf",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 69,
"avg_line_length": 29.057692307692307,
"alnum_prop": 0.6227663798808736,
"repo_name": "Axam/nsx-web",
"id": "ffd5da13ae3bd734ba84eeb6045217f15d5b33c3",
"size": "2146",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/test/unit/test_node_deletion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "99402"
},
{
"name": "JavaScript",
"bytes": "553275"
},
{
"name": "Python",
"bytes": "2623980"
},
{
"name": "Ruby",
"bytes": "33345"
},
{
"name": "Shell",
"bytes": "29681"
}
],
"symlink_target": ""
}
|
from supriya.tools.requesttools.Request import Request
class GroupDeepFreeRequest(Request):
### CLASS VARIABLES ###
__slots__ = (
)
### INITIALIZER ###
def __init__(
self,
):
Request.__init__(self)
raise NotImplementedError
### PUBLIC METHODS ###
def to_osc_message(self):
raise NotImplementedError
### PUBLIC PROPERTIES ###
@property
def response_specification(self):
return None
@property
def request_id(self):
from supriya.tools import requesttools
return requesttools.RequestId.GROUP_DEEP_FREE
|
{
"content_hash": "faa3fcdda1fa94c6ea32d8927531a929",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 54,
"avg_line_length": 18.90909090909091,
"alnum_prop": 0.6025641025641025,
"repo_name": "andrewyoung1991/supriya",
"id": "0615a9c21d1893753ba61fc1b10b38b4f178116d",
"size": "650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/tools/requesttools/GroupDeepFreeRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2693776"
}
],
"symlink_target": ""
}
|
from numpy import array, outer, ones, size, dot, linalg, zeros, cross, transpose
from ms_optAlgs import ms_optNewton
class SegmentMathError(Exception):
pass
class SegmentGeometry(object):
def __init__(self, p1, p2, p3, p4):
self.setPoints(p1, p2, p3, p4)
def setPoints(self, p1, p2, p3, p4):
self.__a = -p1+p2;
self.__b = -p1+p3;
self.__c = p1-p2-p3+p4;
self.__d = p1;
def getPoint(self, alpha, beta):
a = self.__a
b = self.__b
c = self.__c
d = self.__d
return outer(a,alpha) + outer(b,beta) + outer(c,alpha*beta) + outer(d,ones(size(alpha)));
# calculates the intersection of the leading edge with a given plane
def calcIntersectLE(self, p, n):
eta = dot(p-self.__d,n)/ dot(self.__a,n)
return self.getPoint(eta, 0.)
# calculates the intersection of the trailing edge with a given plane
def calcIntersectTE(self, p, n):
eta = dot(p-self.__d - self.__b,n)/ dot(self.__c + self.__a,n)
return self.getPoint(eta, 1.)
# projects the point p on the cut of the segment with a plane (given by p, n)
def projectPointOnCut(self, p_proj, p, n):
debug = False
# some constants for the intersection calculation
a1 = dot(p - self.__d, n);
a2 = -dot(self.__b, n);
a3 = dot(self.__a, n);
a4 = dot(self.__c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# diff( eta(xi). xi), tangent in eta xsi space
alp = lambda beta: (a2*a3 - a1*a4)/((a3 + a4*beta)**2);
# 3d intersection curve, parameterized by beta [0,1]
cu = lambda beta: outer(self.__a,al(beta)) + outer(self.__b,beta) + outer(self.__c,al(beta)*beta) + outer(self.__d,ones(size(beta)));
# tangent in 3d space
cup = lambda beta: (outer(self.__a,ones(size(beta))) + outer(self.__c,beta))*outer(ones(3),alp(beta)) + outer(self.__c,al(beta)) + outer(self.__b,ones(size(beta)));
# calculate intersection points with leading and trailing edge of the segment
pbeg = cu(0)[:,0]
pend = cu(1)[:,0]
reflen = linalg.norm(pbeg-pend);
# project this point onto intersection curve i.e. find beta so that (cu(beta) - pact) * (pbeg-pend) == 0
# as cu(beta) is not linear, we try to find the solution with Newton Raphson method
f = lambda beta: dot(cu(beta)[:,0] - p_proj[:,0], pend - pbeg)/reflen
fp = lambda beta: dot(cup(beta)[:,0], pend - pbeg)/reflen
#initial guess
beta = 0.5
diff = f(beta)
iter = 0;
if debug: print 'Iter:', iter, ' Error=', abs(diff), ' @ Beta=' , beta
while abs(diff) > 1e-12 and iter < 20:
iter += 1
dir = -diff/(fp(beta))
# maybe we need a line-search here...?
beta = beta + dir
diff = f(beta)
if debug: print 'Iter:', iter, ' Error=', abs(diff), '@ Beta=' , beta
if iter >= 20:
raise SegmentMathError('Could not project intersection curve onto line')
return (al(beta), beta)
def getTangents(self, alpha, beta):
assert(size(alpha) == 1)
assert(size(beta) == 1)
J = zeros((3,2))
J[:,0] = self.__a + beta*self.__c
J[:,1] = self.__b + alpha*self.__c
return J
def getNormal(self, alpha, beta):
J = self.getTangents(alpha, beta);
normal = cross(J[:,1],J[:,0])
return normal/linalg.norm(normal)
# this function is NOT part of the segment API but is required for
# the projection algorithm
def __calcHessian(self, alpha, beta, x):
assert(size(alpha) == 1)
assert(size(beta) == 1)
hess = zeros((2,2));
p = self.getPoint(alpha, beta)
hess[0,0] = 2.*dot(self.__a+beta *self.__c, self.__a+beta *self.__c);
hess[1,1] = 2.*dot(self.__b+alpha*self.__c, self.__b+alpha*self.__c);
hess[1,0] = 2.*dot(self.__b+alpha*self.__c, self.__a+beta*self.__c) + 2.*dot((p-x)[:,0], self.__c);
hess[0,1] = hess[1,0]
return hess
def projectOnSegment(self, p):
of = lambda x: linalg.norm(self.getPoint(x[0], x[1]) - p)**2
ograd = lambda x: 2.*dot(transpose(self.getTangents(x[0], x[1])),self.getPoint(x[0], x[1]) - p)[:,0]
ohess = lambda x: self.__calcHessian(x[0],x[1], p)
# numerical gradient and hessian, just for error checking
# ograd_num = lambda x: ms_numGrad(of, x, 1e-8)
# ohess_num = lambda x: ms_numHess(ograd, x, 1e-8)
# initial guess, could be improved but works
x = array([0., 0.]);
x = ms_optNewton(of, ograd, ohess, x)
eta = x[0]
xsi = x[1]
return eta,xsi
@staticmethod
def isValid(eta, xsi):
eps = 1e-7
if eta >= -eps and eta <= 1.+eps and xsi >= -eps and xsi <= 1.+eps:
return True
else:
return False
|
{
"content_hash": "a2131681bc0bec6d0bd51e93761aecda",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 166,
"avg_line_length": 32.371428571428574,
"alnum_prop": 0.6151809355692851,
"repo_name": "DLR-SC/tigl",
"id": "17b6460445d8730cc3a2266109dcb9e1fcb24ec4",
"size": "5283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/math-scripts/ms_segmentGeometry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5617"
},
{
"name": "C",
"bytes": "307361"
},
{
"name": "C++",
"bytes": "3813493"
},
{
"name": "CMake",
"bytes": "274651"
},
{
"name": "GLSL",
"bytes": "15750"
},
{
"name": "Java",
"bytes": "165629"
},
{
"name": "JavaScript",
"bytes": "5338"
},
{
"name": "Makefile",
"bytes": "247"
},
{
"name": "Python",
"bytes": "237583"
},
{
"name": "SWIG",
"bytes": "49596"
},
{
"name": "Shell",
"bytes": "12988"
}
],
"symlink_target": ""
}
|
"""Application API.
Use VAPIX® Application API to upload, control and manage applications and their license keys.
"""
from ...models.applications.application import Application
from ..api import APIItems
URL = "/axis-cgi/applications"
URL_CONTROL = f"{URL}/control.cgi"
URL_LICENSE = f"{URL}/license.cgi"
URL_LIST = f"{URL}/list.cgi"
URL_UPLOAD = f"{URL}/upload.cgi"
PARAM_CGI_KEY = "Properties.EmbeddedDevelopment.Version"
PARAM_CGI_VALUE = "1.20"
APPLICATION_STATE_RUNNING = "Running"
APPLICATION_STATE_STOPPED = "Stopped"
class Applications(APIItems):
"""Applications on Axis devices."""
def __init__(self, request: object) -> None:
"""Initialize applications manager."""
super().__init__({}, request, URL, Application)
async def update(self) -> None:
"""Refresh data."""
raw = await self.list()
self.process_raw(raw)
@staticmethod
def pre_process_raw(raw: dict) -> dict:
"""Return a dictionary of applications."""
if not raw:
return {}
if "application" not in raw.get("reply", {}):
return {}
raw_applications = raw["reply"]["application"]
applications = {}
if not isinstance(raw_applications, list):
applications[raw_applications["@Name"]] = raw_applications
else:
for raw_application in raw_applications:
applications[raw_application["@Name"]] = raw_application
return applications
async def list(self) -> dict:
"""Retrieve information about installed applications."""
return await self._request("post", URL_LIST)
|
{
"content_hash": "742ef3ae41cff8ebc2e435f5fb8ed33e",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 93,
"avg_line_length": 28.344827586206897,
"alnum_prop": 0.6332116788321168,
"repo_name": "Kane610/axis",
"id": "977e665aeeffe2658470171f99c0f00a324d44a9",
"size": "1645",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "axis/vapix/interfaces/applications/applications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "513677"
}
],
"symlink_target": ""
}
|
"""TensorBoard HTTP utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import json
import re
import time
import wsgiref.handlers
import six
import tensorflow as tf
from werkzeug import wrappers
from tensorflow.tensorboard.backend import json_util
_EXTRACT_MIMETYPE_PATTERN = re.compile(r'^[^;\s]*')
_EXTRACT_CHARSET_PATTERN = re.compile(r'charset=([-_0-9A-Za-z]+)')
# Allows *, gzip or x-gzip, but forbid gzip;q=0
# https://tools.ietf.org/html/rfc7231#section-5.3.4
_ALLOWS_GZIP_PATTERN = re.compile(
r'(?:^|,|\s)(?:(?:x-)?gzip|\*)(?!;q=0)(?:\s|,|$)')
_TEXTUAL_MIMETYPES = set([
'application/javascript',
'application/json',
'application/json+protobuf',
'image/svg+xml',
'text/css',
'text/csv',
'text/html',
'text/plain',
'text/tab-separated-values',
'text/x-protobuf',
])
_JSON_MIMETYPES = set([
'application/json',
'application/json+protobuf',
])
def Respond(request,
content,
content_type,
code=200,
expires=0,
content_encoding=None,
encoding='utf-8'):
"""Construct a werkzeug Response.
Responses are transmitted to the browser with compression if: a) the browser
supports it; b) it's sane to compress the content_type in question; and c)
the content isn't already compressed, as indicated by the content_encoding
parameter.
Browser and proxy caching is completely disabled by default. If the expires
parameter is greater than zero then the response will be able to be cached by
the browser for that many seconds; however, proxies are still forbidden from
caching so that developers can bypass the cache with Ctrl+Shift+R.
For textual content that isn't JSON, the encoding parameter is used as the
transmission charset which is automatically appended to the Content-Type
header. That is unless of course the content_type parameter contains a
charset parameter. If the two disagree, the characters in content will be
transcoded to the latter.
If content_type declares a JSON media type, then content MAY be a dict, list,
tuple, or set, in which case this function has an implicit composition with
json_util.Cleanse and json.dumps. The encoding parameter is used to decode
byte strings within the JSON object; therefore transmitting binary data
within JSON is not permitted. JSON is transmitted as ASCII unless the
content_type parameter explicitly defines a charset parameter, in which case
the serialized JSON bytes will use that instead of escape sequences.
Args:
request: A werkzeug Request object. Used mostly to check the
Accept-Encoding header.
content: Payload data as byte string, unicode string, or maybe JSON.
content_type: Media type and optionally an output charset.
code: Numeric HTTP status code to use.
expires: Second duration for browser caching.
content_encoding: Encoding if content is already encoded, e.g. 'gzip'.
encoding: Input charset if content parameter has byte strings.
Returns:
A werkzeug Response object (a WSGI application).
"""
mimetype = _EXTRACT_MIMETYPE_PATTERN.search(content_type).group(0)
charset_match = _EXTRACT_CHARSET_PATTERN.search(content_type)
charset = charset_match.group(1) if charset_match else encoding
textual = charset_match or mimetype in _TEXTUAL_MIMETYPES
if mimetype in _JSON_MIMETYPES and (isinstance(content, dict) or
isinstance(content, list) or
isinstance(content, set) or
isinstance(content, tuple)):
content = json.dumps(json_util.Cleanse(content, encoding),
ensure_ascii=not charset_match)
if charset != encoding:
content = tf.compat.as_text(content, encoding)
content = tf.compat.as_bytes(content, charset)
if textual and not charset_match and mimetype not in _JSON_MIMETYPES:
content_type += '; charset=' + charset
if (not content_encoding and textual and
_ALLOWS_GZIP_PATTERN.search(request.headers.get('Accept-Encoding', ''))):
out = six.BytesIO()
f = gzip.GzipFile(fileobj=out, mode='wb', compresslevel=3)
f.write(content)
f.close()
content = out.getvalue()
content_encoding = 'gzip'
if request.method == 'HEAD':
content = ''
headers = []
headers.append(('Content-Length', str(len(content))))
if content_encoding:
headers.append(('Content-Encoding', content_encoding))
if expires > 0:
e = wsgiref.handlers.format_date_time(time.time() + float(expires))
headers.append(('Expires', e))
headers.append(('Cache-Control', 'private, max-age=%d' % expires))
else:
headers.append(('Expires', '0'))
headers.append(('Cache-Control', 'no-cache, must-revalidate'))
return wrappers.Response(
response=content, status=code, headers=headers, content_type=content_type)
|
{
"content_hash": "36d31b61039e0dc8c4b8161857829444",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 80,
"avg_line_length": 37.5,
"alnum_prop": 0.6929353233830846,
"repo_name": "ville-k/tensorflow",
"id": "81a06a5f14cfb41734154427ad33183db00dcada",
"size": "5714",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/tensorboard/backend/http_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7908"
},
{
"name": "C",
"bytes": "186881"
},
{
"name": "C++",
"bytes": "25385487"
},
{
"name": "CMake",
"bytes": "166479"
},
{
"name": "Go",
"bytes": "859393"
},
{
"name": "HTML",
"bytes": "593130"
},
{
"name": "Java",
"bytes": "319061"
},
{
"name": "JavaScript",
"bytes": "1399"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63700"
},
{
"name": "Protocol Buffer",
"bytes": "227623"
},
{
"name": "Python",
"bytes": "22405092"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "338633"
},
{
"name": "TypeScript",
"bytes": "801168"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from matplotlib.testing.decorators import (image_comparison, cleanup,
knownfailureif)
import matplotlib.pyplot as plt
import matplotlib.patheffects as path_effects
try:
# mock in python 3.3+
from unittest import mock
except ImportError:
import mock
from nose.tools import assert_equal
@image_comparison(baseline_images=['patheffect1'], remove_text=True)
def test_patheffect1():
ax1 = plt.subplot(111)
ax1.imshow([[1, 2], [2, 3]])
txt = ax1.annotate("test", (1., 1.), (0., 0),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3", lw=2),
size=20, ha="center",
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
txt.arrow_patch.set_path_effects([path_effects.Stroke(linewidth=5,
foreground="w"),
path_effects.Normal()])
ax1.grid(True, linestyle="-")
pe = [path_effects.withStroke(linewidth=3, foreground="w")]
for l in ax1.get_xgridlines() + ax1.get_ygridlines():
l.set_path_effects(pe)
@image_comparison(baseline_images=['patheffect2'], remove_text=True)
def test_patheffect2():
ax2 = plt.subplot(111)
arr = np.arange(25).reshape((5, 5))
ax2.imshow(arr)
cntr = ax2.contour(arr, colors="k")
plt.setp(cntr.collections,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
clbls = ax2.clabel(cntr, fmt="%2.0f", use_clabeltext=True)
plt.setp(clbls,
path_effects=[path_effects.withStroke(linewidth=3,
foreground="w")])
@image_comparison(baseline_images=['patheffect3'])
def test_patheffect3():
p1, = plt.plot([1, 3, 5, 4, 3], 'o-b', lw=4)
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
plt.title(r'testing$^{123}$',
path_effects=[path_effects.withStroke(linewidth=1, foreground="r")])
leg = plt.legend([p1], [r'Line 1$^2$'], fancybox=True, loc=2)
leg.legendPatch.set_path_effects([path_effects.withSimplePatchShadow()])
text = plt.text(2, 3, 'Drop test', color='white',
bbox={'boxstyle': 'circle,pad=0.1', 'color': 'red'})
pe = [path_effects.Stroke(linewidth=3.75, foreground='k'),
path_effects.withSimplePatchShadow((6, -3), shadow_rgbFace='blue')]
text.set_path_effects(pe)
text.get_bbox_patch().set_path_effects(pe)
pe = [path_effects.PathPatchEffect(offset=(4, -4), hatch='xxxx',
facecolor='gray'),
path_effects.PathPatchEffect(edgecolor='white', facecolor='black',
lw=1.1)]
t = plt.gcf().text(0.02, 0.1, 'Hatch shadow', fontsize=75, weight=1000,
va='center')
t.set_path_effects(pe)
@image_comparison(baseline_images=['stroked_text'], extensions=['png'])
def test_patheffects_stroked_text():
text_chunks = [
'A B C D E F G H I J K L',
'M N O P Q R S T U V W',
'X Y Z a b c d e f g h i j',
'k l m n o p q r s t u v',
'w x y z 0123456789',
r"!@#$%^&*()-=_+[]\;'",
',./{}|:"<>?'
]
font_size = 50
ax = plt.axes([0, 0, 1, 1])
for i, chunk in enumerate(text_chunks):
text = ax.text(x=0.01, y=(0.9 - i * 0.13), s=chunk,
fontdict={'ha': 'left', 'va': 'center',
'size': font_size, 'color': 'white'})
text.set_path_effects([path_effects.Stroke(linewidth=font_size / 10,
foreground='black'),
path_effects.Normal()])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.axis('off')
@cleanup
@knownfailureif(True)
def test_PathEffect_points_to_pixels():
fig = plt.figure(dpi=150)
p1, = plt.plot(range(10))
p1.set_path_effects([path_effects.SimpleLineShadow(),
path_effects.Normal()])
renderer = fig.canvas.get_renderer()
pe_renderer = path_effects.SimpleLineShadow().get_proxy_renderer(renderer)
assert isinstance(pe_renderer, path_effects.PathEffectRenderer), (
'Expected a PathEffectRendere instance, got '
'a {0} instance.'.format(type(pe_renderer)))
# Confirm that using a path effects renderer maintains point sizes
# appropriately. Otherwise rendered font would be the wrong size.
assert_equal(renderer.points_to_pixels(15),
pe_renderer.points_to_pixels(15))
def test_SimplePatchShadow_offset():
pe = path_effects.SimplePatchShadow(offset=(4, 5))
assert_equal(pe._offset, (4, 5))
@image_comparison(baseline_images=['collection'], tol=0.015)
def test_collection():
x, y = np.meshgrid(np.linspace(0, 10, 150), np.linspace(-5, 5, 100))
data = np.sin(x) + np.cos(y)
cs = plt.contour(data)
pe = [path_effects.PathPatchEffect(edgecolor='black', facecolor='none',
linewidth=12),
path_effects.Stroke(linewidth=5)]
for collection in cs.collections:
collection.set_path_effects(pe)
for text in plt.clabel(cs, colors='white'):
text.set_path_effects([path_effects.withStroke(foreground='k',
linewidth=3)])
text.set_bbox({'boxstyle': 'sawtooth', 'facecolor': 'none',
'edgecolor': 'blue'})
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
{
"content_hash": "30a563fda139741b5f6892f33824cd41",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 78,
"avg_line_length": 36.864197530864196,
"alnum_prop": 0.5517414601473544,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "3fb37031db070069be47268a29eea403a960dd00",
"size": "5972",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/matplotlib/tests/test_patheffects.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
from creative import Creative
REQUIRED_INLINE = ['AdSystem', 'AdTitle']
REQUIRED_WRAPPER = ['AdSystem', 'VASTAdTagURI']
def validateSettings(settings, requireds):
keys = settings.keys()
for required in requireds:
if required not in keys:
raise Exception("Missing required settings: {required}".format(required=required))
def validateInLineSettings(settings):
validateSettings(settings, REQUIRED_INLINE)
def validateWrapperSettings(settings):
validateSettings(settings, REQUIRED_WRAPPER)
class Ad(object):
def __init__(self, settings={}):
self.errors = []
self.surveys = []
self.impressions = []
self.creatives = []
if settings["structure"].lower() == 'wrapper':
validateWrapperSettings(settings)
self.VASTAdTagURI = settings["VASTAdTagURI"]
else:
validateInLineSettings(settings)
self.id = settings["id"]
self.sequence = settings.get("sequence", None)
self.structure = settings["structure"]
self.AdSystem = settings["AdSystem"]
self.AdTitle = settings["AdTitle"]
# optional elements
self.Error = settings.get("Error", None)
self.Description = settings.get("Description", None)
self.Advertiser = settings.get("Advertiser", None)
self.Pricing = settings.get("Pricing", None)
self.Extensions = settings.get("Extensions", None)
def attachSurvey(self, settings):
survey = {"url": settings.url}
if "type" in settings:
survey["type"] = settings["type"]
self.surveys.append(survey)
def attachImpression(self, settings):
self.impressions.append(settings)
return self
def attachCreative(self, _type, options):
creative = Creative(_type, options)
self.creatives.append(creative)
return creative
|
{
"content_hash": "a1f1a686e84d48057b94aae32ec84f7a",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 94,
"avg_line_length": 30.64516129032258,
"alnum_prop": 0.6405263157894737,
"repo_name": "selam/python-vast-xml-generator",
"id": "c61ee7c3e1afd59544a51284dfda6c129785aba2",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vast/ad.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "24332"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
import random
class Sarsa:
def __init__(self, actions, epsilon=0.05, gamma=0.99):
self.q = {}
self.dalpha = defaultdict(lambda: defaultdict(int))
self.epsilon = epsilon
self.gamma = gamma
self.actions = actions
def getQ(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
oldv = self.q.get((state, action), None)
if oldv is None:
self.q[(state, action)] = reward
else:
alpha = self.dalpha[state][action]
if alpha != 0:
alpha = 1/alpha
self.q[(state, action)] = oldv + alpha * (value - oldv)
def add_noise_to_walk(self, action, i, ap = 0.9):
if random.random() < ap:
action = self.actions[i]
elif i%2 == 0:
if random.random() > 0.5:
action = self.actions[(i + 2) % len(self.actions)]
else:
action = self.actions[(i + 3) % len(self.actions)]
else:
if random.random() > 0.5:
action = self.actions[(i + 1) % len(self.actions)]
else:
action = self.actions[(i + 2) % len(self.actions)]
return action
def chooseAction(self, state, ap = 0.9):
if random.random() < self.epsilon:
action = random.choice(self.actions)
else:
q = [self.getQ(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = random.choice(best)
else:
i = q.index(maxQ)
action = self.add_noise_to_walk(self.actions[i], i, ap)
self.dalpha[state][action] += 1
return action
def learn(self, state1, action1, reward, state2, action2):
qnext = self.getQ(state2, action2)
self.learnQ(state1, action1, reward, reward + self.gamma * qnext)
|
{
"content_hash": "74575597e4e0ab09cf68ca086c6d9053",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 76,
"avg_line_length": 35.75,
"alnum_prop": 0.5062937062937063,
"repo_name": "SahilC/reinforcement-learning",
"id": "d01e8d6d3fd6179bdd79b4aae79b409091084a9c",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assign3/sarsa.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17607"
}
],
"symlink_target": ""
}
|
import os
import shutil
import re
import subprocess
from PIL import Image
from PIL import ImageOps
from cliff.lister import Lister
from clint.textui import colored, puts
from ..libraries.mod_pbxproj import XcodeProject
IGNORE_DIR_SUFFIX = ('.framework', '.embeddedframework', '.bundle', '.xcodeproj')
IGNORE_DIR = ('Pods')
class Resource(Lister):
def get_parser(self, prog_name):
parser = super(Resource, self).get_parser(prog_name)
subparsers = parser.add_subparsers()
export_parser = subparsers.add_parser('export')
export_parser.add_argument('command', nargs='?', default='export')
export_parser.add_argument('-s','--source', type = str, help = 'the path of xcode project')
export_parser.add_argument('-t','--target', type = str, default='./exports', help = 'the path of resource target directory')
export_parser.add_argument('--flat', action = 'store_true', help = 'ignore the suffix name')
import_parser = subparsers.add_parser('import')
import_parser.add_argument('command', nargs='?', default='import')
import_parser.add_argument('-s', '--source', type = str, help = 'the path of resource directory')
import_parser.add_argument('-t', '--target', type = str, help = 'the path of xcode project')
cleanup_parser = subparsers.add_parser('cleanup')
cleanup_parser.add_argument('command', nargs='?', default='cleanup')
cleanup_parser.add_argument('-p', '--project', type = str, help = 'the source to cleanup')
scale_parser = subparsers.add_parser('scale')
scale_parser.add_argument('command', nargs='?', default='scale')
scale_parser.add_argument('-f', '--files', nargs='+', help='files to scale')
scale_parser.add_argument('-s', '--size', nargs='+', help = 'the scaling size, 1,2,3 supported')
scale_parser.add_argument('-r', '--recursive', action = 'store_true', help = 'recursive the directory')
return parser
def take_action(self, parsed_args):
command = parsed_args.command
if command == 'export':
return self.export_resource(parsed_args)
elif command == 'import':
return self.import_resource(parsed_args)
elif command == 'cleanup':
return self.cleanup_resource(parsed_args)
elif command == 'scale':
return self.scale_resource(parsed_args)
def export_resource(self, parsed_args):
source = parsed_args.source
target = parsed_args.target
is_flat = parsed_args.flat
result = []
for root, subdirs, files in os.walk(source):
for f in files:
if re.match('.*\.(jpg|png|gif|jpeg)$',os.path.basename(f)):
if is_flat:
if not os.path.isdir(target) or not os.path.exists(target):
os.makedirs(target)
ofpath = os.path.join(root, os.path.basename(f))
tfpath = os.path.abspath(os.path.join(target, os.path.basename(f)))
try:
shutil.copy2(ofpath, tfpath)
result.append((f,'SUCCESS'))
except:
result.append((f,'FAILED'))
else:
rpath = os.path.relpath(root, source)
tpath = os.path.abspath(os.path.join(target, rpath))
ofpath = os.path.join(root, os.path.basename(f))
if not os.path.isdir(tpath) or not os.path.exists(tpath):
os.makedirs(tpath)
tfpath = os.path.join(tpath,os.path.basename(f))
try:
shutil.copy2(ofpath, tfpath)
result.append((f,'SUCCESS'))
except:
result.append((f,'FAILED'))
return (('Name', 'Status'),
(f for f in result)
)
def import_resource(self, parsed_args):
source = parsed_args.source
target = parsed_args.target
result = []
for root, subdirs, files in os.walk(source):
for f in files:
if re.match('.*\.(jpg|png|gif|jpeg)$',os.path.basename(f)):
rpath = os.path.relpath(root, source)
tpath = os.path.abspath(os.path.join(target, rpath))
ofpath = os.path.join(root, os.path.basename(f))
if not os.path.isdir(tpath) or not os.path.exists(tpath):
os.makedirs(tpath)
tfpath = os.path.join(tpath,os.path.basename(f))
try:
shutil.copy2(ofpath, tfpath)
result.append((f,'SUCCESS'))
except:
result.append((f,'FAILED'))
return (('Name', 'Status'),
(f for f in result)
)
def cleanup_resource(self, parsed_args):
source = parsed_args.project
resources = []
resources_ext = {}
resources_path_map = {}
resources_in_file = []
result = []
flat_result = []
resources_pbxproj_map = {}
pbxproj = self.find_pbxproj(source)
if not pbxproj:
puts(colored.red('.xcodeproj not found.'))
return
#get all resource and create resource-path, resource-ext map
for root, subdirs, files in os.walk(source):
if self.is_in_ignore_dir(root, source):
continue
for f in files:
if re.match('.*\.(jpg|png|gif|jpeg)$',os.path.basename(f)):
if self.is_igore_file(os.path.basename(f)):
continue
path = os.path.join(root, os.path.basename(f))
filename_with_scale = os.path.splitext(f)[0]
file_ext = os.path.splitext(f)[1]
filename = filename_with_scale.split('@')[0]
resources.append(filename)
paths = resources_path_map.get(filename, [])
paths.append(path)
resources_path_map[filename] = list(set(paths))
exts = resources_ext.get(filename,[])
exts.append(file_ext)
resources_ext[filename] = list(set(exts))
resources = list(set(resources))
resources.sort()
#get all resource that in project file
for m_root, m_subdirs, m_files in os.walk(source):
if self.is_in_ignore_dir(m_root, source):
continue
for m_f in m_files:
filematch = re.match('.*\.(m|plist|xib)$',os.path.basename(m_f))
if filematch:
m_path = os.path.join(m_root, os.path.basename(m_f))
m_bf = open(m_path,'rb')
pattern = r'@"([a-zA-Z0-9_\-\/\.]+(\.jpg|\.png|\.gif|\.jpeg)?)"' if filematch.group(1) == 'm' else r'([a-zA-Z0-9_\-\/\.]+(\.jpg|\.png|\.gif|\.jpeg)?)'
matchs = re.finditer(pattern, m_bf.read())
for match in matchs:
resources_in_file.append(match.group(1))
resources_in_file = list(set(resources_in_file))
resources_in_file.sort()
#return if not any resource in project
if len(resources_in_file)==0:
puts(colored.red('.m, .plist, .xib files not found.'))
return
for resource in resources:
if resource not in resources_in_file:
is_find = False
exts = resources_ext.get(resource, [])
for ext in exts:
name = resource+ext
if name in resources_in_file:
is_find = True
break
if not is_find:
result.append(resource)
#get the xcodeproj id of the resources in result
for r in result:
if r in resources_path_map:
for path in resources_path_map[r]:
for id in pbxproj.get_ids():
obj = pbxproj.get_obj(id)
if os.path.basename(path) == obj.get('path', None):
resources_pbxproj_map[path] = id
#modified the xcodeproj
for r in result:
if r in resources_path_map:
for path in resources_path_map[r]:
try:
os.remove(path)
pbxproj_id = resources_pbxproj_map[path]
pbxproj.remove_file(pbxproj_id)
flat_result.append((os.path.basename(path), 'SUCCESS'))
except:
flat_result.append((os.path.basename(path), 'FAILED'))
if pbxproj.modified:
pbxproj.save()
return (('Name', 'Status'),
(f for f in flat_result)
)
def scale_resource(self, parser_args):
files = parser_args.files
sizes = [s for s in parser_args.size if s in ['1', '2', '3']]
recursive = parser_args.recursive if len(files)==1 else False
file_size_map = {}
result = []
sizes.sort()
if recursive:
source = os.path.dirname(files[0])
files = []
for root, subdirs, allfiles in os.walk(source):
for f in allfiles:
if re.match('.*\.(jpg|png|gif|jpeg)$',os.path.basename(f)):
files.append(os.path.join(root, os.path.basename(f)))
#create a file and size map
for file in files:
if os.path.exists(file) and os.path.isfile(file):
if re.match('.*\.(jpg|png|gif|jpeg|svg)$',os.path.basename(file)):
filename_split = os.path.splitext(os.path.basename(file))
filename_with_scale = filename_split[0]
file_ext = filename_split[1]
filename_scale_split = filename_with_scale.split('@')
filename = filename_scale_split[0]
file_with_ext = filename+file_ext
filename_scale = filename_scale_split[1] if len(filename_scale_split) > 1 else '1x'
filename_scale = filename_scale if filename_scale else '1x'
filename_map_item = file_size_map.get(file_with_ext,{})
if file_ext == '.svg':
filename_map_item['svg'] = os.path.abspath(file)
else:
filename_map_item[filename_scale] = os.path.abspath(file)
file_size_map[file_with_ext] = filename_map_item
puts(colored.yellow('Scaling %d files...' % len(file_size_map)))
for fk, fv in file_size_map.iteritems():
if not not fv.get('svg'):
ret = self.scale_vector(fk, fv.get('svg'), sizes)
result.append(ret)
else:
ret = self.scale_bitmap(fk, fv, sizes)
result.append(ret)
output_header = ['Name']
for k in [s+'x' for s in sizes]:
output_header.insert(len(output_header), k)
if len(result)>0:
return (output_header,
(r for r in result)
)
else:
return 'Nothing Happened.'
def scale_bitmap(self, skel_filename, file_and_size, sizes):
sizes_with_suffix = [s+'x' for s in sizes]
max_size = 1
max_file = None
need_scale_size = []
ret = {}
for fk, fv in file_and_size.iteritems():
fk_int = int(fk.split('x')[0])
if fk_int > max_size:
if os.path.exists(fv):
max_size = fk_int
max_file = fv
for size_with_suffix in sizes_with_suffix:
if size_with_suffix not in file_and_size:
need_scale_size.append(size_with_suffix)
ret[size_with_suffix] = 'FAILED'
else:
ret[size_with_suffix] = 'EXIST'
if max_file and len(need_scale_size)>0:
for need_size in need_scale_size:
need_size_float = float(need_size.split('x')[0])
scaling = need_size_float/max_size
if scaling > 1:
ret[need_size] = 'IGNORE'
continue
scaling_file_dir = os.path.dirname(max_file)
filename_split = os.path.splitext(skel_filename)
need_size_format = '@'+need_size if need_size != '1x' else ''
scaling_filename = '%s%s%s' % (filename_split[0], need_size_format, filename_split[1])
scaling_path = os.path.join(scaling_file_dir, scaling_filename)
try:
img = Image.open(max_file)
print img.mode
img.convert('RGB')
thumb = ImageOps.fit(img, (int(img.size[0]*scaling),int(img.size[1]*scaling)), Image.ANTIALIAS)
thumb.save(scaling_path)
ret[need_size] = 'SUCCESS'
except:
pass
result_list = [skel_filename]
for size_with_suffix in sizes_with_suffix:
result_list.insert(len(result_list), ret.get(size_with_suffix, 'UNKNOWN'))
return result_list
def scale_vector(self, skel_filename, file, sizes):
filename = os.path.splitext(skel_filename)[0]
scaling_file_dir = os.path.dirname(file)
scaling_file_ext = '.png'
sizes_with_suffix = [s+'x' for s in sizes]
out = open(os.devnull, 'w')
ret = {}
has_rsvg = self.detect_rsvg()
if not has_rsvg:
return
for size in sizes:
scaling = int(size)
scaling_format = '@%sx' % size if size != '1' else ''
scaling_file = os.path.join(scaling_file_dir,filename+scaling_format+scaling_file_ext)
cmd = 'rsvg-convert -z %d -o %s %s' % (scaling, scaling_file, file)
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=out)
p_status = p.wait()
# p_status = p.returncode
ret[size+'x'] = 'SUCCESS' if p_status==0 else 'FAILED'
out.close()
result_list = [skel_filename]
for size_with_suffix in sizes_with_suffix:
result_list.insert(len(result_list), ret.get(size_with_suffix, 'UNKNOWN'))
return result_list
def is_in_ignore_dir(self, path, root):
is_ignore = False
while path != root and not is_ignore:
ext = os.path.splitext(path)[1] if len(os.path.splitext(path)) > 1 else None
if not ext:
continue
basename = os.path.basename(path)
is_ignore = ext in IGNORE_DIR_SUFFIX or basename in IGNORE_DIR
path = os.path.dirname(path)
return is_ignore
def is_igore_file(self, filename):
pattern = r'^(Default|Icon)'
return not not re.search(pattern, filename)
def find_pbxproj(self, path):
pbxproj = None
for subdir in os.listdir(path):
ext = os.path.splitext(subdir)
if ext[1] == '.xcodeproj':
pbxproj = os.path.join(path, subdir, 'project.pbxproj')
return XcodeProject.Load(pbxproj) if pbxproj else None
def detect_rsvg(self):
out = open(os.devnull, 'w')
p = subprocess.Popen('type rsvg-convert', shell=True, stdout=subprocess.PIPE, stderr=out)
# (output, err) = p.communicate()
p_status = p.wait()
# p_status = p.returncode
out.close()
return p_status==0
|
{
"content_hash": "14c192b98698b3713af0db613c3b506c",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 170,
"avg_line_length": 39.593052109181144,
"alnum_prop": 0.5174229130107797,
"repo_name": "sodabiscuit/occult",
"id": "32e82c8ac5c791cd0afd4a979d5936010b717d3f",
"size": "15981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "occult/resource/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135596"
}
],
"symlink_target": ""
}
|
"""
Define Numpy Ufuncs as Models.
"""
import numpy as np
from astropy.modeling.core import Model
trig_ufuncs = ["sin", "cos", "tan", "arcsin", "arccos", "arctan", "arctan2",
"hypot", "sinh", "cosh", "tanh", "arcsinh", "arccosh",
"arctanh", "deg2rad", "rad2deg"]
math_ops = ["add", "subtract", "multiply", "logaddexp", "logaddexp2",
"true_divide", "floor_divide", "negative", "positive", "power",
"remainder", "fmod", "divmod", "absolute", "fabs", "rint",
"exp", "exp2", "log", "log2", "log10", "expm1", "log1p", "sqrt",
"square", "cbrt", "reciprocal", "divide", "mod"]
supported_ufuncs = trig_ufuncs + math_ops
# These names are just aliases for other ufunc objects
# in the numpy API. The alias name must occur later
# in the lists above.
alias_ufuncs = {
"divide": "true_divide",
"mod": "remainder",
}
class _NPUfuncModel(Model):
_is_dynamic = True
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _make_class_name(name):
""" Make a ufunc model class name from the name of the ufunc. """
return name[0].upper() + name[1:] + 'Ufunc'
def ufunc_model(name):
""" Define a Model from a Numpy ufunc name."""
ufunc = getattr(np, name)
nin = ufunc.nin
nout = ufunc.nout
if nin == 1:
separable = True
def evaluate(self, x):
return self.func(x)
else:
separable = False
def evaluate(self, x, y):
return self.func(x, y)
klass_name = _make_class_name(name)
members = {'n_inputs': nin, 'n_outputs': nout, 'func': ufunc,
'linear': False, 'fittable': False, '_separable': separable,
'_is_dynamic': True, 'evaluate': evaluate}
klass = type(str(klass_name), (_NPUfuncModel,), members)
klass.__module__ = 'astropy.modeling.math_functions'
return klass
__all__ = []
for name in supported_ufuncs:
if name in alias_ufuncs:
klass_name = _make_class_name(name)
alias_klass_name = _make_class_name(alias_ufuncs[name])
globals()[klass_name] = globals()[alias_klass_name]
__all__.append(klass_name)
else:
m = ufunc_model(name)
klass_name = m.__name__
globals()[klass_name] = m
__all__.append(klass_name)
|
{
"content_hash": "18aa37a4c7d0d10907049a65c514766c",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 76,
"avg_line_length": 28.048192771084338,
"alnum_prop": 0.5730240549828178,
"repo_name": "lpsinger/astropy",
"id": "c65eca32bc58b5dca17ce6f9ab3afc0c70c7c61d",
"size": "2392",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "astropy/modeling/math_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
def dedupe_and_sort(sequence, first=None, last=None):
"""
De-dupe and partially sort a sequence.
The `first` argument should contain all the items that might appear in
`sequence` and for which the order (relative to each other) is important.
The `last` argument is the same, but matching items will be placed at the
end of the sequence.
For example, `INSTALLED_APPS` and `MIDDLEWARE_CLASSES` settings.
Items from `first` will only be included if they also appear in `sequence`.
Items from `sequence` that don't appear in `first` will come
after any that do, and retain their existing order.
Returns a sequence of the same type as given.
"""
first = first or []
last = last or []
# Add items that should be sorted first.
new_sequence = [i for i in first if i in sequence]
# Add remaining items in their current order, ignoring duplicates and items
# that should be sorted last.
for item in sequence:
if item not in new_sequence and item not in last:
new_sequence.append(item)
# Add items that should be sorted last.
new_sequence.extend([i for i in last if i in sequence])
# Return a sequence of the same type as given.
return type(sequence)(new_sequence)
def _apply_slice(sequence, start, end):
return sequence[start:end]
def slice_sequences(sequences, start, end, apply_slice=None):
"""
Performs a slice across multiple sequences.
Useful when paginating across chained collections.
:param sequences: an iterable of iterables, each nested iterable should contain
a sequence and its size
:param start: starting index to apply the slice from
:param end: index that the slice should end at
:param apply_slice: function that takes the sequence and start/end offsets, and
returns the sliced sequence
:return: a list of the items sliced from the sequences
"""
if start < 0 or end < 0 or end <= start:
raise ValueError('Start and/or End out of range. Start: %s. End: %s' % (start, end))
items_to_take = end - start
items_passed = 0
collected_items = []
if apply_slice is None:
apply_slice = _apply_slice
for sequence, count in sequences:
offset_start = start - items_passed
offset_end = end - items_passed
if items_passed == start:
items = apply_slice(sequence, 0, items_to_take)
elif 0 < offset_start < count:
items = apply_slice(sequence, offset_start, offset_end)
elif offset_start < 0:
items = apply_slice(sequence, 0, offset_end)
else:
items = []
items = list(items)
collected_items += items
items_to_take -= len(items)
items_passed += count
if items_passed > end or items_to_take == 0:
break
return collected_items
|
{
"content_hash": "1eed99b45da583b59e7f61f64284af57",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 92,
"avg_line_length": 33.98823529411764,
"alnum_prop": 0.6531671858774662,
"repo_name": "ic-labs/django-icekit",
"id": "7988d173425e3f26cd84c872663c321c716b47b4",
"size": "2889",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "icekit/utils/sequences.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
}
|
"""
Test suite for the brute force module
"""
import unittest
import numpy
class TestSuite(unittest.TestCase):
"""
Test suite for this module
"""
def test_pivot_to_generator(self):
"""
Verifies the conversion of a pivot vector to a generator
"""
from numpy.random import rand
from brute_force import pivot2generator
pivot = rand(3)
generator = pivot2generator(pivot)
for i in range(3):
self.assertEqual(generator[i, i], 0)
for i in range(3):
for j in range(i):
self.assertEqual(generator[i, j], -generator[j, i])
temp = numpy.dot(generator, pivot)
for i in range(3):
self.assertEqual(temp[i], 0)
def test_pivot_to_rotation(self):
"""
Verifies the conversion of a pivot vector to a rotation matrix
"""
from brute_force import pivot2rotation
from numpy.random import rand
pivot = rand(3)
rot = pivot2rotation(pivot)
temp = numpy.identity(3) - numpy.dot(numpy.transpose(rot), rot)
res = 0
for i in range(3):
for j in range(3):
res += temp[i, j]**2
self.assertTrue(res < 1e-10)
def test_circular_motion_t2ma(self):
"""
Verifies the conversion of time to mean anomaly, for the secular case of circular motion
"""
from brute_force import convert_time2mean_anomaly
from numpy.random import rand
for _ in range(1000):
kop = {'GM':1.0,
'semilatus rectum':rand(),
'eccentricity':0,
'periapse time':0,
'pivot':[0, 0, 0]}
time = rand()
mean_anomaly = convert_time2mean_anomaly(time, kop)
timescale = 1/numpy.sqrt(kop['GM']/kop['semilatus rectum']**3)
self.assertAlmostEqual(mean_anomaly, time/timescale)
def test_circular_motion_ma2t(self):
"""
Verifies the conversion of mean anomaly to time for the secular case of circular motion
"""
from brute_force import convert_mean_anomaly2time
from numpy.random import rand
for _ in range(1000):
kop = {'GM':1.0,
'semilatus rectum':rand(),
'eccentricity':0,
'periapse time':0,
'pivot':[0, 0, 0]}
mean_anomaly = rand()
time = convert_mean_anomaly2time(mean_anomaly, kop)
timescale = 1/numpy.sqrt(kop['GM']/kop['semilatus rectum']**3)
self.assertAlmostEqual(mean_anomaly, time/timescale)
def test_ma2t_reciprocity(self):
"""
Verifies the conversion back and forth between mean and true anomaly
"""
from brute_force import convert_mean_anomaly2time
from brute_force import convert_time2mean_anomaly
from numpy.random import rand
for _ in range(1000):
kop = {'GM':1.0,
'semilatus rectum':rand(),
'eccentricity':rand(),
'periapse time':rand(),
'pivot':[0, 0, 0]}
mean_anomaly = rand()
time = convert_mean_anomaly2time(mean_anomaly, kop)
reconstructed_mean_anomaly = convert_time2mean_anomaly(time, kop)
self.assertAlmostEqual(mean_anomaly, reconstructed_mean_anomaly)
def test_trajectory_consistency(self):
"""
Verifies the velocity and position generated are consistent
"""
from brute_force import generate_complete_trajectory
from brute_force import mid_array
from numpy.random import rand
kop = {'GM':1,
'semilatus rectum':0.5*(1+rand()),
'eccentricity':rand(),
'periapse time':rand(),
'pivot':rand(3)}
time_list = numpy.linspace(0, 10, 10000)
ctr = generate_complete_trajectory(kop, time_list)
derivs = {
'vx':numpy.diff(ctr['position'].T[0])/numpy.diff(time_list),
'vy':numpy.diff(ctr['position'].T[1])/numpy.diff(time_list),
'vz':numpy.diff(ctr['position'].T[2])/numpy.diff(time_list)}
mid = {'vx':mid_array(ctr['velocity'].T[0]),
'vy':mid_array(ctr['velocity'].T[1]),
'vz':mid_array(ctr['velocity'].T[2])}
for velc in mid:
for itm1, itm2 in zip(mid[velc], derivs[velc]):
self.assertAlmostEqual(itm1, itm2, places=4)
def test_chi_square_eval(self):
"""
Verifies that the minimum value of chi squared
is only obtained for the original Keplerian orbit parameters
"""
from brute_force import generate_astrometry
from brute_force import eval_chi_2
from numpy.random import rand
ref_kop = {'GM':1.0,
'semilatus rectum':rand(),
'eccentricity':0.2*rand(),
'periapse time':rand(),
'pivot':5*rand(3)}
time_list = numpy.linspace(0, 10, 100)
astrometry = generate_astrometry(ref_kop, time_list)
ref_chi_2 = eval_chi_2(ref_kop, astrometry)
for _ in range(10):
kop = {'GM':1.0,
'semilatus rectum':rand(),
'eccentricity':0.2*rand(),
'periapse time':rand(),
'pivot':5*rand(3)}
chi_2 = eval_chi_2(kop, astrometry)
self.assertTrue(chi_2 > ref_chi_2)
def test_estimate_init_params(self):
"""
Verifies the estimation of the initial keplerian parameters
"""
from brute_force import generate_astrometry
from brute_force import estimate_initial_parameters
from numpy.random import rand
kop = {'GM':1.0,
'semilatus rectum':1.0,
'eccentricity':0.5,
'periapse time':0.2,
'pivot':rand(3)}
time_list = numpy.linspace(0, 10, 10000)
astrometry = generate_astrometry(kop, time_list)
sol = estimate_initial_parameters(astrometry, grp=kop['GM'])
self.assertAlmostEqual(sol['semilatus rectum'],
kop['semilatus rectum'],
places=4)
self.assertAlmostEqual(sol['eccentricity'],
kop['eccentricity'],
places=4)
self.assertAlmostEqual(sol['periapse time'],
kop['periapse time'],
places=4)
for i in range(3):
self.assertAlmostEqual(sol['pivot'][i],
kop['pivot'][i],
places=4)
def test_brute_force_fit(self):
"""
Verifies that the brute force fit reproduces the Keplerian parameters
"""
from brute_force import generate_astrometry
from brute_force import fit_parameters_bf
kop = {'GM':1,
'semilatus rectum':1.5,#rand(),
'eccentricity':0.3,#0.9*rand(),
'periapse time':0.1,#rand(),
'pivot':numpy.array([0.1, -0.2, 0.3])}#rand(3)}
time_list = numpy.linspace(0, 10, 100)
astrometry = generate_astrometry(kop, time_list)
sol = fit_parameters_bf(astrometry)
self.assertAlmostEqual(sol['semilatus rectum'],
kop['semilatus rectum'],
places=1)
self.assertAlmostEqual(sol['eccentricity'],
kop['eccentricity'],
places=2)
self.assertAlmostEqual(sol['periapse time'],
kop['periapse time'],
places=2)
def test_rotation_fit(self):
"""
Verifies that the rotation based parameter fit reproduces the original Keplerian parameters
"""
from brute_force import generate_astrometry
from brute_force import fit_parameters_wr
# kop = {'GM':1,
# 'semilatus rectum':rand(),
# 'eccentricity':0.9*rand(),
# 'periapse time':rand(),
# 'pivot':rand(3)}
kop = {'GM':1,
'semilatus rectum':1.5,#rand(),
'eccentricity':0.3,#0.9*rand(),
'periapse time':0.1,#rand(),
'pivot':numpy.array([0.1, -0.2, 0.3])}#rand(3)}
time_list = numpy.linspace(0, 10, 100)
astrometry = generate_astrometry(kop, time_list)
sol = fit_parameters_wr(astrometry)
self.assertAlmostEqual(sol['semilatus rectum'],
kop['semilatus rectum'],
places=1)
self.assertAlmostEqual(sol['eccentricity'],
kop['eccentricity'],
places=2)
self.assertAlmostEqual(sol['periapse time'],
kop['periapse time'],
places=2)
def test_best_cayley_rotation(self):
"""
Verifies that the Cayley rotation function reproduces the pivot vector
"""
from numpy.random import rand
from brute_force import pivot2rotation
from brute_force import calc_best_cayley_rotation
x_list = rand(100, 3)
pivot = rand(3)
rotation = pivot2rotation(pivot)
y_list = numpy.dot(rotation, x_list.T).T
reproduced = calc_best_cayley_rotation(x_list, y_list)
for itm1, itm2 in zip(pivot, reproduced):
self.assertAlmostEqual(itm1, itm2)
def test_calc_gl_fit(self):
"""
Verifies the calibration of a linear
transformation that aligns the theoretical and observational data
"""
from numpy.random import rand
from brute_force import generate_complete_trajectory
from brute_force import pivot2rotation
from brute_force import generate_astrometry
from brute_force import calc_gl_position_block
from brute_force import calc_gl_velocity_block
kop = {'GM':1,
'semilatus rectum':rand(),
'eccentricity':rand(),
'periapse time':rand(),
'pivot':numpy.zeros(3)}
time_list = numpy.linspace(0, 10, 1000)
trj = generate_complete_trajectory(kop, time_list)
kop['pivot'] = rand(3)
rotation = pivot2rotation(kop['pivot'])
amd = generate_astrometry(kop, time_list)
blocks = {'position':calc_gl_position_block(amd, trj),
'velocity':calc_gl_velocity_block(amd, trj)}
for i in range(2):
self.assertAlmostEqual(blocks['velocity'][i], rotation[2, i])
for j in range(2):
self.assertAlmostEqual(blocks['position'][i, j], rotation[i, j])
def test_gen_pivot_reciprocity(self):
"""
Verifies the conversion back and forth between anti symmetric generator and pivot vector
"""
from numpy.random import rand
from brute_force import pivot2generator
from brute_force import generator2pivot
pivot = rand(3)
generator = pivot2generator(pivot)
reproduced = generator2pivot(generator)
for itm1, itm2 in zip(pivot, reproduced):
self.assertAlmostEqual(itm1, itm2)
def test_calc_pivot_from_gl_block(self):
"""
Verifies the calculation of a pivot vector from a general linear transformation
"""
from numpy.random import rand
from brute_force import pivot2rotation
from brute_force import calc_pivot_from_gl_block
pivot = rand(3)
rotation = pivot2rotation(pivot)
proj = numpy.zeros((2, 3))
proj[0, 0] = 1
proj[1, 1] = 1
block = numpy.dot(rotation, proj.T)
reproduced = calc_pivot_from_gl_block(block)
for itm1, itm2 in zip(pivot, reproduced):
self.assertAlmostEqual(itm1, itm2)
def test_rot_pivot_reciprocity(self):
"""
Verifies the conversion back and forth between pivot vector and rotation matrix
"""
from numpy.random import rand
from brute_force import pivot2rotation
from brute_force import rotation2pivot
pivot = rand(3)
rotation = pivot2rotation(pivot)
reproduced = rotation2pivot(rotation)
for itm1, itm2 in zip(pivot, reproduced):
self.assertAlmostEqual(itm1, itm2)
def test_fit_small_rotation(self):
"""
Verifies the calculation of rotation matrix, in the limit of small rotation angles
"""
from numpy.random import rand
from brute_force import generate_complete_trajectory
from brute_force import generate_astrometry
from brute_force import fit_small_rotation
kop = {'GM':1,
'semilatus rectum':rand(),
'eccentricity':rand(),
'periapse time':rand(),
'pivot':numpy.zeros(3)}
time_list = numpy.linspace(0, 10, 1000)
trj = generate_complete_trajectory(kop, time_list)
kop['pivot'] = 1e-4*rand(3)
amd = generate_astrometry(kop, time_list)
reproduced = fit_small_rotation(amd, trj)
for itm1, itm2 in zip(kop['pivot'], reproduced):
self.assertAlmostEqual(itm1, itm2)
def test_fit_rotation(self):
"""
Verifies the calculation of a rotation matrix, without assuming small rotation angles
"""
from numpy.random import rand
from brute_force import generate_complete_trajectory
from brute_force import generate_astrometry
from brute_force import fit_rotation_to_astrometry
kop = {'GM':1,
'semilatus rectum':rand(),
'eccentricity':rand(),
'periapse time':rand(),
'pivot':numpy.zeros(3)}
time_list = numpy.linspace(0, 10, 1000)
trj = generate_complete_trajectory(kop, time_list)
kop['pivot'] = rand(3)
amd = generate_astrometry(kop, time_list)
reproduced = fit_rotation_to_astrometry(amd, trj, n_itr=3)
for itm1, itm2 in zip(kop['pivot'], reproduced):
self.assertAlmostEqual(itm1, itm2)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "44c588b0d588b9b5ecbd472efd3d6c6d",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 99,
"avg_line_length": 35.182038834951456,
"alnum_prop": 0.5524663677130045,
"repo_name": "bolverk/kobra",
"id": "5ee7437d43d998218eda4ab3e5b9369e2137b0c1",
"size": "14495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_brute_force.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60771"
}
],
"symlink_target": ""
}
|
import functools
from typing import Any, Mapping
from absl.testing import parameterized
import numpy as np
import scipy.stats
import tensorflow as tf
from private_linear_compression import count_sketching_utils
def perform_encode_decode(gradient: tf.Tensor, gradient_length: tf.Tensor,
kwargs: Mapping[str, Any], method: str) -> tf.Tensor:
"""Returns the reconstructed `gradient_estimate` from the `gradient`."""
sketch = count_sketching_utils.encode(
tf.convert_to_tensor(gradient), **kwargs)
kwargs.pop('length')
kwargs.pop('width')
return count_sketching_utils.decode(
sketch, gradient_length, **kwargs, method=method)
class RandomnessTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self.default_kwargs = {
'width': 25,
'gradient_length': 1000,
'gradient_dtype': tf.float16,
'sign_seeds': tf.constant([0, 0], tf.int32),
'index_seeds': tf.constant([1, 1], tf.int32),
}
@parameterized.named_parameters(('hash_id_0', 0), ('hash_id_1', 1))
def test_indices_are_uniform(self, hash_id):
indices, _ = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id)
_, pval = scipy.stats.kstest(indices, 'uniform')
self.assertLess(pval, 1e-5, 'Indices should follow a uniform distribution.')
@parameterized.named_parameters(('hash_id_0', 0), ('hash_id_1', 1))
def test_signs_are_uniform(self, hash_id):
_, signs = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id)
_, pval = scipy.stats.kstest(signs, 'uniform')
self.assertLess(pval, 1e-5, 'Signs should follow a uniform distribution.')
@parameterized.named_parameters(('hash_id_0', 0), ('hash_id_1', 1))
def test_signs_are_signs(self, hash_id):
_, signs = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id)
self.assertAllInSet(signs, [-1, 1])
@parameterized.named_parameters(('hash_id_0', 0, 1), ('hash_id_1', 1, 2))
def test_hashes_independent_indices(self, hash_id1, hash_id2):
indices_hash_1, _ = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id1)
indices_hash_2, _ = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id2)
self.assertNotAllEqual(indices_hash_1, indices_hash_2)
@parameterized.named_parameters(('hash_id_0', 0, 1), ('hash_id_1', 1, 2))
def test_hashes_independent_signs(self, hash_id1, hash_id2):
_, signs_hash_1 = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id1)
_, signs_hash_2 = count_sketching_utils._get_hash_mapping(
**self.default_kwargs, hash_id=hash_id2)
self.assertNotAllEqual(signs_hash_1, signs_hash_2)
class GradientVectorCountSketchEncodeTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super().setUp()
self.default_sketching_kwargs = {
'length': tf.constant(2, dtype=tf.int32),
'width': tf.constant(5, dtype=tf.int32),
'index_seeds': tf.constant([1, 1], dtype=tf.int32),
'sign_seeds': tf.constant([2, 2], dtype=tf.int32),
}
def test_encode_requires_length(self):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(0, dtype=tf.int32)
with self.assertRaisesRegex(ValueError, '.*[lL]ength.*positive integer.*'):
count_sketching_utils.encode(tf.constant(1.0), **kwargs)
def test_encode_length_required_scalar(self):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant([1, 1], dtype=tf.int32)
with self.assertRaisesRegex(ValueError, '.*[lL]ength.*scalar.*'):
count_sketching_utils.encode(tf.constant(1.0), **kwargs)
def test_encode_requires_width(self):
kwargs = self.default_sketching_kwargs
kwargs['width'] = tf.constant(0, dtype=tf.int32)
with self.assertRaisesRegex(ValueError, '.*[wW]idth.*positive integer.*'):
count_sketching_utils.encode(tf.constant(1.0), **kwargs)
def test_encode_width_required_scalar(self):
kwargs = self.default_sketching_kwargs
kwargs['width'] = tf.constant([1, 1], dtype=tf.int32)
with self.assertRaisesRegex(ValueError, '.*[wW]idth.*scalar.*'):
count_sketching_utils.encode(tf.constant(1.0), **kwargs)
def test_encode_parallel_iterations_error(self):
kwargs = self.default_sketching_kwargs
gradient = tf.constant([1.0, 2.0], dtype=tf.float16)
with self.assertRaisesRegex(ValueError,
'.*parallel_iterations.*must be >= 0*'):
count_sketching_utils.encode(gradient, **kwargs, parallel_iterations=-1)
def test_encode_gradient_dimension_error(self):
kwargs = self.default_sketching_kwargs
gradient = tf.constant([[1.0, 2.0]], dtype=tf.float16)
with self.assertRaisesRegex(ValueError,
'.*[gG]radient.*vector or scalar.*'):
count_sketching_utils.encode(gradient, **kwargs)
def test_encode_output_type(self):
kwargs = self.default_sketching_kwargs
rng = np.random.default_rng(seed=8)
dtype = tf.float16
gradient = tf.constant(rng.uniform(-5.0, 5.0, size=25), dtype=dtype)
sketch = count_sketching_utils.encode(gradient, **kwargs)
self.assertAllEqual(sketch.shape, [kwargs['length'], kwargs['width']])
self.assertDTypeEqual(sketch, dtype.as_numpy_dtype())
# Below, we test that the encode function operates as desired with respect to
# the underlying randomness. Given this test and the next operate as desired,
# the decode methods can be tested separately.
@parameterized.named_parameters([('seed_2', 2), ('seed_3', 3), ('seed_8', 8)])
def test_encode_rows_different(self, seed):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(2, dtype=tf.int32)
kwargs['sign_seeds'] += [0, seed]
kwargs['index_seeds'] += [0, seed]
rng = np.random.default_rng(seed=2)
dtype = tf.float16
gradient = tf.constant(rng.uniform(-5.0, 5.0, size=2), dtype=dtype)
sketch = count_sketching_utils.encode(gradient, **kwargs)
self.assertNotAllEqual(
sketch[0], sketch[1],
'Rows (repeats) of the sketch must be pair-wise independent.')
@parameterized.named_parameters([('seed_2', 2), ('seed_3', 3), ('seed_8', 8)])
def test_encode_sketch_follows_uniform_distribution(self, seed):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(100, tf.int32)
kwargs['width'] = tf.constant(100, tf.int32)
rng = np.random.default_rng(seed=seed)
gradient = tf.constant(rng.uniform(-5.0, 5.0, size=[1]), dtype=tf.float16)
sketch = count_sketching_utils.encode(gradient, **kwargs)
_, nonzero_col_indices = np.nonzero(sketch)
_, pval = scipy.stats.kstest(nonzero_col_indices, 'uniform')
self.assertLess(
pval, 1e-5,
'Values across hashes should follow a uniform distribution.')
@parameterized.named_parameters([('seed_2', 2), ('seed_3', 3), ('seed_8', 8)])
def test_encode_sketch_uses_signs_correctly(self, seed):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(100, tf.int32)
kwargs['width'] = tf.constant(100, tf.int32)
sketch_length, sketch_width = kwargs.pop('length'), kwargs.pop('width')
rng = np.random.default_rng(seed=seed)
dtype = tf.float16
gradient = tf.constant(rng.uniform(-5.0, 5.0, size=[1]), dtype=dtype)
sketch = count_sketching_utils.encode(gradient, sketch_length, sketch_width,
**kwargs)
nonzero_row_indices, nonzero_col_indices = np.nonzero(sketch)
sketch_nonzero = sketch.numpy()[nonzero_row_indices, nonzero_col_indices]
sketch_length, sketch_width = sketch_length.numpy(), sketch_width.numpy()
expected_sketch_value = gradient.numpy() / tf.cast(sketch_length,
tf.float16).numpy()
self.assertAllClose(
tf.abs(sketch_nonzero),
tf.ones([sketch_width], dtype) * tf.abs(expected_sketch_value),
msg='Count sketch with a single element contained values other than '
'+/-gradient.')
self.assertEqual((sketch_nonzero == expected_sketch_value).any(), 1,
'Sketch should contain positive gradient value.')
self.assertEqual((sketch_nonzero == -expected_sketch_value).any(), 1,
'Sketch should contain negative gradient value.')
@parameterized.named_parameters([('seed_2', 2), ('seed_3', 3), ('seed_8', 8)])
def test_encode_sketch_encodes_single_element_per_row(self, seed):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(100, tf.int32)
kwargs['width'] = tf.constant(100, tf.int32)
sketch_length = kwargs.pop('length')
rng = np.random.default_rng(seed=seed)
gradient = tf.constant(rng.uniform(-5.0, 5.0, size=[1]), dtype=tf.float16)
sketch = count_sketching_utils.encode(gradient, sketch_length, **kwargs)
nonzero_row_indices, _ = np.nonzero(sketch)
self.assertAllEqual(
nonzero_row_indices, np.arange(sketch_length),
'count sketch with a single element should constain one nonzero entry '
'per row.')
class GradientVectorCountSketchDecodeTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super().setUp()
self.default_sketching_kwargs = {
'index_seeds': tf.constant([1, 1], dtype=tf.int32),
'sign_seeds': tf.constant([1, 1], dtype=tf.int32),
}
@parameterized.named_parameters([('method_mean', 'mean'),
('method_median', 'median')])
def test_decode_threshold_error(self, method):
kwargs = self.default_sketching_kwargs
sketch = tf.zeros((2, 5))
with self.assertRaisesRegex(ValueError, '.*[tT]hreshold.*positive float.*'):
count_sketching_utils.decode(
sketch, 10, **kwargs, method=method, threshold=-1e-6)
@parameterized.named_parameters([
('rank_0_method_mean', 0, count_sketching_utils.DecodeMethod.MEAN),
('rank_1_method_mean', 1, count_sketching_utils.DecodeMethod.MEAN),
('rank_3_method_mean', 3, count_sketching_utils.DecodeMethod.MEAN),
('rank_0_method_median', 0, count_sketching_utils.DecodeMethod.MEDIAN),
('rank_1_method_median', 1, count_sketching_utils.DecodeMethod.MEDIAN),
('rank_3_method_median', 3, count_sketching_utils.DecodeMethod.MEDIAN),
])
def test_decode_sketch_shape_error(self, rank, method):
kwargs = self.default_sketching_kwargs
sketch_shape = [1] * rank
sketch = tf.zeros(sketch_shape)
with self.assertRaises(ValueError):
count_sketching_utils.decode(sketch, 10, **kwargs, method=method)
@parameterized.named_parameters([
('method_mean', count_sketching_utils.DecodeMethod.MEAN),
('method_median', count_sketching_utils.DecodeMethod.MEDIAN),
])
def test_decode_threshold_nearly_matches_no_threshold(self, method):
kwargs = self.default_sketching_kwargs
sketch_width = 25
sketch_length = 10
rng = np.random.default_rng(seed=8)
sketch = rng.uniform(
-5.0, 5.0, size=[sketch_length, sketch_width]).astype(np.float32)
gradient_length = 100
threshold = np.median(np.abs(sketch))
gradient_estimate_thresholded = count_sketching_utils.decode(
sketch, gradient_length, **kwargs, method=method, threshold=threshold)
gradient_estimate_not_thresholded = count_sketching_utils.decode(
sketch, gradient_length, **kwargs, method=method)
exact_thresholded = np.copy(gradient_estimate_not_thresholded.numpy())
exact_thresholded[np.abs(exact_thresholded) < threshold] = 0.0
self.assertAllClose(
gradient_estimate_thresholded,
exact_thresholded,
msg='Incorrect values were thresholded.')
class GradientVectorCountSketchEncodeDecodeIntegrationTest(
parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super().setUp()
self.default_sketching_kwargs = {
'index_seeds': tf.constant([1, 1], dtype=tf.int32),
'sign_seeds': tf.constant([1, 1], dtype=tf.int32),
}
@parameterized.product(
seed=[2, 8],
method=[
count_sketching_utils.DecodeMethod.MEAN,
count_sketching_utils.DecodeMethod.MEDIAN
],
gradient_length=[10, 20],
)
def test_encode_decode_no_collision(self, seed, method, gradient_length):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(1, tf.int32)
kwargs['width'] = tf.constant(gradient_length * 100, tf.int32)
rng = np.random.default_rng(seed=seed)
n_hot = int(gradient_length * 0.2)
indices = rng.choice(np.arange(gradient_length), size=[n_hot, 1])
updates = tf.ones([n_hot])
gradient = tf.scatter_nd(indices, updates, shape=[gradient_length])
gradient_estimate = perform_encode_decode(gradient, gradient_length, kwargs,
method)
self.assertAllClose(
gradient_estimate,
gradient,
msg=f'Exact gradient reconstruction using method: `{method}` failed.')
@parameterized.named_parameters([
('seed_2_length_10', 2, 10),
('seed_2_length_20', 2, 20),
('seed_3_length_10', 3, 10),
('seed_3_length_20', 3, 20),
('seed_8_length_10', 8, 10),
('seed_8_length_20', 8, 20),
])
def test_encode_decode_approximate_under_collision_mean(
self, seed, gradient_length):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(gradient_length * 30, tf.int32)
kwargs['width'] = tf.constant(gradient_length * 5, tf.int32)
rng = np.random.default_rng(seed=seed)
gradient = tf.constant(rng.normal(size=[gradient_length]))
gradient_estimate = perform_encode_decode(
gradient, gradient_length, kwargs,
count_sketching_utils.DecodeMethod.MEAN)
self.assertAllClose(
gradient_estimate,
gradient,
atol=0.15,
msg='Approximate gradient reconstruction using the `mean` method did '
'not approach the original gradient.')
@parameterized.named_parameters([
('seed_2_length_21', 2, 21),
('seed_2_length_20', 2, 20),
('seed_3_length_21', 3, 21),
('seed_3_length_20', 3, 20),
('seed_8_length_21', 8, 21),
('seed_8_length_20', 8, 20),
])
def test_encode_decode_approximate_under_collision_median(
self, seed, gradient_length):
kwargs = self.default_sketching_kwargs
kwargs['length'] = tf.constant(gradient_length, tf.int32)
kwargs['width'] = tf.constant(gradient_length, tf.int32)
rng = np.random.default_rng(seed=seed)
n_hot = int(0.3 * gradient_length)
indices = rng.choice(np.arange(gradient_length), size=[n_hot, 1])
updates = rng.normal(size=[n_hot])
gradient = tf.scatter_nd(indices, updates, shape=[gradient_length])
gradient_estimate = perform_encode_decode(
gradient, gradient_length, kwargs,
count_sketching_utils.DecodeMethod.MEDIAN)
self.assertAllClose(
gradient_estimate,
gradient,
atol=0.02,
msg='Approximate gradient reconstruction using the `median` method did '
'not approach the original gradient.')
@parameterized.product(
chunk_size=[1, 5, 10],
gradient_length=[10, 20],
sketch_length=[5, 6],
seed=[3, 8])
def test_encode_decode_batch_esimate_exact_correct_no_collision(
self, chunk_size, gradient_length, sketch_length, seed):
kwargs = self.default_sketching_kwargs
sketch_length = tf.constant(sketch_length, tf.int32)
kwargs['length'] = sketch_length
kwargs['width'] = tf.constant(int(gradient_length * 5), tf.int32)
chunk_size = tf.cast(chunk_size, tf.int32)
rng = np.random.default_rng(seed=seed)
n_hot = int(gradient_length * 0.3)
indices = rng.choice(np.arange(gradient_length), size=[n_hot, 1])
updates = tf.ones([n_hot])
gradient = tf.scatter_nd(indices, updates, shape=[gradient_length])
sketch = count_sketching_utils.encode(gradient, **kwargs)
batch_estimate = functools.partial(
count_sketching_utils._get_batch_estimate,
sketch=sketch,
chunk_size=chunk_size,
padded_gradient_length=chunk_size,
index_seeds=kwargs['index_seeds'],
sign_seeds=kwargs['sign_seeds'])
batch_id = tf.constant(0, tf.int32)
gradient_estimate = batch_estimate(batch_id)
self.assertAllClose(
gradient_estimate,
gradient[:chunk_size],
atol=0.02,
msg='Approximate parital gradient reconstruction using '
'`_get_batch_estimate` did not approach the original gradient.')
@parameterized.product(
chunk_size=[1, 3, 5], sketch_length=[10, 11, 5], seed=[5, 8])
def test_encode_decode_batch_esimate_approximately_correct(
self, chunk_size, sketch_length, seed):
kwargs = self.default_sketching_kwargs
gradient_length = 10
kwargs['length'] = tf.constant(sketch_length, tf.int32)
kwargs['width'] = tf.constant(gradient_length * 5, tf.int32)
chunk_size = tf.cast(chunk_size, tf.int32)
rng = np.random.default_rng(seed=seed)
n_hot = int(0.3 * gradient_length)
indices = rng.choice(np.arange(gradient_length), size=[n_hot, 1])
updates = rng.normal(size=[n_hot])
gradient = tf.scatter_nd(indices, updates, shape=[gradient_length])
sketch = count_sketching_utils.encode(gradient, **kwargs)
batch_estimate = functools.partial(
count_sketching_utils._get_batch_estimate,
sketch=sketch,
chunk_size=chunk_size,
padded_gradient_length=chunk_size,
index_seeds=kwargs['index_seeds'],
sign_seeds=kwargs['sign_seeds'])
batch_id = tf.constant(0, tf.int32)
gradient_estimate = batch_estimate(batch_id)
self.assertAllClose(
gradient_estimate,
gradient[:chunk_size],
atol=0.02,
msg='Approximate parital gradient reconstruction using '
'`_get_batch_estimate` did not approach the original gradient.')
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "087287012d545adcfa905bee94aad0e9",
"timestamp": "",
"source": "github",
"line_count": 453,
"max_line_length": 80,
"avg_line_length": 40.21412803532009,
"alnum_prop": 0.6556513147060438,
"repo_name": "google-research/federated",
"id": "73264bb41f3fe8eaa13646e4668b5304824834b2",
"size": "18793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "private_linear_compression/count_sketching_utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "76424"
},
{
"name": "Python",
"bytes": "4122952"
},
{
"name": "Shell",
"bytes": "7089"
},
{
"name": "Starlark",
"bytes": "97189"
}
],
"symlink_target": ""
}
|
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class coreos_vagrant(ShutItModule):
def build(self, shutit):
# Some useful API calls for reference. See shutit's docs for more info and options:
#
# ISSUING BASH COMMANDS
# shutit.send(send,expect=<default>) - Send a command, wait for expect (string or compiled regexp)
# to be seen before continuing. By default this is managed
# by ShutIt with shell prompts.
# shutit.multisend(send,send_dict) - Send a command, dict contains {expect1:response1,expect2:response2,...}
# shutit.send_and_get_output(send) - Returns the output of the sent command
# shutit.send_and_match_output(send, matches)
# - Returns True if any lines in output match any of
# the regexp strings in the matches list
# shutit.send_until(send,regexps) - Send command over and over until one of the regexps seen in the output.
# shutit.run_script(script) - Run the passed-in string as a script
# shutit.install(package) - Install a package
# shutit.remove(package) - Remove a package
# shutit.login(user='root', command='su -')
# - Log user in with given command, and set up prompt and expects.
# Use this if your env (or more specifically, prompt) changes at all,
# eg reboot, bash, ssh
# shutit.logout(command='exit') - Clean up from a login.
#
# COMMAND HELPER FUNCTIONS
# shutit.add_to_bashrc(line) - Add a line to bashrc
# shutit.get_url(fname, locations) - Get a file via url from locations specified in a list
# shutit.get_ip_address() - Returns the ip address of the target
#
# LOGGING AND DEBUG
# shutit.log(msg,add_final_message=False) -
# Send a message to the log. add_final_message adds message to
# output at end of build
# shutit.pause_point(msg='') - Give control of the terminal to the user
# shutit.step_through(msg='') - Give control to the user and allow them to step through commands
#
# SENDING FILES/TEXT
# shutit.send_file(path, contents) - Send file to path on target with given contents as a string
# shutit.send_host_file(path, hostfilepath)
# - Send file from host machine to path on the target
# shutit.send_host_dir(path, hostfilepath)
# - Send directory and contents to path on the target
# shutit.insert_text(text, fname, pattern)
# - Insert text into file fname after the first occurrence of
# regexp pattern.
# ENVIRONMENT QUERYING
# shutit.host_file_exists(filename, directory=False)
# - Returns True if file exists on host
# shutit.file_exists(filename, directory=False)
# - Returns True if file exists on target
# shutit.user_exists(user) - Returns True if the user exists on the target
# shutit.package_installed(package) - Returns True if the package exists on the target
# shutit.set_password(password, user='')
# - Set password for a given user on target
vagrant_dir = shutit.cfg[self.module_id]['vagrant_dir']
if shutit.send_and_get_output('''VBoxManage list runningvms | grep coreos-vagrant | grep -v 'not created' | awk '{print $1}' ''') != '':
if shutit.get_input('Clean up your VMs first, as there appears to be a running coreos-vagrant VM in existence. Want me to clean them up for you (y/n)?',boolean=True):
shutit.multisend('(cd coreos-vagrant && vagrant destroy)',{'y/N':'y'})
memavail = shutit.get_memory()
if memavail < 3500000:
if not shutit.get_input('Memory available appears to be: ' + str(memavail) + 'kB, need 3500000kB available to run.\nIf you want to continue, input "y", else "n"',boolean=True):
shutit.fail('insufficient memory')
shutit.send('cd')
for c in ('virtualbox','git','curl'):
if not shutit.command_available(c):
if shutit.get_input(c + ' apparently not installed. Would you like me to install it for you?',boolean=True):
pw = shutit.get_input('Please input your sudo password in case it is needed.',ispass=True)
command = shutit.get_input('Please input your install command, eg "apt-get install -y", or "yum install -y"')
shutit.multisend('sudo ' + command + ' ' + c,{'assword':pw})
if not shutit.command_available('vagrant'):
shutit.send('wget -qO- https://dl.bintray.com/mitchellh/vagrant/vagrant_1.7.2_x86_64.deb > /tmp/vagrant.deb',note='Downloading vagrant and installing')
shutit.send('dpkg -i /tmp/vagrant.deb')
shutit.send('rm /tmp/vagrant.deb')
shutit.send('mkdir -p ' + vagrant_dir)
shutit.send('cd ' + vagrant_dir)
shutit.send('cd')
shutit.send('rm -rf coreos-vagrant')
shutit.send('git clone https://github.com/coreos/coreos-vagrant.git',note='Get the coreos-vagrant github repo')
shutit.send('cd coreos-vagrant')
# Get coreos id discovery token
token = shutit.send_and_get_output('curl https://discovery.etcd.io/new')
shutit.send('cp user-data.sample user-data')
shutit.replace_text(''' discovery: ''' + token,'user-data','.*#discovery:.*')
# update with token
shutit.send('cp config.rb.sample config.rb')
shutit.replace_text('$num_instances=3','config.rb','^.num_instances=.*$')
shutit.send('vagrant up')
shutit.send_until('vagrant status','core-01.*running')
shutit.send_until('vagrant status','core-02.*running')
shutit.send_until('vagrant status','core-03.*running')
shutit.login(command='vagrant ssh core-01')
shutit.pause_point('You are now in your coreos cluster! Enjoy!\n\nIf you want to start again, ctrl-d once to get out of this coreos machine, run "vagrant destroy" and then re-run.')
shutit.logout()
return True
def get_config(self, shutit):
# CONFIGURATION
# shutit.get_config(module_id,option,default=None,boolean=False)
# - Get configuration value, boolean indicates whether the item is
# a boolean type, eg get the config with:
# shutit.get_config(self.module_id, 'myconfig', default='a value')
# and reference in your code with:
# shutit.cfg[self.module_id]['myconfig']
shutit.get_config(self.module_id, 'vagrant_dir', '/tmp/vagrant_dir')
return True
def test(self, shutit):
# For test cycle part of the ShutIt build.
return True
def finalize(self, shutit):
# Any cleanup required at the end.
return True
def is_installed(self, shutit):
return False
def module():
return coreos_vagrant(
'shutit.coreos_vagrant.coreos_vagrant.coreos_vagrant', 1308628950.00,
description='',
maintainer='',
delivery_methods = ('bash'),
depends=['shutit.tk.setup']
)
|
{
"content_hash": "e0da22cd565494becefd375cb5a1bce1",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 183,
"avg_line_length": 52.580882352941174,
"alnum_prop": 0.6306810236330583,
"repo_name": "ianmiell/shutit-coreos-vagrant",
"id": "314907853bd5f96a24b95439a12d2f30afa54c93",
"size": "7151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coreos_vagrant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7151"
},
{
"name": "Shell",
"bytes": "336"
}
],
"symlink_target": ""
}
|
"""Shared helper functions for BigQuery API classes."""
import base64
from collections import OrderedDict
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _date_from_iso8601_date
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud._helpers import _RFC3339_NO_FRACTION
from google.cloud._helpers import _time_from_iso8601_time_naive
from google.cloud._helpers import _to_bytes
def _not_null(value, field):
"""Check whether 'value' should be coerced to 'field' type."""
return value is not None or field.mode != 'NULLABLE'
def _int_from_json(value, field):
"""Coerce 'value' to an int, if set or not nullable."""
if _not_null(value, field):
return int(value)
def _float_from_json(value, field):
"""Coerce 'value' to a float, if set or not nullable."""
if _not_null(value, field):
return float(value)
def _bool_from_json(value, field):
"""Coerce 'value' to a bool, if set or not nullable."""
if _not_null(value, field):
return value.lower() in ['t', 'true', '1']
def _string_from_json(value, _):
"""NOOP string -> string coercion"""
return value
def _bytes_from_json(value, field):
"""Base64-decode value"""
if _not_null(value, field):
return base64.decodestring(_to_bytes(value))
def _timestamp_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable."""
if _not_null(value, field):
# value will be a float in seconds, to microsecond precision, in UTC.
return _datetime_from_microseconds(1e6 * float(value))
def _datetime_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable."""
if _not_null(value, field):
# value will be a string, in YYYY-MM-DDTHH:MM:SS form.
return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)
def _date_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
# value will be a string, in YYYY-MM-DD form.
return _date_from_iso8601_date(value)
def _time_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
# value will be a string, in HH:MM:SS form.
return _time_from_iso8601_time_naive(value)
def _record_from_json(value, field):
"""Coerce 'value' to a mapping, if set or not nullable."""
if _not_null(value, field):
record = {}
record_iter = zip(field.fields, value['f'])
for subfield, cell in record_iter:
converter = _CELLDATA_FROM_JSON[subfield.field_type]
if subfield.mode == 'REPEATED':
value = [converter(item['v'], subfield) for item in cell['v']]
else:
value = converter(cell['v'], subfield)
record[subfield.name] = value
return record
_CELLDATA_FROM_JSON = {
'INTEGER': _int_from_json,
'INT64': _int_from_json,
'FLOAT': _float_from_json,
'FLOAT64': _float_from_json,
'BOOLEAN': _bool_from_json,
'BOOL': _bool_from_json,
'STRING': _string_from_json,
'BYTES': _bytes_from_json,
'TIMESTAMP': _timestamp_from_json,
'DATETIME': _datetime_from_json,
'DATE': _date_from_json,
'TIME': _time_from_json,
'RECORD': _record_from_json,
}
def _int_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, int):
value = str(value)
return value
def _float_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
return value
def _bool_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bool):
value = 'true' if value else 'false'
return value
def _bytes_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bytes):
value = base64.encodestring(value)
return value
def _timestamp_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.datetime):
if value.tzinfo not in (None, UTC):
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
value = '%s %s+00:00' % (
value.date().isoformat(), value.time().isoformat())
return value
def _datetime_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.datetime):
value = _datetime_to_rfc3339(value)
return value
def _date_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.date):
value = value.isoformat()
return value
def _time_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.time):
value = value.isoformat()
return value
_SCALAR_VALUE_TO_JSON = {
'INTEGER': _int_to_json,
'INT64': _int_to_json,
'FLOAT': _float_to_json,
'FLOAT64': _float_to_json,
'BOOLEAN': _bool_to_json,
'BOOL': _bool_to_json,
'BYTES': _bytes_to_json,
'TIMESTAMP': _timestamp_to_json,
'DATETIME': _datetime_to_json,
'DATE': _date_to_json,
'TIME': _time_to_json,
}
def _row_from_json(row, schema):
"""Convert JSON row data to row with appropriate types.
:type row: dict
:param row: A JSON response row to be converted.
:type schema: tuple
:param schema: A tuple of
:class:`~google.cloud.bigquery.schema.SchemaField`.
:rtype: tuple
:returns: A tuple of data converted to native types.
"""
row_data = []
for field, cell in zip(schema, row['f']):
converter = _CELLDATA_FROM_JSON[field.field_type]
if field.mode == 'REPEATED':
row_data.append([converter(item['v'], field)
for item in cell['v']])
else:
row_data.append(converter(cell['v'], field))
return tuple(row_data)
def _rows_from_json(rows, schema):
"""Convert JSON row data to rows with appropriate types."""
return [_row_from_json(row, schema) for row in rows]
class _ConfigurationProperty(object):
"""Base property implementation.
Values will be stored on a `_configuration` helper attribute of the
property's job instance.
:type name: str
:param name: name of the property
"""
def __init__(self, name):
self.name = name
self._backing_name = '_%s' % (self.name,)
def __get__(self, instance, owner):
"""Descriptor protocal: accesstor"""
if instance is None:
return self
return getattr(instance._configuration, self._backing_name)
def _validate(self, value):
"""Subclasses override to impose validation policy."""
pass
def __set__(self, instance, value):
"""Descriptor protocal: mutator"""
self._validate(value)
setattr(instance._configuration, self._backing_name, value)
def __delete__(self, instance):
"""Descriptor protocal: deleter"""
delattr(instance._configuration, self._backing_name)
class _TypedProperty(_ConfigurationProperty):
"""Property implementation: validates based on value type.
:type name: str
:param name: name of the property
:type property_type: type or sequence of types
:param property_type: type to be validated
"""
def __init__(self, name, property_type):
super(_TypedProperty, self).__init__(name)
self.property_type = property_type
def _validate(self, value):
"""Ensure that 'value' is of the appropriate type.
:raises: ValueError on a type mismatch.
"""
if not isinstance(value, self.property_type):
raise ValueError('Required type: %s' % (self.property_type,))
class _EnumProperty(_ConfigurationProperty):
"""Pseudo-enumeration class.
Subclasses must define ``ALLOWED`` as a class-level constant: it must
be a sequence of strings.
:type name: str
:param name: name of the property.
"""
def _validate(self, value):
"""Check that ``value`` is one of the allowed values.
:raises: ValueError if value is not allowed.
"""
if value not in self.ALLOWED:
raise ValueError('Pass one of: %s' ', '.join(self.ALLOWED))
class UDFResource(object):
"""Describe a single user-defined function (UDF) resource.
:type udf_type: str
:param udf_type: the type of the resource ('inlineCode' or 'resourceUri')
:type value: str
:param value: the inline code or resource URI.
See
https://cloud.google.com/bigquery/user-defined-functions#api
"""
def __init__(self, udf_type, value):
self.udf_type = udf_type
self.value = value
def __eq__(self, other):
return(
self.udf_type == other.udf_type and
self.value == other.value)
class UDFResourcesProperty(object):
"""Custom property type, holding :class:`UDFResource` instances."""
def __get__(self, instance, owner):
"""Descriptor protocol: accessor"""
if instance is None:
return self
return list(instance._udf_resources)
def __set__(self, instance, value):
"""Descriptor protocol: mutator"""
if not all(isinstance(u, UDFResource) for u in value):
raise ValueError("udf items must be UDFResource")
instance._udf_resources = tuple(value)
class AbstractQueryParameter(object):
"""Base class for named / positional query parameters.
"""
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ScalarQueryParameter`
"""
raise NotImplementedError
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
"""
raise NotImplementedError
class ScalarQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for scalar values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type type_: str
:param type_: name of parameter type. One of 'STRING', 'INT64',
'FLOAT64', 'BOOL', 'TIMESTAMP', 'DATETIME', or 'DATE'.
:type value: str, int, float, bool, :class:`datetime.datetime`, or
:class:`datetime.date`.
:param value: the scalar parameter value.
"""
def __init__(self, name, type_, value):
self.name = name
self.type_ = type_
self.value = value
@classmethod
def positional(cls, type_, value):
"""Factory for positional paramters.
:type type_: str
:param type_:
name of paramter type. One of 'STRING', 'INT64',
'FLOAT64', 'BOOL', 'TIMESTAMP', 'DATETIME', or 'DATE'.
:type value: str, int, float, bool, :class:`datetime.datetime`, or
:class:`datetime.date`.
:param value: the scalar parameter value.
:rtype: :class:`ScalarQueryParameter`
:returns: instance without name
"""
return cls(None, type_, value)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ScalarQueryParameter`
:returns: instance
"""
name = resource.get('name')
type_ = resource['parameterType']['type']
value = resource['parameterValue']['value']
converted = _CELLDATA_FROM_JSON[type_](value, None)
return cls(name, type_, converted)
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
value = self.value
converter = _SCALAR_VALUE_TO_JSON.get(self.type_)
if converter is not None:
value = converter(value)
resource = {
'parameterType': {
'type': self.type_,
},
'parameterValue': {
'value': value,
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class ArrayQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for array values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type array_type: str
:param array_type:
name of type of array elements. One of `'STRING'`, `'INT64'`,
`'FLOAT64'`, `'BOOL'`, `'TIMESTAMP'`, or `'DATE'`.
:type values: list of appropriate scalar type.
:param values: the parameter array values.
"""
def __init__(self, name, array_type, values):
self.name = name
self.array_type = array_type
self.values = values
@classmethod
def positional(cls, array_type, values):
"""Factory for positional paramters.
:type array_type: str
:param array_type:
name of type of array elements. One of `'STRING'`, `'INT64'`,
`'FLOAT64'`, `'BOOL'`, `'TIMESTAMP'`, or `'DATE'`.
:type values: list of appropriate scalar type
:param values: the parameter array values.
:rtype: :class:`ArrayQueryParameter`
:returns: instance without name
"""
return cls(None, array_type, values)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ArrayQueryParameter`
:returns: instance
"""
name = resource.get('name')
array_type = resource['parameterType']['arrayType']['type']
values = [
value['value']
for value
in resource['parameterValue']['arrayValues']]
converted = [
_CELLDATA_FROM_JSON[array_type](value, None) for value in values]
return cls(name, array_type, converted)
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
values = self.values
converter = _SCALAR_VALUE_TO_JSON.get(self.array_type)
if converter is not None:
values = [converter(value) for value in values]
resource = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': self.array_type,
},
},
'parameterValue': {
'arrayValues': [{'value': value} for value in values],
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class StructQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for struct values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type sub_params: tuple of :class:`ScalarQueryParameter`
:param sub_params: the sub-parameters for the struct
"""
def __init__(self, name, *sub_params):
self.name = name
self.struct_types = OrderedDict(
(sub.name, sub.type_) for sub in sub_params)
self.struct_values = {sub.name: sub.value for sub in sub_params}
@classmethod
def positional(cls, *sub_params):
"""Factory for positional paramters.
:type sub_params: tuple of :class:`ScalarQueryParameter`
:param sub_params: the sub-parameters for the struct
:rtype: :class:`StructQueryParameter`
:returns: instance without name
"""
return cls(None, *sub_params)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`StructQueryParameter`
:returns: instance
"""
name = resource.get('name')
instance = cls(name)
types = instance.struct_types
for item in resource['parameterType']['structTypes']:
types[item['name']] = item['type']['type']
struct_values = resource['parameterValue']['structValues']
for key, value in struct_values.items():
type_ = types[key]
value = value['value']
converted = _CELLDATA_FROM_JSON[type_](value, None)
instance.struct_values[key] = converted
return instance
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
types = [
{'name': key, 'type': {'type': value}}
for key, value in self.struct_types.items()
]
values = {}
for name, value in self.struct_values.items():
converter = _SCALAR_VALUE_TO_JSON.get(self.struct_types[name])
if converter is not None:
value = converter(value)
values[name] = {'value': value}
resource = {
'parameterType': {
'type': 'STRUCT',
'structTypes': types,
},
'parameterValue': {
'structValues': values,
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class QueryParametersProperty(object):
"""Custom property type, holding query parameter instances."""
def __get__(self, instance, owner):
"""Descriptor protocol: accessor
:type instance: :class:`QueryParametersProperty`
:param instance: instance owning the property (None if accessed via
the class).
:type owner: type
:param owner: the class owning the property.
:rtype: list of instances of classes derived from
:class:`AbstractQueryParameter`.
:returns: the descriptor, if accessed via the class, or the instance's
query paramters.
"""
if instance is None:
return self
return list(instance._query_parameters)
def __set__(self, instance, value):
"""Descriptor protocol: mutator
:type instance: :class:`QueryParametersProperty`
:param instance: instance owning the property (None if accessed via
the class).
:type value: list of instances of classes derived from
:class:`AbstractQueryParameter`.
:param value: new query parameters for the instance.
"""
if not all(isinstance(u, AbstractQueryParameter) for u in value):
raise ValueError(
"query parameters must be derived from AbstractQueryParameter")
instance._query_parameters = tuple(value)
|
{
"content_hash": "a9c61e6cf8426a9f0b0d047e0a75e913",
"timestamp": "",
"source": "github",
"line_count": 625,
"max_line_length": 79,
"avg_line_length": 31.3552,
"alnum_prop": 0.6007552176353523,
"repo_name": "axbaretto/beam",
"id": "bbcbae1674c331e189bc0a55886791232f1c0e97",
"size": "20173",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/cloud/bigquery/_helpers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
from pprint import PrettyPrinter
from . import acf, appinfo, manifest
parser = ArgumentParser(
prog="steamfiles",
description=" Python library for parsing the most common Steam file formats. ",
)
parser.add_argument(
"type", choices=["acf", "appinfo", "manifest"], help="the type of file"
)
parser.add_argument("file", type=str, help="file to parse")
args = parser.parse_args()
if args.type == "acf":
mode = "r"
module = acf
if args.type == "appinfo":
mode = "rb"
module = appinfo
if args.type == "manifest":
mode = "rb"
module = manifest
pp = PrettyPrinter()
with open(args.file, mode) as f:
data = module.load(f)
pp.pprint(data)
|
{
"content_hash": "285f627ffdc50a6fc03a12aad386783b",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 24.655172413793103,
"alnum_prop": 0.6671328671328671,
"repo_name": "leovp/steamfiles",
"id": "2b1c7aebf888dc3e56f291bc72883b2b1cca68eb",
"size": "715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steamfiles/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "749"
},
{
"name": "Python",
"bytes": "25625"
}
],
"symlink_target": ""
}
|
"""
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = 30
ENTITY_ID_FORMAT = DOMAIN + '.{}'
SENSOR_CLASSES = [
None, # Generic on/off
'cold', # On means cold (or too cold)
'connectivity', # On means connection present, Off = no connection
'gas', # CO, CO2, etc.
'heat', # On means hot (or too hot)
'light', # Lightness threshold
'moisture', # Specifically a wetness sensor
'motion', # Motion sensor
'moving', # On means moving, Off means stopped
'occupancy', # On means occupied, Off means not occupied
'opening', # Door, window, etc.
'power', # Power, over-current, etc
'safety', # Generic on=unsafe, off=safe
'smoke', # Smoke detector
'sound', # On means sound detected, Off means no sound
'vibration', # On means vibration detected, Off means no vibration
]
SENSOR_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(SENSOR_CLASSES))
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for binary sensors."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
yield from component.async_setup(config)
return True
# pylint: disable=no-self-use
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return None
@property
def state_attributes(self):
"""Return device specific state attributes."""
attr = {}
if self.sensor_class is not None:
attr['sensor_class'] = self.sensor_class
return attr
|
{
"content_hash": "6810c25cf7a8ba7b815965feccaefbfa",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 75,
"avg_line_length": 30.9,
"alnum_prop": 0.6452265372168284,
"repo_name": "oandrew/home-assistant",
"id": "38b08fd32b4eeb3c55b3d4b83398a2c802af16a1",
"size": "2472",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "homeassistant/components/binary_sensor/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1446622"
},
{
"name": "Python",
"bytes": "4200234"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7256"
}
],
"symlink_target": ""
}
|
"""Test op functions against ORACLE."""
from __future__ import with_statement
from tests import op_fixture, capture_context_buffer, \
_no_sql_testing_config, assert_raises_message, staging_env, \
three_rev_fixture, clear_staging_env
from alembic import op, command, util
from sqlalchemy import Integer, Column, ForeignKey, \
UniqueConstraint, Table, MetaData, String
from sqlalchemy.sql import table
from unittest import TestCase
class FullEnvironmentTests(TestCase):
@classmethod
def setup_class(cls):
env = staging_env()
cls.cfg = cfg = _no_sql_testing_config("oracle")
cls.a, cls.b, cls.c = \
three_rev_fixture(cfg)
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_begin_comit(self):
with capture_context_buffer(transactional_ddl=True) as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "SET TRANSACTION READ WRITE\n\n/" in buf.getvalue()
assert "COMMIT\n\n/" in buf.getvalue()
def test_batch_separator_default(self):
with capture_context_buffer() as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "/" in buf.getvalue()
assert ";" not in buf.getvalue()
def test_batch_separator_custom(self):
with capture_context_buffer(oracle_batch_separator="BYE") as buf:
command.upgrade(self.cfg, self.a, sql=True)
assert "BYE" in buf.getvalue()
class OpTest(TestCase):
def test_add_column(self):
context = op_fixture('oracle')
op.add_column('t1', Column('c1', Integer, nullable=False))
context.assert_("ALTER TABLE t1 ADD c1 INTEGER NOT NULL")
def test_add_column_with_default(self):
context = op_fixture("oracle")
op.add_column('t1', Column('c1', Integer, nullable=False, server_default="12"))
context.assert_("ALTER TABLE t1 ADD c1 INTEGER DEFAULT '12' NOT NULL")
def test_alter_column_rename_oracle(self):
context = op_fixture('oracle')
op.alter_column("t", "c", name="x")
context.assert_(
"ALTER TABLE t RENAME COLUMN c TO x"
)
def test_alter_column_new_type(self):
context = op_fixture('oracle')
op.alter_column("t", "c", type_=Integer)
context.assert_(
'ALTER TABLE t MODIFY c INTEGER'
)
def test_drop_index(self):
context = op_fixture('oracle')
op.drop_index('my_idx', 'my_table')
context.assert_contains("DROP INDEX my_idx")
def test_drop_column_w_default(self):
context = op_fixture('oracle')
op.drop_column('t1', 'c1')
context.assert_(
"ALTER TABLE t1 DROP COLUMN c1"
)
def test_drop_column_w_check(self):
context = op_fixture('oracle')
op.drop_column('t1', 'c1')
context.assert_(
"ALTER TABLE t1 DROP COLUMN c1"
)
def test_alter_column_nullable_w_existing_type(self):
context = op_fixture('oracle')
op.alter_column("t", "c", nullable=True, existing_type=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NULL"
)
def test_alter_column_not_nullable_w_existing_type(self):
context = op_fixture('oracle')
op.alter_column("t", "c", nullable=False, existing_type=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NOT NULL"
)
def test_alter_column_nullable_w_new_type(self):
context = op_fixture('oracle')
op.alter_column("t", "c", nullable=True, type_=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NULL",
'ALTER TABLE t MODIFY c INTEGER'
)
def test_alter_column_not_nullable_w_new_type(self):
context = op_fixture('oracle')
op.alter_column("t", "c", nullable=False, type_=Integer)
context.assert_(
"ALTER TABLE t MODIFY c NOT NULL",
"ALTER TABLE t MODIFY c INTEGER"
)
def test_alter_add_server_default(self):
context = op_fixture('oracle')
op.alter_column("t", "c", server_default="5")
context.assert_(
"ALTER TABLE t MODIFY c DEFAULT '5'"
)
def test_alter_replace_server_default(self):
context = op_fixture('oracle')
op.alter_column("t", "c", server_default="5", existing_server_default="6")
context.assert_(
"ALTER TABLE t MODIFY c DEFAULT '5'"
)
def test_alter_remove_server_default(self):
context = op_fixture('oracle')
op.alter_column("t", "c", server_default=None)
context.assert_(
"ALTER TABLE t MODIFY c DEFAULT NULL"
)
def test_alter_do_everything(self):
context = op_fixture('oracle')
op.alter_column("t", "c", name="c2", nullable=True, type_=Integer, server_default="5")
context.assert_(
'ALTER TABLE t MODIFY c NULL',
"ALTER TABLE t MODIFY c DEFAULT '5'",
'ALTER TABLE t MODIFY c INTEGER',
'ALTER TABLE t RENAME COLUMN c TO c2'
)
# TODO: when we add schema support
#def test_alter_column_rename_oracle_schema(self):
# context = op_fixture('oracle')
# op.alter_column("t", "c", name="x", schema="y")
# context.assert_(
# 'ALTER TABLE y.t RENAME COLUMN c TO c2'
# )
|
{
"content_hash": "354522636df7bef98196c258ae905751",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 94,
"avg_line_length": 34.583333333333336,
"alnum_prop": 0.5959221501390176,
"repo_name": "briandailey/alembic",
"id": "a14197d9458e2aca6835e5fb739025fd1917251b",
"size": "5395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_oracle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "285753"
}
],
"symlink_target": ""
}
|
from tgbot.entities import RequestingEntity
class FileEntity(RequestingEntity):
def __init__(self, props, api):
props.update({
"id": ("file_id", None),
"size": ("file_size", None),
"mime_type": ("mime_type", None)
})
RequestingEntity.__init__(self, props, api)
def download(self, name):
if self.api == None or self.id == None:
raise Exception("Can't make requests with this file")
return self.api.download_file(self.id, name)
class PhotoSize(FileEntity):
def __init__(self, api):
FileEntity.__init__(self, {
"width": ("width", None),
"height": ("height", None)
}, api)
class FileEntityWithThumb(FileEntity):
def _set_props(self, values = {}):
FileEntity._set_props(self, values)
self.thumb = PhotoSize.build(values["thumb"], self.api) if "thumb" in values else None
class Audio(FileEntity):
def __init__(self, api):
FileEntity.__init__(self, {
"duration": ("duration", None),
"performer": ("performer", None),
"title": ("title", None)
}, api)
class Document(FileEntityWithThumb):
def __init__(self, api):
FileEntityWithThumb.__init__(self, {
"name": ("file_name", None)
}, api)
class Sticker(FileEntityWithThumb):
def __init__(self, api):
FileEntityWithThumb.__init__(self, {
"width": ("width", None),
"height": ("height", None),
"emoji": ("emoji", None)
}, api)
class Video(FileEntityWithThumb):
def __init__(self, api):
FileEntityWithThumb.__init__(self, {
"width": ("width", None),
"height": ("height", None),
"duration": ("duration", None)
}, api)
class Voice(FileEntity):
def __init__(self, api):
FileEntity.__init__(self, {
"duration": ("duration", None)
}, api)
|
{
"content_hash": "8bf05b6bfdef066335ccc46afa3099f2",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 94,
"avg_line_length": 31.153846153846153,
"alnum_prop": 0.5219753086419753,
"repo_name": "JacoRuit/tgbot",
"id": "ef9cb9e758a14e98b7e23255d0e83a9fb476c79b",
"size": "2025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tgbot/entities/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53763"
}
],
"symlink_target": ""
}
|
from pandac.PandaModules import OdeJointGroup, OdeWorld, OdeHashSpace
class World:
def __init__(self, game):
self.game = game
self.world = OdeWorld()
self.world.setGravity(0, 0, -9.81 * 3)
self.group = OdeJointGroup()
self.space = OdeHashSpace()
self.space.setAutoCollideWorld(self.world)
self.space.setAutoCollideJointGroup(self.group)
self.setSurfaceTables()
self.objects = []
self.objectsToRemove = []
self.postStepTasks = []
self.engineRunning = True
self.dtAccumulator = 0.0
self.physicsFps = 90
self.physicsMinFps = 10
base.taskMgr.doMethodLater(0.1, self.processPhysics, "Physics")
base.accept("escape", self.togglePhysics)
self.space.setCollisionEvent("odeCollision")
base.accept("odeCollision", self.onCollision)
def setSurfaceTables(self):
self.surfaces = {}
self.surfaces["plane"] = 0
self.surfaces["box"] = 1
self.surfaces["sphere"] = 2
self.surfaces["bullet"] = 3
n = len(self.surfaces)
self.world.initSurfaceTable(n)
for i in range(n):
for j in range(n):
# id1, id2, mu (0 to inf), bounce (1 = bouncy), min_bounce_vel, erp (1 = hard), cfm (0 = hard), slip, dampen
# sample: 150, 0.0, 9.1, 0.9, 0.00001, 0.0, 0.002
self.world.setSurfaceEntry(i, j, 10.0, 1.0, 0.0, 0.9, 0.0, 0.5, 0.001) # Default value for unspecified pairs.
self.world.setSurfaceEntry(self.surfaces["box"], self.surfaces["box"],
200.0, 0.2, 0.3, 1.0, 0.0, 0.0, 0.01)
self.world.setSurfaceEntry(self.surfaces["plane"], self.surfaces["box"],
200.0, 0.2, 0.3, 1.0, 0.0, 0.0, 0.01)
self.world.setSurfaceEntry(self.surfaces["box"], self.surfaces["bullet"],
10.0, 0.1, 0.5, 1.0, 0.0, 0.0, 0.01)
self.world.setSurfaceEntry(self.surfaces["plane"], self.surfaces["bullet"],
100.0, 0.01, 0.1, 1.0, 0.0, 0.0, 1.0)
def addObject(self, obj):
self.objects.append(obj)
def removeObject(self, obj):
if obj not in self.objectsToRemove:
self.objectsToRemove.append(obj)
def removeDestroyedObjects(self):
while len(self.objectsToRemove) > 0:
obj = self.objectsToRemove.pop()
if obj in self.objects:
self.objects.remove(obj)
obj.doDestroy()
def togglePhysics(self):
self.engineRunning = not self.engineRunning
self.dtAccumulator = 0.0
def processPhysics(self, task):
stepSize = 1.0 / self.physicsFps
maxDt = 1.0 / self.physicsMinFps
if self.engineRunning:
self.dtAccumulator += globalClock.getDt()
self.dtAccumulator = min(self.dtAccumulator, maxDt)
while self.dtAccumulator >= stepSize:
self.space.autoCollide()
self.world.quickStep(stepSize)
for obj in self.objects:
obj.updatePosFromPhysics()
self.group.empty()
self.performPostStepTasks()
self.removeDestroyedObjects()
self.dtAccumulator -= stepSize
return task.cont
def onCollision(self, entry):
body1 = entry.getBody1()
body2 = entry.getBody2()
if not body1.isEmpty():
body1.getData().onCollision(body2, entry)
if not body2.isEmpty():
body2.getData().onCollision(body1, entry)
def performAfterStep(self, method, params):
self.postStepTasks.append([method, params])
def performPostStepTasks(self):
for task in self.postStepTasks:
method = task[0]
params = task[1]
method(*params)
self.postStepTasks = []
|
{
"content_hash": "b24223c14ac92c540a69d7cd497b453b",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 113,
"avg_line_length": 32.64356435643565,
"alnum_prop": 0.6800121322414316,
"repo_name": "joaofrancese/heavy-destruction",
"id": "3513ec0cc2e014c36e8a9bf4764934bb6f82a604",
"size": "3297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Panda/src/world.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "43053"
},
{
"name": "Shell",
"bytes": "113"
}
],
"symlink_target": ""
}
|
"""SQLite parser plugin for Google Chrome history database files."""
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import webkit_time as dfdatetime_webkit_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class ChromeHistoryFileDownloadedEventData(events.EventData):
"""Chrome History file downloaded event data.
Attributes:
danger_type (int): assessment by Safe Browsing of the danger of the
downloaded content.
full_path (str): full path where the file was downloaded to.
interrupt_reason (int): indication why the download was interrupted.
offset (str): identifier of the row, from which the event data was
extracted.
opened (int): value to indicate if the downloaded file was opened from
the browser.
query (str): SQL query that was used to obtain the event data.
received_bytes (int): number of bytes received while downloading.
state (int): state of the download, such as finished or cancelled.
total_bytes (int): total number of bytes to download.
url (str): URL of the downloaded file.
"""
DATA_TYPE = 'chrome:history:file_downloaded'
def __init__(self):
"""Initializes event data."""
super(ChromeHistoryFileDownloadedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.danger_type = None
self.full_path = None
self.interrupt_reason = None
self.offset = None
self.opened = None
self.query = None
self.received_bytes = None
self.state = None
self.total_bytes = None
self.url = None
class ChromeHistoryPageVisitedEventData(events.EventData):
"""Chrome History page visited event data.
Attributes:
from_visit (str): URL where the visit originated from.
offset (str): identifier of the row, from which the event data was
extracted.
page_transition_type (int): type of transitions between pages.
query (str): SQL query that was used to obtain the event data.
title (str): title of the visited page.
typed_count (int): number of characters of the URL that were typed.
url (str): URL of the visited page.
url_hidden (bool): True if the URL is hidden.
visit_source (int): source of the page visit.
"""
DATA_TYPE = 'chrome:history:page_visited'
def __init__(self):
"""Initializes event data."""
super(ChromeHistoryPageVisitedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.from_visit = None
self.offset = None
self.page_transition_type = None
self.query = None
self.title = None
self.typed_count = None
self.url = None
self.url_hidden = None
self.visit_source = None
class BaseGoogleChromeHistoryPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Chrome history database files.
The Google Chrome history database file is typically stored in:
Archived History
History
Note that the Archived History database does not contain the downloads table.
"""
_SYNC_CACHE_QUERY = 'SELECT id, source FROM visit_source'
_URL_CACHE_QUERY = (
'SELECT visits.id AS id, urls.url, urls.title FROM '
'visits, urls WHERE urls.id = visits.url')
# https://cs.chromium.org/chromium/src/ui/base/page_transition_types.h?l=108
_PAGE_TRANSITION_CORE_MASK = 0xff
def _GetUrl(self, url, cache, database):
"""Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url (str): URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL or an empty string if no URL was found.
"""
if not url:
return ''
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self._URL_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'url', 'id', ('url', 'title'))
url_cache_results = cache.GetResults('url')
reference_url, reference_title = url_cache_results.get(url, ['', ''])
if not reference_url:
return ''
return '{0:s} ({1:s})'.format(reference_url, reference_title)
def _GetVisitSource(self, visit_identifier, cache, database):
"""Retrieves a visit source type based on the identifier.
Args:
visit_identifier (str): identifier from the visits table for the
particular record.
cache (SQLiteCache): cache which contains cached results from querying
the visit_source table.
database (SQLiteDatabase): database.
Returns:
int: visit source type or None if no visit source type was found for
the identifier.
"""
sync_cache_results = cache.GetResults('sync')
if not sync_cache_results:
result_set = database.Query(self._SYNC_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))
sync_cache_results = cache.GetResults('sync')
if sync_cache_results and visit_identifier:
results = sync_cache_results.get(visit_identifier, None)
if results:
return results[0]
return None
def ParseLastVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a last visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache which contains cached results from querying
the visits and urls tables.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
hidden = self._GetRowValue(query_hash, row, 'hidden')
transition = self._GetRowValue(query_hash, row, 'transition')
visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
event_data = ChromeHistoryPageVisitedEventData()
event_data.from_visit = self._GetUrl(from_visit, cache, database)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.page_transition_type = (
transition & self._PAGE_TRANSITION_CORE_MASK)
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.url_hidden = hidden == '1'
event_data.visit_source = self._GetVisitSource(
visit_identifier, cache, database)
timestamp = self._GetRowValue(query_hash, row, 'visit_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleChrome8HistoryPlugin(BaseGoogleChromeHistoryPlugin):
"""SQLite parser plugin for Google Chrome 8 - 25 history database files."""
NAME = 'chrome_8_history'
DATA_FORMAT = 'Google Chrome 8 - 25 history SQLite database file'
REQUIRED_STRUCTURE = {
'downloads': frozenset([
'id', 'full_path', 'received_bytes', 'total_bytes', 'url',
'start_time', 'state']),
'urls': frozenset([
'id', 'url', 'title', 'visit_count', 'typed_count',
'last_visit_time', 'hidden']),
'visits': frozenset([
'visit_time', 'from_visit', 'transition', 'id'])}
QUERIES = [
(('SELECT urls.id, urls.url, urls.title, urls.visit_count, '
'urls.typed_count, urls.last_visit_time, urls.hidden, visits.'
'visit_time, visits.from_visit, visits.transition, visits.id '
'AS visit_id FROM urls, visits WHERE urls.id = visits.url ORDER '
'BY visits.visit_time'), 'ParseLastVisitedRow'),
(('SELECT id, full_path, url, start_time, received_bytes, '
'total_bytes, state FROM downloads'), 'ParseFileDownloadedRow')]
_SCHEMA_8 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY '
'KEY,value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_16 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY '
'KEY,value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_19 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_20 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
SCHEMAS = [_SCHEMA_8, _SCHEMA_16, _SCHEMA_19, _SCHEMA_20]
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'full_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.state = self._GetRowValue(query_hash, row, 'state')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleChrome27HistoryPlugin(BaseGoogleChromeHistoryPlugin):
"""SQLite parser plugin for Google Chrome 27+ history database files."""
NAME = 'chrome_27_history'
DATA_FORMAT = 'Google Chrome 27 and later history SQLite database file'
REQUIRED_STRUCTURE = {
'downloads': frozenset([
'id', 'target_path', 'received_bytes', 'total_bytes', 'start_time',
'end_time', 'state', 'danger_type', 'interrupt_reason', 'opened']),
'downloads_url_chains': frozenset([
'id', 'url']),
'urls': frozenset([
'id', 'url', 'title', 'visit_count', 'typed_count',
'last_visit_time', 'hidden']),
'visits': frozenset([
'visit_time', 'from_visit', 'transition', 'id'])}
QUERIES = [
(('SELECT urls.id, urls.url, urls.title, urls.visit_count, '
'urls.typed_count, urls.last_visit_time, urls.hidden, visits.'
'visit_time, visits.from_visit, visits.transition, visits.id '
'AS visit_id FROM urls, visits WHERE urls.id = visits.url ORDER '
'BY visits.visit_time'), 'ParseLastVisitedRow'),
(('SELECT downloads.id AS id, downloads.start_time,'
'downloads.target_path, downloads_url_chains.url, '
'downloads.received_bytes, downloads.total_bytes, '
'downloads.end_time, downloads.state, downloads.danger_type, '
'downloads.interrupt_reason, downloads.opened FROM downloads,'
' downloads_url_chains WHERE downloads.id = '
'downloads_url_chains.id'), 'ParseFileDownloadedRow')]
_SCHEMA_27 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_31 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_37 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_51 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_58 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_59 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,last_access_time INTEGER NOT NULL,transient INTEGER NOT '
'NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT NULL,tab_url '
'VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT NULL,http_method '
'VARCHAR NOT NULL,by_ext_id VARCHAR NOT NULL,by_ext_name VARCHAR '
'NOT NULL,etag VARCHAR NOT NULL,last_modified VARCHAR NOT '
'NULL,mime_type VARCHAR(255) NOT NULL,original_mime_type '
'VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 63.0.3239.108 meta.version 37
_SCHEMA_63 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, last_access_time '
'INTEGER NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 65.0.3325.162
_SCHEMA_65 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, guid VARCHAR NOT '
'NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT X\'\', http_method '
'VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR NOT NULL '
'DEFAULT \'\', tab_referrer_url VARCHAR NOT NULL DEFAULT \'\', '
'site_url VARCHAR NOT NULL DEFAULT \'\', last_access_time INTEGER '
'NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 67.0.3396.62.
_SCHEMA_67 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL, mime_type VARCHAR(255) NOT '
'NULL DEFAULT "", original_mime_type VARCHAR(255) NOT NULL DEFAULT '
'"", guid VARCHAR NOT NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT '
'X\'\', http_method VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR '
'NOT NULL DEFAULT \'\', tab_referrer_url VARCHAR NOT NULL DEFAULT '
'\'\', site_url VARCHAR NOT NULL DEFAULT \'\', last_access_time '
'INTEGER NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Linux Chrome 67.0.3396.99 meta.version 39
_SCHEMA_67_2 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, guid VARCHAR NOT '
'NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT X\'\', http_method '
'VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR NOT NULL DEFAULT '
'\'\', tab_referrer_url VARCHAR NOT NULL DEFAULT \'\', site_url '
'VARCHAR NOT NULL DEFAULT \'\', last_access_time INTEGER NOT NULL '
'DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in MacOS Chrome 67.0.3396.99 meta.version 39
_SCHEMA_67_3 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,last_access_time INTEGER NOT NULL,transient INTEGER NOT '
'NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT NULL,tab_url '
'VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT NULL,http_method '
'VARCHAR NOT NULL,by_ext_id VARCHAR NOT NULL,by_ext_name VARCHAR '
'NOT NULL,etag VARCHAR NOT NULL,last_modified VARCHAR NOT '
'NULL,mime_type VARCHAR(255) NOT NULL,original_mime_type '
'VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
SCHEMAS = [
_SCHEMA_27, _SCHEMA_31, _SCHEMA_37, _SCHEMA_51, _SCHEMA_58, _SCHEMA_59,
_SCHEMA_63, _SCHEMA_65, _SCHEMA_67, _SCHEMA_67_2, _SCHEMA_67_3]
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.danger_type = self._GetRowValue(query_hash, row, 'danger_type')
event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.interrupt_reason = self._GetRowValue(
query_hash, row, 'interrupt_reason')
event_data.opened = self._GetRowValue(query_hash, row, 'opened')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.state = self._GetRowValue(query_hash, row, 'state')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
start_timestamp = self._GetRowValue(query_hash, row, 'start_time')
if start_timestamp:
start_date_time = dfdatetime_webkit_time.WebKitTime(
timestamp=start_timestamp)
start_event = time_events.DateTimeValuesEvent(
start_date_time, definitions.TIME_DESCRIPTION_START)
parser_mediator.ProduceEventWithEventData(start_event, event_data)
end_timestamp = self._GetRowValue(query_hash, row, 'end_time')
if end_timestamp:
end_date_time = dfdatetime_webkit_time.WebKitTime(
timestamp=end_timestamp)
end_event = time_events.DateTimeValuesEvent(
end_date_time, definitions.TIME_DESCRIPTION_END)
parser_mediator.ProduceEventWithEventData(end_event, event_data)
sqlite.SQLiteParser.RegisterPlugins([
GoogleChrome8HistoryPlugin, GoogleChrome27HistoryPlugin])
|
{
"content_hash": "48eccd53a8c30d0589a292f5bafe498d",
"timestamp": "",
"source": "github",
"line_count": 1037,
"max_line_length": 79,
"avg_line_length": 49.4619093539055,
"alnum_prop": 0.6453053107697106,
"repo_name": "kiddinn/plaso",
"id": "0fd756f4de42c6e8adcd7511a26dddf01f1424ec",
"size": "51316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/sqlite_plugins/chrome_history.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1047"
},
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "PowerShell",
"bytes": "9560"
},
{
"name": "Python",
"bytes": "4878625"
},
{
"name": "Ruby",
"bytes": "926"
},
{
"name": "Shell",
"bytes": "26453"
}
],
"symlink_target": ""
}
|
import simplejson as json
import sys
import re
import re
import string
import unicodedata
import os
class TweetParser(object):
"""docstring for TweetParser
Recibe un tweet en bruto y retorna un tweet con una representación más ligera
Se especifican fields: atributos del tweet los cuales pueden ser ['id','created_at','retweet_count','favorite_count']
Se especifica si se desar incluir hashtags, mentions y urls
Informacion de geolocalizacion y del usuario se incluye por defecto
"""
def __init__(self, fields, **kwargs):
super(TweetParser, self).__init__()
self.fields = fields
self.mentionsFlag = kwargs['mentionsFlag']
self.hashtagsFlag = kwargs['hashtagsFlag']
self.urlsFlag = kwargs['urlsFlag']
self.userFlag = kwargs['userFlag']
self.coordinatesFlag = kwargs['coordinatesFlag']
self.placeFlag = kwargs['placeFlag']
def parse(self,rawTweet):
tweet = {}
for field in self.fields:
if(field in rawTweet):
tweet[field] = rawTweet[field]
if self.mentionsFlag:
tweet["mentions"] = self.getMentions(rawTweet)
if self.hashtagsFlag:
tweet["hashtags"] = self.getHashtags(rawTweet)
if self.urlsFlag:
tweet["urls"] = self.getUrls(rawTweet)
if self.userFlag:
tweet["user"] = self.getUser(rawTweet)
if self.coordinatesFlag:
tweet["coordinates"] = self.getCoordinates(rawTweet)
if self.placeFlag:
tweet['place'] = self.getPlace(rawTweet)
return tweet
def getUser(self,rawTweet,fields=['id','screen_name','followers_count','friends_count']):
user = {}
for field in fields:
if(field in rawTweet):
user[field] = rawTweet["user"][field]
return user
def getCoordinates(self,rawTweet):
coordinates = {}
if ("coordinates" in rawTweet):
if(rawTweet["coordinates"]):
coordinates["longitud"] = rawTweet["coordinates"]["coordinates"][0]
coordinates["latitud"] = rawTweet["coordinates"]["coordinates"][1]
return coordinates
def getMentions(self,rawTweet):
mentions = []
mentionsData = rawTweet["entities"]["user_mentions"]
if len(mentionsData)>0:
for mention in mentionsData:
mentions.append(mention["screen_name"])
return mentions
def getHashtags(self,rawTweet):
hashtags = []
hashtagsData = rawTweet["entities"]["hashtags"]
if len(hashtagsData)>0:
for hashtag in hashtagsData:
hashtags.append(hashtag["text"].lower())
return hashtags
def getUrls(self,rawTweet):
urls = []
urlsData = rawTweet["entities"]["urls"]
if len(urlsData)>0:
for url in urlsData:
urls.append(url["url"])
return urls
def getPlace(self,rawTweet):
place = {}
if rawTweet['place']:
place['country'] = rawTweet['place']['country']
place['bounding_box'] = rawTweet['place']['bounding_box']
else:
place = None
return place
|
{
"content_hash": "3f2e84b3f1352255a8084617923ef9e0",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 118,
"avg_line_length": 27.26732673267327,
"alnum_prop": 0.7051561365286856,
"repo_name": "edmunoz/aed",
"id": "34d6f8c2553e9ffc71aaeec84140d5fca71601a4",
"size": "2780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter/TweetParser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "47709"
}
],
"symlink_target": ""
}
|
"""Test multi-worker Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import functools
import json
import os
import sys
import threading
from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import keras
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import backend
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import models
from tensorflow.python.keras import optimizer_v1
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import kpl_test_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# pylint: disable=g-direct-tensorflow-import
def _clone_and_build_model(model, strategy):
# The new "original" model in worker 0.
with strategy.scope():
cloned_model = models.clone_model(model)
# Compile and build model.
if isinstance(model.optimizer, optimizer_v1.TFOptimizer):
optimizer = model.optimizer
# TODO(yuefengz): figure out why the optimizer here is still a
# TFOptimizer.
while isinstance(optimizer, optimizer_v1.TFOptimizer):
optimizer = optimizer.optimizer
optimizer = copy.deepcopy(optimizer)
else:
optimizer_config = model.optimizer.get_config()
optimizer = type(model.optimizer).from_config(optimizer_config)
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics))
return cloned_model
# TODO(b/123918215): Possibly merge this Callback with keras_test.Counter.
class MultiWorkerVerificationCallback(callbacks.Callback):
"""MultiWorkerVerificationCallback verifies the callbacks in multi-worker scheme.
This Callback is intended to be used for verifying the callback is indeed
called the correct number of times in various task types.
Attributes:
_task_dict: A nested dictionary storing the number of times a callback has
been called in specific task type, task index, and method name.
Look up structure is
task_name -> task_id -> tracking_method_name -> invoke_count
For example, a _task_dict of
{
'ps': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
},
'worker': {
0: {
'on_epoch_begin': 2
},
1: {
'on_epoch_begin': 2
}
}
}
indicates the ps task has 'on_epoch_begin' called twice on each
of the two indices, and likewise for worker task.
"""
# TODO(rchao): Add other method calls to verify.
METHODS_TO_VERIFY = ['on_epoch_begin']
def __init__(self, num_epoch, num_worker):
"""Initialize a MultiWorkerVerificationCallback.
Args:
num_epoch: Number of epochs this Callback is expected to be called for.
num_worker: Number of workers this Callback is expected to be called from.
"""
super(MultiWorkerVerificationCallback, self).__init__()
self._num_epoch = num_epoch
self._num_worker = num_worker
self._task_dict = {
key: collections.defaultdict(lambda: collections.defaultdict(int))
for key in ['ps', 'worker']
}
self._lock = threading.Lock()
self._is_between_graph = None
self.wrap_methods(self.METHODS_TO_VERIFY)
@property
def is_between_graph(self):
return self._is_between_graph
@is_between_graph.setter
def is_between_graph(self, is_between_graph):
self._is_between_graph = is_between_graph
def wrap_methods(self, method_names):
"""Wrap methods so that the counts of calls are tracked.
Args:
method_names: A list of names of methods to track calls.
"""
for method_name in method_names:
method = getattr(self, method_name)
def wrapped_method(method_to_wrap, name, *arg, **kwargs):
# Use lock to ensure += operation is thread-safe.
with self._lock:
task_config = json.loads(os.environ['TF_CONFIG'])['task']
self._task_dict[task_config['type']][task_config['index']][name] += 1
method_to_wrap(*arg, **kwargs)
setattr(self, method_name,
functools.partial(wrapped_method, method, method_name))
def verify(self, test_case):
method_count_dict = {
method_name: self._num_epoch for method_name in self.METHODS_TO_VERIFY
}
assert self._is_between_graph is not None
if self._is_between_graph:
# TODO(b/124171024): In between-graph replication, by default only the
# chief calls callback. Fix this test to cover that, as well as the rare
# cases where all workers call.
worker_call_count = {
i: method_count_dict for i in range(0, self._num_worker)
}
else:
# If in-graph, only the first worker calls callback methods.
worker_call_count = {0: method_count_dict}
test_case.assertDictEqual(
self._task_dict,
{
# PS' callback is not supposed to be called.
'ps': {},
# Each of the Worker should be called num_epoch of times.
'worker': worker_call_count
})
class KerasMultiWorkerTestIndependentWorker(test_base.IndependentWorkerTestBase,
parameterized.TestCase):
@ds_combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[
collective_strategy.CollectiveAllReduceStrategy,
],
required_gpus=[0, 1]))
def testSimpleModelIndependentWorkerSync(self, strategy_cls):
num_workers = 2
num_epoch = 2
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
# The verification callback will be shared by multiple threads.
verification_callback = MultiWorkerVerificationCallback(
num_epoch=num_epoch, num_worker=num_workers)
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
strategy = strategy_cls()
verification_callback.is_between_graph = \
strategy.extended.experimental_between_graph
batch_size = 64
steps = 2
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
orig_loss, _ = model.evaluate(train_ds, steps=steps)
callbacks_for_fit = nest.flatten(
kwargs.get('verification_callback', []))
history = model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=callbacks_for_fit)
self.assertIsInstance(history, keras.callbacks.History)
trained_loss, _ = model.evaluate(train_ds, steps=steps)
self.assertLess(trained_loss, orig_loss)
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
verification_callback=verification_callback)
threads_to_join = []
strategy = strategy_cls()
if strategy.extended.experimental_between_graph:
for ts in threads.values():
threads_to_join.extend(ts)
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
verification_callback.verify(self)
class KPLMultiWorkerTest(test.TestCase,
parameterized.TestCase):
@ds_combinations.generate(
combinations.combine(
mode=['eager'],
use_adapt=[False], # TODO(b/180742437): Add tests for using adapt.
strategy=[
strategy_combinations.multi_worker_mirrored_2x1_gpu,
strategy_combinations.multi_worker_mirrored_2x2_gpu,
]))
def testTrainAndServeWithKPL(self, use_adapt, strategy):
test_utils_obj = kpl_test_utils.DistributeKplTestUtils()
with strategy.scope():
feature_mapper, label_mapper = test_utils_obj.define_kpls_for_training(
use_adapt)
model = test_utils_obj.define_model()
optimizer = rmsprop.RMSprop(learning_rate=0.1)
accuracy = keras.metrics.Accuracy()
def dataset_fn(_):
return test_utils_obj.dataset_fn(feature_mapper, label_mapper)
@def_function.function
def train_step(iterator):
"""The step function for one training step."""
def step_fn(inputs):
"""The computation to run on each worker."""
features, labels = inputs
with backprop.GradientTape() as tape:
pred = model(features, training=True)
loss = keras.losses.binary_crossentropy(labels, pred)
loss = nn.compute_average_loss(loss)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(list(zip(grads, model.trainable_variables)))
actual_pred = math_ops.cast(math_ops.greater(pred, 0.5), dtypes.int64)
accuracy.update_state(labels, actual_pred)
strategy.run(step_fn, args=(next(iterator),))
distributed_dataset = strategy.distribute_datasets_from_function(
dataset_fn)
distributed_iterator = iter(distributed_dataset)
num_epochs = 4
num_steps = 7
for _ in range(num_epochs):
accuracy.reset_state()
for _ in range(num_steps):
train_step(distributed_iterator)
self.assertGreater(accuracy.result().numpy(), 0.5)
self.assertEqual(optimizer.iterations.numpy(), num_epochs * num_steps)
# Test save/load/serving the trained model.
test_utils_obj.test_save_load_serving_model(
model, feature_mapper, test_utils_obj.define_reverse_lookup_layer())
if __name__ == '__main__':
# Enable manual variable initialization to make sure variables are initialized
# by `init_restore_or_wait_for_variables`.
backend.manual_variable_initialization(True)
with test.mock.patch.object(sys, 'exit', os._exit):
multi_process_runner.test_main()
|
{
"content_hash": "63738db256d3fb8d477793f3b8af894f",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 94,
"avg_line_length": 37.782467532467535,
"alnum_prop": 0.6529174185786715,
"repo_name": "petewarden/tensorflow",
"id": "a5597d00a3017529139a8b47d95399fbf3f003f1",
"size": "12326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/distribute/multi_worker_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31796"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "895451"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82100676"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867248"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "984477"
},
{
"name": "Jupyter Notebook",
"bytes": "550862"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1982867"
},
{
"name": "Makefile",
"bytes": "66496"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "317461"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37425809"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700106"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3613406"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
import json
import logging
import random
import signal
from datetime import datetime, timedelta
from enum import IntEnum
from itertools import chain
from threading import Thread
from time import sleep
import six
from django.conf import settings
from django.db import DatabaseError
from django.db import models
from django.db import transaction
from django.db.utils import IntegrityError
from django.utils import timezone
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as __, ugettext_lazy as _
from django_tasker.exceptions import RetryLaterException
logging = logging.getLogger(__name__)
class ChoicesIntEnum(IntEnum):
"""Extends IntEum with django choices generation capability"""
@classmethod
def choices(cls):
return [(item.value, __(item.name.replace("_", " ").capitalize())) for item in cls]
@classmethod
def values(cls):
return [item.value for item in cls]
class QueueStatus(ChoicesIntEnum):
enabled = 0
disabled = 1
class TaskWorker(object):
def __init__(self, queue):
self.queue = queue
self._stop_requested = False
self.back_off_seconds = None
self.run_count = 0
self.cleanup_rate = self.queue.rate_limit or 5000
def __call__(self):
logging.info("Worker booting for queue: %s", self.queue)
while True:
if self._stop_requested:
logging.info('Stopping on request')
break
self.run_once()
def run_once(self):
queue = self.queue
try:
logging.debug("run_once: %s", self.queue)
if self.run_count % self.cleanup_rate == 0:
queue.retry_busy_timeouts()
emtpy_run = queue.process_batch()
except Exception as ex:
self.back_off_seconds = queue.on_error_back_off(self.back_off_seconds, ex)
else:
self.back_off_seconds = None
if emtpy_run:
seconds = getattr(settings, 'TASKER_SLEEP_TIME', 60)
logging.debug("Queue %s had empty run, it will sleep for %s seconds", queue.name, seconds)
sleep(seconds)
def request_stop(self):
self._stop_requested = True
@classmethod
def run_queues(cls, queue_names):
logging.info("Running workers for queues: %s if they are enabled", queue_names)
qry = TaskQueue.objects.filter(status=QueueStatus.enabled)
if queue_names:
qry = qry.filter(name__in=queue_names)
workers = [cls(q) for q in qry]
threads = [Thread(target=w) for w in workers]
for t in threads:
t.start()
cls.setup_signals(workers)
@classmethod
def setup_signals(cls, workers):
def request_workers_stop(signum, frame):
logging.info("Warm shut down requested: %s", signum)
for w in workers:
w.request_stop()
# TODO: handle signals correctly
signal.signal(signal.SIGINT, request_workers_stop)
signal.signal(signal.SIGTERM, request_workers_stop)
signals_to_names = {}
for n in dir(signal):
if n.startswith('SIG') and not n.startswith('SIG_'):
signals_to_names[getattr(signal, n)] = n
for s, name in sorted(signals_to_names.items()):
handler = signal.getsignal(s)
if handler is signal.SIG_DFL:
handler = 'SIG_DFL'
elif handler is signal.SIG_IGN:
handler = 'SIG_IGN'
print('%-10s (%2d):' % (name, s), handler)
class TaskQueue(models.Model):
name = models.CharField(max_length=100, default='default', unique=True)
rate_limit = models.PositiveSmallIntegerField(null=True, blank=True, help_text='Maximum number of tasks to run per hour')
status = models.PositiveSmallIntegerField(default=QueueStatus.enabled, choices=QueueStatus.choices())
back_off_base_seconds = models.PositiveSmallIntegerField(default=60)
back_off_max_seconds = models.PositiveIntegerField(default=86400)
back_off_multiplier = models.FloatField(default=4)
busy_max_seconds = models.PositiveIntegerField(default=3600)
def __init__(self, *args, **kwargs):
super(TaskQueue, self).__init__(*args, **kwargs)
if self.rate_limit:
self.time_interval = timedelta(seconds=3600 / self.rate_limit)
def __str__(self):
return "TaskQueue:{}:{}.{}".format(self.pk, self.name, self.get_status_display())
def process_batch(self, limit=100):
batch = list(self.get_batch(limit))
random.shuffle(batch)
empty_run = True
for pk in batch:
empty_run = False
start = datetime.now()
if TaskInfo.process_one(pk):
self.throttle(datetime.now() - start)
return empty_run
def get_batch(self, limit, flat=True):
return chain(*[self._get_one_batch(limit, target_id, flat) for target_id in self.targets])
def _get_one_batch(self, limit, target_id, flat=True):
logging.debug("limit: %s on target_id = %s", limit, target_id)
qry = TaskInfo.objects.filter(eta__lte=timezone.now(), status__in=(TaskStatus.queued, TaskStatus.retry), target_id=target_id)
# qry = qry.order_by('eta') # Ordering seems to introduce performance issues
if flat:
qry = qry.values_list('id', flat=True)
return qry[:limit]
@property
def targets(self):
if not hasattr(self, '_targets'):
self._targets = list(TaskTarget.objects.filter(queue=self).values_list('id', flat=True))
return self._targets
def throttle(self, duration):
if self.rate_limit:
wait = self.time_interval - duration
if wait > timedelta():
logging.debug("Throttle limiting for seconds: %s", wait.total_seconds())
sleep(wait.seconds)
def on_error_back_off(self, seconds, ex):
if seconds is None:
seconds = self.back_off_base_seconds
else:
seconds *= self.back_off_multiplier
logging.error("Work failed on %s, backing off for %s seconds", self.name, seconds, exc_info=ex)
sleep(min(seconds, self.back_off_max_seconds))
return seconds
def retry_busy_timeouts(self):
logging.debug("retry_busy_timeouts: %s", self)
when = timezone.now() - timedelta(seconds=self.busy_max_seconds)
rows = TaskInfo.objects.filter(ts__lte=when, status=TaskStatus.busy, target_id__in=self.targets).update(status=TaskStatus.retry)
if rows:
logging.info("Retrying busy %s timeouts in %s queue", rows, self)
return rows
@six.python_2_unicode_compatible
class TaskTarget(models.Model):
name = models.CharField(max_length=100, unique=True)
queue = models.ForeignKey(TaskQueue, on_delete=models.CASCADE)
max_retries = models.PositiveSmallIntegerField(default=5)
def __str__(self):
return self.name
class TaskStatus(ChoicesIntEnum):
created = 0
queued = 1
eager = 2
retry = 3
busy = 4
success = 5
error = 6
corrupted = 7
@six.python_2_unicode_compatible
class TaskInfo(models.Model):
created = models.DateTimeField(auto_now_add=True)
executed = models.DateTimeField(blank=True, null=True)
ts = models.DateTimeField(auto_now=True, db_index=True)
retry_count = models.PositiveSmallIntegerField(default=0, db_index=True)
eta = models.DateTimeField(null=True, blank=True, db_index=True)
target = models.ForeignKey(TaskTarget, db_index=True, on_delete=models.CASCADE)
payload = models.CharField(max_length=300, null=True, blank=True)
status = models.IntegerField(default=TaskStatus.created, choices=TaskStatus.choices(), db_index=True)
status_message = models.TextField(default=None, blank=None, null=True)
name = models.CharField(max_length=300, null=True, blank=True, unique=True)
class Meta:
index_together = (
('status', 'eta'), # Used by TaskQueue.get_batch
('status', 'eta', 'target'), # Used by TaskQueue.get_batch
('status', 'ts'), # Used by TaskQueue.retry_busy_timeouts
('status', 'eta', 'target', 'id'),
# ('id', 'eta', 'status'),
# ('id', 'target'),
# ('id', 'target', 'status', 'eta'),
('target', 'eta'), # TaskInfo.is_unique?
('target', 'status'),
)
def __str__(self):
return "TaskInfo:{}:{}:{}:{}:{}".format(self.pk, self.get_status_display(), self.target, self.retry_count, self.eta)
@classmethod
def setup(cls, target, instance, queue='default', rate_limit=None, countdown=0, eta=None, max_retries=5, name=None):
logging.debug("method.__name__: %s", target.__name__)
now = timezone.now()
eta = eta or (now + timedelta(seconds=countdown))
target_name = cls.get_target_name(target, instance)
target = TaskTarget.objects.filter(name=target_name).first()
if target is None:
queue, created = TaskQueue.objects.get_or_create(name=queue, defaults={'rate_limit': rate_limit})
target, created = TaskTarget.objects.get_or_create(name=target_name, defaults={'queue': queue, 'max_retries': max_retries})
eager = getattr(settings, 'TASKER_ALWAYS_EAGER', None)
task = cls(target=target, eta=eta, status=TaskStatus.eager if eager else TaskStatus.queued, name=name)
task.instance = instance
return task
@staticmethod
def get_target_name(target, instance):
instance = instance or getattr(target, '__self__', None)
# class methods will have __self__ set with class
if instance and not isinstance(instance, type):
target_name = '.'.join((instance.__module__, instance.__class__.__name__, target.__name__))
else:
target_name = '.'.join((target.__module__, target.__qualname__))
return target_name
def queue_once(self, *args, **kwargs):
"""Queue this task only if another similar task does not exits already"""
payload = self._get_payload(args, kwargs)
if self.is_unique(payload):
return self._queue_payload(payload)
def is_unique(self, payload):
assert self.pk is None, "Checking for uniques is not supported for saved tasks"
return not TaskInfo.objects.filter(
eta=self.eta,
target=self.target,
payload=payload,
).exists()
def queue(self, *args, **kwargs):
payload = self._get_payload(args, kwargs)
return self._queue_payload(payload)
def _queue_payload(self, payload):
self.payload = payload
try:
self.save()
except IntegrityError as ex:
if ex.args and ex.args[0] == 'UNIQUE constraint failed: django_tasker_taskinfo.name':
logging.info("Duplicate task name not saved: %s", self)
return self
pass
if self.status == TaskStatus.eager:
self.execute()
return self
def _get_payload(self, args, kwargs):
payload = {}
if args:
payload['args'] = args
if kwargs:
payload['kwargs'] = kwargs
if isinstance(self.instance, models.Model):
assert hasattr(self.instance, 'pk'), "Model instance must have a 'pk' attribute, so task can store it for retrieval before execution"
pk = getattr(self.instance, 'pk')
assert pk is not None, "Model instance must be saved and have a 'pk' value, before it's method can be queued. Alternatively you can use queue a classmethod without pk set"
payload['pk'] = pk
return json.dumps(payload) if payload else None
def execute(self):
logging.info("Executing task : %s", self)
try:
target, args, kwargs = self.prepare_call()
self._execute_call(target, args, kwargs)
except Exception as ex:
logging.warning("{} execution failed".format(str(self)), exc_info=ex)
self.error(self.get_error_status_message(ex), status=TaskStatus.corrupted)
logging.info("Executing complete : %s", self)
def _execute_call(self, target, args, kwargs):
try:
with transaction.atomic():
target(*args, **kwargs)
except RetryLaterException as ex:
self.retry(ex)
except Exception as ex:
self.error(ex)
else:
self.success()
def prepare_call(self):
payload = json.loads(self.payload) if self.payload else {}
args = payload.get('args', [])
kwargs = payload.get('kwargs', {})
pk = payload.get('pk', None)
where, target = self.target.name.rsplit('.', 1)
where = import_string(where)
if pk:
where = where.objects.get(pk=pk)
target = getattr(where, target)
return target, args, kwargs
@classmethod
def process_one(cls, pk):
logging.debug("process_one: %s", pk)
rows = cls.objects.filter(pk=pk, status__in=(TaskStatus.queued, TaskStatus.retry)).update(status=TaskStatus.busy)
if rows < 1:
return
task = cls.objects.get(pk=pk)
task.execute()
return task
def success(self):
self.status = TaskStatus.success
self.executed = timezone.now()
self.save()
def retry(self, ex, status=TaskStatus.retry):
# This is a task controlled retry, it does not count toward max_retries
# if tasks wants to retry indefinitely we will not object
if getattr(settings, 'TASKER_ALWAYS_EAGER', None):
logging.error("Failing permanently on task in eager mode", exc_info=ex)
# there is no point in retrying this in eager mode, it fail each time
return
logging.warning("Retrying on task request", exc_info=ex)
self.eta = ex.eta
self.status_message = self.get_error_status_message(ex)
self.status = status
self.save()
def error(self, ex, status=TaskStatus.error):
logging.error("{} execution failed".format(str(self)), exc_info=True)
self.status = status
self.status_message = self.get_error_status_message(ex)
self.retry_count += 1
if self.retry_count <= self.target.max_retries:
self.status = TaskStatus.retry
countdown = get_retry_countdown(self.retry_count)
self.eta = timezone.now() + timedelta(seconds=countdown)
else:
logging.error("Exceed max_retries on task %s", self, exc_info=ex)
self.save()
# noinspection PyMethodMayBeStatic
def get_error_status_message(self, ex):
return str(ex)
def get_retry_countdown(retries):
return {
0: 30,
1: 60,
2: 300,
3: 1200,
}.get(retries, 3600)
|
{
"content_hash": "e3107bedf3d90219e88aabb0ef9f6db2",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 183,
"avg_line_length": 38.55897435897436,
"alnum_prop": 0.615241388482511,
"repo_name": "wooyek/django-tasker",
"id": "7d2b16d8c8c581e8ee7b435c8c7330a56345e5bc",
"size": "15053",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "django_tasker/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69412"
}
],
"symlink_target": ""
}
|
import json
from thrift.Thrift import TType
class ThriftJSONDecoder(json.JSONDecoder):
"""A decoder that makes python thrift structs JSON deserializable via the standard python json module.
Pass this decoder when reading json, like this:
json.loads(str, cls=text.ThriftJSONDecoder, <other kwargs>)
Note that this is not a full protocol implementation in the thrift sense. This is just a quick-and-easy
parser for unittests etc.
"""
ROOT_THRIFT_CLASS = 'root_thrift_class'
IS_STR = 'is_str'
mIsStr = None
def __init__(self, *args, **kwargs):
self.mIsStr = kwargs[ThriftJSONDecoder.IS_STR]
del kwargs[ThriftJSONDecoder.IS_STR]
self.root_thrift_class = kwargs[ThriftJSONDecoder.ROOT_THRIFT_CLASS]
del kwargs[ThriftJSONDecoder.ROOT_THRIFT_CLASS]
super(ThriftJSONDecoder, self).__init__(*args, **kwargs)
def decode(self, json_obj):
if self.mIsStr:
dict = super(ThriftJSONDecoder, self).decode(json_obj)
else:
dict = json_obj
result = self._convert(dict, TType.STRUCT,
(self.root_thrift_class, self.root_thrift_class.thrift_spec))
return result
def _convert(self, val, ttype, ttype_info):
# print '=== _convert ==='
# print val
# print ttype
# print ttype_info
if ttype == TType.STRUCT:
(thrift_class, thrift_spec) = ttype_info
ret = thrift_class()
for field in thrift_spec:
if field is not None:
(tag, field_ttype, field_name, field_ttype_info, dummy) = field
if field_name in val:
converted_val = self._convert(val[field_name], field_ttype, field_ttype_info)
setattr(ret, field_name, converted_val)
elif ttype == TType.LIST:
(element_ttype, element_ttype_info) = ttype_info
ret = [self._convert(x, element_ttype, element_ttype_info) for x in val]
elif ttype == TType.SET:
(element_ttype, element_ttype_info) = ttype_info
ret = set([self._convert(x, element_ttype, element__ttype_info) for x in val])
elif ttype == TType.MAP:
(key_ttype, key_ttype_info, val_ttype, val_ttype_info) = ttype_info
ret = dict([(self._convert(k, key_ttype, key_ttype_info),
self._converT(v, val_ttype, val_ttype_info)) for (k, v) in val.iteritems()])
elif ttype == TType.STRING:
ret = unicode(val)
elif ttype == TType.DOUBLE:
ret = float(val)
elif ttype == TType.I64:
ret = long(val)
elif ttype == TType.I32 or ttype == TType.I16 or ttype == TType.BYTE:
ret = int(val)
elif ttype == TType.BOOL:
ret = not not val
else:
raise Exception, 'Unrecognized thrift field type: %d' % ttype
# print ret
# print '=== end _convert ==='
return ret
def json_to_thrift(json_dict, root_thrift_class):
""" A utility shortcut function to parse a thrift json object of the specified class."""
return json.loads(json_dict, is_str=False, cls=ThriftJSONDecoder, root_thrift_class=root_thrift_class)
def jsonstr_to_thrift(json_str, root_thrift_class):
""" A utility shortcut function to parse a thrift json object of the specified class."""
return json.loads(json_str, is_str=True, cls=ThriftJSONDecoder, root_thrift_class=root_thrift_class)
# vim: set expandtab ts=4 sw=4:
|
{
"content_hash": "5800afaa4650895739b3ef09aae311f8",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 107,
"avg_line_length": 36.64367816091954,
"alnum_prop": 0.678168130489335,
"repo_name": "duydb2/ZTC",
"id": "424adf70a915d175ca1db4df1955d2b891ab65e1",
"size": "3188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atc/django-atc-api/atc_api/thrift_json_decoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "668"
},
{
"name": "HTML",
"bytes": "11775"
},
{
"name": "JavaScript",
"bytes": "34376"
},
{
"name": "Makefile",
"bytes": "3228"
},
{
"name": "Python",
"bytes": "160618"
},
{
"name": "Ruby",
"bytes": "28413"
},
{
"name": "Shell",
"bytes": "7912"
},
{
"name": "Thrift",
"bytes": "3537"
}
],
"symlink_target": ""
}
|
from .entity_health_state import EntityHealthState
class ServiceHealthState(EntityHealthState):
"""Represents the health state of a service, which contains the service
identifier and its aggregated health state.
:param aggregated_health_state: Possible values include: 'Invalid', 'Ok',
'Warning', 'Error', 'Unknown'
:type aggregated_health_state: str
:param service_name: Full name of the service.
:type service_name: str
"""
_attribute_map = {
'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'},
'service_name': {'key': 'ServiceName', 'type': 'str'},
}
def __init__(self, aggregated_health_state=None, service_name=None):
super(ServiceHealthState, self).__init__(aggregated_health_state=aggregated_health_state)
self.service_name = service_name
|
{
"content_hash": "78e9307e89902b610be576380731dc29",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 97,
"avg_line_length": 38.77272727272727,
"alnum_prop": 0.6811254396248535,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "2a60453cb87f0e793f591dce99cdf3b31cc5e5f4",
"size": "1327",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-servicefabric/azure/servicefabric/models/service_health_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
import subprocess, os, sys
from reverseZone_naming import reverseZone_name
from netaddr import *
zone_files_path="/etc/bind/zones"
def remove_forward_record():
host_name_to_be_removed= sys.argv[1]
domain_name= sys.argv[2]
os.chdir(zone_files_path)
forward_zone_file_name="{0}{1}".format("db.",domain_name)
readFiles = open(forward_zone_file_name,'r')
forward_zone_file_content = readFiles.read()
readFiles.close()
readFiles = open(forward_zone_file_name,'r')
lines = readFiles.readlines()
readFiles.close()
if host_name_to_be_removed in forward_zone_file_content:
file_content = open(forward_zone_file_name,'w')
for line in lines:
if not host_name_to_be_removed in line:
file_content.write(line)
file_content.close()
print "\nThe forward record that you entered has been removed!\n"
else:
print "\nThe record you wanted to remove is already absent in the database!\n"
def main():
remove_forward_record()
main()
|
{
"content_hash": "710a9ad277c4d50ae2269ad22f248512",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 27.2,
"alnum_prop": 0.7237394957983193,
"repo_name": "anilveeramalli/cloudify-azure-plugin",
"id": "d9fa065ffdae864cb7d21bd5e168488e9863ef7a",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blueprints/clustered-dns/dns/dns_remove_forward_record.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "241014"
},
{
"name": "Shell",
"bytes": "15254"
}
],
"symlink_target": ""
}
|
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
"""
stack = [root]
while root.left:
stack.append(root.left)
root = root.left
while True:
curr = stack.pop()
k -= 1
if k == 0:
return curr.val
if curr.right:
stack.append(curr.right)
curr = curr.right
while curr.left:
stack.append(curr.left)
curr = curr.left
|
{
"content_hash": "041f4bed63c0f0fcb88b5b06d24251d6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 43,
"avg_line_length": 24.44,
"alnum_prop": 0.41734860883797054,
"repo_name": "daicang/Leetcode-solutions",
"id": "e3f9c650aea6636a3cecbe24e3fbf8f9a51c839e",
"size": "813",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "230-kth-smallest-element-in-bst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91011"
},
{
"name": "Shell",
"bytes": "785"
}
],
"symlink_target": ""
}
|
import theano
import theano.tensor as T
import numpy as np
from util import *
class BaseGRULayer( object ):
"""
Implements a GRU layer
"""
def __init__(self, input_width, output_width, activation_shift=0.0, name=None, dropout_keep=1, dropout_input=False, dropout_output=True):
"""
Params:
input_width: Width of input
output_width: Width of the GRU output
activation_shift: How to shift the biases of the activation
"""
self._input_width = input_width
self._output_width = output_width
prefix = "" if name is None else name + "_"
self._reset_W = theano.shared(init_params([input_width + output_width, output_width]), prefix+"reset_W")
self._reset_b = theano.shared(init_params([output_width], shift=1.0), prefix+"reset_b")
self._update_W = theano.shared(init_params([input_width + output_width, output_width]), prefix+"update_W")
self._update_b = theano.shared(init_params([output_width], shift=1.0), prefix+"update_b")
self._activation_W = theano.shared(init_params([input_width + output_width, output_width]), prefix+"activation_W")
self._activation_b = theano.shared(init_params([output_width], shift=activation_shift), prefix+"activation_b")
self._dropout_keep = dropout_keep
self._dropout_input = dropout_input
self._dropout_output = dropout_output
@property
def input_width(self):
return self._input_width
@property
def output_width(self):
return self._output_width
@property
def params(self):
return [self._reset_W, self._reset_b, self._update_W, self._update_b, self._activation_W, self._activation_b]
def initial_state(self, batch_size):
"""
The initial state of the network
Params:
batch_size: The batch size to construct the initial state for
"""
return T.zeros([batch_size, self.output_width])
def dropout_masks(self, srng, use_output=None):
if self._dropout_keep == 1:
return []
else:
masks = []
if self._dropout_input:
masks.append(make_dropout_mask((self._input_width,), self._dropout_keep, srng))
if self._dropout_output:
if use_output is not None:
masks.append(use_output)
else:
masks.append(make_dropout_mask((self._output_width,), self._dropout_keep, srng))
return masks
def split_dropout_masks(self, dropout_masks):
if dropout_masks is None:
return [], None
idx = (self._dropout_keep != 1) * (self._dropout_input + self._dropout_output)
return dropout_masks[:idx], dropout_masks[idx:]
def step(self, ipt, state, dropout_masks=Ellipsis):
"""
Perform a single step of the network
Params:
ipt: The current input. Should be an int tensor of shape (n_batch, self.input_width)
state: The previous state. Should be a float tensor of shape (n_batch, self.output_width)
dropout_masks: Masks from get_dropout_masks
Returns: The next output state
"""
if dropout_masks is Ellipsis:
dropout_masks = None
append_masks = False
else:
append_masks = True
if self._dropout_keep != 1 and self._dropout_input and dropout_masks is not None:
ipt_masks = dropout_masks[0]
ipt = apply_dropout(ipt, ipt_masks)
dropout_masks = dropout_masks[1:]
cat_ipt_state = T.concatenate([ipt, state], 1)
reset = do_layer( T.nnet.sigmoid, cat_ipt_state,
self._reset_W, self._reset_b )
update = do_layer( T.nnet.sigmoid, cat_ipt_state,
self._update_W, self._update_b )
candidate_act = do_layer( T.tanh, T.concatenate([ipt, (reset * state)], 1),
self._activation_W, self._activation_b )
newstate = update * state + (1-update) * candidate_act
if self._dropout_keep != 1 and self._dropout_output and dropout_masks is not None:
newstate_masks = dropout_masks[0]
newstate = apply_dropout(newstate, newstate_masks)
dropout_masks = dropout_masks[1:]
if append_masks:
return newstate, dropout_masks
else:
return newstate
|
{
"content_hash": "907725babe2eb4deee7188d9b818da1d",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 141,
"avg_line_length": 38.51282051282051,
"alnum_prop": 0.5876608965823347,
"repo_name": "hexahedria/gated-graph-transformer-network",
"id": "84711d9173c904bb8a1079dfb61c3b8a4bc2e3d1",
"size": "4506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "base_gru.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "14613"
},
{
"name": "Python",
"bytes": "182547"
}
],
"symlink_target": ""
}
|
import pytest
import numpy as np
import nnabla.functions as F
from nbla_test_utils import (
function_tester,
list_ctx_and_func_name)
def ref_broadcast(x, shape):
return x * np.ones(shape, dtype=x.dtype)
def get_combination(n):
if n == 0:
return [(n, np.array([], dtype=np.bool))]
all_comb = np.vstack(map(lambda x: x.flatten(), np.meshgrid(
*[[0, 1] for _ in range(n)]))).T.astype(np.bool)
return [(n, comb) for comb in all_comb]
def get_combinations(*N):
ret = []
for n in N:
ret.extend(get_combination(n))
return ret
@pytest.mark.parametrize("seed", [314])
@pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast']))
@pytest.mark.parametrize("ndim, broadcast_dim", get_combinations(*range(0, 6)))
@pytest.mark.parametrize("align", [True, False])
def test_broadcast_forward_backward(align, ndim, broadcast_dim, seed, fname, ctx, func_name):
func = getattr(F, fname)
ref_func = eval('ref_' + fname)
rng = np.random.RandomState(seed)
shape = rng.randint(2, 5, size=(ndim,))
inshape = shape.copy()
inshape[broadcast_dim] = 1
if ndim == 0:
# Performing 0-dim array test too.
inputs = [np.array(rng.randn()).astype("float32")]
function_tester(rng, func, ref_func, inputs, [shape],
ctx=ctx, backward=[True], func_name=func_name,
atol_b=4e-3)
return
if not align:
# Trailing pattern, e.g., inshape = (3, 4), shape = (2, 3, 4)
if np.all(broadcast_dim) or not np.all(broadcast_dim):
pytest.skip(
"All true or all false of broadcast_dim is not needed to test.")
inshape = inshape[np.logical_not(broadcast_dim)]
shape1 = shape[broadcast_dim]
shape0 = shape[np.logical_not(broadcast_dim)]
shape = shape1 + shape0
inputs = [np.array(rng.randn(*inshape)).astype("float32")]
function_tester(rng, func, ref_func, inputs, [shape],
ctx=ctx, backward=[True], func_name=func_name,
atol_b=6e-3)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("fname, ctx, func_name", list_ctx_and_func_name(['broadcast']))
@pytest.mark.parametrize("ndim, broadcast_dim", get_combinations(*range(0, 6)))
@pytest.mark.parametrize("align", [True, False])
def test_broadcast_double_backward(align, ndim, broadcast_dim, seed, fname, ctx, func_name):
from nbla_test_utils import cap_ignore_region, backward_function_tester
rng = np.random.RandomState(seed)
shape = rng.randint(2, 5, size=(ndim,))
inshape = shape.copy()
inshape[broadcast_dim] = 1
if ndim == 0:
# Performing 0-dim array test too.
inputs = [np.array(rng.randn()).astype("float32")]
backward_function_tester(rng, F.broadcast,
inputs=inputs,
func_args=[shape], func_kwargs={},
ctx=ctx)
if not align:
# Trailing pattern, e.g., inshape = (3, 4), shape = (2, 3, 4)
if np.all(broadcast_dim) or not np.all(broadcast_dim):
pytest.skip(
"All true or all false of broadcast_dim is not needed to test.")
inshape = inshape[np.logical_not(broadcast_dim)]
shape1 = shape[broadcast_dim]
shape0 = shape[np.logical_not(broadcast_dim)]
shape = shape1 + shape0
inputs = [np.array(rng.randn(*inshape)).astype("float32")]
backward_function_tester(rng, F.broadcast, inputs,
func_args=[shape], func_kwargs={},
dstep=1e-3,
ctx=ctx)
|
{
"content_hash": "1ddc45f8ef05870a8332ce47455e0599",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 93,
"avg_line_length": 36.78217821782178,
"alnum_prop": 0.5911170928667564,
"repo_name": "sony/nnabla",
"id": "efa21b57d52c86a9d323a875a55e162562c3e35e",
"size": "4358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/test/function/test_broadcast.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "25938"
},
{
"name": "C++",
"bytes": "2590231"
},
{
"name": "CMake",
"bytes": "35358"
},
{
"name": "Cython",
"bytes": "180959"
},
{
"name": "Dockerfile",
"bytes": "5431"
},
{
"name": "Jupyter Notebook",
"bytes": "540006"
},
{
"name": "Makefile",
"bytes": "24294"
},
{
"name": "Python",
"bytes": "5311538"
},
{
"name": "Shell",
"bytes": "4750"
}
],
"symlink_target": ""
}
|
import sys
class Terminal(object):
"""An interface for text entry and display."""
def __init__(self, environ):
self.environ = environ
@property
def primary_prompt_string(self):
"""The prompt first seen at the command line. Defaults to "$ "."""
return self.environ.get("PS1", "$ ")
@property
def secondary_prompt_string(self):
"""The prompt seen for line continuations. Defaults to "> "."""
return self.environ.get("PS2", "> ")
@property
def quaternary_prompt_string(self):
"""Printed before each command displayed during an execution trace"""
return self.environ.get("PS4", "+ ")
def readline(self, continuation=False):
"""Read a line from the terminal.
A backslash followed by a <newline> is interpreted as a line
continuation. The backslash and <newline>s are removed before return.
For example::
$ uname \
> -m
x86_64
:param continuation:
True if the line is a continuation. Defaults to False.
"""
prompt = (self.secondary_prompt_string if continuation else
self.primary_prompt_string)
try:
line = raw_input(prompt)
while line.endswith("\\"):
line = line[:-1] + raw_input(self.secondary_prompt_string)
except EOFError:
raise SystemExit()
else:
return line
def readlines(self):
"""Read a command from the terminal.
Returns a list of tokens containing the user's input.
"""
continuation = False
while True:
yield self.readline(continuation)
continuation = True
def __iter__(self):
return self.readlines()
def write(self, msg):
"""Output a message.
:param msg: a string to print to standard out
"""
sys.stdout.write(msg)
def debug(self, msg):
print self.quaternary_prompt_string + msg
def error(self, msg):
"""Output an error.
:param msg: a string to print to standard error
"""
sys.stderr.write(msg)
|
{
"content_hash": "35ff71aa394f5500bdb617f203008282",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 28.012820512820515,
"alnum_prop": 0.5725400457665903,
"repo_name": "Ceasar/twosheds",
"id": "fc86b6fee4d3884ff780ea160ef839ffef74dbb5",
"size": "2185",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twosheds/terminal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "573"
},
{
"name": "Python",
"bytes": "34002"
}
],
"symlink_target": ""
}
|
import multiprocessing
from Provider import Provider
from ProviderAgentException import ProviderException
from foundation.FoundationException import FoundationException
import foundation.agent_properties
import MySQLdb
import logging
import datetime
import os
import sys
import re
import inspect
logger = logging.getLogger('provider_application')
fh = logging.FileHandler('providers_logs.log')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
def load_classes(list_classes):
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(currentdir)
agents_directory = currentdir
black_list = ['ProviderExecution', 'ProviderAgentException','ProviderExecutionTest','ProviderEdgeTest','ProviderPublicTest']
for filename in os.listdir (agents_directory):
# Ignore subfolders
if os.path.isdir (os.path.join(agents_directory, filename)):
continue
else:
if re.match(r"Provider.*?\.py$", filename):
classname = re.sub(r".py", r"", filename)
#if (classname not in black_list):
if (classname in foundation.agent_properties.provider_types):
module = __import__(classname)
targetClass = getattr(module, classname)
list_classes[classname] = targetClass
logging.debug('Load Providers Classes initialized')
def getGeneralConfigurationParameters(cursor):
sql = "SELECT bid_periods, initial_offer_number, \
num_periods_market_share \
FROM simulation_generalparameters limit 1"
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
bidPeriods = row[0]
numberOffers = row[1]
numAccumPeriods = row[2]
break
return bidPeriods, numberOffers, numAccumPeriods
def getSeed(seed, year, month, day, hour, minute, second, microsecond):
if (seed == 1):
# the seed for random numbers was defined, therefore we use it.
dtime = datetime.datetime(year,month,day,hour,minute,second,microsecond)
else:
dtime = datetime.datetime.now()
return dtime
def create(list_classes, typ, providerName, providerId, serviceId, providerSeed, marketPositon,
adaptationFactor, monopolistPosition, debug, resources, numberOffers,
numAccumPeriods, numAncestors, startFromPeriod, sellingAddress, buyingAddress, capacityControl, purchase_service):
print 'In create provider - Class requested:' + str(typ)
print list_classes
if typ in list_classes:
targetClass = list_classes[typ]
return targetClass(providerName, providerId, serviceId, providerSeed,
marketPositon, adaptationFactor, monopolistPosition,
debug, resources, numberOffers, numAccumPeriods,
numAncestors, startFromPeriod, sellingAddress, buyingAddress, capacityControl, purchase_service)
else:
err = 'Class' + typ + 'not found to be loaded'
raise ProviderException(err)
'''
The ProviderExecution starts the threads for the service provider agents.
'''
if __name__ == '__main__':
list_classes = {}
# Load Provider classes
load_classes(list_classes)
# Open database connection
#db = MySQLdb.connect("localhost","root","password","Network_Simulation" )
db = MySQLdb.connect(foundation.agent_properties.addr_database,foundation.agent_properties.user_database,
foundation.agent_properties.user_password,foundation.agent_properties.database_name )
# prepare a cursor object using cursor() method
cursor = db.cursor()
# Brings the general parameters from the database
bidPeriods, numberOffers, numAccumPeriods = getGeneralConfigurationParameters(cursor)
# Verifies if they were configured, otherwise brings them from the agent properties.
if (numberOffers == 0):
numberOffers = foundation.agent_properties.initial_number_bids
if (numAccumPeriods == 0):
numAccumPeriods = foundation.agent_properties.num_periods_market_share
# Prepare SQL query to SELECT providers from the database.
sql = "SELECT id, name, market_position, adaptation_factor \
, monopolist_position, service_id, num_ancestors, debug \
, seed, year, month, day, hour, minute, second \
, microsecond, class_name, start_from_period, buying_marketplace_address \
, selling_marketplace_address, capacity_controlled_at, purchase_service_id \
FROM simulation_provider \
WHERE status = 'A'"
try:
providers = []
# Execute the SQL command
cursor.execute(sql)
# Fetch all the rows in a list of lists.
results = cursor.fetchall()
i = 1
lst = []
lst = foundation.agent_properties.provider_types.split(',')
for row in results:
providerId = row[0]
providerName = row[1]
marketPositon = row[2]
adaptationFactor = row[3]
monopolistPosition = row[4]
serviceId = str(row[5])
numAncestors = row[6]
if (row[7] == 1):
debug = True
else:
debug = False
seed = row[8]
year = row[9]
month = row[10]
day = row[11]
hour = row[12]
minute = row[13]
second = row[14]
microsecond = row[15]
class_name = row[16]
startFromPeriod = row[17]
buyingAddress = row[18]
sellingAddress = row[19]
capacityControl = row[20]
purchase_service = row[21]
providerSeed = getSeed(seed, year, month, day, hour, minute, second, microsecond)
# Brings resources definition
cursor2 = db.cursor()
sql_resources = "SELECT resource_id, capacity, cost \
FROM simulation_provider_resource \
WHERE provider_id = '%d'" % (providerId)
cursor2.execute(sql_resources)
resourceRows = cursor2.fetchall()
resources = {}
for resourceRow in resourceRows:
resources[str(resourceRow[0])] = {'Capacity': resourceRow[1], 'Cost' : resourceRow[2]}
print class_name
print lst
if (class_name in lst):
print 'before executing provider public'
provider = create(list_classes, class_name, providerName + str(providerId), providerId, serviceId,
providerSeed, marketPositon, adaptationFactor,
monopolistPosition, debug, resources, numberOffers,
numAccumPeriods, numAncestors, startFromPeriod,
sellingAddress, buyingAddress, capacityControl, purchase_service)
providers.append(provider)
i = i + 1
# start the providers
for w in providers:
w.start()
except FoundationException as e:
print e.__str__()
except ProviderException as e:
print e.__str__()
except Exception as e:
print e.__str__()
finally:
# disconnect from server
db.close()
|
{
"content_hash": "cf75ad3f3321412178123a5a49e14970",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 128,
"avg_line_length": 38.60732984293194,
"alnum_prop": 0.6311364252780038,
"repo_name": "lmarent/network_agents_ver2_python",
"id": "e7c199c2c698b54dbcf44aeac84aa4fa0cd98d22",
"size": "7374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/ProviderExecution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2788"
},
{
"name": "Python",
"bytes": "807581"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import sys
from django.test import TestCase, override_settings
from django.utils import six
from django_postgres_pgpfields import proxy
from django_postgres_pgpfields import fields
from .factories import EncryptedModelFactory
from .models import EncryptedModel, EncryptedModelWithoutManager
PGP_FIELDS = (
fields.EmailPGPPublicKeyField,
fields.IntegerPGPPublicKeyField,
fields.TextPGPPublicKeyField,
fields.DatePGPPublicKeyField,
)
class TestPGPMixin(TestCase):
"""Test `PGPMixin` behave properly."""
def test_check(self):
"""Assert `max_length` check does not return any error."""
for field in PGP_FIELDS:
self.assertEqual(field(name='field').check(), [])
def test_max_length(self):
"""Assert `max_length` is ignored."""
for field in PGP_FIELDS:
self.assertEqual(field(max_length=42).max_length, None)
def test_db_type(self):
"""Check db_type is `bytea`."""
for field in PGP_FIELDS:
self.assertEqual(field().db_type(), 'bytea')
class TestEmailPGPMixin(TestCase):
"""Test emails fields behave properly."""
def test_max_length_validator(self):
"""Check `MaxLengthValidator` is not set."""
field_validated = fields.EmailPGPPublicKeyField().run_validators(value='value@value.com')
self.assertEqual(field_validated, None)
class TestEncryptedTextFieldModel(TestCase):
"""Test `EncryptedTextField` can be integrated in a `Django` model."""
model = EncryptedModel
def test_fields(self):
"""Assert fields are representing our model."""
fields = self.model._meta.get_all_field_names()
expected = (
'id',
'email_pgp_pub_field',
'integer_pgp_pub_field',
'pgp_pub_field',
'pgp_pub_date_field',
'pgp_pub_null_boolean_field',
)
if six.PY2:
self.assertItemsEqual(fields, expected)
else:
self.assertCountEqual(fields, expected)
def test_value_returned_is_not_bytea(self):
"""Assert value returned is not a memoryview instance."""
EncryptedModelFactory.create()
instance = self.model.objects.get()
self.assertIsInstance(instance.email_pgp_pub_field, six.text_type)
self.assertIsInstance(instance.integer_pgp_pub_field, int)
self.assertIsInstance(instance.pgp_pub_field, six.text_type)
def test_fields_descriptor_is_not_instance(self):
"""`EncryptedProxyField` instance returns itself when accessed from the model."""
self.assertIsInstance(
self.model.pgp_pub_field,
proxy.EncryptedProxyField,
)
def test_value_query(self):
"""Assert querying the field's value is making one query."""
EncryptedModelFactory.create(pgp_pub_field='test')
instance = self.model.objects.get()
with self.assertNumQueries(0):
instance.pgp_pub_field
def test_value_pgp_pub(self):
"""Assert we can get back the decrypted value."""
EncryptedModelFactory.create(pgp_pub_field='test')
instance = self.model.objects.get()
self.assertEqual(instance.pgp_pub_field, 'test')
def test_value_pgp_date_pub(self):
"""Assert we can get back the decrypted value."""
EncryptedModelFactory.create(pgp_pub_date_field=datetime.date.today())
instance = self.model.objects.get()
self.assertEqual(instance.pgp_pub_date_field, datetime.date.today())
def test_value_pgp_date_pub_null(self):
"""Assert we can get back the decrypted value."""
EncryptedModelFactory.create(pgp_pub_date_field=None)
instance = self.model.objects.get()
self.assertIsNone(instance.pgp_pub_date_field)
def test_value_pgp_null_boolean_pub(self):
"""Assert we can get back the decrypted values."""
EncryptedModelFactory.create(pgp_pub_null_boolean_field=None)
instance = self.model.objects.last()
self.assertIsNone(instance.pgp_pub_null_boolean_field)
EncryptedModelFactory.create(pgp_pub_null_boolean_field=True)
instance = self.model.objects.last()
self.assertTrue(instance.pgp_pub_null_boolean_field)
EncryptedModelFactory.create(pgp_pub_null_boolean_field=False)
instance = self.model.objects.last()
self.assertFalse(instance.pgp_pub_null_boolean_field)
def test_value_pgp_pub_multipe(self):
"""Assert we get back the correct value when the table contains data."""
EncryptedModelFactory.create(pgp_pub_field='test')
created = EncryptedModelFactory.create(pgp_pub_field='test2')
instance = self.model.objects.get(pk=created.pk)
self.assertEqual(instance.pgp_pub_field, 'test2')
def test_instance_not_saved(self):
"""Assert not saved instance return the value to be encrypted."""
instance = EncryptedModelFactory.build(pgp_pub_field='test')
self.assertEqual(instance.pgp_pub_field, 'test')
self.assertEqual(instance.pgp_pub_field, 'test')
def test_update_attribute_pgp_pub_field(self):
"""Assert pgp field can be updated through its attribute on the model."""
instance = EncryptedModelFactory.create()
instance.pgp_pub_field = 'testing'
instance.save()
updated_instance = self.model.objects.get()
self.assertEqual(updated_instance.pgp_pub_field, 'testing')
def test_update_one_attribute(self):
"""Assert value are not overriden when updating one attribute."""
expected = 'initial value'
new_value = 'new_value'
instance = EncryptedModelFactory.create(
pgp_pub_field=expected,
)
instance.pgp_sym_field = new_value
instance.save()
updated_instance = self.model.objects.get()
self.assertEqual(updated_instance.pgp_pub_field, expected)
def test_pgp_int_public_key_negative_number(self):
"""Assert negative value is saved with an `IntegerPGPPublicKeyField` field."""
instance = EncryptedModelFactory.create(integer_pgp_pub_field=-1)
self.assertEqual(instance.integer_pgp_pub_field, -1)
def test_pgp_int_public_key_null_number(self):
"""Assert negative value is saved with an `IntegerPGPPublicKeyField` field."""
instance = EncryptedModelFactory.create(integer_pgp_pub_field=None)
self.assertEqual(instance.integer_pgp_pub_field, None)
def test_null(self):
"""Assert `NULL` values are saved."""
instance = EncryptedModel.objects.create()
fields = self.model._meta.get_all_field_names()
fields.remove('id')
for field in fields:
self.assertEqual(getattr(instance, field), None)
def test_unexpected_decrypted_value(self):
EncryptedModelWithoutManager.objects.create(pgp_pub_field='test')
obj = EncryptedModelWithoutManager.objects.get()
with self.assertRaisesMessage(ValueError, 'Unexpected encrypted field "pgp_pub_field"!'):
obj.pgp_pub_field
@override_settings(PGPFIELDS_BYPASS_NON_DECRYPTED_FIELD_EXCEPTION=True)
def test_bypass_unexpected_decrypted_value(self):
EncryptedModelWithoutManager.objects.create(pgp_pub_field='test')
obj = EncryptedModelWithoutManager.objects.get()
try:
obj.pgp_pub_field
except ValueError:
self.fail('Unexpected exception!')
@override_settings(PGPFIELDS_BYPASS_FIELD_EXCEPTION_IN_MIGRATIONS=True)
def test_bypass_unexpected_decrypted_value(self):
argv_copy = sys.argv[:]
try:
sys.argv = ['python', 'manage.py', 'migrate']
EncryptedModelWithoutManager.objects.create(pgp_pub_field='test')
obj = EncryptedModelWithoutManager.objects.get()
try:
obj.pgp_pub_field
except ValueError:
self.fail('Unexpected exception!')
finally:
sys.argv = argv_copy
|
{
"content_hash": "da09c11238e2331556157314621ad351",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 97,
"avg_line_length": 39.661764705882355,
"alnum_prop": 0.6620936843406254,
"repo_name": "coldmind/django-postgres-pgpfields",
"id": "cd80278fa28dd89906808b12a722eda5fdf43da7",
"size": "8091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_fields.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "305"
},
{
"name": "Python",
"bytes": "19591"
}
],
"symlink_target": ""
}
|
"""
Uses the unittest module to test this app with `manage.py test`.
"""
# from django.test import TestCase
from unittest import TestCase, main
import doctest
import pug.nlp.util
class NLPDocTest(TestCase):
def test_module(self, module=None):
if module is not None:
failure_count, test_count = doctest.testmod(module, raise_on_error=False, verbose=True)
msg = "Ran {0} tests in {3} and {1} passed ({2} failed)".format(test_count, test_count-failure_count, failure_count, module.__file__)
print msg
if failure_count:
# print "Ignoring {0} doctest failures...".format(__file__)
self.fail(msg)
# return failure_count, test_count
def test_nlp_util(self):
self.test_module(pug.nlp.util)
# def test_invest_util(self):
# self.test_module(pug.invest.util)
if __name__ == '__main__':
main()
|
{
"content_hash": "3564f61185f07190c063c66cb8a54e78",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 145,
"avg_line_length": 30.666666666666668,
"alnum_prop": 0.6152173913043478,
"repo_name": "hobson/pug",
"id": "64ef787b3f3d2ccc9640bf32eebeeae4866029fc",
"size": "942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pug/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "74951"
},
{
"name": "HTML",
"bytes": "31490"
},
{
"name": "JavaScript",
"bytes": "80198"
},
{
"name": "Python",
"bytes": "42362"
},
{
"name": "Shell",
"bytes": "15512"
}
],
"symlink_target": ""
}
|
try:
import usocket as socket, ussl as ssl, uerrno as errno, sys
except:
import socket, ssl, errno, sys, time, select
def test_one(site, opts):
ai = socket.getaddrinfo(site, 443)
addr = ai[0][-1]
print(addr)
# Connect the raw socket
s = socket.socket()
s.setblocking(False)
try:
s.connect(addr)
raise OSError(-1, "connect blocks")
except OSError as e:
if e.errno != errno.EINPROGRESS:
raise
if sys.implementation.name != "micropython":
# in CPython we have to wait, otherwise wrap_socket is not happy
select.select([], [s], [])
try:
# Wrap with SSL
try:
if sys.implementation.name == "micropython":
s = ssl.wrap_socket(s, do_handshake=False)
else:
s = ssl.wrap_socket(s, do_handshake_on_connect=False)
except OSError as e:
if e.errno != errno.EINPROGRESS:
raise
print("wrapped")
# CPython needs to be told to do the handshake
if sys.implementation.name != "micropython":
while True:
try:
s.do_handshake()
break
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
select.select([s], [], [])
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
select.select([], [s], [])
else:
raise
time.sleep(0.1)
# print("shook hands")
# Write HTTP request
out = b"GET / HTTP/1.0\r\nHost: %s\r\n\r\n" % bytes(site, "latin")
while len(out) > 0:
n = s.write(out)
if n is None:
continue
if n > 0:
out = out[n:]
elif n == 0:
raise OSError(-1, "unexpected EOF in write")
print("wrote")
# Read response
resp = b""
while True:
try:
b = s.read(128)
except OSError as err:
if err.errno == 2: # 2=ssl.SSL_ERROR_WANT_READ:
continue
raise
if b is None:
continue
if len(b) > 0:
if len(resp) < 1024:
resp += b
elif len(b) == 0:
break
print("read")
if resp[:7] != b"HTTP/1.":
raise ValueError("response doesn't start with HTTP/1.")
# print(resp)
finally:
s.close()
SITES = [
"google.com",
{"host": "www.google.com"},
"micropython.org",
"pypi.org",
"api.telegram.org",
{"host": "api.pushbullet.com", "sni": True},
]
def main():
for site in SITES:
opts = {}
if isinstance(site, dict):
opts = site
site = opts["host"]
try:
test_one(site, opts)
print(site, "ok")
except Exception as e:
print(site, "error")
print("DONE")
main()
|
{
"content_hash": "19175d47421b0273eb7ca25485a45e21",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 74,
"avg_line_length": 26.887931034482758,
"alnum_prop": 0.4597627444693812,
"repo_name": "bvernoux/micropython",
"id": "54abc6966fc05e5cb1cea27a7237291bceca79ce",
"size": "3119",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/net_inet/test_tls_nonblock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "50694"
},
{
"name": "C",
"bytes": "19869126"
},
{
"name": "C++",
"bytes": "2489380"
},
{
"name": "HTML",
"bytes": "84456"
},
{
"name": "Makefile",
"bytes": "49218"
},
{
"name": "Objective-C",
"bytes": "8382"
},
{
"name": "Python",
"bytes": "856777"
},
{
"name": "Shell",
"bytes": "6229"
}
],
"symlink_target": ""
}
|
import gzip
import json
import os
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import EmptyPage
from django.core.paginator import PageNotAnInteger
from django.core.paginator import Paginator
from django.db.models import Avg
from django.db.models import Count
from django.db.models import Max
from django.db.models import Min
from django.db.models import Q
from django.db.models import Sum
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.shortcuts import redirect
from django.shortcuts import render
from django.template import RequestContext
from django.utils.text import slugify
from django.views.generic import DeleteView
from individuals.forms import IndividualForm, ComparisonForm, GroupForm, BrowserForm
from individuals.models import Individual, Group
from individuals.tasks import VerifyVCF, AnnotateVariants, PopulateVariants
from variants.models import Variant
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
else:
return "text/plain"
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self,obj='',json_opts={},mimetype="application/json",*args,**kwargs):
content = json.dumps(obj,**json_opts)
super(JSONResponse,self).__init__(content,mimetype,*args,**kwargs)
def create(request):
if request.method == 'POST':
form = IndividualForm(request.POST, request.FILES)
if form.is_valid():
if request.user.is_authenticated:
individual = Individual.objects.create(user=request.user, status='new')
else:
individual = Individual.objects.create(user=None, status='new')
individual.vcf_file= request.FILES.get('file')
print('file')
print(request.FILES.get('file'))
filename = individual.vcf_file.name.split('.')
new_filename = []
for tag in filename:
new_filename.append(slugify(tag))
individual.vcf_file.name = ".".join(new_filename)
print('filename ', filename)
#get name from inside vcf file
individual.name= str(os.path.splitext(individual.vcf_file.name)[0]).replace('.vcf','').replace('.gz','').replace('.rar','').replace('.zip','').replace('._',' ').replace('.',' ')
# individual.shared_with_groups = form.cleaned_data['shared_with_groups']
individual.shared_with_groups.set(form.cleaned_data['shared_with_groups'])
individual.save()
f = individual.vcf_file
#fix permissions
#os.chmod("%s/genomes/%s/" % (settings.BASE_DIR, individual.user), 0777)
#if request.user.is_authenticated:
# os.chmod("%s/genomes/%s/%s" % (settings.BASE_DIR, slugify(individual.user), individual.id), 0o777)
#else:
# os.chmod("%s/genomes/public/%s" % (settings.BASE_DIR, individual.id), 0o777)
# AnnotateVariants.delay(individual.id)
# VerifyVCF.delay(individual.id)
data = {'files': [{'deleteType': 'DELETE', 'name': individual.name, 'url': '', 'thumbnailUrl': '', 'type': 'image/png', 'deleteUrl': '', 'size': f.size}]}
response = JSONResponse(data, mimetype=response_mimetype(request))
response['Content-Disposition'] = 'inline; filename=files.json'
return response
else:
print(form.errors)
else:
form = IndividualForm()
return render(request, 'individuals/create.html', {'form':form})
# Create your views here.
@login_required
def edit(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
if request.method == 'POST':
form = IndividualForm(request.POST, instance=individual)
if form.is_valid():
form.save()
return redirect('dashboard')
# form = IndividualForm(request.POST, request.FILES)
# if form.is_valid():
# individual = form.save(commit=False)
# individual.user = request.user
# individual.save()
# return redirect('dashboard')
else:
form = IndividualForm(instance=individual)
return render(request, 'individuals/individual_form.html', {'form':form})
class IndividualDeleteView(DeleteView):
model = Individual
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
individual_id = self.object.id
if self.object.user:
username = self.object.user.username
else:
username = 'public'
#delete files
if self.object.vcf_file:
self.object.vcf_file.delete()
# if self.object.strs_file:
# self.object.strs_file.delete()
# if self.object.cnvs_file:
# self.object.cnvs_file.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
self.object.delete()
# response = JSONResponse(True, {}, response_mimetype(self.request))
# response['Content-Disposition'] = 'inline; filename=files.json'
# return response
messages.add_message(request, messages.INFO, "Individual deleted with success!")
#return redirect('individuals_list')
return redirect('individuals_list')
def view(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
variant_list = Variant.objects.filter(individual=individual)
# snpeff = SnpeffAnnotation.objects.filter(individual=individual)
individual.n_variants = variant_list.count()
individual.novel_variants = variant_list.filter(variant_id = '.').count()
individual.summary = []
#get calculated values from database
summary_item = {
'type': 'Total SNVs',
'total': variant_list.values('genotype').count(),
'discrete': variant_list.values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
summary_item = {
'type': 'Total Gene-associated SNVs',
'total': variant_list.values('gene').exclude(gene="").count(),
'discrete': variant_list.exclude(gene="").values('genotype').annotate(total=Count('genotype'))
}
individual.summary.append(summary_item)
individual.snp_eff = variant_list.values('snpeff_effect').annotate(Count('snpeff_effect')).order_by('snpeff_effect')
# print 'individual.snp_eff', individual.snp_eff
# variant_list.values('snpeff__effect').annotate(Count('snpeff__effect')).order_by('snpeff__effect')
#
individual.functional_class = variant_list.values('snpeff_func_class').annotate(Count('snpeff_func_class')).order_by('snpeff_func_class')
individual.impact_variants = variant_list.values('snpeff_impact').annotate(Count('snpeff_impact')).order_by('snpeff_impact')
individual.filter_variants = variant_list.values('filter').annotate(Count('filter')).order_by('filter')
individual.quality = variant_list.aggregate(Avg('qual'), Max('qual'), Min('qual'))
individual.read_depth = variant_list.aggregate(Avg('read_depth'), Max('read_depth'), Min('read_depth'))
individual.clinvar_clnsig = variant_list.values('clinvar_clnsig').annotate(total=Count('clinvar_clnsig'))
individual.chromossome = variant_list.values('chr').annotate(total=Count('chr')).order_by('chr')
# variants_with_snpid = variant_list.values('variant_id').exclude(variant_id=".")
#print variants_with_snpid
# fields = Variant._meta.get_all_field_names()
paginator = Paginator(variant_list, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
#'fields':fields
return render(request, 'individuals/view.html', {'individual': individual, 'variants':variants})
@login_required
def browse(request, individual_id):
query_string = request.META['QUERY_STRING']
individual = get_object_or_404(Individual, pk=individual_id)
query = {}
# DEFAULT_SORT = 'pk'
# sort_key = request.GET.get('sort', DEFAULT_SORT)
# tags = ['genotype', 'snpeffannotation__effect']#, 'func_class', 'impact', 'cln_omim', 'chr'
# for tag in tags:
# criteria = request.GET.get(tag, '')
# if criteria:
# query[tag] = criteria
if request.method == 'GET':
form = BrowserForm(request.GET)
if form.is_valid():
print('form is valid')
#chr
chr = request.GET.get('chr', '')
if chr != '':
query['chr'] = chr
#pos
pos = request.GET.get('pos', '')
if pos != '':
query['pos'] = pos
effect = request.GET.get('effect', '')
if effect != '':
print('effect', effect)
query['snpeff_effect'] = effect
#snp_id
# snp_id = request.GET.get('snp_id', '')
# if snp_id != '':
# query['variant_id'] = snp_id
# snp_list = request.GET.get('snp_list', '')
# snp_list = snp_list.split('\r\n')
# if snp_list[0] != u'':
# query['variant_id__in'] = snp_list
# snp_eff = request.GET.getlist('effect')
# if len(snp_eff) > 0:
# query['snp_eff__in'] = snp_eff
# func_class = request.GET.getlist('func_class')
# if len(func_class) > 0:
# query['snp_eff_functional_class__in'] = func_class
# gene = request.GET.get('gene', '')
# if gene != '':
# query['gene_name'] = gene
# gene_list = request.GET.get('gene_list', '')
# gene_list = gene_list.split('\r\n')
# if gene_list[0] != u'':
# query['gene_name__in'] = gene_list
# cln = request.GET.get('cln_omim', '')
# print 'clnomim', cln
# if cln == 'on':
# query['cln_omim'] != ''
variants = Variant.objects.filter(individual=individual, **query)
# snpeff_annotations = SnpeffAnnotation.objects.filter(variant__in=variants)
# #b.entry_set.filter(headline__contains='Lennon')
# print 'snpeff_annotations', len(snpeff_annotations)
# for variant in variants:
# print variant.entry_set.all()
# variant.snpeff=
else:
form = BrowserForm(request.GET)
variants = Variant.objects.filter(individual=individual, **query)
#Pagination
paginator = Paginator(variants, 25) # Show 25 contacts per page
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
try:
variants = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
variants = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
variants = paginator.page(paginator.num_pages)
return render(request, 'variants/variants.html', {'individual': individual, 'variants':variants, 'form':form, 'query_string':query_string})
@login_required
def list(request):
if request.method == 'POST':
individuals = request.POST.getlist('individuals')
print(individuals)
individuals = [int(x) for x in individuals]
print(individuals)
if request.POST['selectionField'] == "Show":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = True
individual.save()
if request.POST['selectionField'] == "Hide":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual.is_featured = False
individual.save()
if request.POST['selectionField'] == "Delete":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
individual_id = individual.id
username = individual.user.username
#delete files
if individual.vcf_file:
individual.vcf_file.delete()
# if individual.strs_file:
# individual.strs_file.delete()
# if individual.cnvs_file:
# individual.cnvs_file.delete()
os.system('rm -rf %s/genomes/%s/%s' % (settings.BASE_DIR, username, individual_id))
individual.delete()
#os.system('rm -rf mendelmd14/site_media/media/genomes/%s/%s' % (username, individual_id))
if request.POST['selectionField'] == "Populate":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
PopulateVariants.delay(individual.id)
if request.POST['selectionField'] == "Annotate":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
AnnotateVariants.delay(individual.id)
if request.POST['selectionField'] == "Find_Medical_Conditions_and_Medicines":
for individual_id in individuals:
individual = get_object_or_404(Individual, pk=individual_id)
Find_Medical_Conditions_and_Medicines.delay(individual.id)
args = []
# groups = Groups.objects.filter(user=request.user, shared_with_users=).order_by("-id")
args.append(Q(user=request.user) | Q(shared_with_users=request.user) | Q(shared_with_groups__members=request.user))
if request.user.is_staff:
individuals = Individual.objects.all()
else:
individuals = Individual.objects.filter(*args).order_by("-id")
ind_featured = Individual.objects.filter(is_featured= True).order_by("id")
# paginator = Paginator(individuals, 25) # Show 25 contacts per page
# try:
# page = int(request.GET.get('page', '1'))
# except ValueError:
# page = 1
# try:
# individuals = paginator.page(page)
# except PageNotAnInteger:
# # If page is not an integer, deliver first page.
# individuals = paginator.page(1)
# except EmptyPage:
# # If page is out of range (e.g. 9999), deliver last page of results.
# individuals = paginator.page(paginator.num_pages)
groups = Group.objects.all()
# individuals = Individual.objects.annotate(number_of_variants=Count('variant'))
return render(request, 'individuals/list.html', {'individuals': individuals, 'groups':groups, 'ind_featured':ind_featured})
@login_required
def annotate(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
individual.status = 'new'
individual.n_lines = 0
VerifyVCF.delay(individual.id)
individual.save()
messages.add_message(request, messages.INFO, "Your individual is being annotated.")
return redirect('dashboard')
@login_required
def populate(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
PopulateVariants.delay(individual.id)
messages.add_message(request, messages.INFO, "Your individual is being populated.")
return redirect('dashboard')
@login_required
def populate_mongo(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
PopulateMongoVariants.delay(individual.id)
messages.add_message(request, messages.INFO, "Your individual is being inserted at MongoDB.")
return redirect('individuals_list')
def download(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
filepath = os.path.dirname(str(individual.vcf_file.name))
filename = os.path.basename(str(individual.vcf_file.name))
path = ''
# os.chmod("%s/genomes/%s/%s" % (settings.MEDIA_ROOT, individual.user, individual.id), 0777)
# if filename.endswith('vcf.zip'):
# basename = filename.split('.vcf.zip')[0]
# elif filename.endswith('.zip'):
# basename = filename.split('.zip')[0]
# else:
# basename = filename.split('.vcf')[0]
#print basename
#print path
#print filepath
fullpath = '%s/%s' % (filepath, filename)
if filename.endswith('.gz'):
vcffile = gzip.open(fullpath, 'r')
else:
vcffile = open(fullpath, 'r')
content = vcffile.read()
vcffile.close()
response = HttpResponse(content, content_type='text/plain')
response['Content-Disposition'] = 'attachment; filename=%s' % filename
response['Content-Length'] = os.path.getsize(fullpath)
return response
def download_annotated(request, individual_id):
individual = get_object_or_404(Individual, pk=individual_id)
filepath = os.path.dirname(str(individual.vcf_file.name))
filename = os.path.basename(str(individual.vcf_file.name))
# path = settings.MEDIA_ROOT
# if filename.endswith('vcf.zip'):
# basename = filename.split('.vcf.zip')[0]
# else:
basename = filename.split('.vcf')[0]
fullpath = '%s/annotation.final.vcf.zip' % (filepath)
vcffile = open(fullpath, 'rb')
response = HttpResponse(vcffile, content_type='application/x-zip-compressed')
# # response['Content-Encoding'] = 'gzip'
response['Content-Disposition'] = 'attachment; filename=%s.annotated.mendelmd.vcf.zip' % basename
response['Content-Length'] = os.path.getsize(fullpath)
return response
@login_required
def create_group(request):
if request.method == 'POST':
form = GroupForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('individuals_list')
else:
form = GroupForm()
return render(request, 'groups/create_group.html', {'form': form})
@login_required
def view_group(request, group_id):
group = get_object_or_404(Group, pk=group_id)
return render(request, 'groups/view_group.html', {'group': group})
class GroupDeleteView(DeleteView):
model = Group
def delete(self, request, *args, **kwargs):
"""
This does not actually delete the file, only the database record. But
that is easy to implement.
"""
self.object = self.get_object()
#username = self.object.user.username
self.object.delete()
messages.add_message(request, messages.INFO, "Group deleted with success!")
return redirect('individuals_list')
def comparison(request):
query = {}
summary = {}
variants = []
query_string = request.META['QUERY_STRING']
if request.method == 'GET':
form = ComparisonForm(request.user, request.GET, request.FILES)
if form.is_valid():
individual_one_id = request.GET.get('individual_one', '')
individual_two_id = request.GET.get('individual_two', '')
read_depth = request.GET.get('read_depth', '')
if read_depth != '':
query['read_depth__gte'] = float(read_depth)
if individual_one_id != '' and individual_two_id != '':
variants_ind_one = Variant.objects.filter(individual__id=individual_one_id, **query).values('chr', 'pos', 'genotype')
variants_ind_two = Variant.objects.filter(individual__id=individual_two_id, **query).values('chr', 'pos', 'genotype')
print('Got Variants from Both!')
genotypes_in_common = 0
genotypes_not_in_common = 0
ind_one = {}
ind_two = {}
summary['variants_ind_one'] = variants_ind_one.count()
for variant in variants_ind_one:
id = '%s-%s' % (variant['chr'], variant['pos'])
if id in ind_one:
ind_one[id].append(variant['genotype'])
else:
ind_one[id] = []
ind_one[id].append(variant['genotype'])
summary['variants_ind_two'] = variants_ind_two.count()
for variant in variants_ind_two:
id = '%s-%s' % (variant['chr'], variant['pos'])
if id in ind_two:
ind_two[id].append(variant['genotype'])
else:
ind_two[id] = []
ind_two[id].append(variant['genotype'])
print('Finished creating indexes')
for pos in ind_one:
if pos in ind_two:
for genotype in ind_one[pos]:
if genotype in ind_two[pos]:
genotypes_in_common += 1
# variant ={}
# variant['chr'] = item.split('-')[0]
# variant['pos'] = item.split('-')[1]
# variant['genotype'] = ind_two[item]
# variants.append(variant)
else:
genotypes_not_in_common += 1
#
print('genotypes in common: %s' % genotypes_in_common)
summary['genotypes_in_common'] = genotypes_in_common
summary['genotypes_not_in_common'] = genotypes_not_in_common
summary['total_variants'] = genotypes_in_common + genotypes_not_in_common
summary['percent_ind_one'] = round((float(genotypes_in_common)/summary['variants_ind_one'])*100, 2)
summary['percent_ind_two'] = round((float(genotypes_in_common)/summary['variants_ind_two'])*100, 2)
print(summary)
else:
form = ComparisonForm(request.user)
return render(request, 'individuals/comparison.html', {'form':form, 'summary':summary, 'query_string':query_string})
|
{
"content_hash": "03be12aadbb975058c60db648515c6e1",
"timestamp": "",
"source": "github",
"line_count": 605,
"max_line_length": 189,
"avg_line_length": 38.65289256198347,
"alnum_prop": 0.5854180029933718,
"repo_name": "raonyguimaraes/mendelmd",
"id": "187b0de83501a8ff520c60464dc8a280aaaa514f",
"size": "23385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "individuals/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1089"
},
{
"name": "C++",
"bytes": "748"
},
{
"name": "CSS",
"bytes": "159812"
},
{
"name": "Dockerfile",
"bytes": "3164"
},
{
"name": "Go",
"bytes": "7075"
},
{
"name": "HTML",
"bytes": "889609"
},
{
"name": "JavaScript",
"bytes": "471651"
},
{
"name": "PHP",
"bytes": "52678"
},
{
"name": "Python",
"bytes": "754270"
},
{
"name": "Shell",
"bytes": "14174"
}
],
"symlink_target": ""
}
|
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.sdb as sdb
from salt.exceptions import SaltInvocationError
@pytest.fixture
def configure_loader_modules():
return {sdb: {}}
def test_get():
"""
Test if it gets a value from a db, using a uri in the form of
sdb://<profile>/<key>
"""
assert sdb.get("sdb://salt/foo") == "sdb://salt/foo"
def test_get_strict_no_sdb_in_uri():
"""
Test if SaltInvocationError exception will be raised if we
don't start uri with sdb://
"""
msg = 'SDB uri must start with "sdb://"'
with pytest.raises(SaltInvocationError, match=msg) as cm:
sdb.get("://salt/foo", strict=True)
def test_get_strict_no_profile():
"""
Test if SaltInvocationError exception will be raised if we
don't have a valid profile in the uri
"""
msg = "SDB uri must have a profile name as a first part of the uri before the /"
with pytest.raises(SaltInvocationError, match=msg) as cm:
sdb.get("sdb://salt", strict=True)
def test_get_strict_no_profile_in_config():
"""
Test if SaltInvocationError exception will be raised if we
don't have expected profile in the minion config
"""
msg = 'SDB profile "salt" wasnt found in the minion configuration'
with pytest.raises(SaltInvocationError, match=msg) as cm:
sdb.get("sdb://salt/foo", strict=True)
def test_set():
"""
Test if it sets a value from a db, using a uri in the form of
sdb://<profile>/<key>
"""
assert not sdb.set_("sdb://mymemcached/foo", "bar")
|
{
"content_hash": "ea134d63e6c39a884e7fada0b391eebc",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 84,
"avg_line_length": 26.262295081967213,
"alnum_prop": 0.6491885143570537,
"repo_name": "saltstack/salt",
"id": "01d1e1a627333dc6c1ac795ce6a54d32994b6667",
"size": "1602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/unit/modules/test_sdb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
import os
import cherrypy
import json
PATH = os.path.abspath(os.path.dirname(__file__))
class Root(object):
class api(object):
pass
class RestObject(object):
exposed = True
def __init__(self,data):
self.data = data
def GET(self,*args,**kwargs):
cherrypy.response.headers['Content-Type']= 'application/json'
return json.dumps(self.data,indent=4)
mydata = [
{'name':'User1','age':'21'},
{'name':'User2','age':'25'},
{'name':'User3','age':'26'},
{'name':'User4','age':'27'},
]
Root.api.users = RestObject(mydata)
cherrypy.tree.mount(Root(), '/', config={
'/': {
'tools.staticdir.on': True,
'tools.staticdir.dir': PATH,
'tools.staticdir.index': 'index.html',
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
},
})
cherrypy.engine.start()
cherrypy.engine.block()
|
{
"content_hash": "566a78d05f4bf542af829206430972a7",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 73,
"avg_line_length": 23.80952380952381,
"alnum_prop": 0.528,
"repo_name": "senormeow/durandal-example",
"id": "edb77c18180a1e177e447d3b16c1fbea92ca4823",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "269288"
},
{
"name": "JavaScript",
"bytes": "1547188"
},
{
"name": "Python",
"bytes": "1000"
}
],
"symlink_target": ""
}
|
from app.models import Tag
def get_all_tags():
query = Tag.query.order_by(Tag.created_on.desc())
return query
def get_tag_by_id(id):
query = Tag.query.filter_by(id=id).first_or_404()
return query
def search_tags(string):
query = Tag.query.filter(Tag.name.contains(string))
return query
|
{
"content_hash": "68f5877413c73f6cb04d503da4bcd191",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 55,
"avg_line_length": 19.75,
"alnum_prop": 0.6740506329113924,
"repo_name": "maxnovais/Flapy_Blog",
"id": "daca8e4d6b32084907e644de68c7a07b2a006d24",
"size": "338",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/services/tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4494"
},
{
"name": "JavaScript",
"bytes": "544"
},
{
"name": "Python",
"bytes": "27772"
}
],
"symlink_target": ""
}
|
from cms.models import CMSPlugin, Placeholder
from cms.models.aliaspluginmodel import AliasPluginModel
from cms.models.placeholderpluginmodel import PlaceholderReference
from cms.plugin_base import CMSPluginBase, PluginMenuItem
from cms.plugin_pool import plugin_pool
from cms.plugin_rendering import render_placeholder
from cms.utils.urlutils import admin_reverse
from django.conf.urls import url
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse
from django.middleware.csrf import get_token
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, get_language
class PlaceholderPlugin(CMSPluginBase):
name = _("Placeholder")
parent_classes = [0] # so you will not be able to add it something
#require_parent = True
render_plugin = False
admin_preview = False
model = PlaceholderReference
plugin_pool.register_plugin(PlaceholderPlugin)
class AliasPlugin(CMSPluginBase):
name = _("Alias")
allow_children = False
model = AliasPluginModel
render_template = "cms/plugins/alias.html"
def render(self, context, instance, placeholder):
from cms.utils.plugins import downcast_plugins, build_plugin_tree
context['instance'] = instance
context['placeholder'] = placeholder
if instance.plugin_id:
plugins = instance.plugin.get_descendants().order_by('placeholder', 'path')
plugins = [instance.plugin] + list(plugins)
plugins = downcast_plugins(plugins)
plugins[0].parent_id = None
plugins = build_plugin_tree(plugins)
context['plugins'] = plugins
if instance.alias_placeholder_id:
content = render_placeholder(instance.alias_placeholder, context)
context['content'] = mark_safe(content)
return context
def get_extra_global_plugin_menu_items(self, request, plugin):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'plugin_id': plugin.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_extra_placeholder_menu_items(self, request, placeholder):
return [
PluginMenuItem(
_("Create Alias"),
admin_reverse("cms_create_alias"),
data={'placeholder_id': placeholder.pk, 'csrfmiddlewaretoken': get_token(request)},
)
]
def get_plugin_urls(self):
return [
url(r'^create_alias/$', self.create_alias, name='cms_create_alias'),
]
def create_alias(self, request):
if not request.user.is_staff:
return HttpResponseForbidden("not enough privileges")
if not 'plugin_id' in request.POST and not 'placeholder_id' in request.POST:
return HttpResponseBadRequest("plugin_id or placeholder_id POST parameter missing.")
plugin = None
placeholder = None
if 'plugin_id' in request.POST:
pk = request.POST['plugin_id']
try:
plugin = CMSPlugin.objects.get(pk=pk)
except CMSPlugin.DoesNotExist:
return HttpResponseBadRequest("plugin with id %s not found." % pk)
if 'placeholder_id' in request.POST:
pk = request.POST['placeholder_id']
try:
placeholder = Placeholder.objects.get(pk=pk)
except Placeholder.DoesNotExist:
return HttpResponseBadRequest("placeholder with id %s not found." % pk)
if not placeholder.has_change_permission(request):
return HttpResponseBadRequest("You do not have enough permission to alias this placeholder.")
clipboard = request.toolbar.clipboard
clipboard.cmsplugin_set.all().delete()
language = get_language()
if plugin:
language = plugin.language
alias = AliasPluginModel(language=language, placeholder=clipboard, plugin_type="AliasPlugin")
if plugin:
alias.plugin = plugin
if placeholder:
alias.alias_placeholder = placeholder
alias.save()
return HttpResponse("ok")
plugin_pool.register_plugin(AliasPlugin)
|
{
"content_hash": "8f419a20dee15607d567209a172e63f2",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 109,
"avg_line_length": 39.72222222222222,
"alnum_prop": 0.6494172494172494,
"repo_name": "wuzhihui1123/django-cms",
"id": "32fc603d84a375cd7b6fbd59a9e66cd498a0a0c2",
"size": "4314",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cms/cms_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "76879"
},
{
"name": "HTML",
"bytes": "88600"
},
{
"name": "JavaScript",
"bytes": "413481"
},
{
"name": "Python",
"bytes": "3344353"
},
{
"name": "Ruby",
"bytes": "990"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
import sys
import time
import stackless
from cogen.core.schedulers import Scheduler
from cogen.core.coroutines import coroutine
from cogen.core import events
@coroutine
def mycoro():
while 1:
yield events.Sleep(1)
print '#'
sched = Scheduler()
sched.add(mycoro)
sched_iter = sched.iter_run()
def tasklet():
while True:
# complicated operation with side-effects
print '.'
time.sleep(0.1)
# run a cogen loop
sched_iter.next()
# start the simple tasklet
stackless.tasklet(tasklet)()
# start the stackless scheduler
stackless.run()
|
{
"content_hash": "5220cd0e0138346157f62ed59630283d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 49,
"avg_line_length": 19.71875,
"alnum_prop": 0.6465927099841522,
"repo_name": "pombredanne/cogen",
"id": "91166973424c6144a3ab806ece2075dbf2b05b02",
"size": "631",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/stackless-integration.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import os
import re
import subprocess
import sys
import yaml
index = {}
# Improved 'libcache' data importer. Turns .a.cache files into YAML by
# invoking the proprietary parser directly.
# demangles SunPro mangled names - partially. Arguments aren't parsed yet.
def demangle(mangled):
if mangled.startswith("__1c"):
# Looks like a SunPro compiler name
ptr = 4;
names = [];
while True:
if ptr > len(mangled)-1: break
lengthCharacter = mangled[ptr]
if ord(lengthCharacter) > ord("A") and ord(lengthCharacter) <= ord("Z"):
symlen = ord(lengthCharacter) - ord('A') + 1;
names.append(mangled[ptr+1:ptr+symlen])
ptr += symlen
else:
break
if len(names)==0:
print("Zero-length name after demangling "+mangled)
return mangled
return "::".join(names)
else:
return mangled
class ParseLibParser(object):
def __init__(self, packageName):
# The section we're currently processing (defs, undefs etc)
self.symbolType = None
#For each symbol, an array of the symbols it calls
self.symbolCalls = {}
# For each object, an array of the symbols it contains
self.objectSymbols = {}
# dataSymbols is a set of all the symbols which
# are data rather than code (value is meaningless)
self.dataSymbols = {}
self.currentObjectName = None
self.packageName = packageName
def processPlainSymbol(self, symbol, defType):
if defType[0].lower() == 't' and self.symbolType == "defs":
if symbol not in self.symbolCalls:
self.symbolCalls[symbol] = []
self.objectSymbols[self.currentObjectName].append(symbol)
elif defType[0].lower() == 'd': # Data symbol, should be ignored
self.dataSymbols[symbol] = 1
def processCallSymbols(self, calledSymbol, callType, callingSymbol):
global index
if callingSymbol in self.dataSymbols or calledSymbol in self.dataSymbols: return
if callingSymbol not in self.symbolCalls: self.symbolCalls[callingSymbol] = []
if callType == "u":
callDest = self.packageName+":"+self.currentObjectName+":"+demangle(calledSymbol)
elif calledSymbol not in index:
callDest = "NULL:"+demangle(calledSymbol)
print("%s not found in index"%calledSymbol)
else:
calledPackageName = index[calledSymbol] # calledPackageName includes the object!
callDest = "id:"+calledPackageName+":"+demangle(calledSymbol)
self.symbolCalls[callingSymbol].append(callDest)
if callingSymbol not in self.objectSymbols[self.currentObjectName]:
self.objectSymbols[self.currentObjectName].append(callingSymbol)
def parse(self, text):
for l in text.splitlines():
m = re.match('^(defs|local defs|undefs|weak defs|local undefs):\s*$',l)
if(m):
self.symbolType = m.group(1)
continue
m = re.match('^\s+(\S+).o\s*$',l) # Object name
if m:
self.currentObjectName = m.group(1)
if self.currentObjectName not in self.objectSymbols: self.objectSymbols[self.currentObjectName] = []
continue
m = re.match('^\s+(\S+) (\S+)$',l) # Plain symbol with no call
if m:
self.processPlainSymbol(symbol = m.group(1), defType = m.group(2))
continue
m = re.match('^\s+(\S+) (\S+), caller: (\S+)$',l) # A call from a symbol in our object
if m:
self.processCallSymbols(calledSymbol = m.group(1),
callType = m.group(2),
callingSymbol = m.group(3))
def getYaml(self):
package = {'contains': [], '@id': "id:"+self.packageName,
'@type':'sw:Package', 'name': self.packageName }
yamlRoot = { '@context': ['http://localhost:8000/context.jsonld'],
'@graph': package }
for (objectName,objectContents) in self.objectSymbols.items():
objectIdentifier = "id:"+self.packageName+":"+objectName
obj = { '@id:': objectIdentifier, '@type': 'sw:Object',
'name': objectName, 'contains': [] }
for symbol in objectContents:
if symbol=="":
print("Zero-length symbol found in objectContents for "+objectName)
exit(1)
symbolIdentifier = objectIdentifier+":"+demangle(symbol)
symbolYaml = { '@id': symbolIdentifier,
'name': demangle(symbol),
'@type':'sw:Symbol',
'calls': list(map(demangle, self.symbolCalls[symbol])) }
obj['contains'].append(symbolYaml)
package['contains'].append(obj)
return yaml.dump(yamlRoot)
def scanFile(directory, filename):
global index
print("Scanning file "+os.path.join(directory, filename))
parser = os.environ['PARSE_LIB']
packageName = filename
if packageName.startswith("calls."): packageName = packageName[6:]
if packageName.endswith(".a.cache"): packageName = packageName[:-8]
parserResult = subprocess.check_output([parser, os.path.join(directory, filename)], stdin=None).decode("utf-8")
print("Parser returned %d bytes"%len(parserResult))
parser = ParseLibParser(packageName)
parser.parse(parserResult)
yaml = parser.getYaml()
yamlFile = open(packageName+".yaml", "wt")
yamlFile.write(yaml)
yamlFile.close()
def scanDirectory(directory):
print("Scanning %s"%directory, file=sys.stderr)
files = os.listdir(directory)
for f in files:
if f.endswith(".a.cache"):
scanFile(directory, f)
def main():
global index
# Load the symbol directory
if 'PARSE_LIB' not in os.environ or not os.path.exists(os.environ['PARSE_LIB']):
print("PARSE_LIB must be set to a valid cache file parser.")
exit(1)
# Load the index
indexfile = open("alldefs_sorted_uniq")
index = {}
while True:
l = indexfile.readline()
if l == "": break
(symbol, objectName, libraryName) = l.split(":")
index[symbol]= "%s:%s"%(libraryName.strip(),objectName.strip())
if len(sys.argv) > 1:
if os.path.isdir(sys.argv[1]):
scanDirectory(sys.argv[1])
else:
scanFile(".",sys.argv[1])
else:
scanDirectory(".")
if __name__=="__main__":
main()
|
{
"content_hash": "95f06835cdb9c8f224d2af87de78fd72",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 116,
"avg_line_length": 38.49142857142857,
"alnum_prop": 0.580166270783848,
"repo_name": "CodethinkLabs/software-dependency-visualizer",
"id": "68d541fc074a7fe243b5e747806883e08af167ae",
"size": "6760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "import/lib-importer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2172"
},
{
"name": "HTML",
"bytes": "54276"
},
{
"name": "JavaScript",
"bytes": "46260"
},
{
"name": "Makefile",
"bytes": "647"
},
{
"name": "Python",
"bytes": "63577"
},
{
"name": "TypeScript",
"bytes": "65063"
}
],
"symlink_target": ""
}
|
import sys
import re
import json
# internal function, do not call directly
def _jsonquery(J, exprs):
try: # pop the first expression
expr = exprs[0]
exprs = exprs[1:]
except IndexError: # or note terminal case
expr = None
def recur(Js): # recurrence helper
for s in _jsonquery(Js, exprs):
yield s
# if we have a list, we descend into it,
# optionally applying some selection criterion
if isinstance(J, list):
# first/last selectors
if expr==':first':
for s in recur(J[0]): yield s
elif expr==':last':
for s in recur(J[-1]): yield s
# no selector, simply recur
elif not expr:
for Ji in J: # for each element in the list
for s in recur(Ji): yield s
else: # we have a selector
try: # see if we've got a key=value selector
k,v = re.split('=',expr)
for Ji in J: # for each element in the list
# test if it has the key and it matches
if k in Ji and str(Ji[k])==v: # if so recur
for s in recur(Ji): yield s
except: # we just have a dict key selector
pass
for Ji in J: # iterate over list
if expr in Ji: # if it has the key
# recur on the value of the key
for s in recur(Ji[expr]): yield s
# if we have a dict, then we either have a key selector
# or we have a solution
elif isinstance(J, dict):
if expr in J: # we have a key selector, recur
for s in recur(J[expr]): yield s
else: # this must be a solution, yield it
yield J
# we have a value that is neither a dict nor a list
else:
# so just yield it
yield J
def jsonquery(J, expr):
""" query JSON. syntax:
{key} match a key from a dict
{key}=value match any dict with key=value
{key}:first if key refers to a list value, match the first item
{key}:last if key refers to a list value, match the last itme
these expressions are chained together to descend a strucutre.
for example given an IFCB bin JSON here are some meaningful expressions:
"targets pid" - return a list of target URLs
"targets:first pid" - return the first target URL
"targets stitched=1 pid" - return a list of URLs of stitched targets
"context" - return a list of lines in the context metadata field
"context:first" - return the first line in the context metadata field
"""
try:
J = json.loads(J)
except:
pass # assume parsed structure is being passed in
expr = re.sub(r'([:])',r' \1',expr)
exprs = re.split('\s+',expr)
for s in _jsonquery(J, exprs):
yield s
|
{
"content_hash": "c52b419cd5cfefae67342506fd6509fb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 76,
"avg_line_length": 38.80821917808219,
"alnum_prop": 0.5757147899752912,
"repo_name": "joefutrelle/ladder",
"id": "0bd57171701fb3a67c77e4d1b7f2cf4ba27e2c7d",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonquery.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36882"
}
],
"symlink_target": ""
}
|
assert [foo, bar], "No AssertionError"
assert "There is an AssertionError" # [assert-on-string-literal]
assert "" # [assert-on-string-literal]
|
{
"content_hash": "ee3b9556dd524cb4b97c2d9ff8eff5d4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 47.666666666666664,
"alnum_prop": 0.7342657342657343,
"repo_name": "ruchee/vimrc",
"id": "14e7d4c556bc22b20bacb54e3b7d552696ec0718",
"size": "206",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/functional/a/assert_on_string_literal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from PIL import Image
im = Image.open("cone.png")
print(im.format, im.size, im.mode)
|
{
"content_hash": "01c31876ee06894d8dd46ffa54be53f6",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 37,
"avg_line_length": 20.833333333333332,
"alnum_prop": 0.72,
"repo_name": "wolftype/pony",
"id": "a5b925b4e455d7aae2f4b42089892382351ae260",
"size": "125",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old/test.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "28488"
},
{
"name": "Shell",
"bytes": "302"
}
],
"symlink_target": ""
}
|
import logging
from datetime import timedelta
from django import forms
from django.forms import widgets
import happyforms
from tower import ugettext as _
from tower import ugettext_lazy as _lazy
import mkt
from mkt.api.forms import CustomNullBooleanSelect
from mkt.reviewers.models import CannedResponse
from mkt.reviewers.utils import ReviewHelper
from mkt.search.forms import ApiSearchForm, SimpleSearchForm
from mkt.webapps.models import AddonDeviceType
log = logging.getLogger('z.reviewers.forms')
# We set 'any' here since we need to default this field
# to PUBLIC if not specified for consumer pages.
STATUS_CHOICES = [('any', _lazy(u'Any Status'))]
for status in mkt.WEBAPPS_UNLISTED_STATUSES + mkt.LISTED_STATUSES:
STATUS_CHOICES.append((mkt.STATUS_CHOICES_API[status],
mkt.STATUS_CHOICES[status]))
MODERATE_ACTION_FILTERS = (('', ''), ('approved', _lazy(u'Approved reviews')),
('deleted', _lazy(u'Deleted reviews')))
MODERATE_ACTION_DICT = {'approved': mkt.LOG.APPROVE_REVIEW,
'deleted': mkt.LOG.DELETE_REVIEW}
COMBINED_DEVICE_CHOICES = [('', _lazy(u'Any Device'))] + [
(dev.api_name, dev.name) for dev in mkt.DEVICE_TYPE_LIST]
class ModerateLogForm(happyforms.Form):
start = forms.DateField(required=False,
label=_lazy(u'View entries between'))
end = forms.DateField(required=False,
label=_lazy(u'and'))
search = forms.ChoiceField(required=False, choices=MODERATE_ACTION_FILTERS,
label=_lazy(u'Filter by type/action'))
def clean(self):
data = self.cleaned_data
# We want this to be inclusive of the end date.
if 'end' in data and data['end']:
data['end'] += timedelta(days=1)
if 'search' in data and data['search']:
data['search'] = MODERATE_ACTION_DICT[data['search']]
return data
class ModerateLogDetailForm(happyforms.Form):
action = forms.CharField(
required=True,
widget=forms.HiddenInput(attrs={'value': 'undelete', }))
class ReviewLogForm(happyforms.Form):
start = forms.DateField(required=False,
label=_lazy(u'View entries between'))
end = forms.DateField(required=False, label=_lazy(u'and'))
search = forms.CharField(required=False, label=_lazy(u'containing'))
def __init__(self, *args, **kw):
super(ReviewLogForm, self).__init__(*args, **kw)
# L10n: start, as in "start date"
self.fields['start'].widget.attrs = {'placeholder': _('start'),
'size': 10}
# L10n: end, as in "end date"
self.fields['end'].widget.attrs = {'size': 10, 'placeholder': _('end')}
self.fields['search'].widget.attrs = {
# L10n: Descript of what can be searched for.
'placeholder': _lazy(u'app, reviewer, or comment'),
'size': 30}
def clean(self):
data = self.cleaned_data
# We want this to be inclusive of the end date.
if 'end' in data and data['end']:
data['end'] += timedelta(days=1)
return data
class NonValidatingChoiceField(forms.ChoiceField):
"""A ChoiceField that doesn't validate."""
def validate(self, value):
pass
class TestedOnForm(happyforms.Form):
device_type = NonValidatingChoiceField(
choices=([('', 'Choose...')] +
[(v.name, v.name) for _, v in mkt.DEVICE_TYPES.items()]),
label=_lazy(u'Device Type:'), required=False)
device = forms.CharField(required=False, label=_lazy(u'Device:'))
version = forms.CharField(required=False, label=_lazy(u'Firefox Version:'))
TestedOnFormSet = forms.formsets.formset_factory(TestedOnForm)
class MOTDForm(happyforms.Form):
motd = forms.CharField(required=True, widget=widgets.Textarea())
class ReviewAppForm(happyforms.Form):
comments = forms.CharField(widget=forms.Textarea(),
label=_lazy(u'Comments:'))
canned_response = NonValidatingChoiceField(required=False)
action = forms.ChoiceField(widget=forms.RadioSelect())
device_override = forms.TypedMultipleChoiceField(
choices=[(k, v.name) for k, v in mkt.DEVICE_TYPES.items()],
coerce=int, label=_lazy(u'Device Type Override:'),
widget=forms.CheckboxSelectMultiple, required=False)
notify = forms.BooleanField(
required=False, label=_lazy(u'Notify me the next time the manifest is '
u'updated. (Subsequent updates will not '
u'generate an email)'))
is_tarako = forms.BooleanField(
required=False, label=_lazy(u'This app works on Tarako devices.'))
def __init__(self, *args, **kw):
self.helper = kw.pop('helper')
super(ReviewAppForm, self).__init__(*args, **kw)
# We're starting with an empty one, which will be hidden via CSS.
canned_choices = [['', [('', _('Choose a canned response...'))]]]
responses = CannedResponse.objects.all()
# Loop through the actions.
for k, action in self.helper.actions.iteritems():
action_choices = [[c.response, c.name] for c in responses
if c.sort_group and k in c.sort_group.split(',')]
# Add the group of responses to the canned_choices array.
if action_choices:
canned_choices.append([action['label'], action_choices])
# Now, add everything not in a group.
for r in responses:
if not r.sort_group:
canned_choices.append([r.response, r.name])
self.fields['canned_response'].choices = canned_choices
self.fields['action'].choices = [(k, v['label']) for k, v
in self.helper.actions.items()]
device_types = AddonDeviceType.objects.filter(
addon=self.helper.addon).values_list('device_type', flat=True)
if device_types:
self.initial['device_override'] = device_types
self.initial['is_tarako'] = (
self.helper.addon.tags.filter(tag_text='tarako').exists())
def is_valid(self):
result = super(ReviewAppForm, self).is_valid()
if result:
self.helper.set_data(self.cleaned_data)
return result
def get_review_form(data, files, request=None, addon=None, version=None,
attachment_formset=None, testedon_formset=None):
helper = ReviewHelper(request=request, addon=addon, version=version,
attachment_formset=attachment_formset,
testedon_formset=testedon_formset)
return ReviewAppForm(data=data, files=files, helper=helper)
def _search_form_status(cleaned_data):
status = cleaned_data['status']
if status == 'any':
return None
return mkt.STATUS_CHOICES_API_LOOKUP.get(status, mkt.STATUS_PENDING)
class ApiReviewersSearchForm(ApiSearchForm):
status = forms.ChoiceField(required=False, choices=STATUS_CHOICES,
label=_lazy(u'Status'))
has_editor_comment = forms.NullBooleanField(
required=False,
label=_lazy(u'Has Editor Comment'),
widget=CustomNullBooleanSelect)
has_info_request = forms.NullBooleanField(
required=False,
label=_lazy(u'More Info Requested'),
widget=CustomNullBooleanSelect)
is_escalated = forms.NullBooleanField(
required=False,
label=_lazy(u'Escalated'),
widget=CustomNullBooleanSelect)
is_tarako = forms.NullBooleanField(
required=False,
label=_lazy(u'Tarako-ready'),
widget=CustomNullBooleanSelect)
dev_and_device = forms.ChoiceField(
required=False, choices=COMBINED_DEVICE_CHOICES,
label=_lazy(u'Device'))
def __init__(self, *args, **kwargs):
super(ApiReviewersSearchForm, self).__init__(*args, **kwargs)
# Mobile form, to render, expects choices from the Django field.
BOOL_CHOICES = ((u'', _lazy('Unknown')),
(u'true', _lazy('Yes')),
(u'false', _lazy('No')))
for field_name, field in self.fields.iteritems():
if isinstance(field, forms.NullBooleanField):
self.fields[field_name].choices = BOOL_CHOICES
def clean_status(self):
return _search_form_status(self.cleaned_data)
def clean(self):
# Transform dev_and_device into the separate dev/device parameters.
# We then call super() so that it gets transformed into ids that ES
# will accept.
dev_and_device = self.cleaned_data.pop('dev_and_device', '').split('+')
self.cleaned_data['dev'] = dev_and_device[0]
if len(dev_and_device) > 1:
self.cleaned_data['device'] = dev_and_device[1]
return super(ApiReviewersSearchForm, self).clean()
class ReviewersWebsiteSearchForm(SimpleSearchForm):
status = forms.ChoiceField(required=False, choices=STATUS_CHOICES,
label=_lazy(u'Status'))
def clean_status(self):
return _search_form_status(self.cleaned_data)
class ApproveRegionForm(happyforms.Form):
"""TODO: Use a DRF serializer."""
approve = forms.BooleanField(required=False)
def __init__(self, *args, **kw):
self.app = kw.pop('app')
self.region = kw.pop('region')
super(ApproveRegionForm, self).__init__(*args, **kw)
def save(self):
approved = self.cleaned_data['approve']
if approved:
status = mkt.STATUS_PUBLIC
# Make it public in the previously excluded region.
self.app.addonexcludedregion.filter(
region=self.region.id).delete()
else:
status = mkt.STATUS_REJECTED
value, changed = self.app.geodata.set_status(
self.region, status, save=True)
if changed:
self.app.save()
|
{
"content_hash": "93f066666f3e544e56eff623e05ded13",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 37.337037037037035,
"alnum_prop": 0.6138279932546374,
"repo_name": "kumar303/zamboni",
"id": "db1fba6ca987d11ddc0ce908f654df9b6d51cac5",
"size": "10081",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mkt/reviewers/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356844"
},
{
"name": "HTML",
"bytes": "2278394"
},
{
"name": "JavaScript",
"bytes": "533051"
},
{
"name": "Makefile",
"bytes": "4284"
},
{
"name": "Python",
"bytes": "4069479"
},
{
"name": "Shell",
"bytes": "11274"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
retry_engine_group = cfg.OptGroup('retry_engine',
title='Retry Engine Options',
help='Options under this group allow to '
'configure valid connection '
'for retry engine.')
retry_opts = [
cfg.IntOpt(name='interval', min=30, default=30,
advanced=True,
help='How often should retry happen.'),
cfg.IntOpt(name='max_attempts', default=5,
advanced=True,
help='How many times should retrying be tried.')
]
def register_opts(conf):
conf.register_group(retry_engine_group)
conf.register_opts(retry_opts, group=retry_engine_group)
def list_opts():
return {
retry_engine_group: retry_opts
}
|
{
"content_hash": "01e6f4cdacf31bc6efdd3fb220a2c0f7",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 31.59259259259259,
"alnum_prop": 0.5392731535756154,
"repo_name": "stackforge/monasca-notification",
"id": "83b3ff9f088bd731172b8c36f98fd25a517a3177",
"size": "1434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "monasca_notification/conf/retry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "111733"
}
],
"symlink_target": ""
}
|
"""BeautifulSoup document parser/page scraper.
Requires:
pip install beautifulsoup4
pip install requests
"""
import requests
from bs4 import BeautifulSoup
html = requests.get('http://gog.com/').text
soup = BeautifulSoup(html)
print "\nCleaned up content:\n%s" % soup.prettify()
print "\nContent of the <title> tag:\n%s" % soup.title
print "\nContent of the <body> tag:\n%s" % soup.body
print "\nLinks from this page and where they are going:"
anchors = soup.find_all('a')
for anchor in anchors:
print "%40s href to -> %s" % (anchor.text, anchor.get("href") or "N/A (no href)")
print "\nAny imported scripts?"
scripts = soup.find_all('script')
for script in scripts:
try:
print "%40s href to -> %s" % (script.text, script["src"])
except KeyError:
print "Script without src, must be an inline script."
print "Contents of inline script:"
print script.text
print "\nIs this document in a certain encoding?"
try:
print "\tYes, it is: %s" % soup.find(lambda el: el.has_attr('charset'))["charset"]
except:
print "\tNo language declared for this document."
|
{
"content_hash": "968fc29fed671c23e1fb5d1f7eb0329d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 86,
"avg_line_length": 27.341463414634145,
"alnum_prop": 0.6717216770740411,
"repo_name": "jeremyosborne/python",
"id": "a05e9a59d34fc10de165e823ceee40315290e60d",
"size": "1121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/beautifulsoup/bsoup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "32330"
},
{
"name": "HTML",
"bytes": "4991"
},
{
"name": "JavaScript",
"bytes": "1260744"
},
{
"name": "Jupyter Notebook",
"bytes": "19274"
},
{
"name": "PLpgSQL",
"bytes": "26121"
},
{
"name": "Python",
"bytes": "271200"
}
],
"symlink_target": ""
}
|
from generator.textutil import indent, dedent
from generator.Enum import Enum
from generator.Message import Message
import re
class File:
def __init__(self, f, base_file):
# Store relevant information from our class in our object
self.package = f.package
self.name = f.name
self.base_file = base_file
self.include_path = "{}.h".format(base_file)
self.fqn = '.{}'.format(self.package)
self.dependencies = [d for d in f.dependency]
self.enums = [Enum(e, self) for e in f.enum_type]
self.messages = [Message(m, self) for m in f.message_type]
def generate_cpp(self):
define = '{}_H'.format('_'.join([s.upper() for s in self.name[:-6].strip().split('/')]))
parts = self.package.split('.')
ns_open = '\n'.join(['namespace {} {{'.format(x) for x in parts])
ns_close = '\n'.join('}' * len(parts))
# Generate our enums c++
enums = [e.generate_cpp() for e in self.enums]
enum_headers = indent('\n\n'.join([e[0] for e in enums]))
enum_impls = ('\n\n'.join([e[1] for e in enums]))
enum_python = ('\n\n'.join([e[2] for e in enums]))
# Generate our enums c++
messages = [m.generate_cpp() for m in self.messages]
message_headers = indent('\n\n'.join([m[0] for m in messages]))
message_impls = ('\n\n'.join([m[1] for m in messages]))
message_python = ('\n\n'.join([m[2] for m in messages]))
# By default include some useful headers
# yapf: disable
includes = {
'1<cstdint>',
'2<string>',
'2<array>',
'2<exception>',
'2<map>',
'2<memory>',
'2<vector>',
'4"{}"'.format(self.name[:-6] + '.pb.h'),
'5"message/MessageBase.h"'
}
# yapf: enable
# We use a dirty hack here of putting a priority on each header
# to make the includes be in a better order
for d in self.dependencies:
if d in ['Vector.proto', 'Matrix.proto']:
includes.add('4"message/conversion/proto_matrix.h"')
elif d in ['Neutron.proto']:
pass # We don't need to do anything for these ones
elif d in ['google/protobuf/timestamp.proto', 'google/protobuf/duration.proto']:
includes.add('4"message/conversion/proto_time.h"')
else:
includes.add('4"{}"'.format(d[:-6] + '.h'))
# Don't forget to remove the first character
includes = '\n'.join(['#include {}'.format(i[1:]) for i in sorted(list(includes))])
header_template = dedent(
"""\
#ifndef {define}
#define {define}
{includes}
{open_namespace}
// Enum Definitions
{enums}
// Message Definitions
{messages}
{close_namespace}
#endif // {define}
"""
)
impl_template = dedent(
"""\
{include}
// Enum Implementations
{enums}
// Message Implementations
{messages}
"""
)
python_template = dedent(
"""\
#include <pybind11/pybind11.h>
#include <pybind11/complex.h>
#include <pybind11/stl.h>
#include <pybind11/chrono.h>
#include <pybind11/operators.h>
#include <pybind11/eigen.h>
{include}
void init_{filename}(pybind11::module& module) {{
// Go down to our submodule as required as context
pybind11::module context = module{submodules};
{enums}
{messages}
}}
"""
)
python_submodules = ''.join('.def_submodule("{}")'.format(m) for m in self.fqn.split('.')[2:])
return header_template.format(
define=define,
includes=includes,
open_namespace=ns_open,
enums=enum_headers,
messages=message_headers,
close_namespace=ns_close
), impl_template.format(
include='#include "{}"'.format(self.name[:-6] + '.h'), enums=enum_impls, messages=message_impls
), python_template.format(
include='#include "{}"'.format(self.name[:-6] + '.h'),
messages=indent(message_python),
enums=indent(enum_python),
filename=re.sub(r'[^A-Za-z0-9]', '_', self.name),
submodules=python_submodules
)
|
{
"content_hash": "fedb6a110f528b0e7b5e603d92130489",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 107,
"avg_line_length": 32.58450704225352,
"alnum_prop": 0.5117786902960881,
"repo_name": "Fastcode/NUClearExample",
"id": "b5b0f07543b7e6743206d94ec85ada047677ae3f",
"size": "4651",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nuclear/message/generator/File.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "202371"
},
{
"name": "CMake",
"bytes": "71346"
},
{
"name": "Python",
"bytes": "105361"
}
],
"symlink_target": ""
}
|
"""
Copyright 2011 Dmitry Nikulin
This file is part of Captchure.
Captchure is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Captchure is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Captchure. If not, see <http://www.gnu.org/licenses/>.
"""
import cv, cvext, general
from operator import itemgetter, attrgetter
from numpy import linspace
from types import TupleType, ListType
def unwrap(components):
t1 = type(components[0])
if (t1 == TupleType or t1 == ListType):
t2 = type(components[0][3])
if t2 == cv.iplimage or t2 == cv.cvmat:
return map(itemgetter(3), components)
elif t1 == cv.iplimage or t1 == cv.cvmat:
return components
def shiftRect(rect, x, y):
return (rect[0] + x, rect[1] + y, rect[2], rect[3])
def findCCs(image, erasecol=0, doContinue=None, doSkip=None, bRange=0, connectivity=8):
"""
Finds all connected components in the image.
doContinue is a function applied to the color of every new pixel in the image.
If it is true, this pixel is ignored. Default: <= 128
doSkip is a function applied to every new connected component found by the
function. If it is true, this component will not be included in the result.
Default: do not skip anything.
"""
if doContinue is None:
doContinue = lambda col: col <= 128
if doSkip is None:
doSkip = lambda comp: False
mask = cv.CreateImage((image.width + 2, image.height + 2), cv.IPL_DEPTH_8U, 1)
cv.Zero(mask)
components = []
for x in range(image.width):
for y in range(image.height):
if doContinue(image[y, x]):
continue
comp = cv.FloodFill(image, (x, y), 0, bRange, bRange, connectivity + cv.CV_FLOODFILL_MASK_ONLY + (255 << 8),
mask) # here 3rd argument is ignored
region = shiftRect(comp[2], 1, 1)
if not doSkip(comp):
seg = cvext.getSubImage(mask, region)
components.append((comp[0], comp[1], comp[2], seg))
cv.SetImageROI(image, comp[2])
cv.SetImageROI(mask, region)
cv.Set(image, erasecol, mask)
cv.Zero(mask)
cv.ResetImageROI(image)
cv.ResetImageROI(mask)
return components
def rectsIntersectH(rect1, rect2):
minW3 = min(rect1[2], rect2[2]) / 3
l1 = rect1[0]
l2 = rect2[0]
r1 = rect1[0] + rect1[2]
r2 = rect2[0] + rect2[2]
return (l2 < r1 - minW3 and r2 > l1 + minW3) or \
(l1 < r2 - minW3 and r1 > l2 + minW3)
def distV(rect1, rect2):
t1, b1 = rect1[1], rect1[1] + rect1[3]
t2, b2 = rect2[1], rect2[1] + rect2[3]
if b1 < t2: return t2 - b1
if b2 < t1: return t1 - b2
return 0
def distH(rect1, rect2):
l1, r1 = rect1[0], rect1[0] + rect1[2]
l2, r2 = rect2[0], rect2[0] + rect2[2]
if r1 < l2: return l2 - r1
if r2 < l1: return l1 - r2
return 0
def joinComponents(components):
rects = [comp[2] for comp in components]
rect = (min([rect[0] for rect in rects]), min([rect[1] for rect in rects]), \
max([rect[0] + rect[2] for rect in rects]), max([rect[1] + rect[3] for rect in rects]))
resW = rect[2] - rect[0]
resH = rect[3] - rect[1]
result = cv.CreateImage((resW, resH), cv.IPL_DEPTH_8U, 1)
cv.Zero(result)
for comp in components:
region = cv.GetSubRect(result, shiftRect(comp[2], -rect[0], -rect[1]))
cv.Or(comp[3], region, region, None)
return [sum([comp[0] for comp in components]), 255.0, \
(rect[0], rect[1], rect[2] - rect[0], rect[3] - rect[1]), result]
def joinCCs(components, rectsIntersect):
i = 0
while i < len(components) - 1:
brothers = filter(lambda j: rectsIntersect(components[i][2], components[j][2]), xrange(i + 1, len(components)))
if brothers == []:
i += 1
continue
brothers.append(i)
family = joinComponents(map(lambda bro: components[bro], brothers))
components = [components[j] for j in xrange(len(components)) if j not in brothers]
components[i:i] = [family]
return components
def splitAt(image, splitters):
if splitters == []:
return [image]
segments = []
regions = []
for i in xrange(len(splitters) - 1):
region = (splitters[i], 0, splitters[i + 1] - splitters[i], image.height)
regions.append(region)
seg = cvext.getSubImage(image, region)
segments.append(seg)
return segments, regions
def projectDown(image):
proj = []
for x in range(image.width):
col = cv.GetCol(image, x)
sum = cv.Sum(col)
proj.append(sum)
return proj
def splitIntoNParts(image, parts, projRadius):
splitters = linspace(0, image.width - 1, parts + 1)
projection = projectDown(image)
def adjust(splitter):
if splitter == 0 or splitter == image.width - 1:
return splitter
neighborhood = projection[splitter - projRadius: splitter + projRadius + 1]
return general.argmin(neighborhood) + splitter - projRadius
splitters = general.amap(adjust, splitters.astype(int))
segments, regions = splitAt(image, splitters)
return segments, regions
CAP_BOUND_LEFT = 0
CAP_BOUND_RIGHT = 1
CAP_BOUND_TOP = 2
CAP_BOUND_BOTTOM = 3
def getBound(image, bound, thresh=1, colthresh=0, start=None, stop=None):
if bound == CAP_BOUND_LEFT or bound == CAP_BOUND_RIGHT:
if bound == CAP_BOUND_LEFT:
if start is None: start = 0
if stop is None: stop = image.width
step = 1
else:
if start is None: start = image.width - 1
if stop is None: stop = -1
step = -1
for x in xrange(start, stop, step):
n = 0
for y in xrange(image.height):
if image[y, x] > colthresh:
n += 1
if n >= thresh:
return x
return start
elif bound == CAP_BOUND_TOP or bound == CAP_BOUND_BOTTOM:
if bound == CAP_BOUND_TOP:
if start is None: start = 0
if stop is None: stop = image.height
step = 1
else:
if start is None: start = image.height - 1
if stop is None: stop = -1
step = -1
for y in xrange(start, stop, step):
n = 0
for x in xrange(image.width):
if image[y, x] > colthresh:
n += 1
if n >= thresh:
return y
return start
else:
raise ValueError("Cannot interpret 'bound' argument (%d)" % (bound))
def findNonBlackRect(image, thresh, colthresh=0):
newTop = getBound(image, CAP_BOUND_TOP, thresh, colthresh)
newBottom = getBound(image, CAP_BOUND_BOTTOM, thresh, colthresh)
newLeft = getBound(image, CAP_BOUND_LEFT, thresh, colthresh)
newRight = getBound(image, CAP_BOUND_RIGHT, thresh, colthresh)
return (newLeft, newTop, newRight - newLeft, newBottom - newTop)
def cutNonBlackImage(image, thresh=1, colthresh=0):
newRect = findNonBlackRect(image, thresh, colthresh)
if newRect[2] == 0 or newRect[3] == 0:
return None
region = cvext.getSubImage(image, newRect)
return region
def cutNonBlack(comp, thresh, colthresh=0):
image = comp[3]
rect = comp[2]
newRect = findNonBlackRect(image, thresh, colthresh)
if newRect[2] == 0 or newRect[3] == 0:
return None
region = cvext.getSubImage(image, newRect)
newRect = shiftRect(newRect, rect[0], rect[1])
return (cv.CountNonZero(region), 255.0, newRect, region)
def partsFromSegW(components, segW):
return map(lambda comp: comp[3].width / segW + 1, components)
# Doesn't work, needs thinking on
# expansions = {}
#
#def getExpansion(n):
# if n in expansions:
# return expansions[n]
# cur = [(1, )]
# for i in xrange(1, n):
# a = deque(cur)
# b = deque(cur)
# a.append((1, ) * i)
# b.appendleft((i, ))
# cur = map(lambda x, y: ((x[0] + 1) + x[1:]), a, b)
# expansions[n] = cur
# return cur
five = \
((5, ), \
(1, 4), (2, 3), (3, 2), (4, 1), \
(1, 1, 3), (1, 3, 1), (3, 1, 1), (2, 2, 1), (2, 1, 2), (1, 2, 2), \
(1, 1, 1, 2), (1, 1, 2, 1), (1, 2, 1, 1), (2, 1, 1, 1), \
(1, 1, 1, 1, 1))
five3 = filter(lambda exp: len(exp) == 3, five)
def distance(pt1, pt2):
if len(pt1) != len(pt2):
raise ValueError("Points must be of the same dimensionality.")
return sum([(x1 - x2) ** 2 for (x1, x2) in zip(pt1, pt2)])
def partsFromnSegs5(components):
segments = unwrap(components)
if len(segments) == 5:
return (1, 1, 1, 1, 1)
elif len(segments) == 4:
widths = map(attrgetter("width"), segments)
widest = general.argmax(widths)
result = [1] * 4
result[widest] = 2
return result
elif len(segments) == 3:
widths = map(attrgetter("width"), segments)
narrowest = float(min(widths))
reduced = map(lambda width: width / narrowest, widths)
distances = map(lambda exp: distance(exp, reduced), five3)
#print widths, narrowest, reduced, zip(distances, five3)
return five3[general.argmin(distances)]
elif len(segments) == 2:
w0, w1 = segments[0].width, segments[1].width
frac = float(w0) / float(w1)
if frac < 1.0: frac = 1.0 / frac
if frac > 2.75:
return (4, 1) if w0 > w1 else (1, 4)
else:
return (3, 2) if w0 > w1 else (2, 3)
elif len(segments) == 1:
return (5, )
else:
return (1, ) * len(segments) # raise ValueError("Incorrect number of components: %d" % (len(segments)))
def spltCCs(components, allParts, projRadius, thresh=2):
index = 0
for parts in allParts:
assert (parts >= 1)
comp = components[index]
seg = comp[3]
rect = comp[2]
if parts > 1:
segments, rects = splitIntoNParts(seg, parts, projRadius)
rects = map(lambda rect1: shiftRect(rect1, rect[0], rect[1]), rects)
mapper = lambda segIndex: (0.0, 255.0, \
rects[segIndex], segments[segIndex])
comp = map(mapper, xrange(parts))
comp = map(lambda comp: cutNonBlack(comp, thresh), comp)
comp = filter(lambda comp: comp is not None, comp)
components[index: index + 1] = comp
index += parts
return components
|
{
"content_hash": "91fc19b7504cdc6663467582077ae36a",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 120,
"avg_line_length": 34.19565217391305,
"alnum_prop": 0.5836890382344928,
"repo_name": "Apkawa/simple-captcha-ocr-opencv",
"id": "4b7abf49da28aca5f998a60b65940d1996d18876",
"size": "11011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cap_extra/segment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36293"
}
],
"symlink_target": ""
}
|
MIN_ADDRESS = 0
MAX_ADDRESS = 4294967295
def parse_filter(line):
start, end = map(int, line.split('-'))
return (start, end)
def is_allowed(filters, address):
for min_range, max_range in filters:
if address >= min_range and address <= max_range:
return False
if min_range > address:
return True
return True
def find_minimum(filters):
for address in range(MIN_ADDRESS, MAX_ADDRESS):
if is_allowed(filters, address):
return address
return None
if __name__ == '__main__':
filters = []
with open('input.txt') as filter_file:
for line in filter_file:
filters.append(parse_filter(line))
# Order filters
filters.sort()
minimum = find_minimum(filters)
print('Minimum address: {}'.format(minimum))
|
{
"content_hash": "84194539811483dc5adf73da544114f9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 57,
"avg_line_length": 24.96969696969697,
"alnum_prop": 0.6080097087378641,
"repo_name": "stdgy/adventofcode",
"id": "d71e87e03aaf48df3762403540a1de91ea829577",
"size": "825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2016/days/20/solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6099"
}
],
"symlink_target": ""
}
|
import pip
import xmlrpclib
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
'''
originally based off of:
https://gist.github.com/3555765
which was originally based off of:
http://code.activestate.com/recipes/577708/
'''
class Command(BaseCommand):
def handle(self, *args, **options):
pypi = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')
ignores = getattr(settings, 'LIBRARYWATCH_IGNORE_VERSIONS', {})
for dist in pip.get_installed_distributions():
if ignores.get(dist.project_name, '') != '*': # if it's totally ignored, don't even check.
available = pypi.package_releases(dist.project_name)
if not available:
# Try to capitalize pkg name
available = pypi.package_releases(dist.project_name.capitalize())
try:
if available[0] != dist.version and available[0] != ignores.get(dist.project_name, ''):
print '{dist.project_name} ({dist.version} != {available})'.format(dist=dist, available=available[0])
except IndexError:
print('%s is not available on PyPI.' % dist.project_name)
|
{
"content_hash": "1912e073839ff2e18a32ec7971648a94",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 125,
"avg_line_length": 36.285714285714285,
"alnum_prop": 0.6133858267716535,
"repo_name": "adamfast/django-librarywatch",
"id": "3e8ce9be41bea5a66ad241914cf2f59f153e76fd",
"size": "1270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "librarywatch/management/commands/check_for_updates.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2123"
}
],
"symlink_target": ""
}
|
"""
OpenFlow 1.5 definitions.
"""
from ryu.lib import type_desc
from ryu.ofproto import oxm_fields
from ryu.ofproto import oxs_fields
from struct import calcsize
# enum ofp_port_no
# Port numbering. Ports are numbered starting from 1.
OFPP_MAX = 0xffffff00 # Maximum number of physical and logical
# switch ports.
OFPP_UNSET = 0xfffffff7 # Output port not set in action-set.
# used only in OXM_OF_ACTSET_OUTPUT.
OFPP_IN_PORT = 0xfffffff8 # Send the packet out the input port. This
# reserved port must be explicitly used in
# order to send back out of the input port.
OFPP_TABLE = 0xfffffff9 # Submit the packet to the first flow table
# NB: This destination port can only be used
# in packet-out messages.
OFPP_NORMAL = 0xfffffffa # Forward using non-OpenFlow pipeline.
OFPP_FLOOD = 0xfffffffb # Flood using non-OpenFlow pipeline.
OFPP_ALL = 0xfffffffc # All standard ports except input port.
OFPP_CONTROLLER = 0xfffffffd # Send to controller.
OFPP_LOCAL = 0xfffffffe # Local openflow "port".
OFPP_ANY = 0xffffffff # Special value used in some requests when
# no port is specified (i.e. wildcarded).
# enum ofp_type
# Immutable messages.
OFPT_HELLO = 0 # Symmetric message
OFPT_ERROR = 1 # Symmetric message
OFPT_ECHO_REQUEST = 2 # Symmetric message
OFPT_ECHO_REPLY = 3 # Symmetric message
OFPT_EXPERIMENTER = 4 # Symmetric message
# Switch configuration messages.
OFPT_FEATURES_REQUEST = 5 # Controller/switch message
OFPT_FEATURES_REPLY = 6 # Controller/switch message
OFPT_GET_CONFIG_REQUEST = 7 # Controller/switch message
OFPT_GET_CONFIG_REPLY = 8 # Controller/switch message
OFPT_SET_CONFIG = 9 # Controller/switch message
# Asynchronous messages.
OFPT_PACKET_IN = 10 # Async message
OFPT_FLOW_REMOVED = 11 # Async message
OFPT_PORT_STATUS = 12 # Async message
# Controller command messages.
OFPT_PACKET_OUT = 13 # Controller/switch message
OFPT_FLOW_MOD = 14 # Controller/switch message
OFPT_GROUP_MOD = 15 # Controller/switch message
OFPT_PORT_MOD = 16 # Controller/switch message
OFPT_TABLE_MOD = 17 # Controller/switch message
# Multipart messages.
OFPT_MULTIPART_REQUEST = 18 # Controller/switch message
OFPT_MULTIPART_REPLY = 19 # Controller/switch message
# Barrier messages.
OFPT_BARRIER_REQUEST = 20 # Controller/switch message
OFPT_BARRIER_REPLY = 21 # Controller/switch message
# Controller role change request messages.
OFPT_ROLE_REQUEST = 24 # Controller/switch message
OFPT_ROLE_REPLY = 25 # Controller/switch message
# Asynchronous message configuration.
OFPT_GET_ASYNC_REQUEST = 26 # Controller/switch message
OFPT_GET_ASYNC_REPLY = 27 # Controller/switch message
OFPT_SET_ASYNC = 28 # Controller/switch message
# Meters and rate limiters configuration messages.
OFPT_METER_MOD = 29 # Controller/switch message
# Controller role change event messages.
OFPT_ROLE_STATUS = 30 # Async message
# Asynchronous messages.
OFPT_TABLE_STATUS = 31 # Async message
# Request forwarding by the switch.
OFPT_REQUESTFORWARD = 32 # Async message
# Bundle operations (multiple messages as a single operation).
OFPT_BUNDLE_CONTROL = 33 # Controller/switch message
OFPT_BUNDLE_ADD_MESSAGE = 34 # Controller/switch message
# Controller Status async message.
OFPT_CONTROLLER_STATUS = 35 # Async message
_OFP_HEADER_PACK_STR = 'BBHI'
OFP_HEADER_PACK_STR = '!' + _OFP_HEADER_PACK_STR
OFP_HEADER_SIZE = 8
# struct ofp_hello
OFP_HELLO_HEADER_SIZE = 8
# struct ofp_hello_elem_header
OFP_HELLO_ELEM_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_HEADER_PACK_STR) == OFP_HELLO_ELEM_HEADER_SIZE)
# enum ofp_hello_elem_type
OFPHET_VERSIONBITMAP = 1
# struct ofp_hello_elem_versionbitmap
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR = '!HH'
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE = 4
assert (calcsize(OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_PACK_STR) ==
OFP_HELLO_ELEM_VERSIONBITMAP_HEADER_SIZE)
OFP_DEFAULT_MISS_SEND_LEN = 128
# enum ofp_config_flags
# Handling of IP fragments.
OFPC_FRAG_NORMAL = 0 # No special handling for fragments.
OFPC_FRAG_DROP = 1 << 0 # Drop fragments.
OFPC_FRAG_REASM = 1 << 1 # Reassemble (only if OFPC_IP_REASM set).
OFPC_FRAG_MASK = 3
# struct ofp_switch_config
OFP_SWITCH_CONFIG_PACK_STR = '!HH'
OFP_SWITCH_CONFIG_SIZE = 12
assert (calcsize(OFP_SWITCH_CONFIG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_CONFIG_SIZE)
# enum ofp_table
OFPTT_MAX = 0xfe # Last usable table number.
OFPTT_ALL = 0xff # Wildcard table used for table config, flow stats
# and flow deletes.
# enum ofp_table_config
OFPTC_DEPRECATED_MASK = 3 # Deprecated bits
OFPTC_EVICTION = 1 << 2 # Authorise table to evict flows.
OFPTC_VACANCY_EVENTS = 1 << 3 # Enable vacancy events.
# enum ofp_table_mod_prop_type
OFPTMPT_EVICTION = 0x2 # Eviction property.
OFPTMPT_VACANCY = 0x3 # Vacancy property.
OFPTMPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# enum ofp_table_mod_prop_eviction_flag
OFPTMPEF_OTHER = 1 << 0 # Using other factors.
OFPTMPEF_IMPORTANCE = 1 << 1 # Using flow entry importance.
OFPTMPEF_LIFETIME = 1 << 2 # Using flow entry lifetime.
# struct ofp_table_mod_prop_eviction
OFP_TABLE_MOD_PROP_EVICTION_PACK_STR = '!HHI'
OFP_TABLE_MOD_PROP_EVICTION_SIZE = 8
assert(calcsize(OFP_TABLE_MOD_PROP_EVICTION_PACK_STR) ==
OFP_TABLE_MOD_PROP_EVICTION_SIZE)
# struct ofp_table_mod_prop_vacancy
OFP_TABLE_MOD_PROP_VACANCY_PACK_STR = '!HHBBBx'
OFP_TABLE_MOD_PROP_VACANCY_SIZE = 8
assert(calcsize(OFP_TABLE_MOD_PROP_VACANCY_PACK_STR) ==
OFP_TABLE_MOD_PROP_VACANCY_SIZE)
# struct ofp_table_mod_prop_experimenter
OFP_TABLE_MOD_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_TABLE_MOD_PROP_EXPERIMENTER_SIZE = 12
assert(calcsize(OFP_TABLE_MOD_PROP_EXPERIMENTER_PACK_STR) ==
OFP_TABLE_MOD_PROP_EXPERIMENTER_SIZE)
# struct ofp_table_mod
OFP_TABLE_MOD_PACK_STR = '!B3xI'
OFP_TABLE_MOD_SIZE = 16
assert (calcsize(OFP_TABLE_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_TABLE_MOD_SIZE)
# enum ofp_capabilities
OFPC_FLOW_STATS = 1 << 0 # Flow statistics.
OFPC_TABLE_STATS = 1 << 1 # Table statistics.
OFPC_PORT_STATS = 1 << 2 # Port statistics.
OFPC_GROUP_STATS = 1 << 3 # Group statistics.
OFPC_IP_REASM = 1 << 5 # Can reassemble IP fragments.
OFPC_QUEUE_STATS = 1 << 6 # Queue statistics.
OFPC_PORT_BLOCKED = 1 << 8 # Switch will block looping ports.
OFPC_BUNDLES = 1 << 9 # Switch supports bundles.
OFPC_FLOW_MONITORING = 1 << 10 # Switch supports flow monitoring.
# enum ofp_port_config
OFPPC_PORT_DOWN = 1 << 0 # Port is administratively down.
OFPPC_NO_RECV = 1 << 2 # Drop all packets recieved by port.
OFPPC_NO_FWD = 1 << 5 # Drop packets forwarded to port.
OFPPC_NO_PACKET_IN = 1 << 6 # Do not send packet-in msgs for port.
# enum ofp_port_state
OFPPS_LINK_DOWN = 1 << 0 # No physical link present.
OFPPS_BLOCKED = 1 << 1 # Port is blocked
OFPPS_LIVE = 1 << 2 # Live for Fast Failover Group.
# enum ofp_port_features
OFPPF_10MB_HD = 1 << 0 # 10 Mb half-duplex rate support.
OFPPF_10MB_FD = 1 << 1 # 10 Mb full-duplex rate support.
OFPPF_100MB_HD = 1 << 2 # 100 Mb half-duplex rate support.
OFPPF_100MB_FD = 1 << 3 # 100 Mb full-duplex rate support.
OFPPF_1GB_HD = 1 << 4 # 1 Gb half-duplex rate support.
OFPPF_1GB_FD = 1 << 5 # 1 Gb full-duplex rate support.
OFPPF_10GB_FD = 1 << 6 # 10 Gb full-duplex rate support.
OFPPF_40GB_FD = 1 << 7 # 40 Gb full-duplex rate support.
OFPPF_100GB_FD = 1 << 8 # 100 Gb full-duplex rate support.
OFPPF_1TB_FD = 1 << 9 # 1 Tb full-duplex rate support.
OFPPF_OTHER = 1 << 10 # Other rate, not in the list.
OFPPF_COPPER = 1 << 11 # Copper medium.
OFPPF_FIBER = 1 << 12 # Fiber medium.
OFPPF_AUTONEG = 1 << 13 # Auto-negotiation.
OFPPF_PAUSE = 1 << 14 # Pause.
OFPPF_PAUSE_ASYM = 1 << 15 # Asymmetric pause.
# enum ofp_port_desc_prop_type
OFPPDPT_ETHERNET = 0 # Ethernet property.
OFPPDPT_OPTICAL = 1 # Optical property.
OFPPDPT_PIPELINE_INPUT = 2 # Ingress pipeline fields.
OFPPDPT_PIPELINE_OUTPUT = 3 # Egress pipeline fields.
OFPPDPT_RECIRCULATE = 4 # Recirculation property.
OFPPDPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_port_desc_prop_ethernet
OFP_PORT_DESC_PROP_ETHERNET_PACK_STR = '!HH4xIIIIII'
OFP_PORT_DESC_PROP_ETHERNET_SIZE = 32
assert (calcsize(OFP_PORT_DESC_PROP_ETHERNET_PACK_STR) ==
OFP_PORT_DESC_PROP_ETHERNET_SIZE)
# enum ofp_optical_port_features
OFPOPF_RX_TUNE = 1 << 0 # Receiver is tunable
OFPOPF_TX_TUNE = 1 << 1 # Transmit is tunable
OFPOPF_TX_PWR = 1 << 2 # Power is configurable
OFPOPF_USE_FREQ = 1 << 3 # Use Frequency, not wavelength
# struct ofp_port_desc_prop_optical
OFP_PORT_DESC_PROP_OPTICAL_PACK_STR = '!HH4xIIIIIIIHH'
OFP_PORT_DESC_PROP_OPTICAL_SIZE = 40
assert (calcsize(OFP_PORT_DESC_PROP_OPTICAL_PACK_STR) ==
OFP_PORT_DESC_PROP_OPTICAL_SIZE)
# struct ofp_port_desc_prop_oxm
OFP_PORT_DESC_PROP_OXM_PACK_STR = '!HH'
OFP_PORT_DESC_PROP_OXM_SIZE = 4
assert (calcsize(OFP_PORT_DESC_PROP_OXM_PACK_STR) ==
OFP_PORT_DESC_PROP_OXM_SIZE)
# struct ofp_port_desc_prop_recirculate
OFP_PORT_DESC_PROP_RECIRCULATE_PACK_STR = '!HH'
OFP_PORT_DESC_PROP_RECIRCULATE_SIZE = 4
assert (calcsize(OFP_PORT_DESC_PROP_RECIRCULATE_PACK_STR) ==
OFP_PORT_DESC_PROP_RECIRCULATE_SIZE)
# struct ofp_port_desc_prop_experimenter
OFP_PORT_DESC_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_PORT_DESC_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_PORT_DESC_PROP_EXPERIMENTER_PACK_STR) ==
OFP_PORT_DESC_PROP_EXPERIMENTER_SIZE)
# struct ofp_port
OFP_MAX_PORT_NAME_LEN = 16
OFP_ETH_ALEN = 6
OFP_ETH_ALEN_STR = str(OFP_ETH_ALEN)
_OFP_PORT_PACK_STR = 'IH2x' + OFP_ETH_ALEN_STR + 's' + '2x' + \
str(OFP_MAX_PORT_NAME_LEN) + 's' + 'II'
OFP_PORT_PACK_STR = '!' + _OFP_PORT_PACK_STR
OFP_PORT_SIZE = 40
assert (calcsize(OFP_PORT_PACK_STR) == OFP_PORT_SIZE)
# struct ofp_switch_features
OFP_SWITCH_FEATURES_PACK_STR = '!QIBB2xII'
OFP_SWITCH_FEATURES_SIZE = 32
assert (calcsize(OFP_SWITCH_FEATURES_PACK_STR) + OFP_HEADER_SIZE ==
OFP_SWITCH_FEATURES_SIZE)
# enum ofp_port_reason
OFPPR_ADD = 0 # The port was added.
OFPPR_DELETE = 1 # The port was removed.
OFPPR_MODIFY = 2 # Some attribute of the port has changed.
# struct ofp_port_status
OFP_PORT_STATUS_PACK_STR = '!B7x' + _OFP_PORT_PACK_STR
OFP_PORT_STATUS_SIZE = 56
assert (calcsize(OFP_PORT_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_STATUS_SIZE)
# enum ofp_port_mod_prop_type
OFPPMPT_ETHERNET = 0 # Ethernet property.
OFPPMPT_OPTICAL = 1 # Optical property.
OFPPMPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_port_mod_prop_ethernet
OFP_PORT_MOD_PROP_ETHERNET_PACK_STR = '!HHI'
OFP_PORT_MOD_PROP_ETHERNET_SIZE = 8
assert (calcsize(OFP_PORT_MOD_PROP_ETHERNET_PACK_STR) ==
OFP_PORT_MOD_PROP_ETHERNET_SIZE)
# struct ofp_port_mod_prop_optical
OFP_PORT_MOD_PROP_OPTICAL_PACK_STR = '!HHIIIII'
OFP_PORT_MOD_PROP_OPTICAL_SIZE = 24
assert (calcsize(OFP_PORT_MOD_PROP_OPTICAL_PACK_STR) ==
OFP_PORT_MOD_PROP_OPTICAL_SIZE)
# struct ofp_port_mod_prop_experimenter
OFP_PORT_MOD_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_PORT_MOD_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_PORT_MOD_PROP_EXPERIMENTER_PACK_STR) ==
OFP_PORT_MOD_PROP_EXPERIMENTER_SIZE)
# struct ofp_port_mod
OFP_PORT_MOD_PACK_STR = '!I4x' + OFP_ETH_ALEN_STR + 's2xII'
OFP_PORT_MOD_SIZE = 32
assert (calcsize(OFP_PORT_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_MOD_SIZE)
# enum ofp_header_type_namespaces
OFPHTN_ONF = 0 # ONF namespace.
OFPHTN_ETHERTYPE = 1 # ns_type is an Ethertype.
OFPHTN_IP_PROTO = 2 # ns_type is a IP protocol number.
OFPHTN_UDP_TCP_PORT = 3 # ns_type is a TCP or UDP port.
OFPHTN_IPV4_OPTION = 4 # ns_type is an IPv4 option number.
# enum ofp_header_type_onf
OFPHTO_ETHERNET = 0 # Ethernet (DIX or IEEE 802.3) - default.
OFPHTO_NO_HEADER = 1 # No header, ex. circuit switch.
OFPHTO_OXM_EXPERIMENTER = 0xFFFF # Use Experimenter OXM.
# struct ofp_header_type
OFP_HEADER_TYPE_PACK_STR = '!HH'
OFP_HEADER_TYPE_SIZE = 4
assert (calcsize(OFP_HEADER_TYPE_PACK_STR) ==
OFP_HEADER_TYPE_SIZE)
# enum ofp_match_type
OFPMT_STANDARD = 0 # Deprecated
OFPMT_OXM = 1 # OpenFlow Extensible Match
# struct ofp_match
_OFP_MATCH_PACK_STR = 'HH4x'
OFP_MATCH_PACK_STR = '!' + _OFP_MATCH_PACK_STR
OFP_MATCH_SIZE = 8
assert calcsize(OFP_MATCH_PACK_STR) == OFP_MATCH_SIZE
# enum ofp_oxm_class
OFPXMC_NXM_0 = 0x0000 # Backward compatibility with NXM
OFPXMC_NXM_1 = 0x0001 # Backward compatibility with NXM
OFPXMC_OPENFLOW_BASIC = 0x8000 # Basic class for OpenFlow
OFPXMC_PACKET_REGS = 0x8001 # Packet registers (pipeline fields).
OFPXMC_EXPERIMENTER = 0xFFFF # Experimenter class
# enum ofp_vlan_id
OFPVID_PRESENT = 0x1000 # bit that indicate that a VLAN id is set.
OFPVID_NONE = 0X0000 # No VLAN id was set.
def _oxm_tlv_header(class_, field, hasmask, length):
return (class_ << 16) | (field << 9) | (hasmask << 8) | length
def oxm_tlv_header(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 0, length)
def oxm_tlv_header_w(field, length):
return _oxm_tlv_header(OFPXMC_OPENFLOW_BASIC, field, 1, length * 2)
def oxm_tlv_header_extract_hasmask(header):
return (header >> 8) & 1
def oxm_tlv_header_extract_length(header):
if oxm_tlv_header_extract_hasmask(header):
length = (header & 0xff) // 2
else:
length = header & 0xff
return length
oxm_types = [
oxm_fields.OpenFlowBasic('in_port', 0, type_desc.Int4),
oxm_fields.OpenFlowBasic('in_phy_port', 1, type_desc.Int4),
oxm_fields.OpenFlowBasic('metadata', 2, type_desc.Int8),
oxm_fields.OpenFlowBasic('eth_dst', 3, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_dst_nxm', 1, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('eth_src', 4, type_desc.MacAddr),
oxm_fields.NiciraExtended0('eth_src_nxm', 2, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('eth_type', 5, type_desc.Int2),
oxm_fields.OpenFlowBasic('vlan_vid', 6, type_desc.Int2),
oxm_fields.OpenFlowBasic('vlan_pcp', 7, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_dscp', 8, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_ecn', 9, type_desc.Int1),
oxm_fields.OpenFlowBasic('ip_proto', 10, type_desc.Int1),
oxm_fields.OpenFlowBasic('ipv4_src', 11, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('ipv4_dst', 12, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('tcp_src', 13, type_desc.Int2),
oxm_fields.OpenFlowBasic('tcp_dst', 14, type_desc.Int2),
oxm_fields.OpenFlowBasic('udp_src', 15, type_desc.Int2),
oxm_fields.OpenFlowBasic('udp_dst', 16, type_desc.Int2),
oxm_fields.OpenFlowBasic('sctp_src', 17, type_desc.Int2),
oxm_fields.OpenFlowBasic('sctp_dst', 18, type_desc.Int2),
oxm_fields.OpenFlowBasic('icmpv4_type', 19, type_desc.Int1),
oxm_fields.OpenFlowBasic('icmpv4_code', 20, type_desc.Int1),
oxm_fields.OpenFlowBasic('arp_op', 21, type_desc.Int2),
oxm_fields.OpenFlowBasic('arp_spa', 22, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_tpa', 23, type_desc.IPv4Addr),
oxm_fields.OpenFlowBasic('arp_sha', 24, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('arp_tha', 25, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_src', 26, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_dst', 27, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_flabel', 28, type_desc.Int4),
oxm_fields.OpenFlowBasic('icmpv6_type', 29, type_desc.Int1),
oxm_fields.OpenFlowBasic('icmpv6_code', 30, type_desc.Int1),
oxm_fields.OpenFlowBasic('ipv6_nd_target', 31, type_desc.IPv6Addr),
oxm_fields.OpenFlowBasic('ipv6_nd_sll', 32, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('ipv6_nd_tll', 33, type_desc.MacAddr),
oxm_fields.OpenFlowBasic('mpls_label', 34, type_desc.Int4),
oxm_fields.OpenFlowBasic('mpls_tc', 35, type_desc.Int1),
oxm_fields.OpenFlowBasic('mpls_bos', 36, type_desc.Int1),
oxm_fields.OpenFlowBasic('pbb_isid', 37, type_desc.Int3),
oxm_fields.OpenFlowBasic('tunnel_id', 38, type_desc.Int8),
oxm_fields.NiciraExtended1('tunnel_id_nxm', 16, type_desc.Int8),
oxm_fields.OpenFlowBasic('ipv6_exthdr', 39, type_desc.Int2),
oxm_fields.OpenFlowBasic('pbb_uca', 41, type_desc.Int1),
oxm_fields.OpenFlowBasic('tcp_flags', 42, type_desc.Int2),
oxm_fields.OpenFlowBasic('actset_output', 43, type_desc.Int4),
oxm_fields.OpenFlowBasic('packet_type', 44, type_desc.Int4),
oxm_fields.NiciraExtended1('tun_ipv4_src', 31, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('tun_ipv4_dst', 32, type_desc.IPv4Addr),
oxm_fields.NiciraExtended1('pkt_mark', 33, type_desc.Int4),
oxm_fields.NiciraExtended1('conj_id', 37, type_desc.Int4),
]
oxm_fields.generate(__name__)
# struct ofp_stats
_OFP_STATS_PACK_STR = 'HH4x'
OFP_STATS_PACK_STR = '!' + _OFP_STATS_PACK_STR
OFP_STATS_SIZE = 8
assert calcsize(OFP_STATS_PACK_STR) == OFP_STATS_SIZE
# enum ofp_oxs_class
OFPXSC_OPENFLOW_BASIC = 0x8002 # Basic stats class for OpenFlow
OFPXSC_EXPERIMENTER = 0xFFFF # Experimenter class
def _oxs_tlv_header(class_, field, reserved, length):
return (class_ << 16) | (field << 9) | (reserved << 8) | length
def oxs_tlv_header(field, length):
return _oxs_tlv_header(OFPXSC_OPENFLOW_BASIC, field, 0, length)
def oxs_tlv_header_extract_length(header):
return header & 0xff
oxs_types = [
oxs_fields.OpenFlowBasic('duration', 0, type_desc.Int4Double),
oxs_fields.OpenFlowBasic('idle_time', 1, type_desc.Int4Double),
oxs_fields.OpenFlowBasic('flow_count', 3, type_desc.Int4),
oxs_fields.OpenFlowBasic('packet_count', 4, type_desc.Int8),
oxs_fields.OpenFlowBasic('byte_count', 5, type_desc.Int8),
]
oxs_fields.generate(__name__)
# enum ofp_action_type
OFPAT_OUTPUT = 0 # Output to switch port.
OFPAT_COPY_TTL_OUT = 11 # Copy TTL "outwards" -- from
# next-to-outermost to outermost
OFPAT_COPY_TTL_IN = 12 # Copy TTL "inwards" -- from outermost to
# next-to-outermost
OFPAT_SET_MPLS_TTL = 15 # MPLS TTL.
OFPAT_DEC_MPLS_TTL = 16 # Decrement MPLS TTL
OFPAT_PUSH_VLAN = 17 # Push a new VLAN tag
OFPAT_POP_VLAN = 18 # Pop the outer VLAN tag
OFPAT_PUSH_MPLS = 19 # Push a new MPLS tag
OFPAT_POP_MPLS = 20 # Pop the outer MPLS tag
OFPAT_SET_QUEUE = 21 # Set queue id when outputting to a port
OFPAT_GROUP = 22 # Apply group
OFPAT_SET_NW_TTL = 23 # IP TTL.
OFPAT_DEC_NW_TTL = 24 # Decrement IP TTL.
OFPAT_SET_FIELD = 25 # Set a header field using OXM TLV format.
OFPAT_PUSH_PBB = 26 # Push a new PBB service tag (I-TAG)
OFPAT_POP_PBB = 27 # Pop the outer PBB service tag (I-TAG)
OFPAT_COPY_FIELD = 28 # Copy value between header and register.
OFPAT_METER = 29 # Apply meter (rate limiter)
OFPAT_EXPERIMENTER = 0xffff
# struct ofp_action_header
OFP_ACTION_HEADER_PACK_STR = '!HH4x'
OFP_ACTION_HEADER_SIZE = 8
assert calcsize(OFP_ACTION_HEADER_PACK_STR) == OFP_ACTION_HEADER_SIZE
# enum ofp_controller_max_len
OFPCML_MAX = 0xffe5 # maximum max_len value which can be used to
# request a specific byte length.
OFPCML_NO_BUFFER = 0xffff # indicates that no buffering should be
# applied and the whole packet is to be
# sent to the controller.
# struct ofp_action_output
OFP_ACTION_OUTPUT_PACK_STR = '!HHIH6x'
OFP_ACTION_OUTPUT_SIZE = 16
assert calcsize(OFP_ACTION_OUTPUT_PACK_STR) == OFP_ACTION_OUTPUT_SIZE
# struct ofp_action_generic
OFP_ACTION_GENERIC_PACK_STR = '!HH4x'
OFP_ACTION_GENERIC_SIZE = 8
assert (calcsize(OFP_ACTION_GENERIC_PACK_STR) == OFP_ACTION_GENERIC_SIZE)
# struct ofp_action_mpls_ttl
OFP_ACTION_MPLS_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_MPLS_TTL_SIZE = 8
assert calcsize(OFP_ACTION_MPLS_TTL_PACK_STR) == OFP_ACTION_MPLS_TTL_SIZE
# struct ofp_action_push
OFP_ACTION_PUSH_PACK_STR = '!HHH2x'
OFP_ACTION_PUSH_SIZE = 8
assert calcsize(OFP_ACTION_PUSH_PACK_STR) == OFP_ACTION_PUSH_SIZE
# struct ofp_action_pop_mpls
OFP_ACTION_POP_MPLS_PACK_STR = '!HHH2x'
OFP_ACTION_POP_MPLS_SIZE = 8
assert calcsize(OFP_ACTION_POP_MPLS_PACK_STR) == OFP_ACTION_POP_MPLS_SIZE
# struct ofp_action_set_queue
OFP_ACTION_SET_QUEUE_PACK_STR = '!HHI'
OFP_ACTION_SET_QUEUE_SIZE = 8
assert calcsize(OFP_ACTION_SET_QUEUE_PACK_STR) == OFP_ACTION_SET_QUEUE_SIZE
# struct ofp_action_group
OFP_ACTION_GROUP_PACK_STR = '!HHI'
OFP_ACTION_GROUP_SIZE = 8
assert calcsize(OFP_ACTION_GROUP_PACK_STR) == OFP_ACTION_GROUP_SIZE
# struct ofp_action_nw_ttl
OFP_ACTION_NW_TTL_PACK_STR = '!HHB3x'
OFP_ACTION_NW_TTL_SIZE = 8
assert calcsize(OFP_ACTION_NW_TTL_PACK_STR) == OFP_ACTION_NW_TTL_SIZE
# struct ofp_action_set_field
OFP_ACTION_SET_FIELD_PACK_STR = '!HH4x'
OFP_ACTION_SET_FIELD_SIZE = 8
assert calcsize(OFP_ACTION_SET_FIELD_PACK_STR) == OFP_ACTION_SET_FIELD_SIZE
# struct ofp_action_copy_field
OFP_ACTION_COPY_FIELD_PACK_STR = '!HHHHH2x'
OFP_ACTION_COPY_FIELD_SIZE = 12
assert calcsize(OFP_ACTION_COPY_FIELD_PACK_STR) == OFP_ACTION_COPY_FIELD_SIZE
# struct ofp_action_meter
OFP_ACTION_METER_PACK_STR = '!HHI'
OFP_ACTION_METER_SIZE = 8
assert calcsize(OFP_ACTION_METER_PACK_STR) == OFP_ACTION_METER_SIZE
# struct ofp_action_experimenter_header
OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR = '!HHI'
OFP_ACTION_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_ACTION_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_ACTION_EXPERIMENTER_HEADER_SIZE)
# enum ofp_instruction_type
OFPIT_GOTO_TABLE = 1 # Setup the next table in the lookup pipeline.
OFPIT_WRITE_METADATA = 2 # Setup the metadata field for use later in
# pipeline.
OFPIT_WRITE_ACTIONS = 3 # Write the action(s) onto the datapath
# action set
OFPIT_APPLY_ACTIONS = 4 # Applies the action(s) immediately
OFPIT_CLEAR_ACTIONS = 5 # Clears all actions from the datapath action
# set
OFPIT_DEPRECATED = 6 # Deprecated (was apply meter)
OFPIT_STAT_TRIGGER = 7 # Statistics triggers
OFPIT_EXPERIMENTER = 0xFFFF # Experimenter instruction
# struct ofp_instruction_goto_table
OFP_INSTRUCTION_GOTO_TABLE_PACK_STR = '!HHB3x'
OFP_INSTRUCTION_GOTO_TABLE_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_GOTO_TABLE_PACK_STR) ==
OFP_INSTRUCTION_GOTO_TABLE_SIZE)
# struct ofp_instruction_write_metadata
OFP_INSTRUCTION_WRITE_METADATA_PACK_STR = '!HH4xQQ'
OFP_INSTRUCTION_WRITE_METADATA_SIZE = 24
assert (calcsize(OFP_INSTRUCTION_WRITE_METADATA_PACK_STR) ==
OFP_INSTRUCTION_WRITE_METADATA_SIZE)
# struct ofp_instruction_actions
OFP_INSTRUCTION_ACTIONS_PACK_STR = '!HH4x'
OFP_INSTRUCTION_ACTIONS_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_ACTIONS_PACK_STR) ==
OFP_INSTRUCTION_ACTIONS_SIZE)
# enum ofp_stat_trigger_flags
OFPSTF_PERIODIC = 1 << 0 # Trigger for all multiples of thresholds.
OFPSTF_ONLY_FIRST = 1 << 1 # Trigger on only first reach threshold.
# struct ofp_instruction_stat_trigger
_OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0 = 'HHI'
OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR = (
'!' + _OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0 + _OFP_STATS_PACK_STR)
OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0 = (
'!' + _OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR0)
OFP_INSTRUCTION_STAT_TRIGGER_PACK_SIZE = 16
assert (calcsize(OFP_INSTRUCTION_STAT_TRIGGER_PACK_STR) ==
OFP_INSTRUCTION_STAT_TRIGGER_PACK_SIZE)
# struct ofp_instruction_experimenter_header
OFP_INSTRUCTION_EXPERIMENTER_HEADER_PACK_STR = '!HHI'
OFP_INSTRUCTION_EXPERIMENTER_HEADER_SIZE = 8
assert (calcsize(OFP_INSTRUCTION_EXPERIMENTER_HEADER_PACK_STR) ==
OFP_INSTRUCTION_EXPERIMENTER_HEADER_SIZE)
# enum ofp_flow_mod_command
OFPFC_ADD = 0 # New flow.
OFPFC_MODIFY = 1 # Modify all matching flows.
OFPFC_MODIFY_STRICT = 2 # Modify entry strictly matching wildcards
OFPFC_DELETE = 3 # Delete all matching flows.
OFPFC_DELETE_STRICT = 4 # Strictly match wildcards and priority.
# Value used in "idle_timeout" and "hard_timeout" to indicate that the
# entry is permanent. */
OFP_FLOW_PERMANENT = 0
# By default, choose a priority in the middle.
OFP_DEFAULT_PRIORITY = 0x8000
# enum ofp_flow_mod_flags
OFPFF_SEND_FLOW_REM = 1 << 0 # Send flow removed message when flow
# expires or is deleted.
OFPFF_CHECK_OVERLAP = 1 << 1 # Check for overlapping entries first.
OFPFF_RESET_COUNTS = 1 << 2 # Reset flow packet and byte counts.
OFPFF_NO_PKT_COUNTS = 1 << 3 # Don't keep track of packet count.
OFPFF_NO_BYT_COUNTS = 1 << 4 # Don't keep track of byte count.
# struct ofp_flow_mod
_OFP_FLOW_MOD_PACK_STR0 = 'QQBBHHHIIIHH'
OFP_FLOW_MOD_PACK_STR = '!' + _OFP_FLOW_MOD_PACK_STR0 + _OFP_MATCH_PACK_STR
OFP_FLOW_MOD_PACK_STR0 = '!' + _OFP_FLOW_MOD_PACK_STR0
OFP_FLOW_MOD_SIZE = 56
assert (calcsize(OFP_FLOW_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_MOD_SIZE)
# enum ofp_group
OFPG_MAX = 0xffffff00 # Last usable group number.
OFPG_ALL = 0xfffffffc # Represents all groups for group delete commands.
OFPG_ANY = 0xffffffff # Special wildcard: no group specified.
# enum ofp_group_mod_command
OFPGC_ADD = 0 # New group.
OFPGC_MODIFY = 1 # Modify all matching groups.
OFPGC_DELETE = 2 # Delete all matching groups.
OFPGC_INSERT_BUCKET = 3 # Insert action buckets to the already available
# list of action buckets in a matching group
# OFPGC_??? = 4 # Reserved for future use.
OFPGC_REMOVE_BUCKET = 5 # Remove all action buckets or any specific action
# bucket from matching group
# enum ofp_group_bucket_prop_type
OFPGBPT_WEIGHT = 0 # Select groups only.
OFPGBPT_WATCH_PORT = 1 # Fast failover groups only.
OFPGBPT_WATCH_GROUP = 2 # Fast failover groups only.
OFPGBPT_EXPERIMENTER = 0xFFFF # Experimenter defined.
# struct ofp_group_bucket_prop_header
OFP_GROUP_BUCKET_PROP_HEADER_PACK_STR = '!HH'
OFP_GROUP_BUCKET_PROP_HEADER_SIZE = 4
assert (calcsize(OFP_GROUP_BUCKET_PROP_HEADER_PACK_STR) ==
OFP_GROUP_BUCKET_PROP_HEADER_SIZE)
# struct ofp_group_bucket_prop_weight
OFP_GROUP_BUCKET_PROP_WEIGHT_PACK_STR = '!HHH2x'
OFP_GROUP_BUCKET_PROP_WEIGHT_SIZE = 8
assert (calcsize(OFP_GROUP_BUCKET_PROP_WEIGHT_PACK_STR) ==
OFP_GROUP_BUCKET_PROP_WEIGHT_SIZE)
# struct ofp_group_bucket_prop_watch
OFP_GROUP_BUCKET_PROP_WATCH_PACK_STR = '!HHI'
OFP_GROUP_BUCKET_PROP_WATCH_SIZE = 8
assert (calcsize(OFP_GROUP_BUCKET_PROP_WATCH_PACK_STR) ==
OFP_GROUP_BUCKET_PROP_WATCH_SIZE)
# struct ofp_group_bucket_prop_experimenter
OFP_GROUP_BUCKET_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_GROUP_BUCKET_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_GROUP_BUCKET_PROP_EXPERIMENTER_PACK_STR) ==
OFP_GROUP_BUCKET_PROP_EXPERIMENTER_SIZE)
# struct ofp_bucket
OFP_BUCKET_PACK_STR = '!HHI'
OFP_BUCKET_SIZE = 8
assert calcsize(OFP_BUCKET_PACK_STR) == OFP_BUCKET_SIZE
# enum ofp_group_bucket
OFPG_BUCKET_MAX = 0xffffff00 # Last usable bucket ID.
OFPG_BUCKET_FIRST = 0xfffffffd # First bucket ID in the list of action
# buckets of a group. This is applicable
# for OFPGC_INSERT_BUCKET and
# OFPGC_REMOVE_BUCKET commands.
OFPG_BUCKET_LAST = 0xfffffffe # Last bucket ID in the list of action
# buckets of a group. This is applicable
# for OFPGC_INSERT_BUCKET and
# OFPGC_REMOVE_BUCKET commands.
OFPG_BUCKET_ALL = 0xffffffff # All action buckets in a group,
# This is applicable for
# only OFPGC_REMOVE_BUCKET command.
# enum ofp_group_prop_type
OFPGPT_EXPERIMENTER = 0xFFFF # Experimenter defined.
# struct ofp_group_prop_header
OFP_GROUP_PROP_HEADER_PACK_STR = '!HH'
OFP_GROUP_PROP_HEADER_SIZE = 4
assert (calcsize(OFP_GROUP_PROP_HEADER_PACK_STR) ==
OFP_GROUP_PROP_HEADER_SIZE)
# struct ofp_group_prop_experimenter
OFP_GROUP_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_GROUP_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_GROUP_PROP_EXPERIMENTER_PACK_STR) ==
OFP_GROUP_PROP_EXPERIMENTER_SIZE)
# struct ofp_group_mod
OFP_GROUP_MOD_PACK_STR = '!HBxIH2xI'
OFP_GROUP_MOD_SIZE = 24
assert (calcsize(OFP_GROUP_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_GROUP_MOD_SIZE)
# enum ofp_group_type
OFPGT_ALL = 0 # All (multicast/broadcast) group.
OFPGT_SELECT = 1 # Select group.
OFPGT_INDIRECT = 2 # Indirect group.
OFPGT_FF = 3 # Fast failover group.
OFP_NO_BUFFER = 0xffffffff # Special buffer-id to indicate 'no buffer'
# struct ofp_packet_out
OFP_PACKET_OUT_0_PACK_STR = '!IH2x'
OFP_PACKET_OUT_0_SIZE = 16
OFP_PACKET_OUT_SIZE = 24
assert (calcsize(OFP_PACKET_OUT_0_PACK_STR) + OFP_MATCH_SIZE + OFP_HEADER_SIZE ==
OFP_PACKET_OUT_SIZE)
# enum ofp_packet_in_reason
OFPR_TABLE_MISS = 0 # No matching flow (table-miss flow entry).
OFPR_APPLY_ACTION = 1 # Output to controller in apply-actions.
OFPR_INVALID_TTL = 2 # Packet has invalid TTL.
OFPR_ACTION_SET = 3 # Output to controller in action set.
OFPR_GROUP = 4 # Output to controller in group bucket.
OFPR_PACKET_OUT = 5 # Output to controller in packet-out.
# struct ofp_packet_in
OFP_PACKET_IN_PACK_STR = '!IHBBQ'
OFP_PACKET_IN_SIZE = 32
assert (calcsize(OFP_PACKET_IN_PACK_STR) + OFP_MATCH_SIZE + OFP_HEADER_SIZE ==
OFP_PACKET_IN_SIZE)
# enum ofp_flow_removed_reason
OFPRR_IDLE_TIMEOUT = 0 # Flow idle time exceeded idle_timeout.
OFPRR_HARD_TIMEOUT = 1 # Time exceeded hard_timeout.
OFPRR_DELETE = 2 # Evicted by a DELETE flow mod.
OFPRR_GROUP_DELETE = 3 # Group was removed.
OFPRR_METER_DELETE = 4 # Meter was removed.
OFPRR_EVICTION = 5 # Switch eviction to free resources.
# struct ofp_port_status
OFP_PORT_STATUS_PACK_STR = '!B7x' + _OFP_PORT_PACK_STR
OFP_PORT_STATUS_DESC_OFFSET = OFP_HEADER_SIZE + 8
OFP_PORT_STATUS_SIZE = 56
assert (calcsize(OFP_PORT_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_PORT_STATUS_SIZE)
# struct ofp_flow_removed
_OFP_FLOW_REMOVED_PACK_STR0 = 'BBHHHQ'
OFP_FLOW_REMOVED_PACK_STR = '!' + _OFP_FLOW_REMOVED_PACK_STR0 + \
_OFP_MATCH_PACK_STR
OFP_FLOW_REMOVED_PACK_STR0 = '!' + _OFP_FLOW_REMOVED_PACK_STR0
OFP_FLOW_REMOVED_SIZE = 32
assert (calcsize(OFP_FLOW_REMOVED_PACK_STR) + OFP_HEADER_SIZE ==
OFP_FLOW_REMOVED_SIZE)
# enum ofp_meter
OFPM_MAX = 0xffff0000
OFPM_SLOWPATH = 0xfffffffd # Meter for slow datapath, if any.
OFPM_CONTROLLER = 0xfffffffe # Meter for controller connection.
OFPM_ALL = 0xffffffff # Represents all meters for stat requests
# commands.
# enum ofp_meter_band_type
OFPMBT_DROP = 1 # Drop packet.
OFPMBT_DSCP_REMARK = 2 # Remark DSCP in the IP header.
OFPMBT_EXPERIMENTER = 0xFFFF # Experimenter meter band.
# struct ofp_meter_band_drop
OFP_METER_BAND_DROP_PACK_STR = '!HHII4x'
OFP_METER_BAND_DROP_SIZE = 16
assert (calcsize(OFP_METER_BAND_DROP_PACK_STR) ==
OFP_METER_BAND_DROP_SIZE)
# struct ofp_meter_band_dscp_remark
OFP_METER_BAND_DSCP_REMARK_PACK_STR = '!HHIIB3x'
OFP_METER_BAND_DSCP_REMARK_SIZE = 16
assert (calcsize(OFP_METER_BAND_DSCP_REMARK_PACK_STR) ==
OFP_METER_BAND_DSCP_REMARK_SIZE)
# struct ofp_meter_band_experimenter
OFP_METER_BAND_EXPERIMENTER_PACK_STR = '!HHIII'
OFP_METER_BAND_EXPERIMENTER_SIZE = 16
assert (calcsize(OFP_METER_BAND_EXPERIMENTER_PACK_STR) ==
OFP_METER_BAND_EXPERIMENTER_SIZE)
# enum ofp_meter_mod_command
OFPMC_ADD = 0 # New meter.
OFPMC_MODIFY = 1 # Modify specified meter.
OFPMC_DELETE = 2 # Delete specified meter.
# enum ofp_meter_flags
OFPMF_KBPS = 1 << 0 # Rate value in kb/s (kilo-bit per second).
OFPMF_PKTPS = 1 << 1 # Rate value in packet/sec.
OFPMF_BURST = 1 << 2 # Do burst size.
OFPMF_STATS = 1 << 3 # Collect statistics.
# struct ofp_meter_band_header
OFP_METER_BAND_HEADER_PACK_STR = '!HHII'
OFP_METER_BAND_HEADER_SIZE = 12
assert (calcsize(OFP_METER_BAND_HEADER_PACK_STR) ==
OFP_METER_BAND_HEADER_SIZE)
# struct ofp_meter_mod
OFP_METER_MOD_PACK_STR = '!HHI'
OFP_METER_MOD_SIZE = 16
assert (calcsize(OFP_METER_MOD_PACK_STR) + OFP_HEADER_SIZE ==
OFP_METER_MOD_SIZE)
# enum ofp_error_type
OFPET_HELLO_FAILED = 0 # Hello protocol failed.
OFPET_BAD_REQUEST = 1 # Request was not understood.
OFPET_BAD_ACTION = 2 # Error in action description.
OFPET_BAD_INSTRUCTION = 3 # Error in instruction list.
OFPET_BAD_MATCH = 4 # Error in match.
OFPET_FLOW_MOD_FAILED = 5 # Problem modifying flow entry.
OFPET_GROUP_MOD_FAILED = 6 # Problem modifying group entry.
OFPET_PORT_MOD_FAILED = 7 # OFPT_PORT_MOD failed.
OFPET_TABLE_MOD_FAILED = 8 # Table mod request failed.
OFPET_QUEUE_OP_FAILED = 9 # Queue operation failed.
OFPET_SWITCH_CONFIG_FAILED = 10 # Switch config request failed.
OFPET_ROLE_REQUEST_FAILED = 11 # Controller Role request failed.
OFPET_METER_MOD_FAILED = 12 # Error in meter.
OFPET_TABLE_FEATURES_FAILED = 13 # Setting table features failed.
OFPET_BAD_PROPERTY = 14 # Some property is invalid.
OFPET_ASYNC_CONFIG_FAILED = 15 # Asynchronous config request failed.
OFPET_FLOW_MONITOR_FAILED = 16 # Setting flow monitor failed.
OFPET_BUNDLE_FAILED = 17 # Bundle operation failed.
OFPET_EXPERIMENTER = 0xffff # Experimenter error messages.
# enum ofp_hello_failed_code
OFPHFC_INCOMPATIBLE = 0 # No compatible version.
OFPHFC_EPERM = 1 # Permissions error.
# enum ofp_bad_request_code
OFPBRC_BAD_VERSION = 0 # ofp_header.version not supported.
OFPBRC_BAD_TYPE = 1 # ofp_header.type not supported.
OFPBRC_BAD_MULTIPART = 2 # ofp_multipart_request.type not
# supported.
OFPBRC_BAD_EXPERIMENTER = 3 # Experimenter id not supported
# (in ofp_experimenter_header
# or ofp_multipart_request or
# ofp_multipart_reply).
OFPBRC_BAD_EXP_TYPE = 4 # Experimenter type not supported.
OFPBRC_EPERM = 5 # Permissions error.
OFPBRC_BAD_LEN = 6 # Wrong request length for type.
OFPBRC_BUFFER_EMPTY = 7 # Specified buffer has already been
# used.
OFPBRC_BUFFER_UNKNOWN = 8 # Specified buffer does not exist.
OFPBRC_BAD_TABLE_ID = 9 # Specified table-id invalid or does
# not exist.
OFPBRC_IS_SLAVE = 10 # Denied because controller is slave.
OFPBRC_BAD_PORT = 11 # Invalid port or missing port.
OFPBRC_BAD_PACKET = 12 # Invalid packet in packet-out
OFPBRC_MULTIPART_BUFFER_OVERFLOW = 13 # ofp_multipart_request
# overflowed the assigned buffer.
OFPBRC_MULTIPART_REQUEST_TIMEOUT = 14 # Timeout during multipart request.
OFPBRC_MULTIPART_REPLY_TIMEOUT = 15 # Timeout during multipart reply.
OFPBRC_MULTIPART_BAD_SCHED = 16 # Switch received a
# OFPMP_BUNDLE_FEATURES request and
# failed to update the scheduling
# tolerance.
OFPBRC_PIPELINE_FIELDS_ONLY = 17 # Match fields must include only
# pipeline fields.
OFPBRC_UNKNOWN = 18 # Unspecified error.
# enum ofp_bad_action_code
OFPBAC_BAD_TYPE = 0 # Unknown or unsupported action type.
OFPBAC_BAD_LEN = 1 # Length problem in actions.
OFPBAC_BAD_EXPERIMENTER = 2 # Unknown experimenter id specified.
OFPBAC_BAD_EXP_TYPE = 3 # Unknown action type for experimenter id.
OFPBAC_BAD_OUT_PORT = 4 # Problem validating output action.
OFPBAC_BAD_ARGUMENT = 5 # Bad action argument.
OFPBAC_EPERM = 6 # Permissions error.
OFPBAC_TOO_MANY = 7 # Can't handle this many actions.
OFPBAC_BAD_QUEUE = 8 # Problem validating output queue.
OFPBAC_BAD_OUT_GROUP = 9 # Invalid group id in forward action.
OFPBAC_MATCH_INCONSISTENT = 10 # Action can't apply for this match,
# or Set-Field missing prerequisite.
OFPBAC_UNSUPPORTED_ORDER = 11 # Action order is unsupported for
# the action list in an Apply-Actions
# instruction
OFPBAC_BAD_TAG = 12 # Actions uses an unsupported tag/encap.
OFPBAC_BAD_SET_TYPE = 13 # Unsupported type in SET_FIELD action.
OFPBAC_BAD_SET_LEN = 14 # Length problem in SET_FIELD action.
OFPBAC_BAD_SET_ARGUMENT = 15 # Bad arguement in SET_FIELD action.
OFPBAC_BAD_SET_MASK = 16 # Bad mask in SET_FIELD action.
# enum ofp_bad_instruction_code
OFPBIC_UNKNOWN_INST = 0 # Unknown instruction.
OFPBIC_UNSUP_INST = 1 # Switch or table does not support
# the instruction.
OFPBIC_BAD_TABLE_ID = 2 # Invalid Table-Id specified
OFPBIC_UNSUP_METADATA = 3 # Metadata value unsupported by datapath.
OFPBIC_UNSUP_METADATA_MASK = 4 # Metadata mask value unsupported by
# datapath.
OFPBIC_BAD_EXPERIMENTER = 5 # Unknown experimenter id specified.
OFPBIC_BAD_EXP_TYPE = 6 # Unknown instruction for experimenter id.
OFPBIC_BAD_LEN = 7 # Length problem in instrucitons.
OFPBIC_EPERM = 8 # Permissions error.
OFPBIC_DUP_INST = 9 # Duplicate instruction.
# enum ofp_bad_match_code
OFPBMC_BAD_TYPE = 0 # Unsupported match type apecified by
# the match.
OFPBMC_BAD_LEN = 1 # Length problem in math.
OFPBMC_BAD_TAG = 2 # Match uses an unsupported tag/encap.
OFPBMC_BAD_DL_ADDR_MASK = 3 # Unsupported datalink addr mask -
# switch does not support arbitrary
# datalink address mask.
OFPBMC_BAD_NW_ADDR_MASK = 4 # Unsupported network addr mask -
# switch does not support arbitrary
# network addres mask.
OFPBMC_BAD_WILDCARDS = 5 # Unsupported combination of fields
# masked or omitted in the match.
OFPBMC_BAD_FIELD = 6 # Unsupported field type in the match.
OFPBMC_BAD_VALUE = 7 # Unsupported value in a match field.
OFPBMC_BAD_MASK = 8 # Unsupported mask specified in the match.
OFPBMC_BAD_PREREQ = 9 # A prerequisite was not met.
OFPBMC_DUP_FIELD = 10 # A field type was duplicated.
OFPBMC_EPERM = 11 # Permissions error.
# enum ofp_flow_mod_failed_code
OFPFMFC_UNKNOWN = 0 # Unspecified error.
OFPFMFC_TABLE_FULL = 1 # Flow not added because table was full.
OFPFMFC_BAD_TABLE_ID = 2 # Table does not exist
OFPFMFC_OVERLAP = 3 # Attempted to add overlapping flow with
# CHECK_OVERLAP flag set.
OFPFMFC_EPERM = 4 # Permissions error.
OFPFMFC_BAD_TIMEOUT = 5 # Flow not added because of unsupported
# idle/hard timeout.
OFPFMFC_BAD_COMMAND = 6 # Unsupported or unknown command.
OFPFMFC_BAD_FLAGS = 7 # Unsupported or unknown flags.
OFPFMFC_CANT_SYNC = 8 # Problem in table synchronisation.
OFPFMFC_BAD_PRIORITY = 9 # Unsupported priority value.
OFPFMFC_IS_SYNC = 10 # Synchronised flow entry is read only.
# enum ofp_group_mod_failed_code
OFPGMFC_GROUP_EXISTS = 0 # Group not added because a group ADD
# attempted to replace an already-present
# group.
OFPGMFC_INVALID_GROUP = 1 # Group not added because Group specified
# is invalid.
OFPGMFC_WEIGHT_UNSUPPORTED = 2 # Switch does not support unequal load
# sharing with select groups.
OFPGMFC_OUT_OF_GROUPS = 3 # The group table is full.
OFPGMFC_OUT_OF_BUCKETS = 4 # The maximum number of action buckets
# for a group has been exceeded.
OFPGMFC_CHAINING_UNSUPPORTED = 5 # Switch does not support groups that
# forward to groups.
OFPGMFC_WATCH_UNSUPPORTED = 6 # This group cannot watch the
# watch_port or watch_group specified.
OFPGMFC_LOOP = 7 # Group entry would cause a loop.
OFPGMFC_UNKNOWN_GROUP = 8 # Group not modified because a group MODIFY
# attempted to modify a non-existent group.
OFPGMFC_CHAINED_GROUP = 9 # Group not deleted because another group
# is forwarding to it.
OFPGMFC_BAD_TYPE = 10 # Unsupported or unknown group type.
OFPGMFC_BAD_COMMAND = 11 # Unsupported or unknown command.
OFPGMFC_BAD_BUCKET = 12 # Error in bucket.
OFPGMFC_BAD_WATCH = 13 # Error in watch port/group.
OFPGMFC_EPERM = 14 # Permissions error.
OFPGMFC_UNKNOWN_BUCKET = 15 # Invalid bucket identifier used in
# INSERT BUCKET or REMOVE BUCKET command.
OFPGMFC_BUCKET_EXISTS = 16 # Can't insert bucket because a bucket
# already exist with that bucket-id.
# enum ofp_port_mod_failed_code
OFPPMFC_BAD_PORT = 0 # Specified port does not exist.
OFPPMFC_BAD_HW_ADDR = 1 # Specified hardware address does not match
# the port number.
OFPPMFC_BAD_CONFIG = 2 # Specified config is invalid.
OFPPMFC_BAD_ADVERTISE = 3 # Specified advertise is invalid.
OFPPMFC_EPERM = 4 # Permissions error.
# enum ofp_table_mod_failed_code
OFPTMFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTMFC_BAD_CONFIG = 1 # Specified config is invalid.
OFPTMFC_EPERM = 2 # Permissions error
# enum ofp_queue_op_failed_code
OFPQOFC_BAD_PORT = 0 # Invalid port (or port does not exist).
OFPQOFC_BAD_QUEUE = 1 # Queue does not exist.
OFPQOFC_EPERM = 2 # Permissions error.
# enum ofp_switch_config_failed_code
OFPSCFC_BAD_FLAGS = 0 # Specified flags is invalid.
OFPSCFC_BAD_LEN = 1 # Specified miss send len is invalid.
OFPSCFC_EPERM = 2 # Permissions error.
# enum ofp_role_request_failed_code
OFPRRFC_STALE = 0 # Stale Message: old generation_id.
OFPRRFC_UNSUP = 1 # Controller role change unsupported.
OFPRRFC_BAD_ROLE = 2 # Invalid role.
OFPRRFC_ID_UNSUP = 3 # Switch doesn't support changing ID.
OFPRRFC_ID_IN_USE = 4 # Requested ID is in use.
# enum ofp_meter_mod_failed_code
OFPMMFC_UNKNOWN = 0 # Unspecified error.
OFPMMFC_METER_EXISTS = 1 # Meter not added because a Meter ADD
# attempted to replace an existing Meter.
OFPMMFC_INVALID_METER = 2 # Meter not added because Meter specified
# is invalid, or invalid meter in meter action.
OFPMMFC_UNKNOWN_METER = 3 # Meter not modified because a Meter MODIFY
# attempted to modify a non-existent Meter,
# or bad meter in meter action.
OFPMMFC_BAD_COMMAND = 4 # Unsupported or unknown command.
OFPMMFC_BAD_FLAGS = 5 # Flag configuration unsupported.
OFPMMFC_BAD_RATE = 6 # Rate unsupported.
OFPMMFC_BAD_BURST = 7 # Burst size unsupported.
OFPMMFC_BAD_BAND = 8 # Band unsupported.
OFPMMFC_BAD_BAND_VALUE = 9 # Band value unsupported.
OFPMMFC_OUT_OF_METERS = 10 # No more meters availabile.
OFPMMFC_OUT_OF_BANDS = 11 # The maximum number of properties for a
# meter has been exceeded.
# enum ofp_table_features_failed_code
OFPTFFC_BAD_TABLE = 0 # Specified table does not exist.
OFPTFFC_BAD_METADATA = 1 # Invalid metadata mask.
OFPTFFC_EPERM = 5 # Permissions error.
OFPTFFC_BAD_CAP = 6 # Invalid capability field.
OFPTFFC_BAD_MAX_ENT = 7 # Invalid max_entries field.
OFPTFFC_BAD_FEATURES = 8 # Invalid features field.
OFPTFFC_BAD_COMMAND = 9 # Invalid command.
OFPTFFC_TOO_MANY = 10 # Can't handle this many flow tables.
# enum ofp_bad_property_code
OFPBPC_BAD_TYPE = 0 # Unknown or unsupported property type.
OFPBPC_BAD_LEN = 1 # Length problem in property.
OFPBPC_BAD_VALUE = 2 # Unsupported property value.
OFPBPC_TOO_MANY = 3 # Can't handle this many properties.
OFPBPC_DUP_TYPE = 4 # A property type was duplicated.
OFPBPC_BAD_EXPERIMENTER = 5 # Unknown experimenter id specified.
OFPBPC_BAD_EXP_TYPE = 6 # Unknown exp_type for experimenter id.
OFPBPC_BAD_EXP_VALUE = 7 # Unknown value for experimenter id.
OFPBPC_EPERM = 8 # Permissions error.
# enum ofp_async_config_failed_code
OFPACFC_INVALID = 0 # One mask is invalid.
OFPACFC_UNSUPPORTED = 1 # Requested configuration not supported.
OFPACFC_EPERM = 2 # Permissions error.
# enum ofp_flow_monitor_failed_code
OFPMOFC_UNKNOWN = 0 # Unspecified error.
OFPMOFC_MONITOR_EXISTS = 1 # Monitor not added because a Monitor ADD
# attempted to replace an existing
# Monitor.
OFPMOFC_INVALID_MONITOR = 2 # Monitor not added because Monitor
# specified is invalid.
OFPMOFC_UNKNOWN_MONITOR = 3 # Monitor not modified because a Monitor
# MODIFY attempted to modify a non-existent
# Monitor.
OFPMOFC_BAD_COMMAND = 4 # Unsupported or unknown command.
OFPMOFC_BAD_FLAGS = 5 # Flag configuration unsupported.
OFPMOFC_BAD_TABLE_ID = 6 # Specified table does not exist.
OFPMOFC_BAD_OUT = 7 # Error in output port/group.
# enum ofp_bundle_failed_code
OFPBFC_UNKNOWN = 0 # Unspecified error.
OFPBFC_EPERM = 1 # Permissions error.
OFPBFC_BAD_ID = 2 # Bundle ID doesn't exist.
OFPBFC_BUNDLE_EXIST = 3 # Bundle ID already exist.
OFPBFC_BUNDLE_CLOSED = 4 # Bundle ID is closed.
OFPBFC_OUT_OF_BUNDLES = 5 # Too many bundles IDs.
OFPBFC_BAD_TYPE = 6 # Unsupported or unknown message control type.
OFPBFC_BAD_FLAGS = 7 # Unsupported, unknown, or inconsistent flags.
OFPBFC_MSG_BAD_LEN = 8 # Length problem in included message.
OFPBFC_MSG_BAD_XID = 9 # Inconsistent or duplicate XID.
OFPBFC_MSG_UNSUP = 10 # Unsupported message in this bundle.
OFPBFC_MSG_CONFLICT = 11 # Unsupported message combination in this
# bundle.
OFPBFC_MSG_TOO_MANY = 12 # Can't handle this many messages in bundle.
OFPBFC_MSG_FAILED = 13 # One message in bundle failed.
OFPBFC_TIMEOUT = 14 # Bundle is taking too long.
OFPBFC_BUNDLE_IN_PROGRESS = 15 # Bundle is locking the resource.
OFPBFC_SCHED_NOT_SUPPORTED = 16 # Scheduled commit was received and
# scheduling is not supported.
OFPBFC_SCHED_FUTURE = 17 # Scheduled commit time exceeds upper bound.
OFPBFC_SCHED_PAST = 18 # Scheduled commit time exceeds lower bound.
# struct ofp_error_msg
OFP_ERROR_MSG_PACK_STR = '!HH'
OFP_ERROR_MSG_SIZE = 12
assert (calcsize(OFP_ERROR_MSG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ERROR_MSG_SIZE)
# struct ofp_error_experimenter_msg
OFP_ERROR_EXPERIMENTER_MSG_PACK_STR = '!HHI'
OFP_ERROR_EXPERIMENTER_MSG_SIZE = 16
assert (calcsize(OFP_ERROR_EXPERIMENTER_MSG_PACK_STR) +
OFP_HEADER_SIZE) == OFP_ERROR_EXPERIMENTER_MSG_SIZE
# struct ofp_experimenter_header
OFP_EXPERIMENTER_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_HEADER_SIZE = 16
assert (calcsize(OFP_EXPERIMENTER_HEADER_PACK_STR) + OFP_HEADER_SIZE
== OFP_EXPERIMENTER_HEADER_SIZE)
# enum ofp_multipart_type
OFPMP_DESC = 0
OFPMP_FLOW_DESC = 1
OFPMP_AGGREGATE_STATS = 2
OFPMP_TABLE_STATS = 3
OFPMP_PORT_STATS = 4
OFPMP_QUEUE_STATS = 5
OFPMP_GROUP_STATS = 6
OFPMP_GROUP_DESC = 7
OFPMP_GROUP_FEATURES = 8
OFPMP_METER_STATS = 9
OFPMP_METER_DESC = 10
OFPMP_METER_FEATURES = 11
OFPMP_TABLE_FEATURES = 12
OFPMP_PORT_DESC = 13
OFPMP_TABLE_DESC = 14
OFPMP_QUEUE_DESC = 15
OFPMP_FLOW_MONITOR = 16
OFPMP_FLOW_STATS = 17
OFPMP_CONTROLLER_STATUS = 18
OFPMP_BUNDLE_FEATURES = 19
OFPMP_EXPERIMENTER = 0xffff
# struct ofp_multipart_request
OFP_MULTIPART_REQUEST_PACK_STR = '!HH4x'
OFP_MULTIPART_REQUEST_SIZE = 16
assert (calcsize(OFP_MULTIPART_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REQUEST_SIZE)
# enum ofp_multipart_reply_flags
OFPMPF_REPLY_MORE = 1 << 0 # More requests to follow.
# struct ofp_multipart_reply
OFP_MULTIPART_REPLY_PACK_STR = '!HH4x'
OFP_MULTIPART_REPLY_SIZE = 16
assert (calcsize(OFP_MULTIPART_REPLY_PACK_STR) + OFP_HEADER_SIZE ==
OFP_MULTIPART_REPLY_SIZE)
DESC_STR_LEN = 256
DESC_STR_LEN_STR = str(DESC_STR_LEN)
SERIAL_NUM_LEN = 32
SERIAL_NUM_LEN_STR = str(SERIAL_NUM_LEN)
OFP_DESC_PACK_STR = '!' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's' + \
SERIAL_NUM_LEN_STR + 's' + \
DESC_STR_LEN_STR + 's'
OFP_DESC_SIZE = 1056
assert calcsize(OFP_DESC_PACK_STR) == OFP_DESC_SIZE
# struct ofp_flow_stats_request
_OFP_FLOW_STATS_REQUEST_0_PACK_STR = 'B3xII4xQQ'
OFP_FLOW_STATS_REQUEST_0_PACK_STR = '!' + _OFP_FLOW_STATS_REQUEST_0_PACK_STR
OFP_FLOW_STATS_REQUEST_0_SIZE = 32
assert (calcsize(OFP_FLOW_STATS_REQUEST_0_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_0_SIZE)
OFP_FLOW_STATS_REQUEST_PACK_STR = (OFP_FLOW_STATS_REQUEST_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_FLOW_STATS_REQUEST_PACK_STR) ==
OFP_FLOW_STATS_REQUEST_SIZE)
# struct ofp_flow_desc
_OFP_FLOW_DESC_0_PACK_STR = 'H2xBxHHHHHQ'
OFP_FLOW_DESC_0_PACK_STR = '!' + _OFP_FLOW_DESC_0_PACK_STR
OFP_FLOW_DESC_0_SIZE = 24
assert calcsize(OFP_FLOW_DESC_0_PACK_STR) == OFP_FLOW_DESC_0_SIZE
OFP_FLOW_DESC_PACK_STR = OFP_FLOW_DESC_0_PACK_STR + _OFP_MATCH_PACK_STR
OFP_FLOW_DESC_SIZE = 32
assert calcsize(OFP_FLOW_DESC_PACK_STR) == OFP_FLOW_DESC_SIZE
# enum ofp_flow_stats_reason
OFPFSR_STATS_REQUEST = 0 # Reply to a OFPMP_FLOW_STATS request.
OFPFSR_STAT_TRIGGER = 1 # Status generated by OFPIT_STAT_TRIGGER.
# struct ofp_flow_stats
_OFP_FLOW_STATS_0_PACK_STR = 'H2xBBH'
OFP_FLOW_STATS_0_PACK_STR = '!' + _OFP_FLOW_STATS_0_PACK_STR
OFP_FLOW_STATS_0_SIZE = 8
assert calcsize(OFP_FLOW_STATS_0_PACK_STR) == OFP_FLOW_STATS_0_SIZE
OFP_FLOW_STATS_PACK_STR = (OFP_FLOW_STATS_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_STATS_SIZE = 16
assert calcsize(OFP_FLOW_STATS_PACK_STR) == OFP_FLOW_STATS_SIZE
# struct ofp_aggregate_stats_request
OFP_AGGREGATE_STATS_REQUEST_PACK_STR = '!B3xII4xQQ' + _OFP_MATCH_PACK_STR
OFP_AGGREGATE_STATS_REQUEST_SIZE = 40
assert (calcsize(OFP_AGGREGATE_STATS_REQUEST_PACK_STR) ==
OFP_AGGREGATE_STATS_REQUEST_SIZE)
# struct ofp_aggregate_stats_reply
OFP_AGGREGATE_STATS_REPLY_PACK_STR = OFP_STATS_PACK_STR
OFP_AGGREGATE_STATS_REPLY_SIZE = OFP_STATS_SIZE
assert (calcsize(OFP_AGGREGATE_STATS_REPLY_PACK_STR) ==
OFP_AGGREGATE_STATS_REPLY_SIZE)
# enum ofp_table_feature_prop_type
OFPTFPT_INSTRUCTIONS = 0
OFPTFPT_INSTRUCTIONS_MISS = 1
OFPTFPT_NEXT_TABLES = 2
OFPTFPT_NEXT_TABLES_MISS = 3
OFPTFPT_WRITE_ACTIONS = 4
OFPTFPT_WRITE_ACTIONS_MISS = 5
OFPTFPT_APPLY_ACTIONS = 6
OFPTFPT_APPLY_ACTIONS_MISS = 7
OFPTFPT_MATCH = 8
OFPTFPT_WILDCARDS = 10
OFPTFPT_WRITE_SETFIELD = 12
OFPTFPT_WRITE_SETFIELD_MISS = 13
OFPTFPT_APPLY_SETFIELD = 14
OFPTFPT_APPLY_SETFIELD_MISS = 15
OFPTFPT_TABLE_SYNC_FROM = 16
OFPTFPT_WRITE_COPYFIELD = 18 # Write Copy-Field property.
OFPTFPT_WRITE_COPYFIELD_MISS = 19 # Write Copy-Field for table-miss.
OFPTFPT_APPLY_COPYFIELD = 20 # Apply Copy-Field property.
OFPTFPT_APPLY_COPYFIELD_MISS = 21 # Apply Copy-Field for table-miss.
OFPTFPT_PACKET_TYPES = 22 # Packet types property.
OFPTFPT_EXPERIMENTER = 0xFFFE
OFPTFPT_EXPERIMENTER_MISS = 0xFFFF
# struct ofp_instruction_id
OFP_INSTRUCTION_ID_PACK_STR = '!HH'
OFP_INSTRUCTION_ID_SIZE = 4
assert calcsize(OFP_INSTRUCTION_ID_PACK_STR) == OFP_INSTRUCTION_ID_SIZE
# struct ofp_table_feature_prop_instructions
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_INSTRUCTIONS_SIZE)
# struct ofp_table_feature_prop_actions
OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_ACTIONS_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_ACTIONS_SIZE)
# struct ofp_table_feature_prop_oxm
OFP_TABLE_FEATURE_PROP_OXM_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_OXM_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_OXM_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_OXM_SIZE)
# struct ofp_table_feature_prop_oxm_values
OFP_TABLE_FEATURE_PROP_OXM_VALUES_PACK_STR = '!HH'
OFP_TABLE_FEATURE_PROP_OXM_VALUES_SIZE = 4
assert (calcsize(OFP_TABLE_FEATURE_PROP_OXM_VALUES_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_OXM_VALUES_SIZE)
# struct ofp_table_feature_prop_experimenter
OFP_TABLE_FEATURE_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_TABLE_FEATURE_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_TABLE_FEATURE_PROP_EXPERIMENTER_PACK_STR) ==
OFP_TABLE_FEATURE_PROP_EXPERIMENTER_SIZE)
# enum ofp_table_feature_flag
OFPTFF_INGRESS_TABLE = 1 << 0 # Can be configured as ingress table.
OFPTFF_EGRESS_TABLE = 1 << 1 # Can be configured as egress table.
OFPTFF_FIRST_EGRESS = 1 << 4 # Is the first egress table.
# enum ofp_table_features_command
OFPTFC_REPLACE = 0 # Replace full pipeline.
OFPTFC_MODIFY = 1 # Modify flow tables capabilities.
OFPTFC_ENABLE = 2 # Enable flow tables in the pipeline.
OFPTFC_DISABLE = 3 # Disable flow tables in pipeline.
# struct ofp_table_features
OFP_MAX_TABLE_NAME_LEN = 32
OFP_MAX_TABLE_NAME_LEN_STR = str(OFP_MAX_TABLE_NAME_LEN)
OFP_TABLE_FEATURES_PACK_STR = '!HBBI' + OFP_MAX_TABLE_NAME_LEN_STR + \
's' + 'QQII'
OFP_TABLE_FEATURES_SIZE = 64
assert (calcsize(OFP_TABLE_FEATURES_PACK_STR) ==
OFP_TABLE_FEATURES_SIZE)
# struct ofp_table_stats
OFP_TABLE_STATS_PACK_STR = '!B3xIQQ'
OFP_TABLE_STATS_SIZE = 24
assert calcsize(OFP_TABLE_STATS_PACK_STR) == OFP_TABLE_STATS_SIZE
# struct ofp_table_desc
_OFP_TABLE_DESC_PACK_STR = 'HBxI'
OFP_TABLE_DESC_PACK_STR = '!' + _OFP_TABLE_DESC_PACK_STR
OFP_TABLE_DESC_SIZE = 8
assert calcsize(OFP_TABLE_DESC_PACK_STR) == OFP_TABLE_DESC_SIZE
# struct ofp_port_multipart_request
OFP_PORT_MULTIPART_REQUEST_PACK_STR = '!I4x'
OFP_PORT_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_PORT_MULTIPART_REQUEST_PACK_STR) ==
OFP_PORT_MULTIPART_REQUEST_SIZE)
# enum ofp_port_stats_prop_type
OFPPSPT_ETHERNET = 0 # Ethernet property.
OFPPSPT_OPTICAL = 1 # Optical property.
OFPPSPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_port_stats_prop_ethernet
OFP_PORT_STATS_PROP_ETHERNET_PACK_STR = '!HH4xQQQQ'
OFP_PORT_STATS_PROP_ETHERNET_SIZE = 40
assert (calcsize(OFP_PORT_STATS_PROP_ETHERNET_PACK_STR) ==
OFP_PORT_STATS_PROP_ETHERNET_SIZE)
# struct ofp_port_stats_prop_optical
OFP_PORT_STATS_PROP_OPTICAL_PACK_STR = '!HH4xIIIIIIIHHHH'
OFP_PORT_STATS_PROP_OPTICAL_SIZE = 44
assert (calcsize(OFP_PORT_STATS_PROP_OPTICAL_PACK_STR) ==
OFP_PORT_STATS_PROP_OPTICAL_SIZE)
# enum ofp_port_stats_optical_flags
OFPOSF_RX_TUNE = 1 << 0 # Receiver tune info valid
OFPOSF_TX_TUNE = 1 << 1 # Transmit tune info valid
OFPOSF_TX_PWR = 1 << 2 # TX Power is valid
OFPOSF_RX_PWR = 1 << 4 # RX power is valid
OFPOSF_TX_BIAS = 1 << 5 # Transmit bias is valid
OFPOSF_TX_TEMP = 1 << 6 # TX Temp is valid
# struct ofp_port_stats_prop_experimenter
OFP_PORT_STATS_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_PORT_STATS_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_PORT_STATS_PROP_EXPERIMENTER_PACK_STR) ==
OFP_PORT_STATS_PROP_EXPERIMENTER_SIZE)
# struct ofp_port_stats
OFP_PORT_STATS_PACK_STR = '!H2xIIIQQQQQQQQ'
OFP_PORT_STATS_SIZE = 80
assert calcsize(OFP_PORT_STATS_PACK_STR) == OFP_PORT_STATS_SIZE
# struct ofp_group_multipart_request
OFP_GROUP_MULTIPART_REQUEST_PACK_STR = '!I4x'
OFP_GROUP_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_GROUP_MULTIPART_REQUEST_PACK_STR) ==
OFP_GROUP_MULTIPART_REQUEST_SIZE)
# struct ofp_bucket_counter
OFP_BUCKET_COUNTER_PACK_STR = '!QQ'
OFP_BUCKET_COUNTER_SIZE = 16
assert calcsize(OFP_BUCKET_COUNTER_PACK_STR) == OFP_BUCKET_COUNTER_SIZE
# struct ofp_group_stats
OFP_GROUP_STATS_PACK_STR = '!H2xII4xQQII'
OFP_GROUP_STATS_SIZE = 40
assert calcsize(OFP_GROUP_STATS_PACK_STR) == OFP_GROUP_STATS_SIZE
# struct ofp_group_desc
OFP_GROUP_DESC_PACK_STR = '!HBxIH6x'
OFP_GROUP_DESC_SIZE = 16
assert calcsize(OFP_GROUP_DESC_PACK_STR) == OFP_GROUP_DESC_SIZE
# struct ofp_group_desc_stats
# Backward compatibility with 1.3.1 - avoid breaking the API.
OFP_GROUP_DESC_STATS_PACK_STR = OFP_GROUP_DESC_PACK_STR
OFP_GROUP_DESC_STATS_SIZE = OFP_GROUP_DESC_SIZE
assert calcsize(OFP_GROUP_DESC_STATS_PACK_STR) == OFP_GROUP_DESC_STATS_SIZE
# enum ofp_group_capabilities
OFPGFC_SELECT_WEIGHT = 1 << 0 # Support weight for select groups.
OFPGFC_SELECT_LIVENESS = 1 << 1 # Support liveness for select groups.
OFPGFC_CHAINING = 1 << 2 # Support chaining groups.
OFPGFC_CHAINING_CHECKS = 1 << 3 # Check chaining for loops and delete
# struct ofp_group_features
OFP_GROUP_FEATURES_PACK_STR = '!II4I4I'
OFP_GROUP_FEATURES_SIZE = 40
assert calcsize(OFP_GROUP_FEATURES_PACK_STR) == OFP_GROUP_FEATURES_SIZE
# struct ofp_meter_multipart_request
OFP_METER_MULTIPART_REQUEST_PACK_STR = '!I4x'
OFP_METER_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_METER_MULTIPART_REQUEST_PACK_STR) ==
OFP_METER_MULTIPART_REQUEST_SIZE)
# struct ofp_meter_stats
OFP_METER_STATS_PACK_STR = '!IH6xIQQII'
OFP_METER_STATS_SIZE = 40
assert calcsize(OFP_METER_STATS_PACK_STR) == OFP_METER_STATS_SIZE
# struct ofp_meter_band_stats
OFP_METER_BAND_STATS_PACK_STR = '!QQ'
OFP_METER_BAND_STATS_SIZE = 16
assert (calcsize(OFP_METER_BAND_STATS_PACK_STR) ==
OFP_METER_BAND_STATS_SIZE)
# struct ofp_meter_desc
OFP_METER_DESC_PACK_STR = '!HHI'
OFP_METER_DESC_SIZE = 8
assert calcsize(OFP_METER_DESC_PACK_STR) == OFP_METER_DESC_SIZE
# enum ofp_meter_feature_flags
OFPMFF_ACTION_SET = 1 << 0 # Support meter action in action set.
OFPMFF_ANY_POSITION = 1 << 1 # Support any position in action list.
OFPMFF_MULTI_LIST = 1 << 2 # Support multiple actions in action list.
# struct ofp_meter_features
OFP_METER_FEATURES_PACK_STR = '!IIIBB2xI4x'
OFP_METER_FEATURES_SIZE = 24
assert (calcsize(OFP_METER_FEATURES_PACK_STR) ==
OFP_METER_FEATURES_SIZE)
# All ones is used to indicate all queues in a port (for stats retrieval).
OFPQ_ALL = 0xffffffff
# Min rate > 1000 means not configured.
OFPQ_MIN_RATE_UNCFG = 0xffff
# Max rate > 1000 means not configured.
OFPQ_MAX_RATE_UNCFG = 0xffff
# enum ofp_queue_desc_prop_type
OFPQDPT_MIN_RATE = 1 # Minimum datarate guaranteed.
OFPQDPT_MAX_RATE = 2 # Maximum datarate.
OFPQDPT_EXPERIMENTER = 0xffff # Experimenter defined property.
# struct ofp_queue_desc_prop_min_rate
OFP_QUEUE_DESC_PROP_MIN_RATE_PACK_STR = '!HHH2x'
OFP_QUEUE_DESC_PROP_MIN_RATE_SIZE = 8
assert (calcsize(OFP_QUEUE_DESC_PROP_MIN_RATE_PACK_STR) ==
OFP_QUEUE_DESC_PROP_MIN_RATE_SIZE)
# ofp_queue_desc_prop_max_rate
OFP_QUEUE_DESC_PROP_MAX_RATE_PACK_STR = '!HHH2x'
OFP_QUEUE_DESC_PROP_MAX_RATE_SIZE = 8
assert (calcsize(OFP_QUEUE_DESC_PROP_MAX_RATE_PACK_STR) ==
OFP_QUEUE_DESC_PROP_MAX_RATE_SIZE)
# struct ofp_queue_desc_prop_experimenter
OFP_QUEUE_DESC_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_QUEUE_DESC_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_QUEUE_DESC_PROP_EXPERIMENTER_PACK_STR) ==
OFP_QUEUE_DESC_PROP_EXPERIMENTER_SIZE)
# struct ofp_queue_multipart_request
OFP_QUEUE_MULTIPART_REQUEST_PACK_STR = '!II'
OFP_QUEUE_MULTIPART_REQUEST_SIZE = 8
assert (calcsize(OFP_QUEUE_MULTIPART_REQUEST_PACK_STR) ==
OFP_QUEUE_MULTIPART_REQUEST_SIZE)
# struct ofp_queue_desc
OFP_QUEUE_DESC_PACK_STR = '!IIH6x'
OFP_QUEUE_DESC_SIZE = 16
assert calcsize(OFP_QUEUE_DESC_PACK_STR) == OFP_QUEUE_DESC_SIZE
# enum ofp_queue_stats_prop_type
OFPQSPT_EXPERIMENTER = 0xffff # Experimenter defined property.
# struct ofp_queue_stats_prop_experimenter
OFP_QUEUE_STATS_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_QUEUE_STATS_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_QUEUE_STATS_PROP_EXPERIMENTER_PACK_STR) ==
OFP_QUEUE_STATS_PROP_EXPERIMENTER_SIZE)
# struct ofp_queue_stats
OFP_QUEUE_STATS_PACK_STR = '!H6xIIQQQII'
OFP_QUEUE_STATS_SIZE = 48
assert calcsize(OFP_QUEUE_STATS_PACK_STR) == OFP_QUEUE_STATS_SIZE
# struct ofp_flow_monitor_request
_OFP_FLOW_MONITOR_REQUEST_0_PACK_STR = 'IIIHBB'
OFP_FLOW_MONITOR_REQUEST_0_PACK_STR = ('!' +
_OFP_FLOW_MONITOR_REQUEST_0_PACK_STR)
OFP_FLOW_MONITOR_REQUEST_0_SIZE = 16
OFP_FLOW_MONITOR_REQUEST_PACK_STR = (OFP_FLOW_MONITOR_REQUEST_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_MONITOR_REQUEST_SIZE = 24
assert (calcsize(OFP_FLOW_MONITOR_REQUEST_PACK_STR) ==
OFP_FLOW_MONITOR_REQUEST_SIZE)
# enum ofp_flow_monitor_command
OFPFMC_ADD = 0 # New flow monitor.
OFPFMC_MODIFY = 1 # Modify existing flow monitor.
OFPFMC_DELETE = 2 # Delete/cancel existing flow monitor.
# enum ofp_flow_monitor_flags
# When to send updates.
OFPFMF_INITIAL = 1 << 0 # Initially matching flows.
OFPFMF_ADD = 1 << 1 # New matching flows as they are added.
OFPFMF_REMOVED = 1 << 2 # Old matching flows as they are removed.
OFPFMF_MODIFY = 1 << 3 # Matching flows as they are changed.
# What to include in updates.
OFPFMF_INSTRUCTIONS = 1 << 4 # If set, instructions are included.
OFPFMF_NO_ABBREV = 1 << 5 # If set, include own changes in full.
OFPFMF_ONLY_OWN = 1 << 6 # If set, don't include other controllers.
# struct ofp_flow_update_header
OFP_FLOW_UPDATE_HEADER_PACK_STR = '!HH'
OFP_FLOW_UPDATE_HEADER_SIZE = 4
assert (calcsize(OFP_FLOW_UPDATE_HEADER_PACK_STR) ==
OFP_FLOW_UPDATE_HEADER_SIZE)
# enum ofp_flow_update_event
# struct ofp_flow_update_full.
OFPFME_INITIAL = 0 # Flow present when flow monitor created.
OFPFME_ADDED = 1 # Flow was added.
OFPFME_REMOVED = 2 # Flow was removed.
OFPFME_MODIFIED = 3 # Flow instructions were changed.
# struct ofp_flow_update_abbrev.
OFPFME_ABBREV = 4 # Abbreviated reply.
# struct ofp_flow_update_header.
OFPFME_PAUSED = 5 # Monitoring paused (out of buffer space).
OFPFME_RESUMED = 6 # Monitoring resumed.
# struct ofp_flow_update_full
_OFP_FLOW_UPDATE_FULL_0_PACK_STR = 'HHBBHHH4xQ'
OFP_FLOW_UPDATE_FULL_0_PACK_STR = '!' + _OFP_FLOW_UPDATE_FULL_0_PACK_STR
OFP_FLOW_UPDATE_FULL_0_SIZE = 24
OFP_FLOW_UPDATE_FULL_PACK_STR = (OFP_FLOW_UPDATE_FULL_0_PACK_STR +
_OFP_MATCH_PACK_STR)
OFP_FLOW_UPDATE_FULL_SIZE = 32
assert (calcsize(OFP_FLOW_UPDATE_FULL_PACK_STR) ==
OFP_FLOW_UPDATE_FULL_SIZE)
# struct ofp_flow_update_abbrev
OFP_FLOW_UPDATE_ABBREV_PACK_STR = '!HHI'
OFP_FLOW_UPDATE_ABBREV_SIZE = 8
assert (calcsize(OFP_FLOW_UPDATE_ABBREV_PACK_STR) ==
OFP_FLOW_UPDATE_ABBREV_SIZE)
# struct ofp_flow_update_paused
OFP_FLOW_UPDATE_PAUSED_PACK_STR = '!HH4x'
OFP_FLOW_UPDATE_PAUSED_SIZE = 8
assert (calcsize(OFP_FLOW_UPDATE_PAUSED_PACK_STR) ==
OFP_FLOW_UPDATE_PAUSED_SIZE)
# enum ofp_controller_status_prop_type
OFPCSPT_URI = 0 # Connection URI property.
OFPCSPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_controller_status_prop_header
_OFP_CONTROLLER_STATUS_PROP_HEADER_PACK_STR = 'HH'
OFP_CONTROLLER_STATUS_PROP_HEADER_PACK_STR = (
'!' + _OFP_CONTROLLER_STATUS_PROP_HEADER_PACK_STR)
OFP_CONTROLLER_STATUS_PROP_HEADER_SIZE = 4
assert (calcsize(OFP_CONTROLLER_STATUS_PROP_HEADER_PACK_STR) ==
OFP_CONTROLLER_STATUS_PROP_HEADER_SIZE)
# struct ofp_controller_status_prop_uri
OFP_CONTROLLER_STATUS_PROP_URI_PACK_STR = '!HH'
OFP_CONTROLLER_STATUS_PROP_URI_SIZE = 4
assert (calcsize(OFP_CONTROLLER_STATUS_PROP_URI_PACK_STR) ==
OFP_CONTROLLER_STATUS_PROP_URI_SIZE)
# struct ofp_controller_status_prop_experimenter
OFP_CONTROLLER_STATUS_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_CONTROLLER_STATUS_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_CONTROLLER_STATUS_PROP_EXPERIMENTER_PACK_STR) ==
OFP_CONTROLLER_STATUS_PROP_EXPERIMENTER_SIZE)
# enum ofp_controller_status_reason
OFPCSR_REQUEST = 0 # Controller requested status.
OFPCSR_CHANNEL_STATUS = 1 # Oper status of channel changed.
OFPCSR_ROLE = 2 # Controller role changed.
OFPCSR_CONTROLLER_ADDED = 3 # New controller added.
OFPCSR_CONTROLLER_REMOVED = 4 # Controller removed from config.
OFPCSR_SHORT_ID = 5 # Controller ID changed.
OFPCSR_EXPERIMENTER = 6 # Experimenter data changed.
# struct ofp_controller_status
OFP_CONTROLLER_STATUS_PACK_STR = '!HHIBB6x'
OFP_CONTROLLER_STATUS_SIZE = 16
assert (calcsize(OFP_CONTROLLER_STATUS_PACK_STR) ==
OFP_CONTROLLER_STATUS_SIZE)
# struct ofp_controller_status_header
OFP_CONTROLLER_STATUS_HEADER_PACK_STR = OFP_CONTROLLER_STATUS_PACK_STR
OFP_CONTROLLER_STATUS_HEADER_SIZE = OFP_CONTROLLER_STATUS_SIZE + OFP_HEADER_SIZE
# enum ofp_control_channel_status
OFPCT_STATUS_UP = 0 # Control channel is operational.
OFPCT_STATUS_DOWN = 1 # Control channel is not operational.
# struct ofp_bundle_features_prop_header
OFP_BUNDLE_FEATURES_PROP_HEADER_PACK_STR = '!HH'
OFP_BUNDLE_FEATURES_PROP_HEADER_SIZE = 4
assert (calcsize(OFP_BUNDLE_FEATURES_PROP_HEADER_PACK_STR) ==
OFP_BUNDLE_FEATURES_PROP_HEADER_SIZE)
# struct ofp_bundle_features_request
OFP_BUNDLE_FEATURES_REQUEST_PACK_STR = '!I4x'
OFP_BUNDLE_FEATURES_REQUEST_SIZE = 8
assert (calcsize(OFP_BUNDLE_FEATURES_REQUEST_PACK_STR) ==
OFP_BUNDLE_FEATURES_REQUEST_SIZE)
# enum ofp_bundle_features_prop_type
OFPTMPBF_TIME_CAPABILITY = 0x1 # Time feature property.
OFPTMPBF_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_time
_OFP_TIME_PACK_STR = 'QI4x'
OFP_TIME_PACK_STR = '!' + _OFP_TIME_PACK_STR
OFP_TIME_SIZE = 16
assert calcsize(OFP_TIME_PACK_STR) == OFP_TIME_SIZE
# struct ofp_bundle_features_prop_time
OFP_BUNDLE_FEATURES_PROP_TIME_0_PACK_STR = '!HH4x'
OFP_BUNDLE_FEATURES_PROP_TIME_0_SIZE = 8
OFP_BUNDLE_FEATURES_PROP_TIME_SIZE = 72
assert (calcsize(OFP_BUNDLE_FEATURES_PROP_TIME_0_PACK_STR) +
OFP_TIME_SIZE * 4 == OFP_BUNDLE_FEATURES_PROP_TIME_SIZE)
# enum ofp_bundle_feature_flags
OFPBF_TIMESTAMP = 1 << 0 # Request includes a timestamp.
OFPBF_TIME_SET_SCHED = 1 << 1 # Request includes the sched_max_future and
# sched_max_past parameters.
# struct ofp_bundle_features
OFP_BUNDLE_FEATURES_PACK_STR = '!H6x'
OFP_BUNDLE_FEATURES_SIZE = 8
assert (calcsize(OFP_BUNDLE_FEATURES_PACK_STR) ==
OFP_BUNDLE_FEATURES_SIZE)
# struct ofp_experimenter_multipart_header
OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR = '!II'
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE = 8
assert (calcsize(OFP_EXPERIMENTER_MULTIPART_HEADER_PACK_STR) ==
OFP_EXPERIMENTER_MULTIPART_HEADER_SIZE)
# struct ofp_experimenter_structure
OFP_EXPERIMENTER_STRUCTURE_PACK_STR = '!II'
OFP_EXPERIMENTER_STRUCTURE_SIZE = 8
assert (calcsize(OFP_EXPERIMENTER_STRUCTURE_PACK_STR) ==
OFP_EXPERIMENTER_STRUCTURE_SIZE)
# struct ofp_error_experimenter_msg
OFP_ERROR_EXPERIMENTER_MSG_PACK_STR = '!HHI'
OFP_ERROR_EXPERIMENTER_MSG_SIZE = 16
assert (calcsize(OFP_ERROR_EXPERIMENTER_MSG_PACK_STR) +
OFP_HEADER_SIZE) == OFP_ERROR_EXPERIMENTER_MSG_SIZE
# enum ofp_controller_role
OFPCR_ROLE_NOCHANGE = 0 # Don't change current role.
OFPCR_ROLE_EQUAL = 1 # Default role, full access.
OFPCR_ROLE_MASTER = 2 # Full access, at most one master.
OFPCR_ROLE_SLAVE = 3 # Read-only access.
# If the switch has not requested an identifier, the short_id should be set to
# OFPCID_UNDEFINED.
OFPCID_UNDEFINED = 0
# struct ofp_role_request
OFP_ROLE_REQUEST_PACK_STR = '!IH2xQ'
OFP_ROLE_REQUEST_SIZE = 24
assert (calcsize(OFP_ROLE_REQUEST_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ROLE_REQUEST_SIZE)
# enum ofp_role_prop_type
OFPRPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_role_prop_header
_OFP_ROLE_PROP_HEADER_PACK_STR = 'HH'
OFP_ROLE_PROP_HEADER_PACK_STR = '!' + _OFP_ROLE_PROP_HEADER_PACK_STR
OFP_ROLE_PROP_HEADER_SIZE = 4
assert (calcsize(OFP_ROLE_PROP_HEADER_PACK_STR) ==
OFP_ROLE_PROP_HEADER_SIZE)
# struct ofp_role_prop_experimenter
OFP_ROLE_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_ROLE_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_ROLE_PROP_EXPERIMENTER_PACK_STR) ==
OFP_ROLE_PROP_EXPERIMENTER_SIZE)
# enum ofp_controller_role_reason
OFPCRR_MASTER_REQUEST = 0 # Another controller asked to be master.
OFPCRR_CONFIG = 1 # Configuration changed on the switch.
OFPCRR_EXPERIMENTER = 2 # Experimenter data changed.
# struct ofp_role_status
OFP_ROLE_STATUS_PACK_STR = '!IB3xQ'
OFP_ROLE_STATUS_SIZE = 24
assert (calcsize(OFP_ROLE_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_ROLE_STATUS_SIZE)
# enum ofp_async_config_prop_type
OFPACPT_PACKET_IN_SLAVE = 0 # Packet-in mask for slave.
OFPACPT_PACKET_IN_MASTER = 1 # Packet-in mask for master.
OFPACPT_PORT_STATUS_SLAVE = 2 # Port-status mask for slave.
OFPACPT_PORT_STATUS_MASTER = 3 # Port-status mask for master.
OFPACPT_FLOW_REMOVED_SLAVE = 4 # Flow removed mask for slave.
OFPACPT_FLOW_REMOVED_MASTER = 5 # Flow removed mask for master.
OFPACPT_ROLE_STATUS_SLAVE = 6 # Role status mask for slave.
OFPACPT_ROLE_STATUS_MASTER = 7 # Role status mask for master.
OFPACPT_TABLE_STATUS_SLAVE = 8 # Table status mask for slave.
OFPACPT_TABLE_STATUS_MASTER = 9 # Table status mask for master.
OFPACPT_REQUESTFORWARD_SLAVE = 10 # RequestForward mask for slave.
OFPACPT_REQUESTFORWARD_MASTER = 11 # RequestForward mask for master.
OFPACPT_FLOW_STATS_SLAVE = 12 # Flow stats mask for slave.
OFPACPT_FLOW_STATS_MASTER = 13 # Flow stats mask for master.
OFPACPT_CONT_STATUS_SLAVE = 14 # Controller status mask for slave.
OFPACPT_CONT_STATUS_MASTER = 15 # Controller status mask for master.
OFPACPT_EXPERIMENTER_SLAVE = 0xFFFE # Experimenter for slave.
OFPACPT_EXPERIMENTER_MASTER = 0xFFFF # Experimenter for master.
# struct ofp_async_config_prop_reasons
OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR = '!HHI'
OFP_ASYNC_CONFIG_PROP_REASONS_SIZE = 8
assert (calcsize(OFP_ASYNC_CONFIG_PROP_REASONS_PACK_STR) ==
OFP_ASYNC_CONFIG_PROP_REASONS_SIZE)
# struct ofp_async_config_prop_experimenter
OFP_ASYNC_CONFIG_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_ASYNC_CONFIG_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_ASYNC_CONFIG_PROP_EXPERIMENTER_PACK_STR) ==
OFP_ASYNC_CONFIG_PROP_EXPERIMENTER_SIZE)
# enum ofp_table_reason
OFPTR_VACANCY_DOWN = 3 # Vacancy down threshold event.
OFPTR_VACANCY_UP = 4 # Vacancy up threshold event.
# struct ofp_table_status
_OFP_TABLE_STATUS_0_PACK_STR = 'B7x'
OFP_TABLE_STATUS_0_PACK_STR = '!' + _OFP_TABLE_STATUS_0_PACK_STR
OFP_TABLE_STATUS_0_SIZE = 16
OFP_TABLE_STATUS_PACK_STR = (OFP_TABLE_STATUS_0_PACK_STR +
_OFP_TABLE_DESC_PACK_STR)
OFP_TABLE_STATUS_SIZE = 24
assert (calcsize(OFP_TABLE_STATUS_PACK_STR) + OFP_HEADER_SIZE ==
OFP_TABLE_STATUS_SIZE)
# enum ofp_requestforward_reason
OFPRFR_GROUP_MOD = 0 # Forward group mod requests.
OFPRFR_METER_MOD = 1 # Forward meter mod requests.
# enum ofp_bundle_prop_type
OFPBPT_TIME = 1 # Time property.
OFPBPT_EXPERIMENTER = 0xFFFF # Experimenter property.
# struct ofp_bundle_prop_experimenter
OFP_BUNDLE_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_BUNDLE_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_BUNDLE_PROP_EXPERIMENTER_PACK_STR) ==
OFP_BUNDLE_PROP_EXPERIMENTER_SIZE)
# struct ofp_bundle_prop_time
OFP_BUNDLE_PROP_TIME_PACK_STR0 = '!HH4x'
OFP_BUNDLE_PROP_TIME_PACK_STR0_SIZE = 8
assert (calcsize(OFP_BUNDLE_PROP_TIME_PACK_STR0) ==
OFP_BUNDLE_PROP_TIME_PACK_STR0_SIZE)
OFP_BUNDLE_PROP_TIME_PACK_STR = (OFP_BUNDLE_PROP_TIME_PACK_STR0 +
_OFP_TIME_PACK_STR)
OFP_BUNDLE_PROP_TIME_PACK_STR_SIZE = 24
assert (calcsize(OFP_BUNDLE_PROP_TIME_PACK_STR) ==
OFP_BUNDLE_PROP_TIME_PACK_STR_SIZE)
# enum ofp_bundle_ctrl_type
OFPBCT_OPEN_REQUEST = 0
OFPBCT_OPEN_REPLY = 1
OFPBCT_CLOSE_REQUEST = 2
OFPBCT_CLOSE_REPLY = 3
OFPBCT_COMMIT_REQUEST = 4
OFPBCT_COMMIT_REPLY = 5
OFPBCT_DISCARD_REQUEST = 6
OFPBCT_DISCARD_REPLY = 7
# enum ofp_bundle_flags
OFPBF_ATOMIC = 1 << 0 # Execute atomically.
OFPBF_ORDERED = 1 << 1 # Execute in specified order.
OFPBF_TIME = 1 << 2 # Execute in specified time.
# struct ofp_bundle_ctrl_msg
OFP_BUNDLE_CTRL_MSG_PACK_STR = '!IHH'
OFP_BUNDLE_CTRL_MSG_SIZE = 16
assert (calcsize(OFP_BUNDLE_CTRL_MSG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_BUNDLE_CTRL_MSG_SIZE)
# struct ofp_bundle_add_msg
_OFP_BUNDLE_ADD_MSG_0_PACK_STR = 'I2xH'
OFP_BUNDLE_ADD_MSG_0_PACK_STR = '!' + _OFP_BUNDLE_ADD_MSG_0_PACK_STR
OFP_BUNDLE_ADD_MSG_PACK_STR = (OFP_BUNDLE_ADD_MSG_0_PACK_STR +
_OFP_HEADER_PACK_STR)
OFP_BUNDLE_ADD_MSG_SIZE = 24
assert (calcsize(OFP_BUNDLE_ADD_MSG_PACK_STR) + OFP_HEADER_SIZE ==
OFP_BUNDLE_ADD_MSG_SIZE)
# Note: struct ofp_prop_experimenter is specific to this implementation.
# It does not have a corresponding structure in the specification.
# This structure defines common structure for ofp_*_prop_experimenter.
# struct ofp_prop_experimenter
OFP_PROP_EXPERIMENTER_PACK_STR = '!HHII'
OFP_PROP_EXPERIMENTER_SIZE = 12
assert (calcsize(OFP_PROP_EXPERIMENTER_PACK_STR) ==
OFP_PROP_EXPERIMENTER_SIZE)
# define constants
OFP_VERSION = 0x06
OFP_TCP_PORT = 6653
OFP_SSL_PORT = 6653
MAX_XID = 0xffffffff
|
{
"content_hash": "ccf1bb755350ed4b02d6b024ead22db4",
"timestamp": "",
"source": "github",
"line_count": 1809,
"max_line_length": 81,
"avg_line_length": 41.89110005527916,
"alnum_prop": 0.6746150090392051,
"repo_name": "elahejalalpour/ELRyu",
"id": "31e5cc05603a312133856dffa5b38cf0ae3b9d6e",
"size": "76463",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/ofproto/ofproto_v1_5.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9800"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "872506"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5321774"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
"""
.. module:: __main__
:platform: linux
:synopsis: Special main entry point.
.. moduleauthor:: Paul Fanelli <paul.fanelli@gmail.com>
.. modulecreated:: 6/28/15
"""
import sys
from planet_alignment.app.app_factory import AppFactory
from planet_alignment.cmd.cmd_parser import CommandParser
def main(argv=None):
"""The main function"""
if argv is None:
argv = sys.argv[1:]
cmd_args = CommandParser().parse(argv)
app = AppFactory(cmd_args).create()
results = app.run()
if results:
app.print_results(results)
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "028f63cf0b22949488c3680eadb301dc",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 57,
"avg_line_length": 21,
"alnum_prop": 0.645320197044335,
"repo_name": "paulfanelli/planet_alignment",
"id": "e53802d1978408768cd39ae7703555bfb6de8f55",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet_alignment/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48726"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core import mail
from django.contrib.auth.models import User
from sentry.models import Project, ProjectKey, Group, Event, Team, \
MessageFilterValue, MessageCountByMinute, FilterValue, PendingTeamMember
from tests.base import TestCase
class ProjectTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def setUp(self):
self.project = Project.objects.get(id=1)
def test_migrate(self):
project2 = Project.objects.create(name='Test')
self.project.merge_to(project2)
self.assertFalse(Project.objects.filter(pk=1).exists())
self.assertFalse(Group.objects.filter(project__isnull=True).exists())
self.assertFalse(Event.objects.filter(project__isnull=True).exists())
self.assertFalse(MessageFilterValue.objects.filter(project__isnull=True).exists())
self.assertFalse(MessageCountByMinute.objects.filter(project__isnull=True).exists())
self.assertFalse(FilterValue.objects.filter(project__isnull=True).exists())
self.assertEquals(project2.group_set.count(), 4)
self.assertEquals(project2.event_set.count(), 10)
self.assertEquals(project2.messagefiltervalue_set.count(), 0)
self.assertEquals(project2.messagecountbyminute_set.count(), 0)
self.assertEquals(project2.filtervalue_set.count(), 0)
class ProjectKeyTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def test_get_dsn(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='http://example.com'):
self.assertEquals(key.get_dsn(), 'http://public:secret@example.com/1')
def test_get_dsn_with_ssl(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='https://example.com'):
self.assertEquals(key.get_dsn(), 'https://public:secret@example.com/1')
def test_get_dsn_with_port(self):
key = ProjectKey(project_id=1, public_key='public', secret_key='secret')
with self.Settings(SENTRY_URL_PREFIX='http://example.com:81'):
self.assertEquals(key.get_dsn(), 'http://public:secret@example.com:81/1')
def test_key_is_created_for_project_with_existing_team(self):
user = User.objects.create(username='admin')
team = Team.objects.create(name='Test', slug='test', owner=user)
project = Project.objects.create(name='Test', slug='test', owner=user, team=team)
self.assertTrue(project.key_set.filter(user=user).exists())
def test_key_is_created_for_project_with_new_team(self):
user = User.objects.create(username='admin')
project = Project.objects.create(name='Test', slug='test', owner=user)
self.assertTrue(project.key_set.filter(user=user).exists())
class PendingTeamMemberTest(TestCase):
fixtures = ['tests/fixtures/views.json']
def test_token_generation(self):
member = PendingTeamMember(id=1, team_id=1, email='foo@example.com')
with self.Settings(SENTRY_KEY='a'):
self.assertEquals(member.token, 'f3f2aa3e57f4b936dfd4f42c38db003e')
def test_token_generation_unicode_key(self):
member = PendingTeamMember(id=1, team_id=1, email='foo@example.com')
with self.Settings(SENTRY_KEY="\xfc]C\x8a\xd2\x93\x04\x00\x81\xeak\x94\x02H\x1d\xcc&P'q\x12\xa2\xc0\xf2v\x7f\xbb*lX"):
self.assertEquals(member.token, 'df41d9dfd4ba25d745321e654e15b5d0')
def test_send_invite_email(self):
team = Team(name='test', slug='test', id=1)
member = PendingTeamMember(id=1, team=team, email='foo@example.com')
with self.Settings(SENTRY_URL_PREFIX='http://example.com'):
member.send_invite_email()
self.assertEquals(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEquals(msg.to, ['foo@example.com'])
|
{
"content_hash": "9423902752983d17eb494aa572d10a87",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 126,
"avg_line_length": 44.42696629213483,
"alnum_prop": 0.6820940819423369,
"repo_name": "chayapan/django-sentry",
"id": "b767495a14dfe87bc0c5738a20cf69a35e642747",
"size": "3971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/models/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "126130"
},
{
"name": "HTML",
"bytes": "174367"
},
{
"name": "JavaScript",
"bytes": "54696"
},
{
"name": "Makefile",
"bytes": "1867"
},
{
"name": "Python",
"bytes": "1330807"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import itertools
import unittest
from operator import attrgetter
from freezegun import freeze_time
from freezegun.api import FakeDatetime
from tests.mock import MagicMock, Mock
from streamlink.stream.dash_manifest import MPD, MPDParsers, MPDParsingError, utc, Representation
from tests.resources import xml
class TestMPDParsers(unittest.TestCase):
def test_utc(self):
self.assertIn(utc.tzname(None), ("UTC", "UTC+00:00")) # depends on the implementation
self.assertIn(utc.dst(None), (None, datetime.timedelta(0))) # depends on the implementation
self.assertEqual(utc.utcoffset(None), datetime.timedelta(0))
def test_bool_str(self):
self.assertEqual(MPDParsers.bool_str("true"), True)
self.assertEqual(MPDParsers.bool_str("TRUE"), True)
self.assertEqual(MPDParsers.bool_str("True"), True)
self.assertEqual(MPDParsers.bool_str("0"), False)
self.assertEqual(MPDParsers.bool_str("False"), False)
self.assertEqual(MPDParsers.bool_str("false"), False)
self.assertEqual(MPDParsers.bool_str("FALSE"), False)
def test_type(self):
self.assertEqual(MPDParsers.type("dynamic"), "dynamic")
self.assertEqual(MPDParsers.type("static"), "static")
with self.assertRaises(MPDParsingError):
MPDParsers.type("other")
def test_duration(self):
self.assertEqual(MPDParsers.duration("PT1S"), datetime.timedelta(0, 1))
def test_datetime(self):
self.assertEqual(MPDParsers.datetime("2018-01-01T00:00:00Z"),
datetime.datetime(2018, 1, 1, 0, 0, 0, tzinfo=utc))
def test_segment_template(self):
self.assertEqual(MPDParsers.segment_template("$Time$-$Number$-$Other$")(Time=1, Number=2, Other=3),
"1-2-3")
self.assertEqual(MPDParsers.segment_template("$Number%05d$")(Number=123),
"00123")
self.assertEqual(MPDParsers.segment_template("$Time%0.02f$")(Time=100.234),
"100.23")
def test_frame_rate(self):
self.assertAlmostEqual(MPDParsers.frame_rate("1/25"),
1 / 25.0)
self.assertAlmostEqual(MPDParsers.frame_rate("0.2"),
0.2)
def test_timedelta(self):
self.assertEqual(MPDParsers.timedelta(1)(100),
datetime.timedelta(0, 100.0))
self.assertEqual(MPDParsers.timedelta(10)(100),
datetime.timedelta(0, 10.0))
def test_range(self):
self.assertEqual(MPDParsers.range("100-"), (100, None))
self.assertEqual(MPDParsers.range("100-199"), (100, 100))
self.assertRaises(MPDParsingError, MPDParsers.range, "100")
class TestMPDParser(unittest.TestCase):
maxDiff = None
def test_segments_number_time(self):
with xml("dash/test_1.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/tracks-v3/init-1526842800.g_m4v")
video_segments = list(map(attrgetter("url"), (itertools.islice(segments, 5))))
# suggested delay is 11 seconds, each segment is 5 seconds long - so there should be 3
self.assertSequenceEqual(video_segments,
['http://test.se/tracks-v3/dvr-1526842800-698.g_m4v?t=3403000',
'http://test.se/tracks-v3/dvr-1526842800-699.g_m4v?t=3408000',
'http://test.se/tracks-v3/dvr-1526842800-700.g_m4v?t=3413000'])
def test_segments_static_number(self):
with xml("dash/test_2.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[3].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/video/250kbit/init.mp4")
video_segments = list(map(attrgetter("url"), (itertools.islice(segments, 100000))))
self.assertEqual(len(video_segments), 444)
self.assertSequenceEqual(video_segments[:5],
['http://test.se/video/250kbit/segment_1.m4s',
'http://test.se/video/250kbit/segment_2.m4s',
'http://test.se/video/250kbit/segment_3.m4s',
'http://test.se/video/250kbit/segment_4.m4s',
'http://test.se/video/250kbit/segment_5.m4s'])
def test_segments_dynamic_time(self):
with xml("dash/test_3.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/video-2800000-0.mp4?z32=")
video_segments = list(map(attrgetter("url"), (itertools.islice(segments, 3))))
# default suggested delay is 3 seconds, each segment is 4 seconds long - so there should be 1 segment
self.assertSequenceEqual(video_segments,
['http://test.se/video-time=1525450872000-2800000-0.m4s?z32='])
def test_segments_dynamic_number(self):
with freeze_time(FakeDatetime(2018, 5, 22, 13, 37, 0, tzinfo=utc)) as frozen_datetime:
with xml("dash/test_4.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/hd-5-init.mp4")
video_segments = []
for _ in range(3):
seg = next(segments)
video_segments.append((seg.url,
seg.available_at))
self.assertSequenceEqual(video_segments,
[('http://test.se/hd-5_000311235.mp4',
datetime.datetime(2018, 5, 22, 13, 37, 0, tzinfo=utc)),
('http://test.se/hd-5_000311236.mp4',
datetime.datetime(2018, 5, 22, 13, 37, 5, tzinfo=utc)),
('http://test.se/hd-5_000311237.mp4',
datetime.datetime(2018, 5, 22, 13, 37, 10, tzinfo=utc))
])
def test_segments_static_no_publish_time(self):
with xml("dash/test_5.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[1].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/dash/150633-video_eng=194000.dash")
video_segments = [x.url for x in itertools.islice(segments, 3)]
self.assertSequenceEqual(video_segments,
['http://test.se/dash/150633-video_eng=194000-0.dash',
'http://test.se/dash/150633-video_eng=194000-2000.dash',
'http://test.se/dash/150633-video_eng=194000-4000.dash',
])
def test_segments_list(self):
with xml("dash/test_7.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/chunk_ctvideo_ridp0va0br4332748_cinit_mpd.m4s")
video_segments = [x.url for x in itertools.islice(segments, 3)]
self.assertSequenceEqual(video_segments,
['http://test.se/chunk_ctvideo_ridp0va0br4332748_cn1_mpd.m4s',
'http://test.se/chunk_ctvideo_ridp0va0br4332748_cn2_mpd.m4s',
'http://test.se/chunk_ctvideo_ridp0va0br4332748_cn3_mpd.m4s',
])
def test_segments_dynamic_timeline_continue(self):
with xml("dash/test_6_p1.mpd") as mpd_xml_p1:
with xml("dash/test_6_p2.mpd") as mpd_xml_p2:
mpd_p1 = MPD(mpd_xml_p1, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments_p1 = mpd_p1.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments_p1)
self.assertEqual(init_segment.url, "http://test.se/video/init.mp4")
video_segments_p1 = [x.url for x in itertools.islice(segments_p1, 100)]
self.assertSequenceEqual(video_segments_p1,
['http://test.se/video/1006000.mp4',
'http://test.se/video/1007000.mp4',
'http://test.se/video/1008000.mp4',
'http://test.se/video/1009000.mp4',
'http://test.se/video/1010000.mp4'])
# Continue in the next manifest
mpd_p2 = MPD(mpd_xml_p2,
base_url=mpd_p1.base_url,
url=mpd_p1.url,
timelines=mpd_p1.timelines)
segments_p2 = mpd_p2.periods[0].adaptationSets[0].representations[0].segments(init=False)
video_segments_p2 = [x.url for x in itertools.islice(segments_p2, 100)]
self.assertSequenceEqual(video_segments_p2,
['http://test.se/video/1011000.mp4',
'http://test.se/video/1012000.mp4',
'http://test.se/video/1013000.mp4',
'http://test.se/video/1014000.mp4',
'http://test.se/video/1015000.mp4'])
def test_tsegment_t_is_none_1895(self):
"""
Verify the fix for https://github.com/streamlink/streamlink/issues/1895
"""
with xml("dash/test_8.mpd") as mpd_xml:
mpd = MPD(mpd_xml, base_url="http://test.se/", url="http://test.se/manifest.mpd")
segments = mpd.periods[0].adaptationSets[0].representations[0].segments()
init_segment = next(segments)
self.assertEqual(init_segment.url, "http://test.se/video-2799000-0.mp4?z32=CENSORED_SESSION")
video_segments = [x.url for x in itertools.islice(segments, 3)]
self.assertSequenceEqual(video_segments,
['http://test.se/video-time=0-2799000-0.m4s?z32=CENSORED_SESSION',
'http://test.se/video-time=4000-2799000-0.m4s?z32=CENSORED_SESSION',
'http://test.se/video-time=8000-2799000-0.m4s?z32=CENSORED_SESSION',
])
def test_bitrate_rounded(self):
def mock_rep(bandwidth):
node = Mock(
tag="Representation",
attrib={
"id": "test",
"bandwidth": bandwidth,
"mimeType": "video/mp4"
}
)
node.findall.return_value = []
return Representation(node)
self.assertEqual(mock_rep(1.2*1000.0).bandwidth_rounded, 1.2)
self.assertEqual(mock_rep(45.6*1000.0).bandwidth_rounded, 46.0)
self.assertEqual(mock_rep(134.0*1000.0).bandwidth_rounded, 130.0)
self.assertEqual(mock_rep(1324.0*1000.0).bandwidth_rounded, 1300.0)
|
{
"content_hash": "92694ed4268bb2105ddaaf63834194c1",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 113,
"avg_line_length": 51.675,
"alnum_prop": 0.5474923399451701,
"repo_name": "back-to/streamlink",
"id": "9270552abebd86ddb21fb7cca45906f2f9d07a92",
"size": "12402",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/streams/test_dash_parser.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1451380"
},
{
"name": "Shell",
"bytes": "18044"
}
],
"symlink_target": ""
}
|
import scrapy
from opensecrets.items import ComitteeFunds
import logging
import re
class ComitteesSpider(scrapy.Spider):
name = "comittees"
allowed_domains = ["www.opensecrets.org"]
start_urls = (
'http://www.opensecrets.org/cmteprofiles/',
)
def parse(self, response):
out=["http://www.opensecrets.org%s"% i for i in response.xpath('//form//option/@value').extract()]
out=[re.sub('overview','profiles',i) for i in out]
logging.log(logging.DEBUG, out)
# out=[out[10]]
print out
for i in out:
logging.log(logging.DEBUG, 'sending to pull %s' %i)
years=range(105,115)
for j in years:
i=re.sub('congno=\d{3}',"congno=%s"%j,i)
request= scrapy.Request(i,callback=self.pull_numbers)
request.meta['year']=j
yield request
def pull_numbers(self,response):
year=response.meta['year']
members=response.xpath("//div[following-sibling::table[1]]/span/text()").extract()
members=[re.findall('^.+to (.+?$)',i) for i in members]
comittee=response.xpath('//h1[1]/text()').extract()
for i in range(len(members)):
logging.log(logging.DEBUG,'iteration %s'%i)
member=members[i]
logging.log(logging.DEBUG,'member: %s'%member)
industry=response.xpath('//table[preceding-sibling::div[1] and position()=%s]//tr/td[position()=1]/text()'%i).extract()
funds=response.xpath('//table[preceding-sibling::div[1] and position()=%s]//tr/td[position()=2]/text()'%i).extract()
scrapy_record=ComitteeFunds()
scrapy_record['member']=member
scrapy_record['comittee']=comittee
scrapy_record['funds']=funds
scrapy_record['industry']=industry
scrapy_record['year']=year
yield scrapy_record
|
{
"content_hash": "6cef14542e6b53543d514041c17f9375",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 131,
"avg_line_length": 39.142857142857146,
"alnum_prop": 0.5855057351407716,
"repo_name": "trcook/open_secrets_scrape",
"id": "c50154be9337a7f1aada41245c0407957536eb93",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opensecrets/spiders/comittees.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103899"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
}
|
import sys
import onnx
from onnx import helper, shape_inference
from onnx import TensorProto
import numpy as np
from onnx import numpy_helper
if len(sys.argv) < 2:
print("Please give model path...")
exit(1)
input_model_name = sys.argv[1]
output_model_name = input_model_name[:-5] + "_optimized.onnx"
model = onnx.load(input_model_name)
def add_name(model):
i = 0
for node in model.graph.node:
node.name = "%s_%d" % (node.op_type, i)
i += 1
def find_input_node(model, arg):
result = []
for node in model.graph.node:
for output in node.output:
if output == arg:
result.append(node)
return result[0] if len(result) == 1 else None
def find_output_node(model, arg):
result = []
for node in model.graph.node:
for input in node.input:
if input == arg:
result.append(node)
return result[0] if len(result) == 1 else None
def find_input(model, arg):
for initializer in model.graph.initializer:
if initializer.name == arg:
return initializer
return None
def find_all_fused_nodes(model, concat_node):
result = []
candidate = [concat_node]
while len(candidate) > 0:
node = candidate[0]
candidate.pop(0)
result.append(node)
if node.op_type == "Shape":
continue
for input in node.input:
input_node = find_input_node(model, input)
if input_node is not None:
candidate.append(input_node)
return result
def get_node_index(model, node):
i = 0
while i < len(model.graph.node):
if model.graph.node[i] == node:
break
i += 1
return i if i < len(model.graph.node) else None
def add_const(model, name, output, t_value=None, f_value=None):
const_node = model.graph.node.add()
const_node.op_type = "Constant"
const_node.name = name
const_node.output.extend([output])
attr = const_node.attribute.add()
attr.name = "value"
if t_value is not None:
attr.type = 4
attr.t.CopyFrom(t_value)
else:
attr.type = 1
attr.f = f_value
return const_node
def process_concat(model):
new_nodes = {}
delete_nodes = []
for node in model.graph.node:
if node.op_type == "Concat":
input_nodes = []
for input in node.input:
input_nodes.append(find_input_node(model, input))
# figure out target shape
shape = []
for input_node in input_nodes:
assert input_node.op_type == "Unsqueeze"
const_input = find_input_node(model, input_node.input[0])
if const_input.op_type != "Constant":
shape.append(0)
else:
attr = const_input.attribute
assert len(attr) == 1
assert attr[0].name == "value"
assert attr[0].type == 4
data = numpy_helper.to_array(attr[0].t)
shape.append(np.asscalar(data))
print("concat node: %s, new_shape is: %s" % (node.name, shape))
# find out the nodes need to be deleted.
fuse_nodes = find_all_fused_nodes(model, node)
reshape_node = find_output_node(model, node.output[0])
assert reshape_node.op_type == "Reshape"
new_nodes[get_node_index(model, reshape_node)] = shape
for n in fuse_nodes:
delete_nodes.append(get_node_index(model, n))
# insert new shape to reshape
index = 0
for reshape_node_index in new_nodes:
shape_tensor = numpy_helper.from_array(np.asarray(new_nodes[reshape_node_index], dtype=np.int64))
const_node = add_const(model, "concat_shape_node_%d" % index, "concat_shape_%d" % index, shape_tensor)
index += 1
reshape_node = model.graph.node[reshape_node_index]
reshape_node.input[1] = const_node.output[0]
# delete nodes
delete_nodes.sort(reverse=True)
for delete_node in delete_nodes:
del model.graph.node[delete_node]
def add_cast(model, name, input, output, type):
cast_node = model.graph.node.add()
cast_node.name = name
cast_node.op_type = "Cast"
attr = cast_node.attribute.add()
attr.name = "to"
attr.type = 2
attr.i = type
cast_node.input.extend([input])
cast_node.output.extend([output])
return cast_node
def fix_expand(model):
# find expand node
expand_node = None
for node in model.graph.node:
if node.op_type == "Expand":
expand_node = node
break
assert expand_node is not None
const_expand_input = find_input_node(model, expand_node.input[0])
assert const_expand_input.op_type == "Constant"
shape_node = find_input_node(model, expand_node.input[1])
assert shape_node.op_type == "Shape"
# insert cast --> min --> cast
cast_1 = add_cast(model, "new_cast_01", shape_node.output[0], "to_min_01", 1)
min_target = numpy_helper.from_array(np.asarray([1, 9999], dtype=np.float32))
min_target_node = add_const(model, "op_min_node_10", "op_min_ends_expand_10", min_target)
min_node = model.graph.node.add()
min_node.name = "new_min_01"
min_node.op_type = "Min"
min_node.input.extend([cast_1.output[0], min_target_node.output[0]])
min_node.output.extend(["from_min_01"])
cast_2 = add_cast(model, "new_cast_02", min_node.output[0], "to_slice_01", 7)
# insert slice
position = numpy_helper.from_array(np.expand_dims(np.arange(512, dtype=np.int64), axis=0))
position_node = add_const(model, "position_01_node", "position_01", position)
start_extend = numpy_helper.from_array(np.asarray([0, 0], dtype=np.int64), "start_expand_10")
start_extend_node = add_const(model, "start_expand_10_node", "start_expand_10", start_extend)
axes = numpy_helper.from_array(np.asarray([0, 1], dtype=np.int64), "axes_expand_10")
axes_node = add_const(model, "axes_expand_10_node", "axes_expand_10", axes)
slice_node = model.graph.node.add()
slice_node.name = "new_slice_01"
slice_node.op_type = "Slice"
slice_node.input.extend(
[position_node.output[0], start_extend_node.output[0], cast_2.output[0], axes_node.output[0]]
)
slice_node.output.extend(["from_slice_01"])
# connect to expand
expand_node.input[0] = slice_node.output[0]
# delete the const input
del model.graph.node[get_node_index(model, const_expand_input)]
def fix_dim(model):
del model.graph.input[3:]
def replace_input_arg(model, arg, new_arg):
for node in model.graph.node:
i = 0
while i < len(node.input):
if node.input[i] == arg:
node.input[i] = new_arg
i += 1
def find_weight_index(model, name):
index = 0
for w in model.graph.initializer:
if w.name == name:
return index
index += 1
return None
def fix_transpose(model):
transpose = []
for node in model.graph.node:
if node.op_type == "Transpose":
weight = find_input(model, node.input[0])
if weight is not None:
result = []
for n in model.graph.node:
for input in n.input:
if input == weight.name:
result.append(n)
if len(result) > 1:
continue
perm = node.attribute[0]
assert perm.name == "perm"
perm = perm.ints
assert len(perm) == 2 and perm[0] == 1 and perm[1] == 0
transpose.append((get_node_index(model, node), weight))
for t in transpose:
node = model.graph.node[t[0]]
weight = numpy_helper.to_array(t[1])
assert len(weight.shape) == 2
weight = weight.transpose(perm)
new_weight = numpy_helper.from_array(weight, "%s_transposed" % t[1].name)
model.graph.initializer.extend([new_weight])
replace_input_arg(model, node.output[0], new_weight.name)
transpose.sort(reverse=True)
for t in transpose:
del model.graph.node[t[0]]
old_ws = []
for t in transpose:
if find_output_node(model, t[1].name) is None:
old_ws.append(find_weight_index(model, t[1].name))
old_ws.sort(reverse=True)
for w_i in old_ws:
del model.graph.initializer[w_i]
def process_dropout(model):
dropouts = []
index = 0
for node in model.graph.node:
if node.op_type == "Dropout":
new_dropout = model.graph.node.add()
new_dropout.op_type = "TrainableDropout"
new_dropout.name = "TrainableDropout_%d" % index
# make ratio node
ratio = np.asarray([node.attribute[0].f], dtype=np.float32)
print(ratio.shape)
ratio_value = numpy_helper.from_array(ratio)
ratio_node = add_const(
model, "dropout_node_ratio_%d" % index, "dropout_node_ratio_%d" % index, t_value=ratio_value
)
print(ratio_node)
new_dropout.input.extend([node.input[0], ratio_node.output[0]])
new_dropout.output.extend(node.output)
dropouts.append(get_node_index(model, node))
index += 1
dropouts.sort(reverse=True)
for d in dropouts:
del model.graph.node[d]
# Also need to set following line differently for differnt verison of bert
# expand_out.name = '412'
def add_expand_shape(model):
expand_out = model.graph.value_info.add()
expand_out.name = "74" #'410' # 74 for base model
expand_out.type.CopyFrom(model.graph.input[0].type)
# add name to nodes
add_name(model)
# replace garther&concat to reshape
process_concat(model)
# fix the expand with dynamic shape
fix_expand(model)
# use dynamic batch/sequence
fix_dim(model)
# constant fold transpose
fix_transpose(model)
# replace dropout with trainable dropout
process_dropout(model)
# add output shape of expand
add_expand_shape(model)
# set opset version to 10
model.opset_import[0].version = 10
f = open(output_model_name, "wb")
f.write(model.SerializeToString())
f.close()
# Use ORT to verify the converted model. Notice that you must use python package from the
# training branch because training requires some extra ops.
import onnxruntime as ort
# We convert model to accept variable-length batch size, so it can be any positive integer.
batch = 3
# This should match --max_seq_length when calling nv_run_pretraining.py.
sq_length = 512
# This should match vocab_size in bert_config.json in DeepLearningExamples/PyTorch/LanguageModeling/BERT.
vocab_size = 30528
# Create a fake data point.
input_ids = np.random.randint(low=0, high=vocab_size, size=(batch, sq_length), dtype=np.int64)
segment_ids = np.random.randint(low=0, high=2, size=(batch, sq_length), dtype=np.int64)
input_mask = np.ones((batch, sq_length), dtype=np.int64)
# Do forward using the original model.
sess = ort.InferenceSession(input_model_name, providers=ort.get_available_providers())
result = sess.run(None, {"input1": input_ids, "input2": segment_ids, "input3": input_mask})
# Do forward using the new model.
new_sess = ort.InferenceSession(output_model_name, providers=ort.get_available_providers())
new_result = new_sess.run(None, {"input1": input_ids, "input2": segment_ids, "input3": input_mask})
# Compare the outcomes from the two models.
print(np.linalg.norm(result[0] - new_result[0]))
print(np.linalg.norm(result[1] - new_result[1]))
|
{
"content_hash": "e38b470f9bea75268a5a5d4119afb5bd",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 110,
"avg_line_length": 34.95180722891566,
"alnum_prop": 0.6131506377111341,
"repo_name": "microsoft/onnxruntime",
"id": "8c0be5b08c04a0f409cd43e7b7a08b11675775bf",
"size": "11604",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "orttraining/tools/scripts/model_transform.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1763425"
},
{
"name": "Batchfile",
"bytes": "17040"
},
{
"name": "C",
"bytes": "955390"
},
{
"name": "C#",
"bytes": "2304597"
},
{
"name": "C++",
"bytes": "39435305"
},
{
"name": "CMake",
"bytes": "514764"
},
{
"name": "CSS",
"bytes": "138431"
},
{
"name": "Cuda",
"bytes": "1104338"
},
{
"name": "Dockerfile",
"bytes": "8089"
},
{
"name": "HLSL",
"bytes": "11234"
},
{
"name": "HTML",
"bytes": "5933"
},
{
"name": "Java",
"bytes": "418665"
},
{
"name": "JavaScript",
"bytes": "212575"
},
{
"name": "Jupyter Notebook",
"bytes": "218327"
},
{
"name": "Kotlin",
"bytes": "4653"
},
{
"name": "Liquid",
"bytes": "5457"
},
{
"name": "NASL",
"bytes": "2628"
},
{
"name": "Objective-C",
"bytes": "151027"
},
{
"name": "Objective-C++",
"bytes": "107084"
},
{
"name": "Pascal",
"bytes": "9597"
},
{
"name": "PowerShell",
"bytes": "16419"
},
{
"name": "Python",
"bytes": "5041661"
},
{
"name": "Roff",
"bytes": "27539"
},
{
"name": "Ruby",
"bytes": "3545"
},
{
"name": "Shell",
"bytes": "116513"
},
{
"name": "Swift",
"bytes": "115"
},
{
"name": "TypeScript",
"bytes": "973087"
}
],
"symlink_target": ""
}
|
from novaclient.v1_1.flavors import *
from novaclient.base import Manager
"""novaclient/v1_1/flavors/FlavorManager"""
class CustomeFlavorManager(FlavorManager):
def _data(self, url, response_key, obj_class=None, body=None):
if body:
_resp, body = self.api.client.post(url, body=body)
else:
_resp, body = self.api.client.get(url)
if obj_class is None:
obj_class = self.resource_class
data = body[response_key]
if isinstance(data, dict):
try:
data = data['values']
except KeyError:
pass
return data
def vf_list(self, detailed=True, is_public=True):
"""Get the list of all pci devices"""
return self._data("/flavors/vf_list","vf_list")
def total_pci_devices(self, detailed=True, is_public=True):
"""Get the total pci devices available."""
return self._data("/flavors/total_pci_devices","total_pci_devices")
def allocated_pci_devices(self, detailed=True, is_public=True):
"""Get the total pci devices allocated."""
return self._data("/flavors/allocated_pci_devices","allocated_pci_devices")
def total_vfs_all_nodes(self, detailed=True, is_public=True):
"""Get the total pci devices available for all compute nodes."""
return self._data("/flavors/total_vfs_all_nodes","total_vfs_all_nodes")
def allocated_vfs_all_nodes(self, detailed=True, is_public=True):
"""Get the total pci devices allocated for all compute nodes."""
return self._data("/flavors/allocated_vfs_all_nodes","allocated_vfs_all_nodes")
@classmethod
def custome_flavor_new(cls, *args, **kwargs):
custome_flavor_manager = object.__new__(CustomeFlavorManager)
return custome_flavor_manager
FlavorManager.__new__ = custome_flavor_new
|
{
"content_hash": "345a13ac7178592d4575fa721fac7c19",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 81,
"avg_line_length": 34.08,
"alnum_prop": 0.7007042253521126,
"repo_name": "khandavally/devstack",
"id": "a2aedd623c5e3e140409f9c3cc5b4c3f89bfb5d0",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EPAQA/flavor_manager_patch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "184826"
},
{
"name": "Shell",
"bytes": "147609"
}
],
"symlink_target": ""
}
|
from rest_framework import generics, permissions as drf_permissions
from rest_framework.exceptions import ValidationError, NotFound
from framework.auth.oauth_scopes import CoreScopes
from website.project.model import Q, Node
from api.base import permissions as base_permissions
from api.base.views import JSONAPIBaseView
from api.base.serializers import HideIfRetraction
from api.registrations.serializers import (
RegistrationSerializer,
RegistrationDetailSerializer,
RegistrationContributorsSerializer,
)
from api.nodes.views import (
NodeMixin, ODMFilterMixin, NodeContributorsList, NodeRegistrationsList,
NodeChildrenList, NodeCommentsList, NodeProvidersList, NodeLinksList,
NodeContributorDetail, NodeFilesList, NodeLinksDetail, NodeFileDetail,
NodeAlternativeCitationsList, NodeAlternativeCitationDetail, NodeLogList,
NodeInstitutionDetail, WaterButlerMixin)
from api.registrations.serializers import RegistrationNodeLinksSerializer
from api.nodes.permissions import (
ContributorOrPublic,
ReadOnlyIfRegistration,
)
from api.base.utils import get_object_or_error
class RegistrationMixin(NodeMixin):
"""Mixin with convenience methods for retrieving the current registration based on the
current URL. By default, fetches the current registration based on the node_id kwarg.
"""
serializer_class = RegistrationSerializer
node_lookup_url_kwarg = 'node_id'
def get_node(self, check_object_permissions=True):
node = get_object_or_error(
Node,
self.kwargs[self.node_lookup_url_kwarg],
display_name='node'
)
# Nodes that are folders/collections are treated as a separate resource, so if the client
# requests a collection through a node endpoint, we return a 404
if node.is_folder or not node.is_registration:
raise NotFound
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, node)
return node
class RegistrationList(JSONAPIBaseView, generics.ListAPIView, ODMFilterMixin):
"""Node Registrations.
Registrations are read-only snapshots of a project. This view is a list of all current registrations for which a user
has access. A retracted registration will display a limited subset of information, namely, title, description,
date_created, registration, retracted, date_registered, retraction_justification, and registration supplement. All
other fields will be displayed as null. Additionally, the only relationships permitted to be accessed for a retraction
are the contributors.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registrations's detail view are not necessary. Unregistered nodes cannot be accessed through this endpoint.
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
-------------------------------------------------------------------------------------------------------
title string title of the registered project or component
description string description of the registered node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
fork boolean is this project a fork?
registration boolean has this project been registered?
dashboard boolean is this registered node visible on the user dashboard?
public boolean has this registration been made publicly-visible?
retracted boolean has this registration been retracted?
date_registered iso8601 timestamp timestamp that the registration was created
retraction_justification string reasons for retracting the registration
pending_retraction boolean is this registration pending retraction?
pending_registration_approval boolean is this registration pending approval?
pending_embargo boolean is this registration pending an embargo?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
###Other Relationships
See documentation on registered_from detail view. A registration has many of the same properties as a node.
##Links
See the [JSON-API spec regarding pagination](http://jsonapi.org/format/1.0/#fetching-pagination).
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationSerializer
view_category = 'registrations'
view_name = 'registration-list'
# overrides ODMFilterMixin
def get_default_odm_query(self):
base_query = (
Q('is_deleted', 'ne', True) &
Q('is_registration', 'eq', True)
)
user = self.request.user
permission_query = Q('is_public', 'eq', True)
if not user.is_anonymous():
permission_query = (permission_query | Q('contributors', 'eq', user._id))
query = base_query & permission_query
return query
def is_blacklisted(self, query):
for query_param in query.nodes:
field_name = getattr(query_param, 'attribute', None)
if not field_name:
continue
field = self.serializer_class._declared_fields.get(field_name)
if isinstance(field, HideIfRetraction):
return True
return False
# overrides ListAPIView
def get_queryset(self):
query = self.get_query_from_request()
blacklisted = self.is_blacklisted(query)
nodes = Node.find(query)
# If attempting to filter on a blacklisted field, exclude retractions.
if blacklisted:
non_retracted_list = [node._id for node in nodes if not node.is_retracted]
non_retracted_nodes = Node.find(Q('_id', 'in', non_retracted_list))
return non_retracted_nodes
return nodes
class RegistrationDetail(JSONAPIBaseView, generics.RetrieveAPIView, RegistrationMixin, WaterButlerMixin):
"""Node Registrations.
Registrations are read-only snapshots of a project. This view shows details about the given registration.
Each resource contains the full representation of the registration, meaning additional requests to an individual
registration's detail view are not necessary. A retracted registration will display a limited subset of information,
namely, title, description, date_created, registration, retracted, date_registered, retraction_justification, and registration
supplement. All other fields will be displayed as null. Additionally, the only relationships permitted to be accessed
for a retracted registration are the contributors.
##Registration Attributes
Registrations have the "registrations" `type`.
name type description
-------------------------------------------------------------------------------------------------------
title string title of the registered project or component
description string description of the registered node
category string node category, must be one of the allowed values
date_created iso8601 timestamp timestamp that the node was created
date_modified iso8601 timestamp timestamp when the node was last updated
tags array of strings list of tags that describe the registered node
fork boolean is this project a fork?
registration boolean has this project been registered?
dashboard boolean is this registered node visible on the user dashboard?
public boolean has this registration been made publicly-visible?
retracted boolean has this registration been retracted?
date_registered iso8601 timestamp timestamp that the registration was created
retraction_justification string reasons for retracting the registration
pending_retraction boolean is this registration pending retraction?
pending_registration_approval boolean is this registration pending approval?
pending_embargo boolean is this registration pending an embargo?
registered_meta dictionary registration supplementary information
registration_supplement string registration template
##Relationships
###Registered from
The registration is branched from this node.
###Registered by
The registration was initiated by this user.
###Other Relationships
See documentation on registered_from detail view. A registration has many of the same properties as a node.
##Links
self: the canonical api endpoint of this registration
html: this registration's page on the OSF website
#This Request/Response
"""
permission_classes = (
drf_permissions.IsAuthenticatedOrReadOnly,
ContributorOrPublic,
ReadOnlyIfRegistration,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.NODE_REGISTRATIONS_READ]
required_write_scopes = [CoreScopes.NODE_REGISTRATIONS_WRITE]
serializer_class = RegistrationDetailSerializer
view_category = 'registrations'
view_name = 'registration-detail'
# overrides RetrieveAPIView
def get_object(self):
registration = self.get_node()
if not registration.is_registration:
raise ValidationError('This is not a registration.')
return registration
class RegistrationContributorsList(NodeContributorsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-contributors'
class RegistrationContributorDetail(NodeContributorDetail, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-contributor-detail'
serializer_class = RegistrationContributorsSerializer
class RegistrationChildrenList(NodeChildrenList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-children'
class RegistrationCommentsList(NodeCommentsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-comments'
class RegistrationLogList(NodeLogList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-logs'
class RegistrationProvidersList(NodeProvidersList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-providers'
class RegistrationNodeLinksList(NodeLinksList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-pointers'
serializer_class = RegistrationNodeLinksSerializer
class RegistrationNodeLinksDetail(NodeLinksDetail, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-pointer-detail'
serializer_class = RegistrationNodeLinksSerializer
class RegistrationRegistrationsList(NodeRegistrationsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-providers'
class RegistrationFilesList(NodeFilesList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-files'
class RegistrationFileDetail(NodeFileDetail, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-file-detail'
class RegistrationAlternativeCitationsList(NodeAlternativeCitationsList, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-alternative-citations'
class RegistrationAlternativeCitationDetail(NodeAlternativeCitationDetail, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-alternative-citation-detail'
class RegistrationInstitutionDetail(NodeInstitutionDetail, RegistrationMixin):
view_category = 'registrations'
view_name = 'registration-institution-detail'
|
{
"content_hash": "4c9c5f13465aa880901111a9bdfc0cf9",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 130,
"avg_line_length": 43.23076923076923,
"alnum_prop": 0.6708926453143536,
"repo_name": "brandonPurvis/osf.io",
"id": "04cfb389293f685b46d4feac309c2409b4da8c97",
"size": "13488",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "api/registrations/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "133911"
},
{
"name": "HTML",
"bytes": "68108"
},
{
"name": "JavaScript",
"bytes": "1394041"
},
{
"name": "Mako",
"bytes": "639052"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "4906600"
},
{
"name": "Shell",
"bytes": "2118"
}
],
"symlink_target": ""
}
|
from core import kde
class Restart4(kde.KDE4Action):
"""Restart Yakuake, open terminal instances will get killed."""
def binary_dependencies(self):
return ['yakuake']
def execute(self):
return kde.restart('org.kde.yakuake', 'yakuake')
class Restart5(kde.KDE5Action):
"""Restart Yakuake, open terminal instances will get killed."""
def binary_dependencies(self):
return ['yakuake']
def execute(self):
return kde.restart('org.kde.yakuake', 'yakuake')
|
{
"content_hash": "5377d876ecc8d071ee2792d464bf8047",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 67,
"avg_line_length": 26.894736842105264,
"alnum_prop": 0.6673189823874756,
"repo_name": "nielsvm/kde4-profiles",
"id": "c6f2e779c8e28d1ff16485fd3b19a75abeb64e5c",
"size": "511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/yakuake/restart.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15999"
}
],
"symlink_target": ""
}
|
import unittest
from airflow.contrib.operators.gcs_download_operator \
import GoogleCloudStorageDownloadOperator
from tests.compat import mock
TASK_ID = 'test-gcs-download-operator'
TEST_BUCKET = 'test-bucket'
TEST_OBJECT = 'dir1/test-object'
LOCAL_FILE_PATH = '/home/airflow/gcp/test-object'
class GoogleCloudStorageDownloadOperatorTest(unittest.TestCase):
@mock.patch('airflow.contrib.operators.gcs_download_operator.GoogleCloudStorageHook')
def test_execute(self, mock_hook):
operator = GoogleCloudStorageDownloadOperator(task_id=TASK_ID,
bucket=TEST_BUCKET,
object=TEST_OBJECT,
filename=LOCAL_FILE_PATH)
operator.execute(None)
mock_hook.return_value.download.assert_called_once_with(
bucket=TEST_BUCKET, object=TEST_OBJECT, filename=LOCAL_FILE_PATH
)
|
{
"content_hash": "1bcd8ffc4d2cabc9faf9b25744259a30",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 89,
"avg_line_length": 39.16,
"alnum_prop": 0.6322778345250255,
"repo_name": "owlabs/incubator-airflow",
"id": "c366afed162eaf49507141fa9154bcaa1f529b4c",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/contrib/operators/test_gcs_download_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
}
|
from mozi.layers.activation import RELU, Softmax
from mozi.layers.normalization import LRN
from mozi.layers.convolution import Convolution2D, Pooling2D
from mozi.layers.linear import Linear
from mozi.layers.noise import Dropout
from mozi.layers.misc import Flatten
from mozi.layers.template import Template
class Alexnet(Template):
def __init__(self, input_shape, output_dim):
'''
FIELDS:
self.params: any params from the layer that needs to be updated
by backpropagation can be put inside self.params
PARAMS:
input_shape: tuple
shape of the input image with format (channel, height, width)
output_dim: int
the output dimension of the model
'''
assert len(input_shape) == 3, 'input_shape must be a tuple or list of dim (channel, height, width)'
c, h, w = input_shape
valid = lambda x, y, kernel, stride : ((x-kernel)/stride + 1, (y-kernel)/stride + 1)
full = lambda x, y, kernel, stride : ((x+kernel)/stride - 1, (y+kernel)/stride - 1)
self.layers = []
self.layers.append(Convolution2D(input_channels=3, filters=96, kernel_size=(11,11),
stride=(4,4), border_mode='valid'))
nh, nw = valid(h, w, 11, 4)
self.layers.append(RELU())
self.layers.append(LRN())
self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
nh, nw = valid(nh, nw, 3, 2)
self.layers.append(Convolution2D(input_channels=96, filters=256, kernel_size=(5,5),
stride=(1,1), border_mode='full'))
nh, nw = full(nh, nw, 5, 1)
self.layers.append(RELU())
self.layers.append(LRN())
self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
nh, nw = valid(nh, nw, 3, 2)
self.layers.append(Convolution2D(input_channels=256, filters=384, kernel_size=(3,3),
stride=(1,1), border_mode='full'))
nh, nw = full(nh, nw, 3, 1)
self.layers.append(RELU())
self.layers.append(Convolution2D(input_channels=384, filters=384, kernel_size=(3,3),
stride=(1,1), border_mode='full'))
nh, nw = full(nh, nw, 3, 1)
self.layers.append(RELU())
self.layers.append(Convolution2D(input_channels=384, filters=256, kernel_size=(3,3),
stride=(1,1), border_mode='full'))
nh, nw = full(nh, nw, 3, 1)
self.layers.append(RELU())
self.layers.append(Pooling2D(poolsize=(3,3), stride=(2,2), mode='max'))
nh, nw = valid(nh, nw, 3, 2)
self.layers.append(Flatten())
self.layers.append(Linear(256*nh*nw,4096))
self.layers.append(RELU())
self.layers.append(Dropout(0.5))
self.layers.append(Linear(4096,4096))
self.layers.append(RELU())
self.layers.append(Dropout(0.5))
self.layers.append(Linear(4096,output_dim))
self.layers.append(Softmax())
self.params = []
for layer in self.layers:
self.params += layer.params
def _test_fprop(self, state_below):
for layer in self.layers:
state_below = layer._test_fprop(state_below)
return state_below
def _train_fprop(self, state_below):
for layer in self.layers:
state_below = layer._train_fprop(state_below)
return state_below
|
{
"content_hash": "4746eaf35748abadb0b3ffdef93b357a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 107,
"avg_line_length": 43.95061728395062,
"alnum_prop": 0.573876404494382,
"repo_name": "hycis/Mozi",
"id": "21ce1b4f651d53a87c8089f589d30031686cfb2e",
"size": "3561",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mozi/layers/alexnet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171403"
}
],
"symlink_target": ""
}
|
import sys
file=sys.argv[1]
genbank_record=open(file, 'r')
n = 0
l = []
for line in genbank_record:
# print line
if line.startswith("SOURCE"):
newline=line.strip()
source_divided=(newline.split(None, 1))
print source_divided[1]
if "ORGANISM" in line:
newline=line.strip()
organism_divided=(newline.split(None, 1))
print organism_divided[1]
if "country" in line:
newline=line.strip()
country_divided=(newline.split("=", 1))
print country_divided[1]
if "isolate" in line:
newline=line.strip()
isolate_divided=(newline.split("=", 1))
print isolate_divided[1]
if "isolation_source" in line:
newline=line.strip()
isolation_source_divided=(newline.split("=", 1))
print isolation_source_divided[1]
if "collection_date" in line:
newline=line.strip()
collection_date_divided=(newline.split("=", 1))
print collection_date_divided[1]
# This is to deal with records that do not have these specific fields.
# I should make these into functions so that it is more efficient
# The string is divided into 2 because the 2nd item is called when I go to print out the file.
try:
country_divided
except NameError:
country_divided = "" , ""
try:
collection_date_divided
except NameError:
collection_date_divided = "" , ""
try:
isolate_divided
except NameError:
isolate_divided = "" , ""
import csv
Header = "Filename","Source", "Organism","Isolate","Isolation_source", "Location","Date"
Genbank_data = file,source_divided[1], organism_divided[1], country_divided[1], isolate_divided[1], isolation_source_divided[1],collection_date_divided[1]
import time
timestr = time.strftime("%Y_%m_%d")
genbank_out="Genbank_data_"+timestr+".csv"
def addToFile(file, what):
outfile = open(genbank_out, "a" )
writer = csv.writer(outfile)
writer.writerow(what)
try:
with open(genbank_out):
addToFile(genbank_out, Genbank_data)
except IOError:
print 'Oh dear.'
outfile = open(genbank_out, "w" )
writer = csv.writer(outfile)
writer.writerow(Header)
writer.writerow(Genbank_data)
outfile.close()
|
{
"content_hash": "b1552bf1887d65809e65638b61bfbbbf",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 155,
"avg_line_length": 29.61038961038961,
"alnum_prop": 0.6355263157894737,
"repo_name": "jooolia/phylo_temporal_jericho",
"id": "19560b32348933469aba3eea31d2cdaba2bd0fcc",
"size": "2603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sequence_processing/parsing_genbank.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "20165"
},
{
"name": "Python",
"bytes": "79016"
},
{
"name": "R",
"bytes": "622719"
},
{
"name": "Shell",
"bytes": "122197"
}
],
"symlink_target": ""
}
|
import os.path
from datetime import date
from sqlite3 import dbapi2 as sqlite
import simplejson as json
import xbmcgui
import common
import connection
import database_common as db_common
def create():
c = _database.cursor()
c.execute('''CREATE TABLE series
(content_id INTEGER PRIMARY KEY,
title TEXT,
plot TEXT,
trailer TEXT,
year INTEGER,
studio TEXT,
directors TEXT,
actors TEXT,
genres TEXT,
popularity INTEGER,
favor BOOLEAN DEFAULT 0,
in_last_update BOOLEAN DEFAULT 1);''')
c.execute('''CREATE TABLE season
(content_id INTEGER PRIMARY KEY,
series_content_id INTEGER,
order_rank INTEGER,
title TEXT,
studio TEXT,
directors TEXT,
actors TEXT,
genres TEXT,
popularity INTEGER,
FOREIGN KEY(series_content_id) REFERENCES series(content_id) ON DELETE CASCADE);''')
c.execute('''CREATE TABLE episode
(content_id INTEGER PRIMARY KEY,
season_content_id INTEGER,
media_id TEXT,
order_rank INTEGER,
title TEXT,
title_sort TEXT,
plot TEXT,
duration INTEGER,
year INTEGER,
studio TEXT,
mpaa TEXT,
directors TEXT,
actors TEXT,
genres TEXT,
popularity INTEGER,
added_date timestamp,
cc_available BOOLEAN,
is_hd BOOLEAN,
audio_type TEXT,
playcount INTEGER DEFAULT 0,
FOREIGN KEY(season_content_id) REFERENCES season(content_id) ON DELETE CASCADE);''')
_database.commit()
c.close()
def insert_series(content_id, title=None, plot=None, trailer=None, studio=None, directors=None, actors=None,
genres=None, popularity=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO series (
content_id,
title,
plot,
trailer,
studio,
directors,
actors,
genres,
popularity,
favor,
in_last_update) VALUES (
:content_id,
:title,
:plot,
:trailer,
:studio,
:directors,
:actors,
:genres,
:popularity,
(SELECT favor FROM series WHERE content_id = :content_id),
:in_last_update)''', {
'content_id': content_id,
'title': title,
'plot': plot,
'trailer': trailer,
'studio': studio,
'directors': directors,
'actors': actors,
'genres': genres,
'popularity': popularity,
'in_last_update': True
})
_database.commit()
c.close()
def insert_season(content_id, series_content_id, order_rank=None, title=None, studio=None, directors=None,
actors=None, genres=None, popularity=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO season (
content_id,
series_content_id,
order_rank,
title,
studio,
directors,
actors,
genres,
popularity) VALUES (
:content_id,
:series_content_id,
:order_rank,
:title,
:studio,
:directors,
:actors,
:genres,
:popularity)''', {
'content_id': content_id,
'series_content_id': series_content_id,
'order_rank': order_rank,
'title': title,
'studio': studio,
'directors': directors,
'actors': actors,
'genres': genres,
'popularity': popularity
})
_database.commit()
c.close()
def insert_episode(content_id, season_content_id, order_rank=None, title=None, title_sort=None, plot=None,
duration=None,
studio=None, mpaa=None, directors=None, actors=None, genres=None, popularity=None, added_date=None,
cc_available=None, is_hd=None, audio_type=None, year=None):
c = _database.cursor()
c.execute('''INSERT OR REPLACE INTO episode (
content_id,
season_content_id,
order_rank,
title,
title_sort,
plot,
duration,
year,
studio,
mpaa,
directors,
actors,
genres,
popularity,
added_date,
cc_available,
is_hd,
audio_type,
playcount) VALUES (
:content_id,
:season_content_id,
:order_rank,
:title,
:title_sort,
:plot,
:duration,
:year,
:studio,
:mpaa,
:directors,
:actors,
:genres,
:popularity,
:added_date,
:cc_available,
:is_hd,
:audio_type,
(SELECT playcount FROM episode WHERE content_id = :content_id))''', {
'content_id': content_id,
'season_content_id': season_content_id,
'order_rank': order_rank,
'title': title,
'title_sort': title_sort,
'plot': plot,
'duration': duration,
'year': year,
'studio': studio,
'mpaa': mpaa,
'directors': directors,
'actors': actors,
'genres': genres,
'popularity': popularity,
'added_date': added_date,
'cc_available': cc_available,
'is_hd': is_hd,
'audio_type': audio_type
})
_database.commit()
c.close()
def lookup_series(content_id):
c = _database.cursor()
return c.execute('SELECT DISTINCT * FROM series WHERE content_id = (?)', (content_id,))
def lookup_season(content_id):
c = _database.cursor()
return c.execute('SELECT DISTINCT * FROM season WHERE content_id = (?)', (content_id,))
def lookup_episode(content_id):
c = _database.cursor()
return c.execute('SELECT DISTINCT * FROM episode WHERE content_id = (?)', (content_id,))
def delete_series(content_id):
c = _database.cursor()
c.execute('DELETE FROM series WHERE content_id = (?)', (content_id,))
c.close()
def watch_episode(content_id):
# TODO make this actually increment
c = _database.cursor()
c.execute("UPDATE episode SET playcount = 1 WHERE content_id = (?)", (content_id,))
_database.commit()
c.close()
return c.rowcount
def unwatch_episode(content_id):
c = _database.cursor()
c.execute("UPDATE episode SET playcount=? WHERE content_id = (?)", (0, content_id))
_database.commit()
c.close()
return c.rowcount
def favor_series(content_id):
c = _database.cursor()
c.execute("UPDATE series SET favor=? WHERE content_id=?", (True, content_id))
_database.commit()
c.close()
return c.rowcount
def unfavor_series(content_id):
c = _database.cursor()
c.execute("UPDATE series SET favor=? WHERE content_id=?", (False, content_id))
_database.commit()
c.close()
return c.rowcount
def get_series(mpaafilter=False, genrefilter=False, yearfilter=False, directorfilter=False,
watchedfilter=False, favorfilter=False, actorfilter=False, alphafilter=False, studiofilter=False):
c = _database.cursor()
if genrefilter:
genrefilter = '%' + genrefilter + '%'
return c.execute('SELECT DISTINCT * FROM series WHERE genres LIKE (?)',
(genrefilter,))
elif mpaafilter:
return c.execute('SELECT DISTINCT * FROM series WHERE mpaa = (?)', (mpaafilter,))
elif actorfilter:
actorfilter = '%' + actorfilter + '%'
return c.execute('SELECT DISTINCT * FROM series WHERE actors LIKE (?)',
(actorfilter,))
elif directorfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE directors LIKE (?)',
(directorfilter,))
elif studiofilter:
return c.execute('SELECT DISTINCT * FROM series WHERE studio = (?)', (studiofilter,))
elif yearfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE year = (?)', (int(yearfilter),))
elif watchedfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE playcount > 0')
elif favorfilter:
return c.execute('SELECT DISTINCT * FROM series WHERE favor = 1')
elif alphafilter:
return c.execute('SELECT DISTINCT * FROM series WHERE title REGEXP (?)',
(alphafilter + '*',))
else:
return c.execute('SELECT DISTINCT * FROM series')
def get_series_season_count(series_id):
c = _database.cursor()
row = c.execute('''SELECT COUNT(sea.content_id) AS total_seasons
FROM season AS sea
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE ser.content_id = (?)
GROUP BY ser.content_id''', (series_id,)).fetchone()
c.close()
if row:
return row['total_seasons']
else:
return 0
def get_series_episode_count(series_id, filter=None):
c = _database.cursor()
if filter == 'watched':
row = c.execute('''SELECT COUNT(e.content_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE ser.content_id = (?) AND e.playcount > 0
GROUP BY ser.content_id''', (series_id,)).fetchone()
else:
row = c.execute('''SELECT COUNT(e.content_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE ser.content_id = (?)
GROUP BY ser.content_id''', (series_id,)).fetchone()
c.close()
if row:
return row['total_episodes']
else:
return 0
def get_series_year(series_id):
c = _database.cursor()
row = c.execute('''SELECT e.year FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE ser.content_id = (?)
ORDER BY e.year ASC LIMIT 1''', (series_id,)).fetchone()
c.close()
if row:
return row['year']
else:
return None
def get_seasons(series_id):
c = _database.cursor()
return c.execute('''SELECT DISTINCT sea.*,ser.title AS series_title
FROM season AS sea
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE series_content_id = (?)''', (series_id,))
def get_season_episode_count(season_id, filter=None):
c = _database.cursor()
if filter == 'watched':
row = c.execute('''SELECT COUNT(e.content_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
WHERE sea.content_id = (?) AND e.playcount > 0
GROUP BY sea.content_id''', (season_id,)).fetchone()
else:
row = c.execute('''SELECT COUNT(e.content_id) AS total_episodes
FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
WHERE sea.content_id = (?)
GROUP BY sea.content_id''', (season_id,)).fetchone()
c.close()
if row:
return row['total_episodes']
else:
return 0
def get_season_year(season_id):
c = _database.cursor()
row = c.execute('''SELECT e.year FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
WHERE sea.content_id = (?)
ORDER BY e.year ASC LIMIT 1''', (season_id,)).fetchone()
c.close()
if row:
return row['year']
else:
return None
def get_episodes(season_id):
c = _database.cursor()
return c.execute('''SELECT DISTINCT e.*, sea.order_rank AS season_num, sea.title AS series_title, sea.content_id AS series_id
FROM episode AS e
JOIN season AS sea ON sea.content_id = e.season_content_id
JOIN series AS ser ON ser.content_id = sea.series_content_id
WHERE season_content_id = (?)''', (season_id,))
def get_types(col):
c = _database.cursor()
items = c.execute('select distinct %s from series' % col)
list = []
for data in items:
data = data[0]
if type(data) == type(str()):
if 'Rated' in data:
item = data.split('for')[0]
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
else:
data = data.decode('utf-8').encode('utf-8').split(',')
for item in data:
item = item.replace('& ', '').strip()
if item not in list and item <> '' and item <> 0 and item <> 'Inc.' and item <> 'LLC.':
list.append(item)
elif data <> 0:
if data is not None:
list.append(str(data))
c.close()
return list
def update_tv(force=False):
# Check if we've recently updated and skip
if not force and not _needs_update():
return
dialog = xbmcgui.DialogProgress()
dialog.create('Refreshing TV Database')
dialog.update(0, 'Initializing TV Scan')
json_url = '{0}/metadata-service/play/content/partner/Web_{1}.json?contentType=Series%20with%20Season'.format(db_common.API_DOMAIN, db_common.SERVICE)
data = connection.get_url(json_url)
tv_json = json.loads(data)['playContentArray']['playContents']
# Mark all series as unfound. This will be updated as we go through
c = _database.cursor()
c.execute("UPDATE series SET in_last_update = 0")
_database.commit()
c.close()
total = len(tv_json)
count = 0
for series in tv_json:
count += 1
dialog.update(0, 'Scanned {0} of {1} TV series'.format(count, total))
actors_list = []
for actor in series['actors']:
actors_list.append(actor['fullName'])
actors = ','.join(actors_list)
directors_list = []
for director in series['directors']:
directors_list.append(director['fullName'])
directors = ','.join(directors_list)
trailer = None
if 'sportURL' in series:
trailer = series['spotURL']
insert_series(content_id=series['contentId'], title=series['title'], plot=series['logLine'],
trailer=trailer, studio=series['studio'], directors=directors, actors=actors,
popularity=series['popularity'])
# Season Children
if 'childContent' in series:
_json_process_seasons(series['childContent'], series['contentId'])
_set_last_update()
# Remove unfound movies
c = _database.cursor()
c.execute("DELETE FROM series WHERE in_last_update = 0")
c.close()
def _json_process_seasons(season_data, series_content_id):
for season in season_data:
actors_list = []
for actor in season['actors']:
actors_list.append(actor['fullName'])
actors = ','.join(actors_list)
directors_list = []
for director in season['directors']:
directors_list.append(director['fullName'])
directors = ','.join(directors_list)
insert_season(content_id=season['contentId'], series_content_id=series_content_id, order_rank=season['order'],
title=season['title'], studio=season['studio'], directors=directors, actors=actors,
popularity=season['popularity'])
if 'childContent' in season:
_json_process_episodes(season['childContent'], season['contentId'])
def _json_process_episodes(episode_data, season_content_id):
for episode in episode_data:
duration = int(episode['runtime'] / 60)
mpaa = db_common.parse_mpaa(episode['mpaaRating'])
actors_list = []
for actor in episode['actors']:
actors_list.append(actor['fullName'])
actors = ','.join(actors_list)
directors_list = []
for director in episode['directors']:
directors_list.append(director['fullName'])
directors = ','.join(directors_list)
date_without_time = episode['startDate'][:10]
added_date = common.parse_date(date_without_time, '%Y-%m-%d')
insert_episode(content_id=episode['contentId'], season_content_id=season_content_id,
order_rank=episode['order'], title=episode['properCaseTitle'], title_sort=episode['titleSort'],
plot=episode['logLine'], duration=duration, studio=episode['studio'], mpaa=mpaa,
directors=directors, actors=actors, popularity=episode['popularity'], added_date=added_date,
cc_available=episode['closedCaption'], is_hd=episode['hd'], audio_type=episode['audioType'],
year=episode['releaseYear'])
def _needs_update():
# Update every 15 days
if 'last_update' in _database_meta:
last_update = common.parse_date(_database_meta['last_update'], '%Y-%m-%d')
return (date.today() - last_update.date()).days > 15
return True
def _set_last_update():
_database_meta['last_update'] = date.today().strftime('%Y-%m-%d')
_write_meta_file()
def _write_meta_file():
f = open(DB_META_FILE, 'w')
json.dump(_database_meta, f)
f.close()
DB_META_FILE = os.path.join(common.__addonprofile__, 'tv.meta')
_database_meta = False
if os.path.exists(DB_META_FILE):
f = open(DB_META_FILE, 'r')
_database_meta = json.load(f)
f.close()
else:
_database_meta = {}
DB_FILE = os.path.join(common.__addonprofile__, 'tv.db')
if not os.path.exists(DB_FILE):
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
create()
else:
_database = sqlite.connect(DB_FILE)
_database.text_factory = str
_database.row_factory = sqlite.Row
|
{
"content_hash": "57d9b4298317dce2ebf8440b67600efc",
"timestamp": "",
"source": "github",
"line_count": 576,
"max_line_length": 154,
"avg_line_length": 33.3125,
"alnum_prop": 0.542995622263915,
"repo_name": "bradyemerson/plugin.video.starzplay",
"id": "787351278de65dda85178c0131a556b51789ad36",
"size": "19234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resources/lib/database_tv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "74984"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['Lag1Trend'] , ['Seasonal_MonthOfYear'] , ['MLP'] );
|
{
"content_hash": "5844c585d52563a586535f4ea62eb59a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 86,
"avg_line_length": 39.75,
"alnum_prop": 0.710691823899371,
"repo_name": "antoinecarme/pyaf",
"id": "eb3366505a489519f6abeab6ca20e499073751fb",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_Lag1Trend_Seasonal_MonthOfYear_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models.deletion import ProtectedError
from .testament import Testament
from django_rv_apps.apps.believe_his_prophets.models.language import Language
class Book(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
testament = models.ForeignKey(
Testament, db_column='testament_id',
blank=False, null=False,
on_delete=models.PROTECT)
language = models.ManyToManyField(
Language,
through='BookLanguage', blank=True,
related_name='book_language_set'
)
book_order = models.IntegerField(
blank=False, null=False)
class Meta:
verbose_name = 'Book'
db_table = 'believe_book'
verbose_name_plural = 'Book'
def __str__(self):
return str(self.book_order)
class BookLanguage(models.Model):
id = models.AutoField(
primary_key=True,
editable=False)
name = models.CharField(
max_length=100,
blank=False, null=False)
abrev = models.CharField(
max_length=100,
blank=True, null=True)
book = models.ForeignKey(
'Book', db_column='book_id',
related_name='book_language_book_set',
blank=False, null=False,
on_delete=models.PROTECT)
language = models.ForeignKey(
Language, db_column='language_id',
related_name='book_language_language_set',
blank=False, null=False,
on_delete=models.PROTECT)
class Meta:
verbose_name = 'BookLanguage'
verbose_name_plural = 'BookLanguage'
db_table = 'believe_book_language'
|
{
"content_hash": "df0bef4e0336e6cf7fe4eb3fdd46f8fe",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 77,
"avg_line_length": 28.87719298245614,
"alnum_prop": 0.6336573511543135,
"repo_name": "davrv93/creed-en-sus-profetas-backend",
"id": "d7ef1df145f189254ce508f5cf4292eff111f550",
"size": "1647",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rv_apps/apps/believe_his_prophets/models/book.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "160568"
}
],
"symlink_target": ""
}
|
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/adapter/model/v1beta1/info.proto',
package='istio.mixer.adapter.model.v1beta1',
syntax='proto3',
serialized_options=_b('Z(istio.io/api/mixer/adapter/model/v1beta1'),
serialized_pb=_b('\n&mixer/adapter/model/v1beta1/info.proto\x12!istio.mixer.adapter.model.v1beta1\"c\n\x04Info\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x11\n\ttemplates\x18\x03 \x03(\t\x12\x0e\n\x06\x63onfig\x18\x04 \x01(\t\x12\x15\n\rsession_based\x18\x05 \x01(\x08\x42*Z(istio.io/api/mixer/adapter/model/v1beta1b\x06proto3')
)
_INFO = _descriptor.Descriptor(
name='Info',
full_name='istio.mixer.adapter.model.v1beta1.Info',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.adapter.model.v1beta1.Info.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='istio.mixer.adapter.model.v1beta1.Info.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templates', full_name='istio.mixer.adapter.model.v1beta1.Info.templates', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='config', full_name='istio.mixer.adapter.model.v1beta1.Info.config', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='session_based', full_name='istio.mixer.adapter.model.v1beta1.Info.session_based', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=77,
serialized_end=176,
)
DESCRIPTOR.message_types_by_name['Info'] = _INFO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Info = _reflection.GeneratedProtocolMessageType('Info', (_message.Message,), dict(
DESCRIPTOR = _INFO,
__module__ = 'mixer.adapter.model.v1beta1.info_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.adapter.model.v1beta1.Info)
))
_sym_db.RegisterMessage(Info)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
{
"content_hash": "52295f534f7ae5c202b7b60ddc1b4101",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 372,
"avg_line_length": 39.88421052631579,
"alnum_prop": 0.7120612298759568,
"repo_name": "rshriram/api",
"id": "28e678d3e58bb5f3474b1f63e0c402fe3e566dba",
"size": "3898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/istio_api/mixer/adapter/model/v1beta1/info_pb2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "14734"
},
{
"name": "Makefile",
"bytes": "17284"
},
{
"name": "Python",
"bytes": "962"
},
{
"name": "Shell",
"bytes": "12513"
}
],
"symlink_target": ""
}
|
from unittest import mock
import os
import platform
import requests
import tempfile
import shutil
import unittest
import yaml
from knack import CLI
from azure.cli.core._config import GLOBAL_CONFIG_DIR, ENV_VAR_PREFIX
from azure.cli.core.cloud import get_active_cloud
from azure.cli.core.profiles import get_sdk, ResourceType, supported_api_version
from msrestazure.azure_exceptions import CloudError
from azure.cli.command_modules.acs._params import (regions_in_preview,
regions_in_prod)
from azure.cli.command_modules.acs.custom import (merge_kubernetes_configurations, list_acs_locations,
_acs_browse_internal, _add_role_assignment, _get_default_dns_prefix,
create_application, _update_addons,
k8s_install_kubectl, k8s_install_kubelogin)
from azure.cli.command_modules.acs.addonconfiguration import ensure_container_insights_for_monitoring
from azure.mgmt.containerservice.models import (ContainerServiceOrchestratorTypes,
ContainerService,
ContainerServiceOrchestratorProfile)
from azure.mgmt.containerservice.v2020_11_01.models import ManagedClusterAddonProfile
from azure.cli.core.util import CLIError
from azure.cli.command_modules.acs._consts import (CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_AZURE_POLICY_ADDON_NAME)
class MockCLI(CLI):
def __init__(self):
super(MockCLI, self).__init__(cli_name='mock_cli', config_dir=GLOBAL_CONFIG_DIR,
config_env_var_prefix=ENV_VAR_PREFIX, commands_loader_cls=MockLoader)
self.cloud = get_active_cloud(self)
class MockLoader(object):
def __init__(self, ctx):
self.ctx = ctx
def get_models(self, *attr_args, **_):
from azure.cli.core.profiles import get_sdk
return get_sdk(self.ctx, ResourceType.MGMT_CONTAINERSERVICE, 'ManagedClusterAddonProfile',
mod='models', operation_group='managed_clusters')
class MockCmd(object):
def __init__(self, ctx, arguments={}):
self.cli_ctx = ctx
self.loader = MockLoader(self.cli_ctx)
self.arguments = arguments
def get_models(self, *attr_args, **kwargs):
return get_sdk(self.cli_ctx, ResourceType.MGMT_CONTAINERSERVICE, 'ManagedClusterAddonProfile',
mod='models', operation_group='managed_clusters')
class AcsCustomCommandTest(unittest.TestCase):
def setUp(self):
self.cli = MockCLI()
def test_list_acs_locations(self):
client, cmd = mock.MagicMock(), mock.MagicMock()
regions = list_acs_locations(client, cmd)
prodregions = regions["productionRegions"]
previewregions = regions["previewRegions"]
self.assertListEqual(prodregions, regions_in_prod, "Production regions doesn't match")
self.assertListEqual(previewregions, regions_in_preview, "Preview regions doesn't match")
def test_get_default_dns_prefix(self):
name = 'test5678910'
resource_group_name = 'resource_group_with_underscore'
sub_id = '123456789'
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, sub_id)
self.assertEqual(dns_name_prefix, "test567891-resourcegroupwit-123456")
name = '1test5678910'
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, sub_id)
self.assertEqual(dns_name_prefix, "a1test5678-resourcegroupwit-123456")
def test_add_role_assignment_basic(self):
role = 'Owner'
sp = '1234567'
cli_ctx = mock.MagicMock()
with mock.patch(
'azure.cli.command_modules.acs.custom.create_role_assignment') as create_role_assignment:
ok = _add_role_assignment(cli_ctx, role, sp, delay=0)
create_role_assignment.assert_called_with(cli_ctx, role, sp, True, scope=None)
self.assertTrue(ok, 'Expected _add_role_assignment to succeed')
def test_add_role_assignment_msi_basic(self):
role = 'Owner'
sp = '1234567'
cli_ctx = mock.MagicMock()
with mock.patch(
'azure.cli.command_modules.acs.custom.create_role_assignment') as create_role_assignment:
ok = _add_role_assignment(cli_ctx, role, sp, False, delay=0)
create_role_assignment.assert_called_with(cli_ctx, role, sp, False, scope=None)
self.assertTrue(ok, 'Expected _add_role_assignment with msi to succeed')
def test_add_role_assignment_exists(self):
role = 'Owner'
sp = '1234567'
cli_ctx = mock.MagicMock()
with mock.patch(
'azure.cli.command_modules.acs.custom.create_role_assignment') as create_role_assignment:
resp = requests.Response()
resp.status_code = 409
resp._content = b'Conflict'
err = CloudError(resp)
err.message = 'The role assignment already exists.'
create_role_assignment.side_effect = err
ok = _add_role_assignment(cli_ctx, role, sp, delay=0)
create_role_assignment.assert_called_with(cli_ctx, role, sp, True, scope=None)
self.assertTrue(ok, 'Expected _add_role_assignment to succeed')
def test_add_role_assignment_fails(self):
role = 'Owner'
sp = '1234567'
cli_ctx = mock.MagicMock()
with mock.patch(
'azure.cli.command_modules.acs.custom.create_role_assignment') as create_role_assignment:
resp = requests.Response()
resp.status_code = 500
resp._content = b'Internal Error'
err = CloudError(resp)
err.message = 'Internal Error'
create_role_assignment.side_effect = err
ok = _add_role_assignment(cli_ctx, role, sp, delay=0)
create_role_assignment.assert_called_with(cli_ctx, role, sp, True, scope=None)
self.assertFalse(ok, 'Expected _add_role_assignment to fail')
@mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
def test_browse_k8s(self, get_subscription_id):
acs_info = ContainerService(location="location", orchestrator_profile={}, master_profile={}, linux_profile={})
acs_info.orchestrator_profile = ContainerServiceOrchestratorProfile(
orchestrator_type=ContainerServiceOrchestratorTypes.kubernetes)
client, cmd = mock.MagicMock(), mock.MagicMock()
with mock.patch('azure.cli.command_modules.acs.custom._get_acs_info',
return_value=acs_info) as get_acs_info:
with mock.patch(
'azure.cli.command_modules.acs.custom._k8s_browse_internal') as k8s_browse:
_acs_browse_internal(client, cmd, acs_info, 'resource-group', 'name', False, 'ssh/key/file')
get_acs_info.assert_called_once()
k8s_browse.assert_called_with('name', acs_info, False, 'ssh/key/file')
@mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
def test_browse_dcos(self, get_subscription_id):
acs_info = ContainerService(location="location", orchestrator_profile={}, master_profile={}, linux_profile={})
acs_info.orchestrator_profile = ContainerServiceOrchestratorProfile(
orchestrator_type=ContainerServiceOrchestratorTypes.dcos)
client, cmd = mock.MagicMock(), mock.MagicMock()
with mock.patch(
'azure.cli.command_modules.acs.custom._dcos_browse_internal') as dcos_browse:
_acs_browse_internal(client, cmd, acs_info, 'resource-group', 'name', False, 'ssh/key/file')
dcos_browse.assert_called_with(acs_info, False, 'ssh/key/file')
def test_merge_credentials_non_existent(self):
self.assertRaises(CLIError, merge_kubernetes_configurations, 'non', 'existent', False)
def test_merge_credentials_broken_yaml(self):
existing = tempfile.NamedTemporaryFile(delete=False)
existing.close()
addition = tempfile.NamedTemporaryFile(delete=False)
addition.close()
with open(existing.name, 'w+') as stream:
stream.write('{ broken')
self.addCleanup(os.remove, existing.name)
obj2 = {
'clusters': [
'cluster2'
],
'contexts': [
'context2'
],
'users': [
'user2'
],
'current-context': 'cluster2',
}
with open(addition.name, 'w+') as stream:
yaml.safe_dump(obj2, stream)
self.addCleanup(os.remove, addition.name)
self.assertRaises(CLIError, merge_kubernetes_configurations, existing.name, addition.name, False)
def test_merge_credentials(self):
existing = tempfile.NamedTemporaryFile(delete=False)
existing.close()
addition = tempfile.NamedTemporaryFile(delete=False)
addition.close()
obj1 = {
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster1'
}
],
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterUser_aztest_aztest'
},
'name': 'context1'
}
],
'current-context': 'context1',
'kind': 'Config',
'preferences': {},
'users': [
{
'name': 'user1',
'user': {
'client-certificate-data': 'clientcertificatedata1',
'client-key-data': 'clientkeydata1',
'token': 'token1'
}
}
]
}
with open(existing.name, 'w+') as stream:
yaml.safe_dump(obj1, stream)
self.addCleanup(os.remove, existing.name)
obj2 = {
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster2'
}
],
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterUser_aztest_aztest'
},
'name': 'context2'
}
],
'current-context': 'aztest',
'kind': 'Config',
'preferences': {},
'users': [
{
'name': 'user2',
'user': {
'client-certificate-data': 'clientcertificatedata1',
'client-key-data': 'clientkeydata1',
'token': 'token1'
}
}
]
}
with open(addition.name, 'w+') as stream:
yaml.safe_dump(obj2, stream)
self.addCleanup(os.remove, addition.name)
merge_kubernetes_configurations(existing.name, addition.name, False)
with open(existing.name, 'r') as stream:
merged = yaml.safe_load(stream)
self.assertEqual(len(merged['clusters']), 2)
self.assertEqual(merged['clusters'], [obj1['clusters'][0], obj2['clusters'][0]])
self.assertEqual(len(merged['contexts']), 2)
self.assertEqual(merged['contexts'], [obj1['contexts'][0], obj2['contexts'][0]])
self.assertEqual(len(merged['users']), 2)
self.assertEqual(merged['users'], [obj1['users'][0], obj2['users'][0]])
self.assertEqual(merged['current-context'], obj2['current-context'])
def test_merge_admin_credentials(self):
existing = tempfile.NamedTemporaryFile(delete=False)
existing.close()
addition = tempfile.NamedTemporaryFile(delete=False)
addition.close()
obj1 = {
'apiVersion': 'v1',
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'aztest'
}
],
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterUser_aztest_aztest'
},
'name': 'aztest'
}
],
'current-context': 'aztest',
'kind': 'Config',
'preferences': {},
'users': [
{
'name': 'clusterUser_aztest_aztest',
'user': {
'client-certificate-data': 'clientcertificatedata1',
'client-key-data': 'clientkeydata1',
'token': 'token1'
}
}
]
}
with open(existing.name, 'w+') as stream:
yaml.safe_dump(obj1, stream)
self.addCleanup(os.remove, existing.name)
obj2 = {
'apiVersion': 'v1',
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'aztest'
}
],
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterAdmin_aztest_aztest'
},
'name': 'aztest'
}
],
'current-context': 'aztest',
'kind': 'Config',
'preferences': {},
'users': [
{
'name': 'clusterAdmin_aztest_aztest',
'user': {
'client-certificate-data': 'someclientcertificatedata2',
'client-key-data': 'someclientkeydata2',
'token': 'token2'
}
}
]
}
with open(addition.name, 'w+') as stream:
yaml.safe_dump(obj2, stream)
self.addCleanup(os.remove, addition.name)
merge_kubernetes_configurations(existing.name, addition.name, False)
with open(existing.name, 'r') as stream:
merged = yaml.safe_load(stream)
self.assertEqual(len(merged['clusters']), 1)
self.assertEqual([c['cluster'] for c in merged['clusters']],
[{'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'}])
self.assertEqual(len(merged['contexts']), 2)
self.assertEqual(merged['contexts'],
[{'context': {'cluster': 'aztest', 'user': 'clusterUser_aztest_aztest'},
'name': 'aztest'},
{'context': {'cluster': 'aztest', 'user': 'clusterAdmin_aztest_aztest'},
'name': 'aztest-admin'}])
self.assertEqual(len(merged['users']), 2)
self.assertEqual([u['name'] for u in merged['users']],
['clusterUser_aztest_aztest', 'clusterAdmin_aztest_aztest'])
self.assertEqual(merged['current-context'], 'aztest-admin')
def test_merge_credentials_missing(self):
existing = tempfile.NamedTemporaryFile(delete=False)
existing.close()
addition = tempfile.NamedTemporaryFile(delete=False)
addition.close()
obj1 = {
'clusters': None,
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterUser_aztest_aztest'
},
'name': 'context1'
}
],
'current-context': 'context1',
'kind': 'Config',
'preferences': {},
'users': [
{
'name': 'user1',
'user': {
'client-certificate-data': 'clientcertificatedata1',
'client-key-data': 'clientkeydata1',
'token': 'token1'
}
}
]
}
with open(existing.name, 'w+') as stream:
yaml.safe_dump(obj1, stream)
self.addCleanup(os.remove, existing.name)
obj2 = {
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://aztest-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster2'
}
],
'contexts': [
{
'context': {
'cluster': 'aztest',
'user': 'clusterUser_aztest_aztest'
},
'name': 'context2'
}
],
'current-context': 'context2',
'kind': 'Config',
'preferences': {},
'users': None
}
with open(addition.name, 'w+') as stream:
yaml.safe_dump(obj2, stream)
self.addCleanup(os.remove, addition.name)
merge_kubernetes_configurations(existing.name, addition.name, False)
with open(existing.name, 'r') as stream:
merged = yaml.safe_load(stream)
self.assertEqual(len(merged['clusters']), 1)
self.assertEqual(merged['clusters'], [obj2['clusters'][0]])
self.assertEqual(len(merged['contexts']), 2)
self.assertEqual(merged['contexts'], [obj1['contexts'][0], obj2['contexts'][0]])
self.assertEqual(len(merged['users']), 1)
self.assertEqual(merged['users'], [obj1['users'][0]])
self.assertEqual(merged['current-context'], obj2['current-context'])
def test_merge_credentials_already_present(self):
existing = tempfile.NamedTemporaryFile(delete=False)
existing.close()
addition = tempfile.NamedTemporaryFile(delete=False)
addition.close()
obj1 = {
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://cluster1-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster1'
},
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://cluster2-aztest-abc123-abcd1234.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster2'
}
],
'contexts': [
{
'context': {
'cluster': 'cluster1',
'user': 'cluster1User_aztest_aztest'
},
'name': 'context1'
},
{
'context': {
'cluster': 'cluster1',
'user': 'cluster1User_aztest_aztest'
},
'name': 'context2'
}
],
'users': [
{
'name': 'cluster1User_aztest_aztest',
'user': {
'client-certificate-data': 'someclientcertificatedata2',
'client-key-data': 'someclientkeydata2',
'token': 'token2'
}
},
{
'name': 'cluster2User_aztest_aztest',
'user': {
'client-certificate-data': 'someclientcertificatedata2',
'client-key-data': 'someclientkeydata2',
'token': 'token2'
}
}
],
'current-context': 'context1',
}
with open(existing.name, 'w+') as stream:
yaml.safe_dump(obj1, stream)
obj2 = {
'clusters': [
{
'cluster': {
'certificate-authority-data': 'certificateauthoritydata1',
'server': 'https://other2-aztest-abc456-abcd4567.hcp.eastus.azmk8s.io:443'
},
'name': 'cluster2'
}
],
'contexts': [
{
'context': {
'cluster': 'cluster2',
'user': 'cluster1_aztest_aztest'
},
'name': 'context2'
}
],
'users': [
{
'name': 'cluster2User_aztest_aztest',
'user': {
'client-certificate-data': 'someclientcertificatedata2',
'client-key-data': 'someclientkeydata2',
'token': 'token3'
}
}
],
'current-context': 'some-context',
}
with open(addition.name, 'w+') as stream:
yaml.safe_dump(obj2, stream)
with self.assertRaises(CLIError):
merge_kubernetes_configurations(existing.name, addition.name, False)
merge_kubernetes_configurations(existing.name, addition.name, True)
self.addCleanup(os.remove, addition.name)
with open(existing.name, 'r') as stream:
merged = yaml.safe_load(stream)
self.addCleanup(os.remove, existing.name)
self.assertEqual(len(merged['clusters']), 2)
expected_clusters = [
obj1['clusters'][0],
obj2['clusters'][0]
]
self.assertEqual(merged['clusters'], expected_clusters)
self.assertEqual(len(merged['contexts']), 2)
expected_contexts = [
obj1['contexts'][0],
obj2['contexts'][0]
]
self.assertEqual(merged['contexts'], expected_contexts)
self.assertEqual(len(merged['users']), 2)
expected_users = [
obj1['users'][0],
obj2['users'][0]
]
self.assertEqual(merged['users'], expected_users)
self.assertEqual(merged['current-context'], obj2['current-context'])
@mock.patch('azure.cli.command_modules.acs.addonconfiguration.get_rg_location', return_value='eastus')
@mock.patch('azure.cli.command_modules.acs.addonconfiguration.get_resource_groups_client', autospec=True)
@mock.patch('azure.cli.command_modules.acs.addonconfiguration.get_resources_client', autospec=True)
def test_update_addons(self, rg_def, get_resource_groups_client, get_resources_client):
# http_application_routing enabled
instance = mock.MagicMock()
instance.addon_profiles = None
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'http_application_routing', enable=True)
self.assertIn(CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME, instance.addon_profiles)
addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertTrue(addon_profile.enabled)
# http_application_routing disabled
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'http_application_routing', enable=False)
addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertFalse(addon_profile.enabled)
# monitoring added
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'monitoring', enable=True)
monitoring_addon_profile = instance.addon_profiles[CONST_MONITORING_ADDON_NAME]
self.assertTrue(monitoring_addon_profile.enabled)
routing_addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertFalse(routing_addon_profile.enabled)
# monitoring disabled, routing enabled
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'monitoring', enable=False)
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'http_application_routing', enable=True)
monitoring_addon_profile = instance.addon_profiles[CONST_MONITORING_ADDON_NAME]
self.assertFalse(monitoring_addon_profile.enabled)
routing_addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertTrue(routing_addon_profile.enabled)
self.assertEqual(sorted(list(instance.addon_profiles)), [CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME, CONST_MONITORING_ADDON_NAME])
# azurepolicy added
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'azure-policy', enable=True)
azurepolicy_addon_profile = instance.addon_profiles[CONST_AZURE_POLICY_ADDON_NAME]
self.assertTrue(azurepolicy_addon_profile.enabled)
routing_addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertTrue(routing_addon_profile.enabled)
monitoring_addon_profile = instance.addon_profiles[CONST_MONITORING_ADDON_NAME]
self.assertFalse(monitoring_addon_profile.enabled)
# azurepolicy disabled, routing enabled
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'azure-policy', enable=False)
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'http_application_routing', enable=True)
azurepolicy_addon_profile = instance.addon_profiles[CONST_AZURE_POLICY_ADDON_NAME]
self.assertFalse(azurepolicy_addon_profile.enabled)
monitoring_addon_profile = instance.addon_profiles[CONST_MONITORING_ADDON_NAME]
self.assertFalse(monitoring_addon_profile.enabled)
routing_addon_profile = instance.addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME]
self.assertTrue(routing_addon_profile.enabled)
self.assertEqual(sorted(list(instance.addon_profiles)), [CONST_AZURE_POLICY_ADDON_NAME, CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME, CONST_MONITORING_ADDON_NAME])
# kube-dashboard disabled, no existing dashboard addon profile
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'kube-dashboard', enable=False)
dashboard_addon_profile = instance.addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME]
self.assertFalse(dashboard_addon_profile.enabled)
# kube-dashboard enabled, no existing dashboard addon profile
instance.addon_profiles.pop(CONST_KUBE_DASHBOARD_ADDON_NAME, None)
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'kube-dashboard', enable=True)
dashboard_addon_profile = instance.addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME]
self.assertTrue(dashboard_addon_profile.enabled)
# kube-dashboard disabled, there's existing dashboard addon profile
instance.addon_profiles.pop(CONST_KUBE_DASHBOARD_ADDON_NAME, None)
# test lower cased key name
instance.addon_profiles['kubedashboard'] = ManagedClusterAddonProfile(enabled=True)
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'kube-dashboard', enable=False)
dashboard_addon_profile = instance.addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME]
self.assertFalse(dashboard_addon_profile.enabled)
# kube-dashboard enabled, there's existing dashboard addon profile
instance.addon_profiles.pop(CONST_KUBE_DASHBOARD_ADDON_NAME, None)
# test lower cased key name
instance.addon_profiles['kubedashboard'] = ManagedClusterAddonProfile(enabled=False)
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'kube-dashboard', enable=True)
dashboard_addon_profile = instance.addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME]
self.assertTrue(dashboard_addon_profile.enabled)
# monitoring enabled and then enabled again should error
instance = mock.Mock()
instance.addon_profiles = None
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'monitoring', enable=True)
with self.assertRaises(CLIError):
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'monitoring', enable=True)
# virtual-node enabled
instance = mock.MagicMock()
instance.addon_profiles = None
cmd = mock.MagicMock()
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'virtual-node', enable=True, subnet_name='foo')
self.assertIn('aciConnectorLinux', instance.addon_profiles)
addon_profile = instance.addon_profiles['aciConnectorLinux']
self.assertTrue(addon_profile.enabled)
# virtual-node disabled
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'virtual-node', enable=False)
addon_profile = instance.addon_profiles['aciConnectorLinux']
self.assertFalse(addon_profile.enabled)
# ingress-appgw enabled
instance = mock.MagicMock()
instance.addon_profiles = None
cmd = mock.MagicMock()
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'ingress-appgw', enable=True, appgw_subnet_cidr='10.2.0.0/16')
self.assertIn('ingressApplicationGateway', instance.addon_profiles)
addon_profile = instance.addon_profiles['ingressApplicationGateway']
self.assertTrue(addon_profile.enabled)
# ingress-appgw disabled
instance = _update_addons(MockCmd(self.cli), instance, '00000000-0000-0000-0000-000000000000',
'clitest000001', 'clitest000001', 'ingress-appgw', enable=False)
addon_profile = instance.addon_profiles['ingressApplicationGateway']
self.assertFalse(addon_profile.enabled)
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubectl_emit_warnings(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = lambda _, install_location: open(install_location, 'a').close()
try:
temp_dir = tempfile.mkdtemp() # tempfile.TemporaryDirectory() is no available on 2.7
test_location = os.path.join(temp_dir, 'kubectl')
k8s_install_kubectl(mock.MagicMock(), client_version='1.2.3', install_location=test_location)
self.assertEqual(mock_url_retrieve.call_count, 1)
# 2 warnings, 1st for download result; 2nd for updating PATH
self.assertEqual(logger_mock.warning.call_count, 2) # 2 warnings, one for download result
finally:
shutil.rmtree(temp_dir)
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubectl_create_installation_dir(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = lambda _, install_location: open(install_location, 'a').close()
try:
temp_dir = tempfile.mkdtemp() # tempfile.TemporaryDirectory() is no available on 2.7
test_location = os.path.join(temp_dir, 'foo', 'kubectl')
k8s_install_kubectl(mock.MagicMock(), client_version='1.2.3', install_location=test_location)
self.assertTrue(os.path.exists(test_location))
finally:
shutil.rmtree(temp_dir)
@unittest.skip('Update api version')
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubelogin_emit_warnings(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = create_kubelogin_zip
try:
temp_dir = os.path.realpath(tempfile.mkdtemp()) # tempfile.TemporaryDirectory() is no available on 2.7
test_location = os.path.join(temp_dir, 'kubelogin')
k8s_install_kubelogin(mock.MagicMock(), client_version='0.0.4', install_location=test_location)
self.assertEqual(mock_url_retrieve.call_count, 1)
# 2 warnings, 1st for download result; 2nd for updating PATH
self.assertEqual(logger_mock.warning.call_count, 2) # 2 warnings, one for download result
finally:
shutil.rmtree(temp_dir)
@unittest.skip('Update api version')
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubelogin_create_installation_dir(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = create_kubelogin_zip
try:
temp_dir = tempfile.mkdtemp() # tempfile.TemporaryDirectory() is no available on 2.7
test_location = os.path.join(temp_dir, 'foo', 'kubelogin')
k8s_install_kubelogin(mock.MagicMock(), client_version='0.0.4', install_location=test_location)
self.assertTrue(os.path.exists(test_location))
finally:
shutil.rmtree(temp_dir)
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubectl_with_custom_source_url(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = lambda _, install_location: open(install_location, 'a').close()
try:
temp_dir = tempfile.mkdtemp()
test_location = os.path.join(temp_dir, 'foo', 'kubectl')
test_ver = '1.2.5'
test_source_url = 'http://url1'
k8s_install_kubectl(mock.MagicMock(), client_version=test_ver, install_location=test_location, source_url=test_source_url)
mock_url_retrieve.assert_called_with(mockUrlretrieveUrlValidator(test_source_url, test_ver), mock.ANY)
finally:
shutil.rmtree(temp_dir)
@unittest.skip('No such file or directory')
@mock.patch('azure.cli.command_modules.acs.custom._urlretrieve')
@mock.patch('azure.cli.command_modules.acs.custom.logger')
def test_k8s_install_kubelogin_with_custom_source_url(self, logger_mock, mock_url_retrieve):
mock_url_retrieve.side_effect = create_kubelogin_zip
try:
temp_dir = tempfile.mkdtemp()
test_location = os.path.join(temp_dir, 'foo', 'kubelogin')
test_ver = '1.2.6'
test_source_url = 'http://url2'
k8s_install_kubelogin(mock.MagicMock(), client_version=test_ver, install_location=test_location, source_url=test_source_url)
mock_url_retrieve.assert_called_with(mockUrlretrieveUrlValidator(test_source_url, test_ver), mock.ANY)
finally:
shutil.rmtree(temp_dir)
class mockUrlretrieveUrlValidator(object):
def __init__(self, url, version):
self.url = url
self.version = version
def __eq__(self, other):
return other.startswith(self.url) and self.version in other
def create_kubelogin_zip(file_url, download_path):
import zipfile
try:
cwd = os.getcwd()
temp_dir = os.path.realpath(tempfile.mkdtemp())
os.chdir(temp_dir)
bin_dir = 'bin'
system = platform.system()
if system == 'Windows':
bin_dir += '/windows_amd64'
elif system == 'Linux':
bin_dir += '/linux_amd64'
elif system == 'Darwin':
bin_dir += '/darwin_amd64'
os.makedirs(bin_dir)
bin_location = os.path.join(bin_dir, 'kubelogin')
open(bin_location, 'a').close()
with zipfile.ZipFile(download_path, 'w', zipfile.ZIP_DEFLATED) as outZipFile:
outZipFile.write(bin_location)
finally:
os.chdir(cwd)
shutil.rmtree(temp_dir)
|
{
"content_hash": "ec9af0eae556a0ba56d28b4487448bc1",
"timestamp": "",
"source": "github",
"line_count": 841,
"max_line_length": 168,
"avg_line_length": 46.30083234244947,
"alnum_prop": 0.5673746115719459,
"repo_name": "yugangw-msft/azure-cli",
"id": "ae84b8174fef5e7efff8d9941ec5a13f8c708d69",
"size": "39305",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/acs/tests/hybrid_2020_09_01/test_custom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
}
|
"""Tests for object_detection.meta_architectures.faster_rcnn_meta_arch."""
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch_test_lib
class FasterRCNNMetaArchTest(
faster_rcnn_meta_arch_test_lib.FasterRCNNMetaArchTestBase,
parameterized.TestCase):
def test_postprocess_second_stage_only_inference_mode_with_masks(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = tf.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)
num_proposals = tf.constant([3, 2], dtype=tf.int32)
refined_box_encodings = tf.zeros(
[total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32)
class_predictions_with_background = tf.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)
image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)
mask_height = 2
mask_width = 2
mask_predictions = 30. * tf.ones(
[total_num_padded_proposals, model.num_classes,
mask_height, mask_width], dtype=tf.float32)
exp_detection_masks = np.array([[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]],
[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[0, 0], [0, 0]]]])
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
'mask_predictions': mask_predictions
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllClose(detections_out['detection_scores'],
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
self.assertAllClose(detections_out['detection_masks'],
exp_detection_masks)
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def test_postprocess_second_stage_only_inference_mode_with_calibration(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6,
calibration_mapping_value=0.5)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = tf.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)
num_proposals = tf.constant([3, 2], dtype=tf.int32)
refined_box_encodings = tf.zeros(
[total_num_padded_proposals, model.num_classes, 4], dtype=tf.float32)
class_predictions_with_background = tf.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)
image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)
mask_height = 2
mask_width = 2
mask_predictions = 30. * tf.ones(
[total_num_padded_proposals, model.num_classes,
mask_height, mask_width], dtype=tf.float32)
exp_detection_masks = np.array([[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]]],
[[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[1, 1], [1, 1]],
[[0, 0], [0, 0]]]])
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
'mask_predictions': mask_predictions
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
# All scores map to 0.5, except for the final one, which is pruned.
self.assertAllClose(detections_out['detection_scores'],
[[0.5, 0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5, 0.0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
self.assertAllClose(detections_out['detection_masks'],
exp_detection_masks)
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def test_postprocess_second_stage_only_inference_mode_with_shared_boxes(self):
model = self._build_model(
is_training=False, number_of_stages=2, second_stage_batch_size=6)
batch_size = 2
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = tf.constant(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]], dtype=tf.float32)
num_proposals = tf.constant([3, 2], dtype=tf.int32)
# This has 1 box instead of one for each class.
refined_box_encodings = tf.zeros(
[total_num_padded_proposals, 1, 4], dtype=tf.float32)
class_predictions_with_background = tf.ones(
[total_num_padded_proposals, model.num_classes+1], dtype=tf.float32)
image_shape = tf.constant([batch_size, 36, 48, 3], dtype=tf.int32)
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings,
'class_predictions_with_background': class_predictions_with_background,
'num_proposals': num_proposals,
'proposal_boxes': proposal_boxes,
'image_shape': image_shape,
}, true_image_shapes)
with self.test_session() as sess:
detections_out = sess.run(detections)
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllClose(detections_out['detection_scores'],
[[1, 1, 1, 1, 1], [1, 1, 1, 1, 0]])
self.assertAllClose(detections_out['detection_classes'],
[[0, 0, 0, 1, 1], [0, 0, 1, 1, 0]])
self.assertAllClose(detections_out['num_detections'], [5, 4])
@parameterized.parameters(
{'masks_are_class_agnostic': False},
{'masks_are_class_agnostic': True},
)
def test_predict_correct_shapes_in_inference_mode_three_stages_with_masks(
self, masks_are_class_agnostic):
batch_size = 2
image_size = 10
max_num_proposals = 8
initial_crop_size = 3
maxpool_stride = 1
input_shapes = [(batch_size, image_size, image_size, 3),
(None, image_size, image_size, 3),
(batch_size, None, None, 3),
(None, None, None, 3)]
expected_num_anchors = image_size * image_size * 3 * 3
expected_shapes = {
'rpn_box_predictor_features':
(2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'rpn_box_encodings': (2, expected_num_anchors, 4),
'rpn_objectness_predictions_with_background':
(2, expected_num_anchors, 2),
'anchors': (expected_num_anchors, 4),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
3)
}
for input_shape in input_shapes:
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=False,
number_of_stages=3,
second_stage_batch_size=2,
predict_masks=True,
masks_are_class_agnostic=masks_are_class_agnostic)
preprocessed_inputs = tf.placeholder(tf.float32, shape=input_shape)
_, true_image_shapes = model.preprocess(preprocessed_inputs)
result_tensor_dict = model.predict(preprocessed_inputs,
true_image_shapes)
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict, feed_dict={
preprocessed_inputs:
np.zeros((batch_size, image_size, image_size, 3))})
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'detection_boxes', 'detection_scores', 'detection_classes',
'detection_masks', 'num_detections', 'mask_predictions',
'raw_detection_boxes', 'raw_detection_scores'
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
self.assertAllEqual(tensor_dict_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(tensor_dict_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllEqual(tensor_dict_out['detection_classes'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['detection_scores'].shape, [2, 5])
self.assertAllEqual(tensor_dict_out['num_detections'].shape, [2])
num_classes = 1 if masks_are_class_agnostic else 2
self.assertAllEqual(tensor_dict_out['mask_predictions'].shape,
[10, num_classes, 14, 14])
@parameterized.parameters(
{'masks_are_class_agnostic': False},
{'masks_are_class_agnostic': True},
)
def test_predict_gives_correct_shapes_in_train_mode_both_stages_with_masks(
self, masks_are_class_agnostic):
test_graph = tf.Graph()
with test_graph.as_default():
model = self._build_model(
is_training=True,
number_of_stages=3,
second_stage_batch_size=7,
predict_masks=True,
masks_are_class_agnostic=masks_are_class_agnostic)
batch_size = 2
image_size = 10
max_num_proposals = 7
initial_crop_size = 3
maxpool_stride = 1
image_shape = (batch_size, image_size, image_size, 3)
preprocessed_inputs = tf.zeros(image_shape, dtype=tf.float32)
groundtruth_boxes_list = [
tf.constant([[0, 0, .5, .5], [.5, .5, 1, 1]], dtype=tf.float32),
tf.constant([[0, .5, .5, 1], [.5, 0, 1, .5]], dtype=tf.float32)
]
groundtruth_classes_list = [
tf.constant([[1, 0], [0, 1]], dtype=tf.float32),
tf.constant([[1, 0], [1, 0]], dtype=tf.float32)
]
groundtruth_weights_list = [
tf.constant([1, 1], dtype=tf.float32),
tf.constant([1, 1], dtype=tf.float32)]
_, true_image_shapes = model.preprocess(tf.zeros(image_shape))
model.provide_groundtruth(
groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_weights_list=groundtruth_weights_list)
result_tensor_dict = model.predict(preprocessed_inputs, true_image_shapes)
mask_shape_1 = 1 if masks_are_class_agnostic else model._num_classes
expected_shapes = {
'rpn_box_predictor_features': (2, image_size, image_size, 512),
'rpn_features_to_crop': (2, image_size, image_size, 3),
'image_shape': (4,),
'refined_box_encodings': (2 * max_num_proposals, 2, 4),
'class_predictions_with_background': (2 * max_num_proposals, 2 + 1),
'num_proposals': (2,),
'proposal_boxes': (2, max_num_proposals, 4),
'proposal_boxes_normalized': (2, max_num_proposals, 4),
'box_classifier_features':
self._get_box_classifier_features_shape(
image_size, batch_size, max_num_proposals, initial_crop_size,
maxpool_stride, 3),
'mask_predictions': (2 * max_num_proposals, mask_shape_1, 14, 14)
}
init_op = tf.global_variables_initializer()
with self.test_session(graph=test_graph) as sess:
sess.run(init_op)
tensor_dict_out = sess.run(result_tensor_dict)
self.assertEqual(
set(tensor_dict_out.keys()),
set(expected_shapes.keys()).union(
set([
'rpn_box_encodings',
'rpn_objectness_predictions_with_background',
'anchors',
])))
for key in expected_shapes:
self.assertAllEqual(tensor_dict_out[key].shape, expected_shapes[key])
anchors_shape_out = tensor_dict_out['anchors'].shape
self.assertLen(anchors_shape_out, 2)
self.assertEqual(4, anchors_shape_out[1])
num_anchors_out = anchors_shape_out[0]
self.assertAllEqual(tensor_dict_out['rpn_box_encodings'].shape,
(2, num_anchors_out, 4))
self.assertAllEqual(
tensor_dict_out['rpn_objectness_predictions_with_background'].shape,
(2, num_anchors_out, 2))
def test_postprocess_third_stage_only_inference_mode(self):
num_proposals_shapes = [(2), (None)]
refined_box_encodings_shapes = [(16, 2, 4), (None, 2, 4)]
class_predictions_with_background_shapes = [(16, 3), (None, 3)]
proposal_boxes_shapes = [(2, 8, 4), (None, 8, 4)]
batch_size = 2
image_shape = np.array((2, 36, 48, 3), dtype=np.int32)
for (num_proposals_shape, refined_box_encoding_shape,
class_predictions_with_background_shape,
proposal_boxes_shape) in zip(num_proposals_shapes,
refined_box_encodings_shapes,
class_predictions_with_background_shapes,
proposal_boxes_shapes):
tf_graph = tf.Graph()
with tf_graph.as_default():
model = self._build_model(
is_training=False, number_of_stages=3,
second_stage_batch_size=6, predict_masks=True)
total_num_padded_proposals = batch_size * model.max_num_proposals
proposal_boxes = np.array(
[[[1, 1, 2, 3],
[0, 0, 1, 1],
[.5, .5, .6, .6],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0]],
[[2, 3, 6, 8],
[1, 2, 5, 3],
4*[0], 4*[0], 4*[0], 4*[0], 4*[0], 4*[0]]])
num_proposals = np.array([3, 2], dtype=np.int32)
refined_box_encodings = np.zeros(
[total_num_padded_proposals, model.num_classes, 4])
class_predictions_with_background = np.ones(
[total_num_padded_proposals, model.num_classes+1])
num_proposals_placeholder = tf.placeholder(tf.int32,
shape=num_proposals_shape)
refined_box_encodings_placeholder = tf.placeholder(
tf.float32, shape=refined_box_encoding_shape)
class_predictions_with_background_placeholder = tf.placeholder(
tf.float32, shape=class_predictions_with_background_shape)
proposal_boxes_placeholder = tf.placeholder(
tf.float32, shape=proposal_boxes_shape)
image_shape_placeholder = tf.placeholder(tf.int32, shape=(4))
_, true_image_shapes = model.preprocess(
tf.zeros(image_shape_placeholder))
detections = model.postprocess({
'refined_box_encodings': refined_box_encodings_placeholder,
'class_predictions_with_background':
class_predictions_with_background_placeholder,
'num_proposals': num_proposals_placeholder,
'proposal_boxes': proposal_boxes_placeholder,
'image_shape': image_shape_placeholder,
'detection_boxes': tf.zeros([2, 5, 4]),
'detection_masks': tf.zeros([2, 5, 14, 14]),
'detection_scores': tf.zeros([2, 5]),
'detection_classes': tf.zeros([2, 5]),
'num_detections': tf.zeros([2]),
}, true_image_shapes)
with self.test_session(graph=tf_graph) as sess:
detections_out = sess.run(
detections,
feed_dict={
refined_box_encodings_placeholder: refined_box_encodings,
class_predictions_with_background_placeholder:
class_predictions_with_background,
num_proposals_placeholder: num_proposals,
proposal_boxes_placeholder: proposal_boxes,
image_shape_placeholder: image_shape
})
self.assertAllEqual(detections_out['detection_boxes'].shape, [2, 5, 4])
self.assertAllEqual(detections_out['detection_masks'].shape,
[2, 5, 14, 14])
self.assertAllClose(detections_out['detection_scores'].shape, [2, 5])
self.assertAllClose(detections_out['detection_classes'].shape, [2, 5])
self.assertAllClose(detections_out['num_detections'].shape, [2])
self.assertTrue(np.amax(detections_out['detection_masks'] <= 1.0))
self.assertTrue(np.amin(detections_out['detection_masks'] >= 0.0))
def _get_box_classifier_features_shape(self,
image_size,
batch_size,
max_num_proposals,
initial_crop_size,
maxpool_stride,
num_features):
return (batch_size * max_num_proposals,
initial_crop_size/maxpool_stride,
initial_crop_size/maxpool_stride,
num_features)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "4b3b2aa40df48a668415026bf9d6f089",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 80,
"avg_line_length": 46.413145539906104,
"alnum_prop": 0.550070807202104,
"repo_name": "derekjchow/models",
"id": "810d65a232484c2259c95a51bc6d553193c184c9",
"size": "20462",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "research/object_detection/meta_architectures/faster_rcnn_meta_arch_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('cfp', '0014_auto_20170605_1545'),
('events', '0004_auto_20170516_1156'),
]
operations = [
migrations.CreateModel(
name='Workshop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=80)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from='title', unique=True)),
('about', models.TextField()),
('abstract', models.TextField()),
('venue', models.TextField()),
('starts_at', models.DateTimeField()),
('applicant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='workshops', to='cfp.Applicant')),
('event', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='workshops', to='events.Event')),
('skill_level', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='cfp.AudienceSkillLevel')),
],
),
]
|
{
"content_hash": "237739b2f64d597c83b05f10488f14fa",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 140,
"avg_line_length": 41.63636363636363,
"alnum_prop": 0.6069868995633187,
"repo_name": "WebCampZg/conference-web",
"id": "2c2ede86f030b40d5b44af544beac5dfd79e69e8",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workshops/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "131971"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "268738"
},
{
"name": "SCSS",
"bytes": "41619"
}
],
"symlink_target": ""
}
|
from google.cloud import speech_v1p1beta1 as speech
def sample_recognize(storage_uri):
"""
Performs synchronous speech recognition on an audio file
Args:
storage_uri URI for audio file in Cloud Storage, e.g. gs://[BUCKET]/[FILE]
"""
client = speech.SpeechClient()
# storage_uri = 'gs://cloud-samples-data/speech/brooklyn_bridge.mp3'
# The language of the supplied audio
language_code = "en-US"
# Sample rate in Hertz of the audio data sent
sample_rate_hertz = 44100
# Encoding of audio data sent. This sample sets this explicitly.
# This field is optional for FLAC and WAV audio formats.
encoding = speech.RecognitionConfig.AudioEncoding.MP3
config = {
"language_code": language_code,
"sample_rate_hertz": sample_rate_hertz,
"encoding": encoding,
}
audio = {"uri": storage_uri}
response = client.recognize(config=config, audio=audio)
for result in response.results:
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_quickstart_beta]
return response
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--storage_uri",
type=str,
default="gs://cloud-samples-data/speech/brooklyn_bridge.mp3",
)
args = parser.parse_args()
sample_recognize(args.storage_uri)
if __name__ == "__main__":
main()
|
{
"content_hash": "17e1efae933d787939ae36a11fb77977",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 80,
"avg_line_length": 26.344827586206897,
"alnum_prop": 0.6557591623036649,
"repo_name": "googleapis/python-speech",
"id": "d40e6d32f1c89bdc3d97a23504e6ea3e393529fd",
"size": "2611",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/snippets/speech_quickstart_beta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2077065"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
}
|
# Copyright (c) 2016 Croatian Meteor Network
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import ephem
import datetime
import time
import serial
import numpy as np
import threading, Queue
import logging
import os
import sys
import errno
import shutil
from subprocess import call
from nightAnalyzer import analyze
def mkdirp(path):
""" Makes a directory and handles all errors.
"""
try:
os.makedirs(path)
except OSError, exc:
if exc.errno == errno.EEXIST:
pass
else:
print 'An error occured:'
raise
def wordDayDuration(lat, lon, elevation):
"""Function that calculates start time and duration of capturing
Reqiured inputs: Latitude, longitude, elevation
Outputs: When to start recording (start_time) and for how long (duration), returned as list.
start_time returns 'True' if it needs to start immediately
duration returns a number of hours rounded %.2f"""
o=ephem.Observer()
o.lat=str(lat)
o.long=str(lon)
o.elevation=elevation
o.horizon = '-5:26' #Correction for 30-minute later start/ending
s=ephem.Sun()
s.compute()
next_rise=ephem.localtime(o.next_rising(s))
next_set=ephem.localtime(o.next_setting(s))
current_time=datetime.datetime.now()
#Should we start recording now?
if next_set>next_rise:
start_time=True
else:
start_time=next_set
#For how long should we record?
if start_time==True:
duration=next_rise-current_time
else:
duration=next_rise-next_set
# Recalculate the recording time to hours and round to 4 decimals
duration=round(duration.total_seconds()/60/60, 4)
return (start_time, duration)
def currentMicroTime():
""" Returns the number of milliseconds since epoch. """
return int(round(time.time() * 1000000))
def waitResponse(ser, char):
""" Wait for response from Arduino while toggling iris. """
while 1:
print 'Waiting...'
logging.info('Waiting...')
time.sleep(0.2)
ser.write(char)
if "Response" in ser.readline():
break
def recordData(ser):
""" Run radiometer data recording for one data block. """
data_list = []
times_list = []
first_read = True
# Flush input buffer
ser.flushInput()
count = 0
while True:
if not first_read:
if (currentMicroTime() - record_start >= (data_block_time*1000000)):
break
try:
b1 = ord(ser.read(1))
b2 = ord(ser.read(1))
#ser.flushInput()
except:
print "Couldn't get data!"
logging.info("Couldn't get data!")
continue
# Calculate ADC value from 2 bytes transfered via COM interface
serial_value = b1 + b2*256
if serial_value != '':
if first_read:
first_read = False
record_start = currentMicroTime()
time_start = datetime.datetime.utcnow().strftime("%Y-%m-%d_%H:%M:%S.%f")
first = currentMicroTime()
print 'Start', time_start
logging.info('Start ' + str(time_start))
#print serial_value
# Add value to data list
try:
# Add the timestamp to list
times_list.append(round((currentMicroTime() - first)/1000000.0, 6))
# Add the measurement to list
data_list.append(serial_value)
except:
print 'Error adding data to list!'
logging.info('Error adding data to list!')
print serial_value
logging.info(str(serial_value))
break
run_duration = currentMicroTime() - record_start
# Calculate samples per second
sps = len(data_list) / (run_duration / 1000000.00)
print 'Calculated SPS: ', sps
logging.info('Calculated SPS: ' + str(sps))
return time_start, times_list, data_list
def initConnection(port):
""" Initialize connection to Arduino. """
# Initalize serial connection, block read until new data is recieved
ser = serial.Serial(port, baudrate=115200, bytesize=8, parity='N', stopbits=1, timeout=None)
# Wait for initialization
time.sleep(2)
# Open iris
waitResponse(ser, "1")
# Wait for the iris to open
time.sleep(2)
print 'Connected!'
logging.info('Connected!')
# Flush input buffer
ser.flushInput()
return ser
def closeConnection(ser):
""" Close connection to Arduino. """
print 'Ending data capture'
logging.info('Ending data capture')
# Close iris
waitResponse(ser, '0')
# Close serial communication
ser.close()
def writeData():
""" Write data to a file. """
while 1:
if not q.empty():
data = q.get()
night_folder = data[3]
file_name = str(data[0]).replace(':', '.') + '.csv'
times_array = data[1]
data_array = data[2]
times_array = np.array(times_array)
data_array = np.array(data_array)
output = np.column_stack((times_array.flatten(), data_array.flatten()))
np.savetxt(night_folder + os.sep + file_name, output, fmt = ['%s', '%s'], delimiter = ',')
time.sleep(1)
if __name__ == '__main__':
# Define the used Arduino port
arduino_port = 'COM4'
# Geographical coordinates of the radiometric station
latitude = 44.869509 # N
longitude = 13.853925 # E
elevation = 23 # meters
# Duration of a single time block in seconds
data_block_time = 10.24 #s
# Initiate logging
logging.basicConfig(filename='record_log.log', level=logging.INFO, format='%(asctime)s %(message)s', datefmt='%Y/%m/%d-%H:%M:%S')
# Set a thread which writes data to files
q = Queue.Queue()
thread1 = threading.Thread(target=writeData)
print 'Data block duration: ', data_block_time, 's'
logging.info('Data block duration: ' + str(data_block_time) + 's')
# Start the saving thread
thread1.start()
counter = 0
while True:
if counter == 0:
start_time, duration = wordDayDuration(latitude, longitude, elevation)
# Recalculate the duration to seconds from hours
duration = duration*60*60
# Wait if it is not yet time to start recording
if start_time != True:
# Calculate seconds to wait until recording
seconds_waiting = int((start_time - datetime.datetime.now()).total_seconds()) + 1
print 'Waiting ' + str(seconds_waiting) + ' seconds.'
logging.info('Waiting ' + str(seconds_waiting) + ' seconds.')
time.sleep(seconds_waiting)
# Generate the night folder name
night_folder = datetime.datetime.now().strftime("%Y%m%d") + (datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%d")
# Make a now night folder
mkdirp(night_folder)
counter = int(duration/data_block_time) + 1
# Initialize connection
ser = initConnection(arduino_port)
else:
counter -= 1
# Run data recording
time_start, times_array, data_array = recordData(ser)
# Put files that are to be written into a file in queue so the saving thread can pick them up
q.put((time_start, times_array, data_array, night_folder))
# Close conection if this is the last recording run
if counter == 0:
closeConnection(ser)
# Wait a bit before analyzing
time.sleep(5)
print 'Running the night statistics...'
# Run the night statistics analysis
analyze(night_folder)
print 'Statistics done!'
# Wait a bit before recalculating the new start time
time.sleep(5*60)
|
{
"content_hash": "7c5c1e1875ac6c35a24181f68fbd0408",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 141,
"avg_line_length": 29.830188679245282,
"alnum_prop": 0.5829643685431162,
"repo_name": "CroatianMeteorNetwork/MeteorRadiometer",
"id": "0b420bad545146f0af129d61a8a56532f5154fa6",
"size": "9511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/radiometerRecord.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "8634"
},
{
"name": "Processing",
"bytes": "454"
},
{
"name": "Python",
"bytes": "34107"
}
],
"symlink_target": ""
}
|
from django import template
from webquills.core.models import Image
# https://docs.djangoproject.com/en/3.2/howto/custom-template-tags/
register = template.Library()
@register.simple_tag
def image_url(instance: Image, op: str = "", **kwargs):
if op:
return instance.get_thumb(op, **kwargs).url
elif kwargs:
raise ValueError(
"image_url called without op. Did you for get to quote the operation name?"
)
return instance.file.url
@register.simple_tag(takes_context=True)
def menu_active(context, menuitem: str):
path = str(context["request"].path)
# Special case because every url starts with /
if menuitem == "/":
if path == "/":
return "btn btn-outline-primary"
return ""
# Otherwise, if the page is under the menupage's directory, it is active
if path.startswith(menuitem):
return "btn btn-outline-primary"
return ""
|
{
"content_hash": "3418feec0d4ec9033c6f06bc149beea1",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 87,
"avg_line_length": 29.15625,
"alnum_prop": 0.654876741693462,
"repo_name": "veselosky/webquills",
"id": "4ea727dd991ef8637a75ceb170e63a40334fa8f0",
"size": "933",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "webquills/core/templatetags/webquills.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21709"
},
{
"name": "HTML",
"bytes": "12296"
},
{
"name": "Python",
"bytes": "135964"
},
{
"name": "Shell",
"bytes": "3242"
}
],
"symlink_target": ""
}
|
import numpy
from numpy.testing import assert_raises
from fuel import config
from fuel.datasets import CIFAR100
from fuel.streams import DataStream
from fuel.schemes import SequentialScheme
def test_cifar100():
train = CIFAR100('train', load_in_memory=False)
assert train.num_examples == 50000
handle = train.open()
coarse_labels, features, fine_labels = train.get_data(handle,
slice(49990, 50000))
assert features.shape == (10, 3, 32, 32)
assert coarse_labels.shape == (10, 1)
assert fine_labels.shape == (10, 1)
train.close(handle)
test = CIFAR100('test', load_in_memory=False)
handle = test.open()
coarse_labels, features, fine_labels = test.get_data(handle,
slice(0, 10))
assert features.shape == (10, 3, 32, 32)
assert coarse_labels.shape == (10, 1)
assert fine_labels.shape == (10, 1)
assert features.dtype == numpy.uint8
assert coarse_labels.dtype == numpy.uint8
assert fine_labels.dtype == numpy.uint8
test.close(handle)
stream = DataStream.default_stream(
test, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[1]
assert data.min() >= 0.0 and data.max() <= 1.0
assert data.dtype == config.floatX
assert_raises(ValueError, CIFAR100, 'valid')
|
{
"content_hash": "1a07d1e13583529351ced7d8ed7d3dbb",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 32.06818181818182,
"alnum_prop": 0.6265060240963856,
"repo_name": "EderSantana/fuel",
"id": "689a7b7e2ce44b7b77f8c413a3a8c8f9e54b89ce",
"size": "1411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cifar100.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "248958"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
}
|
import main
import robocup
import constants
import math
#
# Returns an array of their robots that we can chip over based on the current ball position
# this only operates on the distances from the ball to each robot and the Chip distances in constants.py
#
def chippable_robots():
bp = main.ball().pos
return [
rob for rob in main.system_state().their_robots
if (rob.pos - bp).mag() > constants.OurChipping.MIN_CARRY and (
rob.pos - bp).mag() < constants.OurChipping.MAX_CARRY
]
|
{
"content_hash": "6441159ce22b4d26a2070f1fd129c6e2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 104,
"avg_line_length": 30.705882352941178,
"alnum_prop": 0.6934865900383141,
"repo_name": "JNeiger/robocup-software",
"id": "448b55009dd37360da0174705c275bcd2c185fac",
"size": "522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soccer/gameplay/evaluation/chipping.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2990"
},
{
"name": "C++",
"bytes": "1083792"
},
{
"name": "CMake",
"bytes": "112437"
},
{
"name": "Dockerfile",
"bytes": "2872"
},
{
"name": "MATLAB",
"bytes": "31229"
},
{
"name": "Makefile",
"bytes": "5816"
},
{
"name": "Python",
"bytes": "735005"
},
{
"name": "Shell",
"bytes": "21468"
}
],
"symlink_target": ""
}
|
import json
import sys
from nltk import RegexpTokenizer, OrderedDict
from nltk.stem.snowball import RussianStemmer
genres = {
1: "Rock",
2: "Pop",
3: "Rap & Hip - Hop",
4: "Easy Listening",
5: "Dance & House",
6: "Instrumental",
7: "Metal",
21: "Alternative",
8: "Dubstep",
9: "Jazz & Blues",
10: "Drum & Bass",
11: "Trance",
12: "Chanson",
13: "Ethnic",
14: "Acoustic & Vocal",
15: "Reggae",
16: "Classical",
17: "Indie Pop",
19: "Speech",
22: "Electropop & Disco",
18: "Other"
}
def dictWithoutOneKey(d, key):
new_d = d.copy()
new_d.pop(key)
return new_d
if __name__ == '__main__':
musicFileName = sys.argv[1]
destFileName = sys.argv[2]
tokenizer = RegexpTokenizer(r"[A-Za-zА-Яа-я]+")
stemmer = RussianStemmer()
audioStats = dict()
with open(musicFileName, "r", encoding="utf8") as f_music:
for line in f_music:
jsonData = json.loads(line, encoding="utf8")
for song in list(jsonData.values())[0]:
songName = "{} - {}".format(song["artist"], song["title"])
filteredSongName = "".join(
[stemmer.stem(token).lower() for token in tokenizer.tokenize(songName)]
)
if len(filteredSongName) > 1:
audioStatsItem = audioStats.get(filteredSongName, {
"name": songName,
"url": song["url"],
"genre": genres.get(song["genre_id"], "Other"),
"count": 0
})
audioStatsItem["count"] += 1
audioStats[filteredSongName] = audioStatsItem
with open(destFileName, "w", encoding="utf-8") as f_out:
sortedSongs = [item[1] for item in sorted(audioStats.items(), key=lambda item: item[1]["count"], reverse=True)]
data = OrderedDict([(item["name"], dictWithoutOneKey(item, "name")) for item in sortedSongs])
f_out.write(json.dumps(data, ensure_ascii=False, indent=4))
|
{
"content_hash": "4fadc238a4b28cb45276cb3732328030",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 119,
"avg_line_length": 31.567164179104477,
"alnum_prop": 0.5314420803782506,
"repo_name": "AlekseyLobanov/gotohack",
"id": "97f5189cce7f06aa2a35e12ea78380e454f342d5",
"size": "2164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio-analyzer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "554393"
},
{
"name": "Python",
"bytes": "12246"
}
],
"symlink_target": ""
}
|
"""
Auto-generated class for AYSStep
"""
from .Job import Job
from . import client_support
class AYSStep(object):
"""
auto-generated. don't touch.
"""
@staticmethod
def create(jobs, number):
"""
:type jobs: list[Job]
:type number: int
:rtype: AYSStep
"""
return AYSStep(
jobs=jobs,
number=number,
)
def __init__(self, json=None, **kwargs):
if not json and not kwargs:
raise ValueError('No data or kwargs present')
class_name = 'AYSStep'
create_error = '{cls}: unable to create {prop} from value: {val}: {err}'
required_error = '{cls}: missing required property {prop}'
data = json or kwargs
property_name = 'jobs'
val = data.get(property_name)
if val is not None:
datatypes = [Job]
try:
self.jobs = client_support.list_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
property_name = 'number'
val = data.get(property_name)
if val is not None:
datatypes = [int]
try:
self.number = client_support.val_factory(val, datatypes)
except ValueError as err:
raise ValueError(create_error.format(cls=class_name, prop=property_name, val=val, err=err))
else:
raise ValueError(required_error.format(cls=class_name, prop=property_name))
def __str__(self):
return self.as_json(indent=4)
def as_json(self, indent=0):
return client_support.to_json(self, indent=indent)
def as_dict(self):
return client_support.to_dict(self)
|
{
"content_hash": "dab86e705613d9bd574ac0750a356c9d",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 107,
"avg_line_length": 28.954545454545453,
"alnum_prop": 0.5724751439037153,
"repo_name": "Jumpscale/ays9",
"id": "971a157506bba1668bb31bfc4fe4cbe2b06bba6e",
"size": "1911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JumpScale9AYS/clients/sync_client/AYSStep.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235840"
},
{
"name": "Cap'n Proto",
"bytes": "20377"
},
{
"name": "HTML",
"bytes": "1974"
},
{
"name": "JavaScript",
"bytes": "4324209"
},
{
"name": "Python",
"bytes": "691623"
},
{
"name": "RAML",
"bytes": "3933753"
},
{
"name": "Shell",
"bytes": "3824"
}
],
"symlink_target": ""
}
|
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer from open source:
pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory:
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
DV360 API Insert From BigQuery
Insert DV360 API Endpoints.
Specify the name of the dataset and table.
Rows will be read and applied as a insert to DV360.
'''
from starthinker_airflow.factory import DAG_Factory
# Add the following credentials to your Airflow configuration.
USER_CONN_ID = "starthinker_user" # The connection to use for user authentication.
GCP_CONN_ID = "starthinker_service" # The connection to use for service authentication.
INPUTS = {
'auth_write': 'user', # Credentials used for writing data.
'insert': '',
'auth_read': 'service', # Credentials used for reading data.
'dataset': '', # Google BigQuery dataset to create tables in.
'table': '', # Google BigQuery dataset to create tables in.
}
TASKS = [
{
'dv360_api': {
'auth': {
'field': {
'name': 'auth_write',
'kind': 'authentication',
'order': 0,
'default': 'user',
'description': 'Credentials used for writing data.'
}
},
'insert': {
'field': {
'name': 'insert',
'kind': 'choice',
'choices': [
'advertisers',
'advertisers.campaigns',
'advertisers.channels',
'advertisers.channels.sites',
'advertisers.creatives',
'advertisers.insertionOrders',
'advertisers.lineItems',
'advertisers.locationLists',
'advertisers.locationLists.assignedLocations',
'advertisers.negativeKeywordLists',
'advertisers.negativeKeywordLists.negativeKeywords',
'floodlightGroups',
'inventorySourceGroups',
'partners.channels',
'users'
],
'default': ''
}
},
'bigquery': {
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'service',
'description': 'Credentials used for reading data.'
}
},
'dataset': {
'field': {
'name': 'dataset',
'kind': 'string',
'order': 2,
'default': '',
'description': 'Google BigQuery dataset to create tables in.'
}
},
'table': {
'field': {
'name': 'table',
'kind': 'string',
'order': 3,
'default': '',
'description': 'Google BigQuery dataset to create tables in.'
}
},
'as_object': True
}
}
}
]
DAG_FACTORY = DAG_Factory('dv360_api_insert_from_bigquery', { 'tasks':TASKS }, INPUTS)
DAG_FACTORY.apply_credentails(USER_CONN_ID, GCP_CONN_ID)
DAG = DAG_FACTORY.execute()
if __name__ == "__main__":
DAG_FACTORY.print_commandline()
|
{
"content_hash": "2b71a4efde3c6ff08387550c43ca8b76",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 87,
"avg_line_length": 27.96551724137931,
"alnum_prop": 0.5283600493218249,
"repo_name": "google/starthinker",
"id": "94e36910830b90c8c903b959014af75ee54331af",
"size": "3986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dags/dv360_api_insert_from_bigquery_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "89775"
},
{
"name": "Jupyter Notebook",
"bytes": "1088964"
},
{
"name": "Python",
"bytes": "2356647"
},
{
"name": "Shell",
"bytes": "89492"
}
],
"symlink_target": ""
}
|
import logging
import re
import uuid
import random
from urllib.parse import urlparse
from multiprocessing import Process
import requests
from lxml import etree
from twisted.internet import reactor
from scrapy.crawler import CrawlerRunner
from scrapy.utils.log import configure_logging
from scrapy.utils.project import get_project_settings
from mydm.spiderfactory import SpiderFactory
from mydm.model import (
save_spider_settings, save_feed, is_exists_spider, get_stats
)
from mydm.util import is_url
logger = logging.getLogger(__name__)
TEST_SETTINGS = {
'EXTENSIONS': {
'mydm.extensions.ExtensionStats': 900,
'scrapy.extensions.logstats.LogStats': None,
'scrapy.extensions.spiderstate.SpiderState': None,
'scrapy.extensions.telnet.TelnetConsole': None,
},
'ITEM_PIPELINES': {},
'DOWNLOADER_MIDDLEWARES': {},
'BOT_NAME': 'TestSpider',
'WEBSERVICE_ENABLED': False,
'TELNETCONSOLE_ENABLED': False,
'LOG_LEVEL': 'WARNING',
'LOG_FORMAT': '%(asctime)s-%(levelname)s: %(message)s',
'LOG_DATEFORMAT': '%Y-%m-%d %H:%M:%S'
}
def get_feed_name(url):
parser = urlparse(url)
fields = parser.hostname.split('.')
if len(fields) == 1:
return re.sub(
r'[^a-zA-Z]',
'',
fields[0]
).lower().capitalize()
else:
return ''.join(
re.sub(r'[^a-zA-Z]', '', item).lower().capitalize()
for item in fields[:-1]
if item.lower() != 'www'
)
def _run_feed_spider(url, feed):
spid = str(uuid.uuid4())
feed['_id'] = spid
configure_logging(TEST_SETTINGS, install_root_handler=False)
logging.getLogger('scrapy').setLevel(logging.WARNING)
save_feed(url)
cls = SpiderFactory.create_spider(feed)
runner = CrawlerRunner(TEST_SETTINGS)
d = runner.crawl(cls)
d.addBoth(lambda _: reactor.stop())
reactor.run(installSignalHandlers=False)
n = get_stats([spid])[spid]
if n == 0:
raise Exception(f'feed spider crawled 0 articles')
if is_exists_spider(url):
raise Exception(f'feed[{url}] existed')
del feed['_id']
save_spider_settings(feed)
def dry_run_feed_spider(url, feed):
p = Process(target=_run_feed_spider, args=(url, feed))
p.start()
p.join()
return p.exitcode == 0
def validate_rss_feed(feed):
url = feed['url']
if not is_url(url):
raise Exception(f'invalid url value[{url}]')
item_content_xpath = feed['item_content_xpath'].strip('\r\n\t ')
if not item_content_xpath:
feed.pop('item_content_xpath')
else:
feed['item_content_xpath'] = item_content_xpath
removed_xpath_nodes = feed['removed_xpath_nodes']
new_removed_xpath_nodes = []
for node in removed_xpath_nodes:
new_node = node.strip('\r\n\t ')
if new_node:
new_removed_xpath_nodes.append(new_node)
if not new_removed_xpath_nodes:
feed.pop('removed_xpath_nodes')
else:
feed['removed_xpath_nodes'] = new_removed_xpath_nodes
def submit_rss_feed(feed):
validate_rss_feed(feed)
url = feed.pop('url')
settings = get_project_settings()
headers = settings['DEFAULT_REQUEST_HEADERS'].copy()
headers['User-Agent'] = settings['USER_AGENT']
try:
r = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
logger.error('rss spider connect %s failed', url)
raise
if r.status_code != 200:
logger.error(
'rss spider got bad response[%s, status=%d]',
url,
r.status_code
)
raise Exception(f'bad response for rss feed[{url}]')
parser = etree.XMLParser(
encoding=r.encoding,
ns_clean=True,
remove_blank_text=True,
dtd_validation=False,
load_dtd=True
)
try:
root = etree.XML(r.content, parser)
except Exception:
logger.error('rss feed[%s] parse failed', url)
raise
while len(root) == 1:
root = root[0]
for e in root:
try:
en = etree.QName(e.tag).localname.lower()
except ValueError:
continue
else:
if en != 'title':
continue
feed['title'] = re.sub(
r'^(\r|\n|\s)+|(\r|\n|\s)+$',
'',
e.text
)
feed['name'] = get_feed_name(url)
if 'title' not in feed:
feed['title'] = feed['name']
feed['type'] = 'xml'
feed['start_urls'] = [url]
if not dry_run_feed_spider(url, feed):
raise Exception('feed spider dry run failed')
def validate_blog_feed(feed):
url = feed['url']
if not is_url(url):
raise Exception(f'invalid url value[{url}]')
category = feed['category'].strip('\r\n\t ')
if not category:
raise Exception('category value is empty')
else:
feed['category'] = category
entry_xpath = feed['entry_xpath'].strip('\r\n\t ')
if not entry_xpath:
raise Exception('entry_xpath value is empty')
else:
feed['entry_xpath'] = entry_xpath
item_title_xpath = feed['item_title_xpath'].strip('\r\n\t ')
if not item_title_xpath:
raise Exception('item_title_xpath value is empty')
else:
feed['item_title_xpath'] = item_title_xpath
item_link_xpath = feed['item_link_xpath'].strip('\r\n\t ')
if not item_link_xpath:
raise Exception('item_link_xpath value is empty')
else:
feed['item_link_xpath'] = item_link_xpath
item_content_xpath = feed['item_content_xpath'].strip('\r\n\t ')
if not item_content_xpath:
raise Exception('item_content_xpath value is empty')
else:
feed['item_content_xpath'] = item_content_xpath
removed_xpath_nodes = feed['removed_xpath_nodes']
new_removed_xpath_nodes = []
for node in removed_xpath_nodes:
new_node = node.strip('\r\n\t ')
if new_node:
new_removed_xpath_nodes.append(new_node)
if not new_removed_xpath_nodes:
feed.pop('removed_xpath_nodes')
else:
feed['removed_xpath_nodes'] = new_removed_xpath_nodes
def submit_blog_feed(feed):
validate_blog_feed(feed)
url = feed.pop('url')
feed['name'] = get_feed_name(url)
feed['title'] = feed['name']
feed['type'] = 'blog'
feed['start_urls'] = [url]
if not dry_run_feed_spider(url, feed):
raise Exception('feed spider dry run failed')
def crawl_articles(spids):
settings = get_project_settings()
configure_logging(settings, install_root_handler=False)
logging.getLogger('scrapy').setLevel(logging.WARNING)
runner = CrawlerRunner(settings)
loader = runner.spider_loader
if 'all' in spids:
spids = loader.list()
spiders = [
loader.load(spid)
for spid in spids
if spid in loader.list()
]
if not spiders:
return
random.shuffle(spiders)
for spider in spiders:
runner.crawl(spider)
d = runner.join()
d.addBoth(lambda _: reactor.stop())
logger.info('crawl job starting...')
try:
reactor.run()
except Exception:
logger.exception('crawl job got exception:')
logger.info('crawl job finished')
|
{
"content_hash": "6b37f849bcef45df9cc2e12947df46ce",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 68,
"avg_line_length": 30.406639004149376,
"alnum_prop": 0.5989355895196506,
"repo_name": "hack4code/BlogSpider",
"id": "66aae574d48f5626cfaff8848dfec6b0bd819ce5",
"size": "7354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "spider/src/task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5725"
},
{
"name": "Dockerfile",
"bytes": "893"
},
{
"name": "HTML",
"bytes": "3530"
},
{
"name": "JavaScript",
"bytes": "38921"
},
{
"name": "Python",
"bytes": "93265"
},
{
"name": "Shell",
"bytes": "767"
}
],
"symlink_target": ""
}
|
""" Python 'uu_codec' Codec - UU content transfer encoding
Unlike most of the other codecs which target Unicode, this codec
will return Python string objects for both encode and decode.
Written by Marc-Andre Lemburg (mal@lemburg.com). Some details were
adapted from uu.py which was written by Lance Ellinghouse and
modified by Jack Jansen and Fredrik Lundh.
"""
import codecs, binascii
### Codec APIs
def uu_encode(input,errors='strict',filename='<data>',mode=0666):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import b2a_uu
infile = StringIO(input)
outfile = StringIO()
read = infile.read
write = outfile.write
# Encode
write('begin %o %s\n' % (mode & 0777, filename))
chunk = read(45)
while chunk:
write(b2a_uu(chunk))
chunk = read(45)
write(' \nend\n')
return (outfile.getvalue(), len(input))
def uu_decode(input,errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling which is the only currently supported
error handling for this codec.
Note: filename and file mode information in the input data is
ignored.
"""
assert errors == 'strict'
from cStringIO import StringIO
from binascii import a2b_uu
infile = StringIO(input)
outfile = StringIO()
readline = infile.readline
write = outfile.write
# Find start of encoded data
while 1:
s = readline()
if not s:
raise ValueError, 'Missing "begin" line in input data'
if s[:5] == 'begin':
break
# Decode
while 1:
s = readline()
if not s or \
s == 'end\n':
break
try:
data = a2b_uu(s)
except binascii.Error, v:
# Workaround for broken uuencoders by /Fredrik Lundh
nbytes = (((ord(s[0])-32) & 63) * 4 + 5) / 3
data = a2b_uu(s[:nbytes])
#sys.stderr.write("Warning: %s\n" % str(v))
write(data)
if not s:
raise ValueError, 'Truncated input data'
return (outfile.getvalue(), len(input))
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return uu_encode(input,errors)
def decode(self,input,errors='strict'):
return uu_decode(input,errors)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (uu_encode,uu_decode,StreamReader,StreamWriter)
|
{
"content_hash": "188c9a2f4eab9cc59f849886f0897245",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 70,
"avg_line_length": 28.285714285714285,
"alnum_prop": 0.6300505050505051,
"repo_name": "neopoly/rubyfox-server",
"id": "6ef8369d53c615b5e4b4ea7dd41990fa707806fe",
"size": "3168",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/rubyfox/server/data/lib/Lib/encodings/uu_codec.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "36798"
},
{
"name": "CSS",
"bytes": "3443"
},
{
"name": "HTML",
"bytes": "43018"
},
{
"name": "Java",
"bytes": "41664"
},
{
"name": "JavaScript",
"bytes": "168340"
},
{
"name": "Python",
"bytes": "4026158"
},
{
"name": "Ruby",
"bytes": "5433"
},
{
"name": "Shell",
"bytes": "13948"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.