text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Test the onboarding views."""
import asyncio
import os
from unittest.mock import patch
import pytest
from homeassistant.components import onboarding
from homeassistant.components.onboarding import const, views
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.helpers import area_registry as ar
from homeassistant.setup import async_setup_component
from . import mock_storage
from tests.common import CLIENT_ID, CLIENT_REDIRECT_URI, register_auth_provider
from tests.components.met.conftest import mock_weather # noqa: F401
@pytest.fixture(autouse=True)
def always_mock_weather(mock_weather): # noqa: F811
"""Mock the Met weather provider."""
@pytest.fixture(autouse=True)
def auth_active(hass):
"""Ensure auth is always active."""
hass.loop.run_until_complete(
register_auth_provider(hass, {"type": "homeassistant"})
)
@pytest.fixture(name="rpi")
async def rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "raspberrypi3"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="no_rpi")
async def no_rpi_fixture(hass, aioclient_mock, mock_supervisor):
"""Mock core info with rpi."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "machine": "odroid-n2"},
},
)
assert await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
@pytest.fixture(name="mock_supervisor")
async def mock_supervisor_fixture(hass, aioclient_mock):
"""Mock supervisor."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=True,
), patch(
"homeassistant.components.hassio.HassIO.get_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_host_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_store",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value={"diagnostics": True},
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value={},
), patch(
"homeassistant.components.hassio.HassIO.get_ingress_panels",
return_value={"panels": {}},
), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
yield
async def test_onboarding_progress(hass, hass_storage, hass_client_no_auth):
"""Test fetching progress."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
with patch.object(views, "STEPS", ["hello", "world"]):
resp = await client.get("/api/onboarding")
assert resp.status == 200
data = await resp.json()
assert len(data) == 2
assert data[0] == {"step": "hello", "done": True}
assert data[1] == {"step": "world", "done": False}
async def test_onboarding_user_already_done(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user when user step already done."""
mock_storage(hass_storage, {"done": [views.STEP_USER]})
with patch.object(onboarding, "STEPS", ["hello", "world"]):
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == HTTP_FORBIDDEN
async def test_onboarding_user(hass, hass_storage, hass_client_no_auth):
"""Test creating a new user."""
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test Name",
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 200
assert const.STEP_USER in hass_storage[const.DOMAIN]["data"]["done"]
data = await resp.json()
assert "auth_code" in data
users = await hass.auth.async_get_users()
assert len(users) == 1
user = users[0]
assert user.name == "Test Name"
assert len(user.credentials) == 1
assert user.credentials[0].data["username"] == "test-user"
assert len(hass.data["person"][1].async_items()) == 1
# Validate refresh token 1
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Validate created areas
area_registry = ar.async_get(hass)
assert len(area_registry.areas) == 3
assert sorted(area.name for area in area_registry.async_list_areas()) == [
"Bedroom",
"Kitchen",
"Living Room",
]
async def test_onboarding_user_invalid_name(hass, hass_storage, hass_client_no_auth):
"""Test not providing name."""
mock_storage(hass_storage, {"done": []})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"username": "test-user",
"password": "test-pass",
"language": "en",
},
)
assert resp.status == 400
async def test_onboarding_user_race(hass, hass_storage, hass_client_no_auth):
"""Test race condition on creating new user."""
mock_storage(hass_storage, {"done": ["hello"]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp1 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 1",
"username": "1-user",
"password": "1-pass",
"language": "en",
},
)
resp2 = client.post(
"/api/onboarding/users",
json={
"client_id": CLIENT_ID,
"name": "Test 2",
"username": "2-user",
"password": "2-pass",
"language": "es",
},
)
res1, res2 = await asyncio.gather(resp1, resp2)
assert sorted([res1.status, res2.status]) == [200, HTTP_FORBIDDEN]
async def test_onboarding_integration(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 200
data = await resp.json()
assert "auth_code" in data
# Validate refresh token
resp = await client.post(
"/auth/token",
data={
"client_id": CLIENT_ID,
"grant_type": "authorization_code",
"code": data["auth_code"],
},
)
assert resp.status == 200
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
tokens = await resp.json()
assert (
await hass.auth.async_validate_access_token(tokens["access_token"]) is not None
)
# Onboarding refresh token and new refresh token
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 2, user
async def test_onboarding_integration_missing_credential(
hass, hass_storage, hass_client, hass_access_token
):
"""Test that we fail integration step if user is missing credentials."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
refresh_token = await hass.auth.async_validate_access_token(hass_access_token)
refresh_token.credential = None
client = await hass_client()
resp = await client.post(
"/api/onboarding/integration",
json={"client_id": CLIENT_ID, "redirect_uri": CLIENT_REDIRECT_URI},
)
assert resp.status == 403
async def test_onboarding_integration_invalid_redirect_uri(
hass, hass_storage, hass_client
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.auth.indieauth.fetch_redirect_uris", return_value=[]
):
resp = await client.post(
"/api/onboarding/integration",
json={
"client_id": CLIENT_ID,
"redirect_uri": "http://invalid-redirect.uri",
},
)
assert resp.status == 400
# We will still mark the last step as done because there is nothing left.
assert const.STEP_INTEGRATION in hass_storage[const.DOMAIN]["data"]["done"]
# Only refresh token from onboarding should be there
for user in await hass.auth.async_get_users():
assert len(user.refresh_tokens) == 1, user
async def test_onboarding_integration_requires_auth(
hass, hass_storage, hass_client_no_auth
):
"""Test finishing integration step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client_no_auth()
resp = await client.post(
"/api/onboarding/integration", json={"client_id": CLIENT_ID}
)
assert resp.status == 401
async def test_onboarding_core_sets_up_met(hass, hass_storage, hass_client):
"""Test finishing the core step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("weather")) == 1
async def test_onboarding_core_sets_up_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, rpi
):
"""Test that the core step sets up rpi_power on RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert rpi_power_state
async def test_onboarding_core_no_rpi_power(
hass, hass_storage, hass_client, aioclient_mock, no_rpi
):
"""Test that the core step do not set up rpi_power on non RPi."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.rpi_power.config_flow.new_under_voltage"
), patch("homeassistant.components.rpi_power.binary_sensor.new_under_voltage"):
resp = await client.post("/api/onboarding/core_config")
assert resp.status == 200
await hass.async_block_till_done()
rpi_power_state = hass.states.get("binary_sensor.rpi_power_status")
assert not rpi_power_state
async def test_onboarding_analytics(hass, hass_storage, hass_client, hass_admin_user):
"""Test finishing analytics step."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 200
assert const.STEP_ANALYTICS in hass_storage[const.DOMAIN]["data"]["done"]
resp = await client.post("/api/onboarding/analytics")
assert resp.status == 403
async def test_onboarding_installation_type(hass, hass_storage, hass_client):
"""Test returning installation type during onboarding."""
mock_storage(hass_storage, {"done": []})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
with patch(
"homeassistant.components.onboarding.views.async_get_system_info",
return_value={"installation_type": "Home Assistant Core"},
):
resp = await client.get("/api/onboarding/installation_type")
assert resp.status == 200
resp_content = await resp.json()
assert resp_content["installation_type"] == "Home Assistant Core"
async def test_onboarding_installation_type_after_done(hass, hass_storage, hass_client):
"""Test raising for installation type after onboarding."""
mock_storage(hass_storage, {"done": [const.STEP_USER]})
assert await async_setup_component(hass, "onboarding", {})
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/onboarding/installation_type")
assert resp.status == 401
|
{
"content_hash": "8931c2f7511b3d22ec093cdf168599df",
"timestamp": "",
"source": "github",
"line_count": 482,
"max_line_length": 88,
"avg_line_length": 30.66804979253112,
"alnum_prop": 0.6319848464348532,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "582ad5a436b0b074f8dc1988082f2a1112cf9083",
"size": "14782",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/components/onboarding/test_views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
'''
Created on 18 sept. 2015
@author: arxit
'''
import os.path
from collections import OrderedDict
from pyspatialite import dbapi2 as db
from qgis.core import *
from PyQt4.QtCore import QFileInfo, QVariant, QObject, pyqtSignal, QSettings
from PyQt4.QtGui import QMessageBox
import main
from PagLuxembourg.schema import *
from PagLuxembourg.widgets.stylize.stylize import *
from PagLuxembourg.widgets.topology.topology import *
FILENAME = 'project.qgs'
DATABASE = 'database.sqlite'
PK = 'OGC_FID'
IMPORT_ID = 'ImportId'
class Project(QObject):
'''
A class which represent a PAG project
'''
ready = pyqtSignal()
def __init__(self):
'''
Constructor
'''
super(Project, self).__init__()
self.creation_mode = False
def open(self):
'''
Called when a QGIS project is opened
'''
# Signal QgsInterface.projectRead seems to be emited twice
if QgsProject is None:
return
# QGIS emits projectRead when creating a new project
if self.creation_mode:
return
# Setting
filename = QgsProject.instance().fileName()
self.folder = os.path.normpath(os.path.dirname(filename))
self.filename = os.path.normpath(filename)
self.database = os.path.join(self.folder, DATABASE)
# If not PAG project return
if not self.isPagProject():
self.ready.emit()
return
# Update database
self._updateDatabase()
# Update map layers
self._updateMapLayers()
# Topological settings
self._setupTopologicalSettings()
# Activate the auto Show feature form on feature creation
self._activateAutoShowForm()
QgsProject.instance().write()
self.ready.emit()
def create(self, folder, name):
'''
Creates a new projects, and loads it in the interface
:param folder: Folder path which will contain the new project folder
:type folder: str, QString
:param name: Project name, will be the project folder name
:type name: str, QString
'''
self.creation_mode = True
# Create project path
self.folder = os.path.normpath(os.path.join(folder,name))
if not os.path.exists(self.folder):
os.makedirs(self.folder)
# Create project filename
self.filename = os.path.join(self.folder, FILENAME)
main.qgis_interface.newProject(True)
main.qgis_interface.mapCanvas().setDestinationCrs(QgsCoordinateReferenceSystem(2169, QgsCoordinateReferenceSystem.EpsgCrsId)) # SRS 2169
QgsProject.instance().setFileName(self.filename) # Project filename
# Flag PAG project
QgsProject.instance().writeEntry('PAG', '/ProjetPAG', True)
QgsProject.instance().write()
# Database
self.database = os.path.join(self.folder, DATABASE)
self._updateDatabase()
# Update map layers
self._updateMapLayers()
QgsProject.instance().write()
# Topological settings
self._setupTopologicalSettings()
# Activate the auto Show feature form on feature creation
self._activateAutoShowForm()
self.creation_mode = False
# Save project and add to recent projects
main.qgis_interface.actionSaveProject().trigger()
# Notify project is ready
self.ready.emit()
def isPagProject(self):
'''
Indicates whether this is a PAG project
'''
result, dummy = QgsProject.instance().readBoolEntry('PAG', '/ProjetPAG', False)
return result
def getLayer(self, type):
'''
Get the map layer corresponding to the type
:param type: XSD schema type
:type type: PAGType
'''
# Map layers in the TOC
maplayers = QgsMapLayerRegistry.instance().mapLayers()
# Iterates through XSD types
uri = self.getTypeUri(type)
# Check whether a layer with type data source exists in the map
for k,v in maplayers.iteritems():
if self.compareURIs(v.source(), uri):
return v
return None
def isPagLayer(self, layer):
'''
Checks if a layer is a PAG layer
:param layer: Layer to check
:type layer: QgsVectorLayer
'''
for type in main.xsd_schema.types:
uri = self.getTypeUri(type)
if self.compareURIs(layer.source(), uri):
return True
return False
def getLayerTableName(self, layer):
'''
Returns the table name of the layer, only if it is a PAG layer
:param layer: Layer to check
:type layer: QgsVectorLayer
'''
if layer is None:
return None
if not self.isPagLayer(layer):
return None
return self.getUriInfos(layer.source())[1]
def getImportLogLayer(self):
logimport_table = PAGType()
logimport_table.name = 'ImportLog'
uri = self.getTypeUri(logimport_table)
layer = QgsVectorLayer(uri, logimport_table.friendlyName(), 'spatialite')
if not layer.isValid():
return None
return layer
def getModificationPagLayer(self):
return self.getLayer(main.xsd_schema.getTypeFromTableName('PAG.MODIFICATION_PAG'))
def getNativeFields(self, type):
'''
Gets the native fields with type from database
:param type: XSD schema type
:type type: PAGType
'''
conn = db.connect(self.database)
cursor = conn.cursor()
rs = cursor.execute("PRAGMA table_info('{}')".format(type.name))
for i in range(len(rs.description)):
if rs.description[i][0] == 'name':
name_index = i
if rs.description[i][0] == 'type':
type_index = i
fields =[]
for row in rs:
fields.append((row[name_index],row[type_index]))
cursor.close()
del cursor
conn.close()
del conn
return fields
def _setupTopologicalSettings(self):
# Topological editing
QgsProject.instance().setTopologicalEditing(True)
# Update snapping settings
QgsProject.instance().writeEntry('Digitizing', '/SnappingMode', 'current_layer')
QgsProject.instance().writeEntry('Digitizing', '/DefaultSnapType', 'to vertex and segment' )
QgsProject.instance().writeEntry('Digitizing', '/DefaultSnapTolerance', 10.0)
QgsProject.instance().writeEntry('Digitizing', '/DefaultSnapToleranceUnit', QgsTolerance.Pixels)
QgsProject.instance().snapSettingsChanged.emit()
def _activateAutoShowForm(self):
settings = QSettings()
settings.setValue("/Map/identifyAutoFeatureForm", True)
def _updateDatabase(self):
'''
Updates the project database
'''
xsd_schema = main.xsd_schema
createdb = not os.path.isfile(self.database)
conn = db.connect(self.database)
# Create database if not exist
if createdb:
cursor=conn.cursor()
cursor.execute("SELECT InitSpatialMetadata()")
del cursor
# Check and update tables
for type in xsd_schema.types:
uri = self.getTypeUri(type)
layer = QgsVectorLayer(uri, type.friendlyName(), 'spatialite')
# Create layer if not valid
if not layer.isValid():
self._createTable(conn, type)
layer = QgsVectorLayer(uri, type.friendlyName(), 'spatialite')
self._updateTable(type, layer, True)
# Check and update the import log table
self._updateImportLogTable(conn)
conn.close()
del conn
def getTypeUri(self, type):
'''
Gets a uri to the table according to the XSD
:param type: XSD schema type
:type type: PAGType
'''
uri = QgsDataSourceURI()
uri.setDatabase(self.database)
geom_column = 'GEOMETRY' if type.geometry_type is not None else ''
uri.setDataSource('', type.name, geom_column,'',PK)
return uri.uri()
def _createTable(self, conn, type):
'''
Creates a new table in the spatialite database according to the XSD
:param conn: The database connection
:type conn: Connection
:param type: XSD schema type
:type type: PAGType
'''
# Create table
query="CREATE TABLE '%s' (%s integer primary key autoincrement,"%(type.name,PK)
# Geometry column
if type.geometry_type is not None:
query+="'GEOMETRY' %s,"%type.geometry_type
query=query[:-1]+")"
cursor=conn.cursor()
cursor.execute(query)
conn.commit()
cursor.close()
del cursor
# Register geometry column
if type.geometry_type is not None:
query="SELECT RecoverGeometryColumn('%s','GEOMETRY',2169,'%s',2)"%(type.name,type.geometry_type)
cursor=conn.cursor()
cursor.execute(query)
rep=cursor.fetchall()
if rep[0][0]==0:
conn.rollback()
else:
conn.commit()
cursor.close()
del cursor
def _updateImportLogTable(self, conn):
'''
Update the import table
:param conn: The database connection
:type conn: Connection
'''
# Log import table
logimport_table = PAGType()
logimport_table.name = 'ImportLog'
# Import ID field
field = PAGField()
field.name = IMPORT_ID
field.type = DataType.STRING
field.nullable = False
logimport_table.fields.append(field)
# Date field
field = PAGField()
field.name = 'Date'
field.type = DataType.STRING
field.nullable = False
logimport_table.fields.append(field)
# Type field
field = PAGField()
field.name = 'Filename'
field.type = DataType.STRING
field.nullable = False
logimport_table.fields.append(field)
# Layers field
field = PAGField()
field.name = 'Layers'
field.type = DataType.STRING
field.nullable = True
logimport_table.fields.append(field)
uri = self.getTypeUri(logimport_table)
layer = QgsVectorLayer(uri, logimport_table.friendlyName(), 'spatialite')
# Create table if not valid
if not layer.isValid():
self._createTable(conn, logimport_table)
layer = QgsVectorLayer(uri, logimport_table.friendlyName(), 'spatialite')
# Update fields
self._updateTable(logimport_table, layer)
def _updateTable(self, type, layer, add_importid = False):
'''
Updates the layer's table according to the XSD
:param type: XSD schema type
:type type: PAGType
:param layer: the QGIS vector layer object
:type layer: QgsVectorLayer
'''
for field in type.fields:
if layer.fieldNameIndex(field.name)<0:
layer.dataProvider().addAttributes([self._getField(field)])
# Add import id field
if add_importid:
field = PAGField()
field.name = IMPORT_ID
field.type = DataType.STRING
field.nullable = True
if layer.fieldNameIndex(field.name)<0:
layer.dataProvider().addAttributes([self._getField(field)])
layer.updateFields()
# Mapping between XSD datatype and QGIS datatype
datatypeMap = XSD_QGIS_DATATYPE_MAP
def _getField(self, pagfield):
'''
Creates a QGIS Field according to the XSD
:param pagfield: XSD schema field
:type pagfield: PAGField
:returns: The corresponding QGIS Field
:rtype: QgsField
'''
return QgsField(pagfield.name,
self.datatypeMap[pagfield.type],
pagfield.type,
int(pagfield.length) if pagfield.length is not None else 0)
def _updateMapLayers(self):
'''
Update layers attributes editors and add missing layers to the TOC
'''
# Get rules config
config_path = os.path.join(PagLuxembourg.main.plugin_dir,
'assets',
'LayerTree.json')
f = open(config_path, 'r')
config_file = f.read()
config = json.loads(config_file)
f.close()
main.qgis_interface.messageBar().clearWidgets()
# Process root node tree
self._updateLayerTreeNode(config, config)
# Add WMS basemap layer
self._addOrthoBasemap()
# Add topology rules
TopologyChecker(None).updateProjectRules()
def _updateLayerTreeNode(self, node, parentnode):
'''
Update a layer tree node, a lyer group
:param node: The current node to update
:type node: dict
:param node: The parent node to update
:type node: dict
'''
parent = QgsProject.instance().layerTreeRoot()
if parentnode['Name'] != 'Root':
parent = parent.findGroup(parentnode['Name'])
treenode = parent.findGroup(node['Name']) if node['Name'] != 'Root' else QgsProject.instance().layerTreeRoot()
if treenode is None:
treenode = parent.addGroup(node['Name'])
stylize = StylizeProject()
for child in node['Nodes']:
if child['IsGroup']:
self._updateLayerTreeNode(child, node)
else:
xsd_type = main.xsd_schema.getTypeFromTableName(child['TableName'])
# Type not found in XSD
if xsd_type is None:
main.qgis_interface.messageBar().pushSuccess(QCoreApplication.translate('Project','Error'),
QCoreApplication.translate('Project','Type not found in XSD : {}').format(child['TableName']))
continue
layer = self.getLayer(xsd_type)
# Layer is in the TOC
if layer is None:
uri = self.getTypeUri(xsd_type)
layer = QgsVectorLayer(uri, child['Name'], 'spatialite')
QgsMapLayerRegistry.instance().addMapLayer(layer, False)
treenode.addLayer(layer)
# Updates layers style
stylize.stylizeLayer(layer, xsd_type)
# Update attributes editors
self._updateLayerEditors(layer, xsd_type)
# Activate the auto Show feature form on feature creation
layer.setFeatureFormSuppress(QgsVectorLayer.SuppressOff)
def _addOrthoBasemap(self):
ortho_url = 'url=http://wmts1.geoportail.lu/opendata/service&SLegend=0&crs=EPSG:2169&dpiMode=7&featureCount=10&format=image/jpeg&layers=ortho_latest&styles='
ortho_found = False
for k,v in QgsMapLayerRegistry.instance().mapLayers().iteritems():
if v.source() == ortho_url:
ortho_found = True
break
if not ortho_found:
ortho_layer = QgsRasterLayer(ortho_url, 'Ortho 2013', 'wms')
QgsMapLayerRegistry.instance().addMapLayer(ortho_layer, False)
QgsProject.instance().layerTreeRoot().addLayer(ortho_layer)
main.qgis_interface.mapCanvas().setExtent(ortho_layer.extent())
def getUriInfos(self, uri):
'''
Gets the database and table name from uri
:param uri: URI
:type uri: QString
:returns: Database and table name
:rtype: tuple(QString, QString)
'''
db=''
table=''
split = uri.split(' ')
for kv in split:
if kv.startswith('dbname'):
db = os.path.normpath(kv[8:-1])
if kv.startswith('table'):
table = kv[7:-1]
return db, table
def compareURIs(self, uri1, uri2):
'''
Compares 2 URIs
:param uri1: URI 1
:type uri1: QString
:param uri2: URI 2
:type uri2: QString
:returns: True is the URIs point to the same table
:rtype: Boolean
'''
# URI 1
info1 = self.getUriInfos(uri1)
# URI 2
info2 = self.getUriInfos(uri2)
return info1 == info2
def _updateLayerEditors(self, layer, type):
'''
Update the layers attributes editors
:param layer: The layer to update
:type layer: QgsVectorLayer
:param type: XSD schema type
:type type: PAGType
'''
# Hide fields
hidden = [PK, IMPORT_ID]
for field in layer.pendingFields():
if field.name() == IMPORT_ID:
layer.setEditorWidgetV2(layer.fieldNameIndex(field.name()),'TextEdit')
''' Bug http://hub.qgis.org/issues/14235 '''
for field in layer.pendingFields():
if field.name() in hidden:
layer.setEditorWidgetV2(layer.fieldNameIndex(field.name()),'Hidden')
# Editors
for field in type.fields:
self._setupFieldEditor(field, layer)
fileFields = ['NOM_FICHIER','NOM_EC','NOM_GR']
def _setupFieldEditor(self, field, layer):
'''
Update the field editor
:param pagfield: XSD schema field
:type pagfield: PAGField
:param layer: The layer to update
:type layer: QgsVectorLayer
'''
fieldIndex = layer.fieldNameIndex(field.name)
if fieldIndex == -1:
return
config = dict()
# String
if field.type == DataType.STRING:
# Simple text
editor = 'TextEdit'
# File
for fileField in self.fileFields:
if field.name.startswith(fileField):
editor = 'SimpleFilename'
# Enumeration
if field.listofvalues is not None:
editor = 'ValueMap'
# Invert key, value of currentConfig
currentConfig = layer.editorWidgetV2Config(fieldIndex) if layer.editorWidgetV2(fieldIndex) == 'ValueMap' else OrderedDict()
currentConfig = OrderedDict((v, k) for k, v in currentConfig.iteritems())
# Keep current values and add new ones
for element in field.listofvalues:
if element in currentConfig:
config[currentConfig[element]]=element # Config is in the form, description, value
else:
config[element]=element
# Integer
elif field.type == DataType.INTEGER:
editor = 'PreciseRange'
config['Min'] = int(field.minvalue) if field.minvalue is not None else -sys.maxint-1
config['Max'] = int(field.maxvalue) if field.maxvalue is not None else sys.maxint
config['Step'] = 1
config['AllowNull'] = field.nullable
# Double
elif field.type == DataType.DOUBLE:
editor = 'PreciseRange'
config['Min'] = float(field.minvalue) if field.minvalue is not None else -sys.maxint-1
config['Max'] = float(field.maxvalue) if field.maxvalue is not None else sys.maxint
mindecimal = len(field.minvalue.split('.')[1]) if field.minvalue is not None and len(field.minvalue.split('.'))==2 else 0
maxdecimal = len(field.maxvalue.split('.')[1]) if field.maxvalue is not None and len(field.maxvalue.split('.'))==2 else 0
config['Step'] = 1.0/pow(10,max(mindecimal,maxdecimal))
config['AllowNull'] = field.nullable
# Date
elif field.type == DataType.DATE:
editor = 'DateTime'
config['field_format'] = 'yyyy-MM-dd'
config['display_format'] = 'yyyy-MM-dd'
config['calendar_popup'] = True
config['allow_null'] = field.nullable
# Other
else:
raise NotImplementedError('Unknown datatype')
layer.setEditorWidgetV2(fieldIndex,editor)
layer.setEditorWidgetV2Config(fieldIndex, config)
|
{
"content_hash": "e031683d31ae101083f33b584e260083",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 165,
"avg_line_length": 32.27098674521355,
"alnum_prop": 0.5432639649507119,
"repo_name": "Geoportail-Luxembourg/qgis-pag-plugin",
"id": "937f98812d092c800d4ce0788dce6ffd89ec67ff",
"size": "21912",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4758"
},
{
"name": "HTML",
"bytes": "27843"
},
{
"name": "JavaScript",
"bytes": "968"
},
{
"name": "Makefile",
"bytes": "4626"
},
{
"name": "Python",
"bytes": "280282"
},
{
"name": "QML",
"bytes": "414349"
}
],
"symlink_target": ""
}
|
"""
Component to interface with garage doors that can be controlled remotely.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/garage_door/
"""
import logging
import os
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.const import (
STATE_CLOSED, STATE_OPEN, STATE_UNKNOWN, SERVICE_CLOSE, SERVICE_OPEN,
ATTR_ENTITY_ID)
from homeassistant.components import group
DOMAIN = 'garage_door'
SCAN_INTERVAL = 30
GROUP_NAME_ALL_GARAGE_DOORS = 'all garage doors'
ENTITY_ID_ALL_GARAGE_DOORS = group.ENTITY_ID_FORMAT.format('all_garage_doors')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
GARAGE_DOOR_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
_LOGGER = logging.getLogger(__name__)
def is_closed(hass, entity_id=None):
"""Return if the garage door is closed based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_GARAGE_DOORS
return hass.states.is_state(entity_id, STATE_CLOSED)
def close_door(hass, entity_id=None):
"""Close all or a specified garage door."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_CLOSE, data)
def open_door(hass, entity_id=None):
"""Open all or specified garage door."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_OPEN, data)
def setup(hass, config):
"""Track states and offer events for garage door."""
_LOGGER.warning('This component has been deprecated in favour of the '
'"cover" component and will be removed in the future.'
' Please upgrade.')
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_GARAGE_DOORS)
component.setup(config)
def handle_garage_door_service(service):
"""Handle calls to the garage door services."""
target_locks = component.extract_from_service(service)
for item in target_locks:
if service.service == SERVICE_CLOSE:
item.close_door()
else:
item.open_door()
if item.should_poll:
item.update_ha_state(True)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
hass.services.register(DOMAIN, SERVICE_OPEN, handle_garage_door_service,
descriptions.get(SERVICE_OPEN),
schema=GARAGE_DOOR_SERVICE_SCHEMA)
hass.services.register(DOMAIN, SERVICE_CLOSE, handle_garage_door_service,
descriptions.get(SERVICE_CLOSE),
schema=GARAGE_DOOR_SERVICE_SCHEMA)
return True
class GarageDoorDevice(Entity):
"""Representation of a garage door."""
# pylint: disable=no-self-use
@property
def is_closed(self):
"""Return true if door is closed."""
return None
def close_door(self):
"""Close the garage door."""
raise NotImplementedError()
def open_door(self):
"""Open the garage door."""
raise NotImplementedError()
@property
def state(self):
"""Return the state of the garage door."""
closed = self.is_closed
if closed is None:
return STATE_UNKNOWN
return STATE_CLOSED if closed else STATE_OPEN
|
{
"content_hash": "56be940c3a7fdecf03daf271daf311ef",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 33.21621621621622,
"alnum_prop": 0.6653105505831299,
"repo_name": "varunr047/homefile",
"id": "c5576b1da844c915791abf7a7c5f298c5641ec8d",
"size": "3687",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/garage_door/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1594834"
},
{
"name": "JavaScript",
"bytes": "1216"
},
{
"name": "Python",
"bytes": "3696131"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
"""
Common functions used by different CLI interfaces.
"""
from __future__ import print_function
import argparse
import traceback
from oslo_log import log as logging
import six
import nova.conf
import nova.db.api
from nova import exception
from nova.i18n import _, _LE
from nova import utils
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def block_db_access(service_name):
"""Blocks Nova DB access."""
class NoDB(object):
def __getattr__(self, attr):
return self
def __call__(self, *args, **kwargs):
stacktrace = "".join(traceback.format_stack())
LOG.error(_LE('No db access allowed in %(service_name)s: '
'%(stacktrace)s'),
dict(service_name=service_name, stacktrace=stacktrace))
raise exception.DBNotAllowed(service_name)
nova.db.api.IMPL = NoDB()
# Decorators for actions
def args(*args, **kwargs):
"""Decorator which adds the given args and kwargs to the args list of
the desired func's __dict__.
"""
def _decorator(func):
func.__dict__.setdefault('args', []).insert(0, (args, kwargs))
return func
return _decorator
def methods_of(obj):
"""Get all callable methods of an object that don't start with underscore
returns a list of tuples of the form (method_name, method)
"""
result = []
for i in dir(obj):
if callable(getattr(obj, i)) and not i.startswith('_'):
result.append((i, getattr(obj, i)))
return result
def add_command_parsers(subparsers, categories):
"""Adds command parsers to the given subparsers.
Adds version and bash-completion parsers.
Adds a parser with subparsers for each category in the categories dict
given.
"""
parser = subparsers.add_parser('version')
parser = subparsers.add_parser('bash-completion')
parser.add_argument('query_category', nargs='?')
for category in categories:
command_object = categories[category]()
desc = getattr(command_object, 'description', None)
parser = subparsers.add_parser(category, description=desc)
parser.set_defaults(command_object=command_object)
category_subparsers = parser.add_subparsers(dest='action')
for (action, action_fn) in methods_of(command_object):
parser = category_subparsers.add_parser(action, description=desc)
action_kwargs = []
for args, kwargs in getattr(action_fn, 'args', []):
# FIXME(markmc): hack to assume dest is the arg name without
# the leading hyphens if no dest is supplied
kwargs.setdefault('dest', args[0][2:])
if kwargs['dest'].startswith('action_kwarg_'):
action_kwargs.append(kwargs['dest'][len('action_kwarg_'):])
else:
action_kwargs.append(kwargs['dest'])
kwargs['dest'] = 'action_kwarg_' + kwargs['dest']
parser.add_argument(*args, **kwargs)
parser.set_defaults(action_fn=action_fn)
parser.set_defaults(action_kwargs=action_kwargs)
parser.add_argument('action_args', nargs='*',
help=argparse.SUPPRESS)
def print_bash_completion(categories):
if not CONF.category.query_category:
print(" ".join(categories.keys()))
elif CONF.category.query_category in categories:
fn = categories[CONF.category.query_category]
command_object = fn()
actions = methods_of(command_object)
print(" ".join([k for (k, v) in actions]))
def get_action_fn():
fn = CONF.category.action_fn
fn_args = []
for arg in CONF.category.action_args:
if isinstance(arg, six.binary_type):
arg = arg.decode('utf-8')
fn_args.append(arg)
fn_kwargs = {}
for k in CONF.category.action_kwargs:
v = getattr(CONF.category, 'action_kwarg_' + k)
if v is None:
continue
if isinstance(v, six.binary_type):
v = v.decode('utf-8')
fn_kwargs[k] = v
# call the action with the remaining arguments
# check arguments
missing = utils.validate_args(fn, *fn_args, **fn_kwargs)
if missing:
# NOTE(mikal): this isn't the most helpful error message ever. It is
# long, and tells you a lot of things you probably don't want to know
# if you just got a single arg wrong.
print(fn.__doc__)
CONF.print_help()
raise exception.Invalid(
_("Missing arguments: %s") % ", ".join(missing))
return fn, fn_args, fn_kwargs
|
{
"content_hash": "40e640531ded4a0a5b3f8e5b073ed90f",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 31.986301369863014,
"alnum_prop": 0.6079229122055675,
"repo_name": "cloudbase/nova",
"id": "cd15a2252961b344d0ff60d7e2aad4bda06f60a6",
"size": "5309",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nova/cmd/common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "18199370"
},
{
"name": "Shell",
"bytes": "37074"
},
{
"name": "Smarty",
"bytes": "299657"
}
],
"symlink_target": ""
}
|
from datetime import timedelta as td
import json
from unittest.mock import patch
from django.core import mail
from django.utils.timezone import now
from hc.api.models import Channel, Check, Notification
from hc.test import BaseTestCase
class NotifyWhatsAppTestCase(BaseTestCase):
def _setup_data(self, notify_up=True, notify_down=True):
self.check = Check(project=self.project)
self.check.status = "down"
self.check.last_ping = now() - td(minutes=61)
self.check.save()
definition = {"value": "+1234567890", "up": notify_up, "down": notify_down}
self.channel = Channel(project=self.project, kind="whatsapp")
self.channel.value = json.dumps(definition)
self.channel.save()
self.channel.checks.add(self.check)
@patch("hc.api.transports.requests.request")
def test_it_works(self, mock_post):
mock_post.return_value.status_code = 200
self._setup_data()
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertEqual(payload["To"], "whatsapp:+1234567890")
n = Notification.objects.get()
callback_path = f"/api/v1/notifications/{n.code}/status"
self.assertTrue(payload["StatusCallback"].endswith(callback_path))
# sent SMS counter should go up
self.profile.refresh_from_db()
self.assertEqual(self.profile.sms_sent, 1)
@patch("hc.api.transports.requests.request")
def test_it_obeys_up_down_flags(self, mock_post):
self._setup_data(notify_down=False)
self.check.last_ping = now() - td(hours=2)
self.channel.notify(self.check)
self.assertEqual(Notification.objects.count(), 0)
self.assertFalse(mock_post.called)
@patch("hc.api.transports.requests.request")
def test_it_enforces_limit(self, mock_post):
# At limit already:
self.profile.last_sms_date = now()
self.profile.sms_sent = 50
self.profile.save()
self._setup_data()
self.channel.notify(self.check)
self.assertFalse(mock_post.called)
n = Notification.objects.get()
self.assertTrue("Monthly message limit exceeded" in n.error)
# And email should have been sent
self.assertEqual(len(mail.outbox), 1)
email = mail.outbox[0]
self.assertEqual(email.to[0], "alice@example.org")
self.assertEqual(email.subject, "Monthly WhatsApp Limit Reached")
@patch("hc.api.transports.requests.request")
def test_it_does_not_escape_special_characters(self, mock_post):
self._setup_data()
self.check.name = "Foo > Bar & Co"
mock_post.return_value.status_code = 200
self.channel.notify(self.check)
args, kwargs = mock_post.call_args
payload = kwargs["data"]
self.assertIn("Foo > Bar & Co", payload["Body"])
|
{
"content_hash": "9aa6a09f2d32535301643c7d7f8a7d20",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 83,
"avg_line_length": 33.45977011494253,
"alnum_prop": 0.6489178976296806,
"repo_name": "iphoting/healthchecks",
"id": "5245be29459c9e6aefdf9d92d4cb2841880aaa76",
"size": "2928",
"binary": false,
"copies": "1",
"ref": "refs/heads/heroku",
"path": "hc/api/tests/test_notify_whatsapp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64145"
},
{
"name": "Dockerfile",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "595497"
},
{
"name": "JavaScript",
"bytes": "55883"
},
{
"name": "Less",
"bytes": "14135"
},
{
"name": "Python",
"bytes": "894208"
},
{
"name": "Shell",
"bytes": "4382"
}
],
"symlink_target": ""
}
|
import tempfile
from django.conf import settings
from django.core.files.uploadedfile import InMemoryUploadedFile # noqa
from django.core.urlresolvers import reverse
from django.forms.widgets import HiddenInput # noqa
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from horizon import tables as horizon_tables
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.images.images import forms
from openstack_dashboard.dashboards.project.images.images import tables
IMAGES_INDEX_URL = reverse('horizon:project:images:index')
class CreateImageFormTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_no_location_or_file(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
post = {
'name': u'Ubuntu 11.10',
'source_type': u'file',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': 1}
files = {}
form = forms.CreateImageForm(post, files)
self.assertEqual(form.is_valid(), False)
@override_settings(HORIZON_IMAGES_ALLOW_UPLOAD=False)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_upload_disabled(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA({}), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
form = forms.CreateImageForm({})
self.assertEqual(
isinstance(form.fields['image_file'].widget, HiddenInput), True)
source_type_dict = dict(form.fields['source_type'].choices)
self.assertNotIn('file', source_type_dict)
def test_create_image_metadata_docker(self):
form_data = {
'name': u'Docker image',
'description': u'Docker image test',
'source_type': u'url',
'image_url': u'/',
'disk_format': u'docker',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'is_copying': False
}
meta = forms.create_image_metadata(form_data)
self.assertEqual(meta['disk_format'], 'raw')
self.assertEqual(meta['container_format'], 'docker')
self.assertIn('properties', meta)
self.assertNotIn('description', meta)
self.assertNotIn('architecture', meta)
self.assertEqual(meta['properties']['description'],
form_data['description'])
self.assertEqual(meta['properties']['architecture'],
form_data['architecture'])
class UpdateImageFormTests(test.TestCase):
def test_is_format_field_editable(self):
form = forms.UpdateImageForm({})
disk_format = form.fields['disk_format']
self.assertFalse(disk_format.widget.attrs.get('readonly', False))
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertEqual(res.context['image'].disk_format,
image.disk_format)
@test.create_stubs({api.glance: ('image_update', 'image_get')})
def test_image_update_post(self):
image = self.images.first()
data = {
'name': u'Ubuntu 11.10',
'image_id': str(image.id),
'description': u'Login with admin/admin',
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': False,
'protected': False,
'method': 'UpdateImageForm'}
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
api.glance.image_update(IsA(http.HttpRequest),
image.id,
is_public=data['is_public'],
protected=data['protected'],
disk_format=data['disk_format'],
container_format="bare",
name=data['name'],
min_ram=data['minimum_ram'],
min_disk=data['minimum_disk'],
properties={'description': data['description'],
'architecture':
data['architecture']},
purge_props=False).AndReturn(image)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:update',
args=[image.id])
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
class ImageViewTests(test.TestCase):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_image_create_get(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.get(url)
self.assertTemplateUsed(res,
'project/images/images/create.html')
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_copy_from(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': True}
api_data = {'copy_from': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_location(self):
data = {
'source_type': u'url',
'image_url': u'http://cloud-images.ubuntu.com/releases/'
u'oneiric/release/ubuntu-11.10-server-cloudimg'
u'-amd64-disk1.img',
'is_copying': False}
api_data = {'location': data['image_url']}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_upload(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {'source_type': u'file',
'image_file': temp_file}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_create',)})
def test_image_create_post_with_kernel_ramdisk(self):
temp_file = tempfile.NamedTemporaryFile()
temp_file.write(b'123')
temp_file.flush()
temp_file.seek(0)
data = {
'source_type': u'file',
'image_file': temp_file,
'kernel_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482e',
'ramdisk_id': '007e7d55-fe1e-4c5c-bf08-44b4a496482a'
}
api_data = {'data': IsA(InMemoryUploadedFile)}
self._test_image_create(data, api_data)
@test.create_stubs({api.glance: ('image_list_detailed',)})
def _test_image_create(self, extra_form_data, extra_api_data):
data = {
'name': u'Ubuntu 11.10',
'description': u'Login with admin/admin',
'disk_format': u'qcow2',
'architecture': u'x86-64',
'minimum_disk': 15,
'minimum_ram': 512,
'is_public': True,
'protected': False,
'method': 'CreateImageForm'}
data.update(extra_form_data)
api_data = {'container_format': 'bare',
'disk_format': data['disk_format'],
'is_public': True,
'protected': False,
'min_disk': data['minimum_disk'],
'min_ram': data['minimum_ram'],
'properties': {
'description': data['description'],
'architecture': data['architecture']},
'name': data['name']}
api_data.update(extra_api_data)
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
api.glance.image_create(
IsA(http.HttpRequest),
**api_data).AndReturn(self.images.first())
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:create')
res = self.client.post(url, data)
self.assertNoFormErrors(res)
self.assertEqual(res.status_code, 302)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(self.images.first())
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].name, image.name)
self.assertEqual(res.context['image'].protected, image.protected)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_custom_props_get(self):
image = self.images.list()[8]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(reverse('horizon:project:images:images:detail',
args=[image.id]))
image_props = res.context['image_props']
# Test description property not displayed
image_keys = [prop[0] for prop in image_props]
self.assertNotIn(('description'), image_keys)
# Test custom properties are sorted
self.assertEqual(image_props[0], ('bar', 'bar', 'bar val'))
self.assertEqual(image_props[1], ('foo', 'foo', 'foo val'))
# Test all custom properties appear in template
self.assertContains(res, '<dt title="bar">bar</dt>')
self.assertContains(res, '<dd>bar val</dd>')
self.assertContains(res, '<dt title="foo">foo</dt>')
self.assertContains(res, '<dd>foo val</dd>')
@test.create_stubs({api.glance: ('image_get',)})
def test_protected_image_detail_get(self):
image = self.images.list()[2]
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:detail',
args=[image.id]))
self.assertTemplateUsed(res,
'horizon/common/_detail.html')
self.assertEqual(res.context['image'].protected, image.protected)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_detail_get_with_exception(self):
image = self.images.first()
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndRaise(self.exceptions.glance)
self.mox.ReplayAll()
url = reverse('horizon:project:images:images:detail',
args=[image.id])
res = self.client.get(url)
self.assertRedirectsNoFollow(res, IMAGES_INDEX_URL)
@test.create_stubs({api.glance: ('image_get',)})
def test_image_update_get(self):
image = self.images.first()
image.disk_format = "ami"
image.is_public = True
api.glance.image_get(IsA(http.HttpRequest), str(image.id)) \
.AndReturn(image)
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:project:images:images:update',
args=[image.id]))
self.assertTemplateUsed(res,
'project/images/images/_update.html')
self.assertEqual(res.context['image'].name, image.name)
# Bug 1076216 - is_public checkbox not being set correctly
self.assertContains(res, "<input type='checkbox' id='id_public'"
" name='public' checked='checked'>",
html=True,
msg_prefix="The is_public checkbox is not checked")
class OwnerFilterTests(test.TestCase):
def setUp(self):
super(OwnerFilterTests, self).setUp()
self.table = self.mox.CreateMock(horizon_tables.DataTable)
self.table.request = self.request
@override_settings(IMAGES_LIST_FILTER_TENANTS=[{'name': 'Official',
'tenant': 'officialtenant',
'icon': 'fa-check'}])
def test_filter(self):
self.mox.ReplayAll()
all_images = self.images.list()
table = self.table
self.filter_tenants = settings.IMAGES_LIST_FILTER_TENANTS
filter_ = tables.OwnerFilter()
images = filter_.filter(table, all_images, 'project')
self.assertEqual(images, self._expected('project'))
images = filter_.filter(table, all_images, 'public')
self.assertEqual(images, self._expected('public'))
images = filter_.filter(table, all_images, 'shared')
self.assertEqual(images, self._expected('shared'))
images = filter_.filter(table, all_images, 'officialtenant')
self.assertEqual(images, self._expected('officialtenant'))
def _expected(self, filter_string):
my_tenant_id = self.request.user.tenant_id
images = self.images.list()
special = map(lambda t: t['tenant'], self.filter_tenants)
if filter_string == 'public':
return [im for im in images if im.is_public]
if filter_string == 'shared':
return [im for im in images
if (not im.is_public and
im.owner != my_tenant_id and
im.owner not in special)]
if filter_string == 'project':
filter_string = my_tenant_id
return [im for im in images if im.owner == filter_string]
|
{
"content_hash": "00c604d9ac86b1bf4cc9f103996e830d",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 79,
"avg_line_length": 39.6764705882353,
"alnum_prop": 0.5607857672349889,
"repo_name": "Tesora/tesora-horizon",
"id": "002ea76bff44034c94e5714413d0aa69db39e332",
"size": "16952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/images/images/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "106985"
},
{
"name": "HTML",
"bytes": "482184"
},
{
"name": "JavaScript",
"bytes": "1106973"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4828758"
},
{
"name": "Shell",
"bytes": "30969"
}
],
"symlink_target": ""
}
|
#! /bin/env python
import os
thisdir = os.path.abspath(os.path.dirname(__file__))
sourcedir = os.path.join(thisdir,'source')
libdir = os.path.join(thisdir,'lib')
__version__ = '1.3'
def build(sources,libs,out,minified_out,header=None):
sources = [os.path.join(sourcedir,s) for s in sources]
libs = [os.path.join(libdir,s) for s in libs]
out = os.path.join(thisdir,out)
with open(out,'w') as outfile:
if header:
outfile.write(header)
# include libfiles
for lib in libs:
with open(lib) as libfile:
outfile.write('\n')
outfile.write(libfile.read())
outfile.write('\n')
# include sources
for source in sources:
with open(source) as sourcefile:
outfile.write('\n')
outfile.write(sourcefile.read())
outfile.write('\n')
# Due to slimit problem, it commented
"""
try:
from slimit import minify
with open(out) as outfile:
with open(minified_out,'w') as minified:
minified.write(minify(outfile.read(), mangle=False, mangle_toplevel=False))
except ImportError:
print('Slimit package was not available')
"""
print('the file: "%s" was generated successfully. ' % out)
out_header="""
/*!
joop: javascript OOP toolkit
author: Vahid Mardani
version: %s
*/
""" % __version__
if __name__ == '__main__':
out_filename = 'build/joop-%s.js' % __version__
minified_out_filename = 'joop-%s.min.js' % __version__
build([ # Source files
'joop.ie.compat.js',
'joop.helpers.js',
'joop.namespace.js',
'joop.init.js',
'joop.class.js',
'joop.singleton.js',
'joop.object.js',
],
[ # Libraries to insert before sources
'sprintf/sprintf.js',
],
# Output files
out_filename,minified_out_filename,header=out_header)
|
{
"content_hash": "dbace7dced6c578600bcb7924a44a03a",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 91,
"avg_line_length": 27.324324324324323,
"alnum_prop": 0.552917903066271,
"repo_name": "pylover/joop",
"id": "2e022282deca91e7e15df4102cce0510580d75f9",
"size": "2022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4668"
},
{
"name": "JavaScript",
"bytes": "91871"
},
{
"name": "Python",
"bytes": "2022"
}
],
"symlink_target": ""
}
|
import xcffib
import struct
import io
_events = {}
_errors = {}
class ClientMessageData(xcffib.Union):
def __init__(self, unpacker):
if isinstance(unpacker, xcffib.Protobj):
unpacker = xcffib.MemoryUnpacker(unpacker.pack())
xcffib.Union.__init__(self, unpacker)
self.data8 = xcffib.List(unpacker.copy(), "B", 20)
self.data16 = xcffib.List(unpacker.copy(), "H", 10)
self.data32 = xcffib.List(unpacker.copy(), "I", 5)
def pack(self):
buf = io.BytesIO()
buf.write(xcffib.pack_list(self.data8, "B"))
return buf.getvalue()
xcffib._add_ext(key, unionExtension, _events, _errors)
|
{
"content_hash": "5058c11325145ad1ad7b327df2c90315",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 36.5,
"alnum_prop": 0.6270928462709284,
"repo_name": "tych0/xcffib",
"id": "d07b49261889b7719f3d99d104628471e41fc788",
"size": "657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/generator/union.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Haskell",
"bytes": "45429"
},
{
"name": "Makefile",
"bytes": "2562"
},
{
"name": "Python",
"bytes": "90210"
}
],
"symlink_target": ""
}
|
import json
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django_qbe.forms import QueryByExampleFormSet, DATABASES
from django_qbe.utils import (autocomplete_graph, qbe_models, formats,
pickle_encode, pickle_decode, get_query_hash,
admin_site)
from django_qbe.settings import (
QBE_ACCESS_FOR,
QBE_GROUP_BY,
QBE_SHOW_ROW_NUMBER,
QBE_ADMIN,
QBE_ALIASES,
QBE_SAVED_QUERIES
)
qbe_access_for = QBE_ACCESS_FOR
def get_apps():
try:
from django.apps import apps
return [app.models_module for app in apps.get_app_configs() if app.models_module]
except ImportError:
# Backward compatibility for Django prior to 1.7
from django.db import models
return models.get_apps()
@user_passes_test(qbe_access_for)
def qbe_form(request, query_hash=None):
query_key = "qbe_query_%s" % query_hash
db_alias = request.session.get("qbe_database", "default")
formset = QueryByExampleFormSet(using=db_alias)
json_data = None
if query_key in request.session:
data = request.session[query_key]
db_alias = data.get("database_alias", "default")
formset = QueryByExampleFormSet(data=data, using=db_alias)
if not formset.is_valid():
formset = QueryByExampleFormSet(using=db_alias)
else:
json_data = json.dumps(data)
apps = get_apps()
models = qbe_models(admin_site=admin_site, only_admin_models=False)
json_models = qbe_models(admin_site=admin_site, json=True)
title_url = reverse("qbe_form")
saved_query = None
if QBE_SAVED_QUERIES:
title_url = reverse("admin:app_list", args=["savedqueries"])
from django_qbe.savedqueries.models import SavedQuery
saved_queries = SavedQuery.objects.filter(query_hash=query_hash)
saved_query = saved_queries.first()
context = {
'apps': apps,
'models': models,
'formset': formset,
'databases': DATABASES,
'database_alias': db_alias,
'title': _(u"Query by Example"),
'title_url': title_url,
'saved_query': saved_query,
'json_models': json_models,
'json_data': json_data,
'query_hash': query_hash,
'savedqueries_installed': QBE_SAVED_QUERIES,
'aliases_enabled': QBE_ALIASES,
'group_by_enabled': QBE_GROUP_BY
}
return render(request, 'qbe.html', context)
@user_passes_test(qbe_access_for)
def qbe_proxy(request):
if request.POST:
data = request.POST.copy()
db_alias = request.session.get("qbe_database", "default")
formset = QueryByExampleFormSet(data=data, using=db_alias)
if formset.is_valid():
pickled = pickle_encode(data)
query_hash = get_query_hash(pickled)
query_key = "qbe_query_%s" % query_hash
request.session[query_key] = data
return redirect("qbe_results", query_hash=query_hash)
return redirect("qbe_form")
@user_passes_test(qbe_access_for)
def qbe_results(request, query_hash):
query_key = "qbe_query_%s" % (query_hash or "")
if query_key in request.session:
query_key = "qbe_query_%s" % query_hash
data = request.session[query_key]
else:
return redirect("qbe_form")
db_alias = data.get("database_alias", "default")
if db_alias in DATABASES:
request.session["qbe_database"] = db_alias
else:
db_alias = request.session.get("qbe_database", "default")
formset = QueryByExampleFormSet(data=data, using=db_alias)
if formset.is_valid():
row_number = QBE_SHOW_ROW_NUMBER
admin_name = QBE_ADMIN
aliases = QBE_ALIASES
labels = formset.get_labels(row_number=row_number, aliases=aliases)
count = formset.get_count()
limit = count
try:
page = int(request.GET.get("p", 0))
except ValueError:
page = 0
if not request.GET.get("show", None):
try:
limit = int(data.get("limit", 100))
except ValueError:
limit = 100
offset = limit * page
results = formset.get_results(limit=limit, offset=offset,
admin_name=admin_name,
row_number=row_number)
query = formset.get_raw_query(add_params=True)
pickled = pickle_encode(data)
title_url = reverse("qbe_form")
saved_query = None
if QBE_SAVED_QUERIES:
title_url = reverse("admin:app_list", args=["savedqueries"])
from django_qbe.savedqueries.models import SavedQuery
saved_queries = SavedQuery.objects.filter(query_hash=query_hash)
saved_query = saved_queries.first()
context = {
'formset': formset,
'title': _(u"Query by Example"),
'title_url': title_url,
'saved_query': saved_query,
'results': results,
'labels': labels,
'query': query,
'count': count,
'limit': limit,
'page': page,
'offset': offset + 1,
'offset_limit': offset + limit,
'pickled': pickled,
'query_hash': query_hash,
'admin_urls': admin_name is not None and formset.has_admin_urls(),
'formats': formats,
'savedqueries_installed': QBE_SAVED_QUERIES,
'aliases_enabled': QBE_ALIASES,
'group_by_enabled': QBE_GROUP_BY
}
return render(request, 'qbe_results.html', context)
return redirect("qbe_form")
@user_passes_test(qbe_access_for)
def qbe_bookmark(request):
data = request.GET.get("data", None)
if data:
query_hash = get_query_hash(data)
query_key = "qbe_query_%s" % query_hash
request.session[query_key] = pickle_decode(data)
return redirect("qbe_results", query_hash)
else:
return redirect("qbe_form")
@user_passes_test(qbe_access_for)
def qbe_export(request, query_hash, format):
query_key = "qbe_query_%s" % query_hash
if format in formats and query_key in request.session:
data = request.session[query_key]
db_alias = request.session.get("qbe_database", "default")
formset = QueryByExampleFormSet(data=data, using=db_alias)
if formset.is_valid():
aliases = QBE_ALIASES
labels = formset.get_labels(aliases=aliases)
query = formset.get_raw_query()
results = formset.get_results(query)
return formats[format](labels, results)
return redirect("qbe_form")
# @user_passes_test(qbe_access_for)
def qbe_js(request):
user_passed_test = request.user and qbe_access_for(request.user)
return HttpResponse(render_to_string('qbe_index.js', {
'qbe_url': reverse("qbe_form"),
'reports_label': _(u"Reports"),
'qbe_label': _(u"Query by Example"),
'user_passes_test': user_passed_test,
}), mimetype="text/javascript")
@user_passes_test(qbe_access_for)
def qbe_autocomplete(request):
nodes = None
if request.is_ajax() and request.POST:
models = request.POST.get('models', []).split(",")
nodes = autocomplete_graph(admin_site, models)
|
{
"content_hash": "79e84e05f34a3c747b2a6be3535d725d",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 89,
"avg_line_length": 36.90731707317073,
"alnum_prop": 0.6086439333862014,
"repo_name": "versae/qbe",
"id": "d0736042a5c52e2fe9b2b03f5f20c1c45f495f9c",
"size": "7590",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_qbe/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6108"
},
{
"name": "HTML",
"bytes": "9655"
},
{
"name": "JavaScript",
"bytes": "72707"
},
{
"name": "Python",
"bytes": "64194"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0008_auto_20151202_1146'),
]
operations = [
migrations.CreateModel(
name='LanguageBranches',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False,
auto_created=True, primary_key=True)),
('family_ix', models.IntegerField(blank=True)),
('level1_branch_ix', models.IntegerField(blank=True)),
('level1_branch_name', models.TextField(
unique=True, blank=True)),
],
),
]
|
{
"content_hash": "912ed6fcf4bf9e7c28d67f9fad5a8e02",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 78,
"avg_line_length": 30.625,
"alnum_prop": 0.5346938775510204,
"repo_name": "lingdb/CoBL-public",
"id": "15d7cbb511a75563d242cb93c608081e5584f075",
"size": "759",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ielex/lexicon/migrations/0009_languagebranches.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "76222"
},
{
"name": "HTML",
"bytes": "558967"
},
{
"name": "JavaScript",
"bytes": "189642"
},
{
"name": "Python",
"bytes": "858438"
},
{
"name": "Shell",
"bytes": "1258"
},
{
"name": "TeX",
"bytes": "119143"
},
{
"name": "Vim script",
"bytes": "870"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import os
import platform
from . import option_list
if 'DIGITS_SERVER_NAME' in os.environ:
value = os.environ['DIGITS_SERVER_NAME']
else:
value = platform.node()
option_list['server_name'] = value
|
{
"content_hash": "ed818b237e52334a6dd5e6811e77bdd3",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 44,
"avg_line_length": 19.076923076923077,
"alnum_prop": 0.7137096774193549,
"repo_name": "ethantang95/DIGITS-GAN",
"id": "16946588304089dc400d9582e33688aff0935cb1",
"size": "317",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "digits/config/server_name.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4386"
},
{
"name": "HTML",
"bytes": "2638563"
},
{
"name": "JavaScript",
"bytes": "53916"
},
{
"name": "Lua",
"bytes": "110600"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "1235552"
},
{
"name": "Shell",
"bytes": "13054"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class Provider(Model):
"""Resource provider information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The provider ID.
:vartype id: str
:param namespace: The namespace of the resource provider.
:type namespace: str
:ivar registration_state: The registration state of the provider.
:vartype registration_state: str
:ivar resource_types: The collection of provider resource types.
:vartype resource_types: list of :class:`ProviderResourceType
<azure.mgmt.resource.resources.v2017_05_10.models.ProviderResourceType>`
"""
_validation = {
'id': {'readonly': True},
'registration_state': {'readonly': True},
'resource_types': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'namespace': {'key': 'namespace', 'type': 'str'},
'registration_state': {'key': 'registrationState', 'type': 'str'},
'resource_types': {'key': 'resourceTypes', 'type': '[ProviderResourceType]'},
}
def __init__(self, namespace=None):
self.id = None
self.namespace = namespace
self.registration_state = None
self.resource_types = None
|
{
"content_hash": "32a06c999d71fbf73a35bf94c60f67c4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 33.94736842105263,
"alnum_prop": 0.6364341085271318,
"repo_name": "v-iam/azure-sdk-for-python",
"id": "6f24dc41953bfa71aecdd9322fe320d5fa2d08eb",
"size": "1764",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-resource/azure/mgmt/resource/resources/v2017_05_10/models/provider.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19856874"
}
],
"symlink_target": ""
}
|
from cookies.resources.helpers import makeCookieHeader, setNoCacheAndCORSHeaders
from wptserve.utils import isomorphic_encode
def main(request, response):
"""Respond to `/cookies/resources/setSameSiteNone.py?{value}` by setting two cookies:
1. `samesite_none_insecure={value};SameSite=None;path=/`
2. `samesite_none_secure={value};SameSite=None;Secure;path=/`
"""
headers = setNoCacheAndCORSHeaders(request, response)
value = isomorphic_encode(request.url_parts.query)
headers.append(makeCookieHeader(b"samesite_none_insecure", value, {b"SameSite":b"None", b"path":b"/"}))
headers.append(makeCookieHeader(b"samesite_none_secure", value, {b"SameSite":b"None", b"Secure":b"", b"path":b"/"}))
return headers, b'{"success": true}'
|
{
"content_hash": "a2950cd4aef93318c34d15052e3feea9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 120,
"avg_line_length": 47.9375,
"alnum_prop": 0.7249022164276402,
"repo_name": "ric2b/Vivaldi-browser",
"id": "446c75eb44735f328d13982d943fa6b1046e2fe8",
"size": "767",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "chromium/third_party/blink/web_tests/external/wpt/cookies/resources/setSameSiteNone.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from pysnmp.proto.secmod.rfc3826.priv import aes
from pysnmp.proto.secmod.rfc3414.auth import hmacmd5, hmacsha
from pysnmp.proto.secmod.rfc3414 import localkey
class Aes256(aes.Aes):
serviceID = (1, 3, 6, 1, 4, 1, 9, 12, 6, 1, 2) # cusmAESCfb256PrivProtocol
keySize = 32
# 3.1.2.1
def localizeKey(self, authProtocol, privKey, snmpEngineID):
if authProtocol == hmacmd5.HmacMd5.serviceID:
localPrivKey = localkey.localizeKeyMD5(privKey, snmpEngineID)
localPrivKey = localPrivKey + localkey.localizeKeyMD5(
localPrivKey, snmpEngineID
)
elif authProtocol == hmacsha.HmacSha.serviceID:
localPrivKey = localkey.localizeKeySHA(privKey, snmpEngineID)
localPrivKey = localPrivKey + localkey.localizeKeySHA(
localPrivKey, snmpEngineID
)
else:
raise error.ProtocolError(
'Unknown auth protocol %s' % (authProtocol,)
)
return localPrivKey[:32]
|
{
"content_hash": "c6f37eb25f9f26b711cab3af0026f42d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 41.56,
"alnum_prop": 0.6361886429258903,
"repo_name": "xfguo/pysnmp",
"id": "874c83a58f2ac0d0be4d3f91e44dc3f1d433caaf",
"size": "1142",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysnmp/proto/secmod/eso/priv/aes256.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "804395"
}
],
"symlink_target": ""
}
|
"""
@file copy_make_border.py
@brief Sample code that shows the functionality of copyMakeBorder
"""
import sys
from random import randint
import cv2 as cv
def main(argv):
## [variables]
# First we declare the variables we are going to use
borderType = cv.BORDER_CONSTANT
window_name = "copyMakeBorder Demo"
## [variables]
## [load]
imageName = argv[0] if len(argv) > 0 else 'lena.jpg'
# Loads an image
src = cv.imread(cv.samples.findFile(imageName), cv.IMREAD_COLOR)
# Check if image is loaded fine
if src is None:
print ('Error opening image!')
print ('Usage: copy_make_border.py [image_name -- default lena.jpg] \n')
return -1
## [load]
# Brief how-to for this program
print ('\n'
'\t copyMakeBorder Demo: \n'
' -------------------- \n'
' ** Press \'c\' to set the border to a random constant value \n'
' ** Press \'r\' to set the border to be replicated \n'
' ** Press \'ESC\' to exit the program ')
## [create_window]
cv.namedWindow(window_name, cv.WINDOW_AUTOSIZE)
## [create_window]
## [init_arguments]
# Initialize arguments for the filter
top = int(0.05 * src.shape[0]) # shape[0] = rows
bottom = top
left = int(0.05 * src.shape[1]) # shape[1] = cols
right = left
## [init_arguments]
while 1:
## [update_value]
value = [randint(0, 255), randint(0, 255), randint(0, 255)]
## [update_value]
## [copymakeborder]
dst = cv.copyMakeBorder(src, top, bottom, left, right, borderType, None, value)
## [copymakeborder]
## [display]
cv.imshow(window_name, dst)
## [display]
## [check_keypress]
c = cv.waitKey(500)
if c == 27:
break
elif c == 99: # 99 = ord('c')
borderType = cv.BORDER_CONSTANT
elif c == 114: # 114 = ord('r')
borderType = cv.BORDER_REPLICATE
## [check_keypress]
return 0
if __name__ == "__main__":
main(sys.argv[1:])
|
{
"content_hash": "488130a1cfac70dc35cc87545b8fb845",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 87,
"avg_line_length": 30.231884057971016,
"alnum_prop": 0.5556088207094918,
"repo_name": "opencv/opencv",
"id": "ff1f3669c7472d2bfe707a03f94345eb1874f5ce",
"size": "2086",
"binary": false,
"copies": "2",
"ref": "refs/heads/4.x",
"path": "samples/python/tutorial_code/ImgTrans/MakeBorder/copy_make_border.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AIDL",
"bytes": "1986"
},
{
"name": "Batchfile",
"bytes": "1498"
},
{
"name": "C",
"bytes": "1543870"
},
{
"name": "C++",
"bytes": "35975082"
},
{
"name": "CMake",
"bytes": "1010867"
},
{
"name": "Cuda",
"bytes": "333437"
},
{
"name": "Dockerfile",
"bytes": "309"
},
{
"name": "HTML",
"bytes": "40027"
},
{
"name": "Java",
"bytes": "774232"
},
{
"name": "JavaScript",
"bytes": "233673"
},
{
"name": "Kotlin",
"bytes": "5204"
},
{
"name": "Objective-C",
"bytes": "100731"
},
{
"name": "Objective-C++",
"bytes": "392600"
},
{
"name": "Perl",
"bytes": "15865"
},
{
"name": "PowerShell",
"bytes": "14591"
},
{
"name": "Prolog",
"bytes": "843"
},
{
"name": "Python",
"bytes": "1038154"
},
{
"name": "Shell",
"bytes": "22738"
},
{
"name": "Swift",
"bytes": "301765"
},
{
"name": "TeX",
"bytes": "3530"
}
],
"symlink_target": ""
}
|
"""General implementation of walking commits and their contents."""
from collections import defaultdict
import collections
import heapq
from itertools import chain
from dulwich.diff_tree import (
RENAME_CHANGE_TYPES,
tree_changes,
tree_changes_for_merge,
RenameDetector,
)
from dulwich.errors import (
MissingCommitError,
)
ORDER_DATE = 'date'
ORDER_TOPO = 'topo'
ALL_ORDERS = (ORDER_DATE, ORDER_TOPO)
# Maximum number of commits to walk past a commit time boundary.
_MAX_EXTRA_COMMITS = 5
class WalkEntry(object):
"""Object encapsulating a single result from a walk."""
def __init__(self, walker, commit):
self.commit = commit
self._store = walker.store
self._get_parents = walker.get_parents
self._changes = None
self._rename_detector = walker.rename_detector
def changes(self):
"""Get the tree changes for this entry.
:return: For commits with up to one parent, a list of TreeChange
objects; if the commit has no parents, these will be relative to the
empty tree. For merge commits, a list of lists of TreeChange
objects; see dulwich.diff.tree_changes_for_merge.
"""
if self._changes is None:
commit = self.commit
if not self._get_parents(commit):
changes_func = tree_changes
parent = None
elif len(self._get_parents(commit)) == 1:
changes_func = tree_changes
parent = self._store[self._get_parents(commit)[0]].tree
else:
changes_func = tree_changes_for_merge
parent = [self._store[p].tree for p in self._get_parents(commit)]
self._changes = list(changes_func(
self._store, parent, commit.tree,
rename_detector=self._rename_detector))
return self._changes
def __repr__(self):
return '<WalkEntry commit=%s, changes=%r>' % (
self.commit.id, self.changes())
class _CommitTimeQueue(object):
"""Priority queue of WalkEntry objects by commit time."""
def __init__(self, walker):
self._walker = walker
self._store = walker.store
self._get_parents = walker.get_parents
self._excluded = walker.excluded
self._pq = []
self._pq_set = set()
self._seen = set()
self._done = set()
self._min_time = walker.since
self._last = None
self._extra_commits_left = _MAX_EXTRA_COMMITS
self._is_finished = False
for commit_id in chain(walker.include, walker.excluded):
self._push(commit_id)
def _push(self, commit_id):
try:
commit = self._store[commit_id]
except KeyError:
raise MissingCommitError(commit_id)
if commit_id not in self._pq_set and commit_id not in self._done:
heapq.heappush(self._pq, (-commit.commit_time, commit))
self._pq_set.add(commit_id)
self._seen.add(commit_id)
def _exclude_parents(self, commit):
excluded = self._excluded
seen = self._seen
todo = [commit]
while todo:
commit = todo.pop()
for parent in self._get_parents(commit):
if parent not in excluded and parent in seen:
# TODO: This is inefficient unless the object store does
# some caching (which DiskObjectStore currently does not).
# We could either add caching in this class or pass around
# parsed queue entry objects instead of commits.
todo.append(self._store[parent])
excluded.add(parent)
def next(self):
if self._is_finished:
return None
while self._pq:
_, commit = heapq.heappop(self._pq)
sha = commit.id
self._pq_set.remove(sha)
if sha in self._done:
continue
self._done.add(sha)
for parent_id in self._get_parents(commit):
self._push(parent_id)
reset_extra_commits = True
is_excluded = sha in self._excluded
if is_excluded:
self._exclude_parents(commit)
if self._pq and all(c.id in self._excluded
for _, c in self._pq):
_, n = self._pq[0]
if self._last and n.commit_time >= self._last.commit_time:
# If the next commit is newer than the last one, we need
# to keep walking in case its parents (which we may not
# have seen yet) are excluded. This gives the excluded
# set a chance to "catch up" while the commit is still
# in the Walker's output queue.
reset_extra_commits = True
else:
reset_extra_commits = False
if (self._min_time is not None and
commit.commit_time < self._min_time):
# We want to stop walking at min_time, but commits at the
# boundary may be out of order with respect to their parents. So
# we walk _MAX_EXTRA_COMMITS more commits once we hit this
# boundary.
reset_extra_commits = False
if reset_extra_commits:
# We're not at a boundary, so reset the counter.
self._extra_commits_left = _MAX_EXTRA_COMMITS
else:
self._extra_commits_left -= 1
if not self._extra_commits_left:
break
if not is_excluded:
self._last = commit
return WalkEntry(self._walker, commit)
self._is_finished = True
return None
__next__ = next
class Walker(object):
"""Object for performing a walk of commits in a store.
Walker objects are initialized with a store and other options and can then
be treated as iterators of Commit objects.
"""
def __init__(self, store, include, exclude=None, order=ORDER_DATE,
reverse=False, max_entries=None, paths=None,
rename_detector=None, follow=False, since=None, until=None,
get_parents=lambda commit: commit.parents,
queue_cls=_CommitTimeQueue):
"""Constructor.
:param store: ObjectStore instance for looking up objects.
:param include: Iterable of SHAs of commits to include along with their
ancestors.
:param exclude: Iterable of SHAs of commits to exclude along with their
ancestors, overriding includes.
:param order: ORDER_* constant specifying the order of results. Anything
other than ORDER_DATE may result in O(n) memory usage.
:param reverse: If True, reverse the order of output, requiring O(n)
memory.
:param max_entries: The maximum number of entries to yield, or None for
no limit.
:param paths: Iterable of file or subtree paths to show entries for.
:param rename_detector: diff.RenameDetector object for detecting
renames.
:param follow: If True, follow path across renames/copies. Forces a
default rename_detector.
:param since: Timestamp to list commits after.
:param until: Timestamp to list commits before.
:param get_parents: Method to retrieve the parents of a commit
:param queue_cls: A class to use for a queue of commits, supporting the
iterator protocol. The constructor takes a single argument, the
Walker.
"""
# Note: when adding arguments to this method, please also update
# dulwich.repo.BaseRepo.get_walker
if order not in ALL_ORDERS:
raise ValueError('Unknown walk order %s' % order)
self.store = store
if not isinstance(include, list):
include = [include]
self.include = include
self.excluded = set(exclude or [])
self.order = order
self.reverse = reverse
self.max_entries = max_entries
self.paths = paths and set(paths) or None
if follow and not rename_detector:
rename_detector = RenameDetector(store)
self.rename_detector = rename_detector
self.get_parents = get_parents
self.follow = follow
self.since = since
self.until = until
self._num_entries = 0
self._queue = queue_cls(self)
self._out_queue = collections.deque()
def _path_matches(self, changed_path):
if changed_path is None:
return False
for followed_path in self.paths:
if changed_path == followed_path:
return True
if (changed_path.startswith(followed_path) and
changed_path[len(followed_path)] == b'/'[0]):
return True
return False
def _change_matches(self, change):
if not change:
return False
old_path = change.old.path
new_path = change.new.path
if self._path_matches(new_path):
if self.follow and change.type in RENAME_CHANGE_TYPES:
self.paths.add(old_path)
self.paths.remove(new_path)
return True
elif self._path_matches(old_path):
return True
return False
def _should_return(self, entry):
"""Determine if a walk entry should be returned..
:param entry: The WalkEntry to consider.
:return: True if the WalkEntry should be returned by this walk, or False
otherwise (e.g. if it doesn't match any requested paths).
"""
commit = entry.commit
if self.since is not None and commit.commit_time < self.since:
return False
if self.until is not None and commit.commit_time > self.until:
return False
if commit.id in self.excluded:
return False
if self.paths is None:
return True
if len(self.get_parents(commit)) > 1:
for path_changes in entry.changes():
# For merge commits, only include changes with conflicts for
# this path. Since a rename conflict may include different
# old.paths, we have to check all of them.
for change in path_changes:
if self._change_matches(change):
return True
else:
for change in entry.changes():
if self._change_matches(change):
return True
return None
def _next(self):
max_entries = self.max_entries
while max_entries is None or self._num_entries < max_entries:
entry = next(self._queue)
if entry is not None:
self._out_queue.append(entry)
if entry is None or len(self._out_queue) > _MAX_EXTRA_COMMITS:
if not self._out_queue:
return None
entry = self._out_queue.popleft()
if self._should_return(entry):
self._num_entries += 1
return entry
return None
def _reorder(self, results):
"""Possibly reorder a results iterator.
:param results: An iterator of WalkEntry objects, in the order returned
from the queue_cls.
:return: An iterator or list of WalkEntry objects, in the order required
by the Walker.
"""
if self.order == ORDER_TOPO:
results = _topo_reorder(results, self.get_parents)
if self.reverse:
results = reversed(list(results))
return results
def __iter__(self):
return iter(self._reorder(iter(self._next, None)))
def _topo_reorder(entries, get_parents=lambda commit: commit.parents):
"""Reorder an iterable of entries topologically.
This works best assuming the entries are already in almost-topological
order, e.g. in commit time order.
:param entries: An iterable of WalkEntry objects.
:param get_parents: Optional function for getting the parents of a commit.
:return: iterator over WalkEntry objects from entries in FIFO order, except
where a parent would be yielded before any of its children.
"""
todo = collections.deque()
pending = {}
num_children = defaultdict(int)
for entry in entries:
todo.append(entry)
for p in get_parents(entry.commit):
num_children[p] += 1
while todo:
entry = todo.popleft()
commit = entry.commit
commit_id = commit.id
if num_children[commit_id]:
pending[commit_id] = entry
continue
for parent_id in get_parents(commit):
num_children[parent_id] -= 1
if not num_children[parent_id]:
parent_entry = pending.pop(parent_id, None)
if parent_entry:
todo.appendleft(parent_entry)
yield entry
|
{
"content_hash": "83eca600c0318c552c0e69946851e38e",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 81,
"avg_line_length": 37.41242937853107,
"alnum_prop": 0.5738447598912715,
"repo_name": "KaranToor/MA450",
"id": "b5997db75a32c820107ce517c9ab1103dcac0d17",
"size": "14056",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/third_party/dulwich/walk.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
}
|
from os import fstat
from datetime import datetime
from os.path import join, exists, abspath
from six.moves.urllib.parse import unquote
from tornado.concurrent import return_future
from thumbor.loaders import LoaderResult
@return_future
def load(context, path, callback):
file_path = join(
context.config.FILE_LOADER_ROOT_PATH.rstrip('/'), path.lstrip('/'))
file_path = abspath(file_path)
inside_root_path = file_path.startswith(
abspath(context.config.FILE_LOADER_ROOT_PATH))
result = LoaderResult()
if not inside_root_path:
result.error = LoaderResult.ERROR_NOT_FOUND
result.successful = False
callback(result)
return
# keep backwards compatibility, try the actual path first
# if not found, unquote it and try again
if not exists(file_path):
file_path = unquote(file_path)
if exists(file_path):
with open(file_path, 'r') as f:
stats = fstat(f.fileno())
result.successful = True
result.buffer = f.read()
result.metadata.update(
size=stats.st_size,
updated_at=datetime.utcfromtimestamp(stats.st_mtime))
else:
result.error = LoaderResult.ERROR_NOT_FOUND
result.successful = False
callback(result)
|
{
"content_hash": "c393d0666f456c40852b35f12967b2f4",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 75,
"avg_line_length": 28.52173913043478,
"alnum_prop": 0.6516768292682927,
"repo_name": "okor/thumbor",
"id": "88bc5e1db0bcd764e3a3ee1907a95f8874b74246",
"size": "1564",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thumbor/loaders/file_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "62441"
},
{
"name": "JavaScript",
"bytes": "409"
},
{
"name": "Makefile",
"bytes": "10255"
},
{
"name": "Python",
"bytes": "594069"
}
],
"symlink_target": ""
}
|
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar, Union
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._domains_operations import build_cancel_verification_request_initial, build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_initiate_verification_request_initial, build_list_by_email_service_resource_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DomainsOperations:
"""DomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.communication.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
**kwargs: Any
) -> "_models.DomainResource":
"""Get.
Get the Domains resource and its properties.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DomainResource, or the result of cls(response)
:rtype: ~azure.mgmt.communication.models.DomainResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.DomainResource",
**kwargs: Any
) -> "_models.DomainResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'DomainResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('DomainResource', pipeline_response)
if response.status_code == 201:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('DomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.DomainResource",
**kwargs: Any
) -> AsyncLROPoller["_models.DomainResource"]:
"""Create Or Update.
Add a new Domains resource under the parent EmailService resource or update an existing Domains
resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:param parameters: Parameters for the create or update operation.
:type parameters: ~azure.mgmt.communication.models.DomainResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DomainResource or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.communication.models.DomainResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 202:
response_headers['location']=self._deserialize('str', response.headers.get('location'))
if cls:
return cls(pipeline_response, None, response_headers)
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete.
Operation to delete a Domains resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.UpdateDomainRequestParameters",
**kwargs: Any
) -> "_models.DomainResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'UpdateDomainRequestParameters')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
if response.status_code == 200:
deserialized = self._deserialize('DomainResource', pipeline_response)
if response.status_code == 201:
response_headers['Azure-AsyncOperation']=self._deserialize('str', response.headers.get('Azure-AsyncOperation'))
deserialized = self._deserialize('DomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.UpdateDomainRequestParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.DomainResource"]:
"""Update.
Operation to update an existing Domains resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:param parameters: Parameters for the update operation.
:type parameters: ~azure.mgmt.communication.models.UpdateDomainRequestParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DomainResource or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.communication.models.DomainResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('DomainResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}"} # type: ignore
@distributed_trace
def list_by_email_service_resource(
self,
resource_group_name: str,
email_service_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DomainResourceList"]:
"""List by EmailService.
Handles requests to list all Domains resources under the parent EmailServices resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DomainResourceList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.communication.models.DomainResourceList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.DomainResourceList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_email_service_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
api_version=api_version,
template_url=self.list_by_email_service_resource.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_email_service_resource_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DomainResourceList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_email_service_resource.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains"} # type: ignore
async def _initiate_verification_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.VerificationParameter",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VerificationParameter')
request = build_initiate_verification_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._initiate_verification_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['location']=self._deserialize('str', response.headers.get('location'))
if cls:
return cls(pipeline_response, None, response_headers)
_initiate_verification_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}/initiateVerification"} # type: ignore
@distributed_trace_async
async def begin_initiate_verification( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.VerificationParameter",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Initiate Verification.
Initiate verification of DNS record.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:param parameters: Type of verification to be initiated.
:type parameters: ~azure.mgmt.communication.models.VerificationParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._initiate_verification_initial(
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_initiate_verification.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}/initiateVerification"} # type: ignore
async def _cancel_verification_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.VerificationParameter",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VerificationParameter')
request = build_cancel_verification_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._cancel_verification_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
response_headers = {}
response_headers['location']=self._deserialize('str', response.headers.get('location'))
if cls:
return cls(pipeline_response, None, response_headers)
_cancel_verification_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}/cancelVerification"} # type: ignore
@distributed_trace_async
async def begin_cancel_verification( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
email_service_name: str,
domain_name: str,
parameters: "_models.VerificationParameter",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Cancel Verification.
Cancel verification of DNS record.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param email_service_name: The name of the EmailService resource.
:type email_service_name: str
:param domain_name: The name of the Domains resource.
:type domain_name: str
:param parameters: Type of verification to be canceled.
:type parameters: ~azure.mgmt.communication.models.VerificationParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2021-10-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._cancel_verification_initial(
resource_group_name=resource_group_name,
email_service_name=email_service_name,
domain_name=domain_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_cancel_verification.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Communication/emailServices/{emailServiceName}/domains/{domainName}/cancelVerification"} # type: ignore
|
{
"content_hash": "cc40fbbb394910ad498f55648b9734be",
"timestamp": "",
"source": "github",
"line_count": 838,
"max_line_length": 297,
"avg_line_length": 46.68615751789976,
"alnum_prop": 0.6471896326968791,
"repo_name": "Azure/azure-sdk-for-python",
"id": "b5571bdd111c157681d305fc49d16082c327ffd3",
"size": "39623",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/communication/azure-mgmt-communication/azure/mgmt/communication/aio/operations/_domains_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""Utilities to setup a mingw based build setup for Python projects."""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: MIT
from __future__ import print_function
import os.path as op
import os
import sys
from subprocess import check_output
import time
import tarfile
import shutil
try:
from urllib.request import urlretrieve
except ImportError:
# Python 2 compat
from urllib import urlretrieve
WINEPREFIX_PATTERN = 'wine-py{version}-{arch}'
PYTHON_MSI_PATTERN = "python-{version}{arch_marker}.msi"
PYTHON_URL_PATTERN = ("https://www.python.org/ftp/python/{version}/"
+ PYTHON_MSI_PATTERN)
GET_PIP_SCRIPT = 'get-pip.py'
GET_PIP_URL = "https://bootstrap.pypa.io/get-pip.py"
GCC_VERSION = '4.9.2'
MINGW_FILE_PATTERN = "mingw{arch}static-{version}.tar.xz"
MINGW_URL_PATTERN = ("https://bitbucket.org/carlkl/mingw-w64-for-python/"
"downloads/" + MINGW_FILE_PATTERN)
ENV_REGISTRY_KEY = b"[HKEY_CURRENT_USER\\Environment]"
DISTUTILS_CFG_CONTENT = u"""\
[build]
compiler=mingw32
"""
DISTUTILS_CFG_ISSUE_4709_CONTENT = u"""\
[build_ext]
define=MS_WIN64
"""
def run(command, prepend_wine='auto', *args, **kwargs):
"""Execute a windows command (using wine under Linux)"""
if ((prepend_wine == 'auto' and sys.platform != 'win32')
or prepend_wine is True):
command = ['wine'] + command
# Use the windows shell under Windows to get the PATH-based command
# resolution. Under Linux, stick to wine and its fake registry.
shell = sys.platform == 'win32'
print("=> " + " ".join(command), flush=True)
return check_output(command, *args, stderr=sys.stdout, shell=shell,
**kwargs)
def unix_path(path, env=None):
if sys.platform == 'win32':
# Nothing to do under Windows
return path
# Under Linux, compute the Linux path from the virtual windows path
return run(['winepath', '--unix', path], env=env).decode('utf-8').strip()
def windows_path(path, env=None):
if sys.platform == 'win32':
# Nothing to do under Windows
return path
# Under Linux, compute the virtual Windows path from the concrete Linux
# path
return run(['winepath', '--windows', path],
env=env).decode('utf-8').strip()
def set_env_in_registry(attribute, value, env=None):
"""Edit the wine registry to configure an environment variable"""
print("Setting '%s'='%s'" % (attribute, value))
# Prepare a '.reg' file with the new parameters
filename = '_custom_path.reg'
value_line = u'"%s"="%s"' % (attribute, value.replace(u'\\', u'\\\\'))
value_ascii = value_line.encode('ascii')
with open(filename, 'wb') as f:
f.write(ENV_REGISTRY_KEY)
f.write(b'\r\n')
f.write(value_ascii)
f.write(b'\r\n')
# Use regedit to load the new configuration
command = ['regedit', '/s', filename]
run(command, env=env, prepend_wine=False)
if sys.platform != 'win32':
# XXX [hackish]: Wait for regedit to apply those updates under wine
print("Waiting for registry to get updated...")
if env is None or 'WINEPREFIX' not in env:
user_reg = op.expanduser(op.join('~', '.wine', 'user.reg'))
else:
user_reg = op.join(env['WINEPREFIX'], 'user.reg')
for i in range(100):
if op.exists(user_reg):
with open(user_reg, 'rb') as f:
if value_ascii in f.read():
print('registry updated')
break
print('.', end='', flush=True)
sys.stdout.flush()
time.sleep(1)
def make_path(python_home, mingw_home):
python_path = "{python_home};{python_home}\\Scripts".format(**locals())
mingw_path = "{mingw_home}\\bin".format(**locals())
return ";".join([python_path, mingw_path])
def download_python(version, arch, download_folder='.', env=None):
if arch == "32":
arch_marker = ""
elif arch == "64":
arch_marker = ".amd64"
else:
raise ValueError("Unsupported windows architecture: %s" % arch)
url = PYTHON_URL_PATTERN.format(version=version, arch_marker=arch_marker)
filename = PYTHON_MSI_PATTERN.format(
version=version, arch_marker=arch_marker)
if not op.exists(download_folder):
os.makedirs(download_folder)
filepath = op.abspath(op.join(download_folder, filename))
if not op.exists(filepath):
print("Downloading %s to %s" % (url, filepath), flush=True)
urlretrieve(url, filepath)
return filepath
def install_python(python_home, version, arch, download_folder='.', env=None):
local_python_folder = unix_path(python_home, env=env)
if not op.exists(local_python_folder):
python_msi_filepath = download_python(version, arch,
download_folder=download_folder)
# Install the Python MSI
print('Installing Python %s (%s bit) to %s' % (
version, arch, python_home))
python_msi_filepath = windows_path(python_msi_filepath, env=env)
command = ['msiexec', '/qn', '/i', python_msi_filepath,
'/log', 'msi_install.log', 'TARGETDIR=%s' % python_home]
run(command, env=env)
# Install / upgrade pip
if not op.exists(download_folder):
os.makedirs(download_folder)
getpip_filepath = op.abspath(op.join(download_folder, GET_PIP_SCRIPT))
if not op.exists(getpip_filepath):
print("Downloading %s to %s" % (GET_PIP_URL, getpip_filepath),
flush=True)
urlretrieve(GET_PIP_URL, getpip_filepath)
getpip_filepath = windows_path(getpip_filepath, env=env)
run([python_home + '\\python', getpip_filepath], env=env)
run([python_home + '\\python', '-m', 'pip', 'install', '--upgrade',
'pip'], env=env)
def download_mingw(mingw_version="2014-11", arch="64", download_folder='.'):
filename = MINGW_FILE_PATTERN.format(arch=arch, version=mingw_version)
url = MINGW_URL_PATTERN.format(arch=arch, version=mingw_version)
if not op.exists(download_folder):
os.makedirs(download_folder)
filepath = op.abspath(op.join(download_folder, filename))
if not op.exists(filepath):
print("Downloading %s to %s" % (url, filepath), flush=True)
urlretrieve(url, filepath)
return filepath
def install_mingw(mingw_home, mingw_version="2014-11", arch="64",
download_folder='.', env=None):
# XXX: This function only works under Python 3.3+ that has native support
# for extracting .tar.xz archives with the LZMA compression library.
mingw_home_path = unix_path(mingw_home, env=env)
if not op.exists(mingw_home_path):
mingw_filepath = download_mingw(
mingw_version=mingw_version, arch=arch,
download_folder=download_folder)
tmp_mingw_folder = op.join(download_folder, 'mingw%sstatic' % arch)
if not op.exists(tmp_mingw_folder):
print("Extracting %s..." % mingw_filepath, flush=True)
with tarfile.open(mingw_filepath) as f:
f.extractall(download_folder)
print("Installing mingw to %s..." % mingw_home, flush=True)
shutil.move(tmp_mingw_folder, mingw_home_path)
def configure_mingw(mingw_home, python_home, python_version, arch, env=None):
mingw_home_path = unix_path(mingw_home, env=env)
python_home_path = unix_path(python_home, env=env)
v_major, v_minor = tuple(int(x) for x in python_version.split('.')[:2])
mingw_bin = mingw_home + "\\bin\\"
# Generate the libpythonXX.dll.a archive
dlla_name = 'libpython%d%d.dll.a' % (v_major, v_minor)
dlla_path = op.join(python_home_path, 'libs', dlla_name)
if not op.exists(dlla_path):
print('Generating %s from %s' % (dlla_name, python_home))
dll_name = 'python%d%d.dll' % (v_major, v_minor)
def_name = 'python%d%d.def' % (v_major, v_minor)
# Look for the Python dll in the Python folder.
dll_win_path = python_home + '\\' + dll_name
if not op.exists(unix_path(dll_win_path, env=env)):
print('Python dll not found in %s' % dll_win_path)
# Look for a copy of the Python dll installed in the system folder:
if arch == '64':
# On a 64 bit system, 64 bit DLLs are stored in System64...
dll_win_path = 'C:\\Windows\\System32\\' + dll_name
else:
# 32 bit DLLs are stored in SysWoW64
dll_win_path = 'C:\\Windows\\SysWoW64\\' + dll_name
if not op.exists(unix_path(dll_win_path, env=env)):
raise RuntimeError("Could not find %s" % dll_win_path)
run([mingw_bin + 'gendef', dll_win_path], env=env)
run([mingw_bin + 'dlltool', '-D', dll_win_path, '-d', def_name, '-l',
dlla_name], env=env)
print("Moving %s to %s" % (dlla_name, dlla_path), flush=True)
shutil.move(dlla_name, dlla_path)
# Install a disutils.cfg file to select mingw as the default compiler
# (useful for pip in particular)
distutils_cfg = op.join(python_home_path, 'Lib', 'distutils',
'distutils.cfg')
print("Setting mingw as the default compiler in %s" % distutils_cfg,
flush=True)
with open(distutils_cfg, 'w') as f:
f.write(DISTUTILS_CFG_CONTENT)
# Use the correct MSVC runtime depending on the arch and the Python version
if arch == '64':
arch_folder = 'x86_64-w64-mingw32'
elif arch == '32':
arch_folder = 'i686-w64-mingw32'
else:
raise ValueError("Unsupported architecture: %s" % arch)
vc_tag = '100' if v_major == 3 else '90'
libmsvcr = 'libmsvcr%s.a' % vc_tag
specs = 'specs%s' % vc_tag
# Copy the msvc runtime library
libmsvcr_path = op.join(mingw_home_path, arch_folder, 'lib', libmsvcr)
libs_folder = op.join(python_home_path, 'libs')
print('Copying %s to %s' % (libmsvcr_path, libs_folder),
flush=True)
shutil.copy2(libmsvcr_path, libs_folder)
# Configure the msvc runtime specs file
specs_folder = op.join(mingw_home_path, 'lib', 'gcc', arch_folder,
GCC_VERSION)
specs_source_path = op.join(specs_folder, specs)
specs_target_path = op.join(specs_folder, 'specs')
print('Copying %s to %s' % (specs_source_path, specs_target_path),
flush=True)
shutil.copy2(specs_source_path, specs_target_path)
def fix_issue_4709(python_home, python_version, arch, env=None):
# http://bugs.python.org/issue4709
if arch == "64" and python_version.startswith('2.'):
python_home_path = unix_path(python_home, env=env)
distutils_cfg = op.join(python_home_path, 'Lib', 'distutils',
'distutils.cfg')
print("Setting workaround for issue 4709 in %s" % distutils_cfg,
flush=True)
with open(distutils_cfg, 'a') as f:
f.write(DISTUTILS_CFG_ISSUE_4709_CONTENT)
def make_wine_env(python_version, python_arch, wine_prefix_root=None):
"""Set the wineprefix environment"""
env = os.environ.copy()
if sys.platform == 'win32':
# Do nothing under Windows
return env
if wine_prefix_root is not None:
wine_prefix_root = op.abspath(wine_prefix_root)
if not op.exists(wine_prefix_root):
os.makedirs(wine_prefix_root)
wine_prefix = WINEPREFIX_PATTERN.format(
version=python_version, arch=python_arch)
env['WINEPREFIX'] = op.join(wine_prefix_root, wine_prefix)
if python_arch == '32':
# wine 64 has many bugs when running 32 bit apps, better force the
# creation of a wine 32 prefix
env['WINEARCH'] = 'win32'
return env
def setup_wine_env(python_home, python_version, python_arch, mingw_home,
wine_prefix_root=None, download_folder='downloads'):
env = make_wine_env(python_version, python_arch,
wine_prefix_root=wine_prefix_root)
install_python(python_home, python_version, python_arch,
download_folder=download_folder, env=env)
install_mingw(mingw_home, arch=python_arch,
download_folder=download_folder, env=env)
custom_path = make_path(python_home, mingw_home)
if sys.platform == 'win32':
# Under Windows, prepend the existing PATH with the new folders in
# in the current process environment
env['PATH'] = custom_path + ";" + env['PATH']
else:
# Under wine: use the registry to setup the path
set_env_in_registry(u'PATH', custom_path, env=env)
configure_mingw(mingw_home, python_home, python_version, python_arch,
env=env)
fix_issue_4709(python_home, python_version, python_arch, env=env)
# Sanity check to make sure that python and gcc are in the PATH
run(['python', '--version'], env=env)
run(['gcc', '--version'], env=env)
def setup_configure_from_yaml(config_filename):
import yaml
with open(config_filename) as f:
config = yaml.load(f)
wine_prefix_root = config.get('wine_prefix_root', 'wine')
environments = config.get('matrix', ())
for environment in environments:
python_home = environment['python_home']
python_version = environment['python_version']
python_arch = environment['python_arch']
mingw_home = environment['mingw_home']
download_folder = environment.get('DOWNLOAD_FOLDER', '.')
setup_wine_env(python_home, python_version, python_arch, mingw_home,
wine_prefix_root=wine_prefix_root,
download_folder=download_folder)
if __name__ == "__main__":
if len(sys.argv) > 1:
# Setup one isolated WINEPREFIX install per configuration
# provided in a YAML formatted specification.
setup_configure_from_yaml(sys.argv[1])
else:
# Perform one setup using environment variables. The WINEPREFIX
# environment variable should be defined externally if needed.
try:
python_home = os.environ['PYTHON_HOME']
python_version = os.environ['PYTHON_VERSION']
python_arch = os.environ['ARCH']
except KeyError as e:
print("pywinbuilder require configuration as"
" environment variable: %s" % e)
sys.exit(1)
mingw_home = os.environ.get('MINGW_HOME', 'C:\\mingw-static')
download_folder = os.environ.get('DOWNLOAD_FOLDER', '.')
setup_wine_env(python_home, python_version, python_arch, mingw_home,
download_folder=download_folder)
|
{
"content_hash": "5e321c03b443d97d9ef60e95bb7a0ea8",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 40.49175824175824,
"alnum_prop": 0.6174095935952235,
"repo_name": "ogrisel/python-winbuilder",
"id": "933c56746b03f1cb47b326e720f8a114e09afc03",
"size": "14739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pywinbuilder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15816"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from simple_parsing import ArgumentParser, subgroups
@dataclass
class ModelConfig:
...
@dataclass
class DatasetConfig:
...
@dataclass
class ModelAConfig(ModelConfig):
lr: float = 3e-4
optimizer: str = "Adam"
betas: tuple[float, float] = (0.9, 0.999)
@dataclass
class ModelBConfig(ModelConfig):
lr: float = 1e-3
optimizer: str = "SGD"
momentum: float = 1.234
@dataclass
class Dataset1Config(DatasetConfig):
data_dir: str | Path = "data/foo"
foo: bool = False
@dataclass
class Dataset2Config(DatasetConfig):
data_dir: str | Path = "data/bar"
bar: float = 1.2
@dataclass
class Config:
# Which model to use
model: ModelConfig = subgroups(
{"model_a": ModelAConfig, "model_b": ModelBConfig},
default=ModelAConfig(),
)
# Which dataset to use
dataset: DatasetConfig = subgroups(
{"dataset_1": Dataset1Config, "dataset_2": Dataset2Config},
default=Dataset2Config(),
)
parser = ArgumentParser()
parser.add_arguments(Config, dest="config")
args = parser.parse_args()
config: Config = args.config
print(config)
expected = """
Config(model=ModelAConfig(lr=0.0003, optimizer='Adam', betas=(0.9, 0.999)), dataset=Dataset2Config(data_dir='data/bar', bar=1.2))
"""
parser.print_help()
expected += """
usage: subgroups_example.py [-h] [--model {model_a,model_b}] [--dataset {dataset_1,dataset_2}] [--model.lr float] [--model.optimizer str] [--model.betas float float]
[--dataset.data_dir str|Path] [--dataset.bar float]
options:
-h, --help show this help message and exit
Config ['config']:
Config(model: 'ModelConfig' = ModelAConfig(lr=0.0003, optimizer='Adam', betas=(0.9, 0.999)), dataset: 'DatasetConfig' = Dataset2Config(data_dir='data/bar', bar=1.2))
--model {model_a,model_b}
Which model to use (default: ModelAConfig(lr=0.0003, optimizer='Adam', betas=(0.9, 0.999)))
--dataset {dataset_1,dataset_2}
Which dataset to use (default: Dataset2Config(data_dir='data/bar', bar=1.2))
ModelAConfig ['config.model']:
ModelAConfig(lr: 'float' = 0.0003, optimizer: 'str' = 'Adam', betas: 'tuple[float, float]' = (0.9, 0.999))
--model.lr float (default: 0.0003)
--model.optimizer str
(default: Adam)
--model.betas float float
(default: (0.9, 0.999))
Dataset2Config ['config.dataset']:
Dataset2Config(data_dir: 'str | Path' = 'data/bar', bar: 'float' = 1.2)
--dataset.data_dir str|Path
(default: data/bar)
--dataset.bar float (default: 1.2)
"""
|
{
"content_hash": "43f935f3a78ae1092f1406b1e0881b9f",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 167,
"avg_line_length": 26.58252427184466,
"alnum_prop": 0.6322132943754566,
"repo_name": "lebrice/SimpleParsing",
"id": "a5c75d576abce410cf30f4febd0f5a103e519e60",
"size": "2738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/subgroups/subgroups_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "607221"
}
],
"symlink_target": ""
}
|
import pytest
import sys
# no graphtool on win32 and no available package for osx built with python2
if (sys.platform != 'win32') and (sys.platform != 'darwin' and sys.version_info[0] == 2):
from skmultilearn.cluster import GraphToolLabelGraphClusterer
from skmultilearn.cluster.base import LabelCooccurrenceGraphBuilder
from skmultilearn.cluster.graphtool import StochasticBlockModel
from skmultilearn.tests.example import EXAMPLE_X, EXAMPLE_y
import scipy.sparse as sparse
import sys
def get_graphtool_partitioners():
for nested in [True, False]:
for degree_correlation in [True, False]:
for weight_model in [None, 'real-exponential', 'real-normal',
'discrete-geometric', 'discrete-binomial',
'discrete-poisson']:
sbm = StochasticBlockModel(nested, degree_correlation, False, weight_model)
bld = LabelCooccurrenceGraphBuilder(weighted=weight_model is not None,
include_self_edges=False,
normalize_self_edges=False)
clf = GraphToolLabelGraphClusterer(graph_builder=bld, model=sbm)
yield clf
@pytest.mark.skipif(sys.platform == 'win32', reason="does not _run on windows")
@pytest.mark.parametrize("nested,degree_correlation,allow_overlap,weight_model", [
(True, True, True, None),
(True, True, True, 'real-exponential'),
(True, True, True, 'real-normal'),
(True, True, True, 'discrete-geometric'),
(True, True, True, 'discrete-binomial'),
(True, True, True, 'discrete-poisson'),
(True, True, False, None),
(True, True, False, 'real-exponential'),
(True, True, False, 'real-normal'),
(True, True, False, 'discrete-geometric'),
(True, True, False, 'discrete-binomial'),
(True, True, False, 'discrete-poisson'),
(True, False, False, None),
(True, False, False, 'real-exponential'),
(True, False, False, 'real-normal'),
(True, False, False, 'discrete-geometric'),
(True, False, False, 'discrete-binomial'),
(True, False, False, 'discrete-poisson'),
(False, False, False, None),
(False, False, False, 'real-exponential'),
(False, False, False, 'real-normal'),
(False, False, False, 'discrete-geometric'),
(False, False, False, 'discrete-binomial'),
(False, False, False, 'discrete-poisson')
])
def test_that_graph_tool_clusterer_works(nested, degree_correlation, allow_overlap, weight_model):
sbm = StochasticBlockModel(nested, degree_correlation, allow_overlap, weight_model)
bld = LabelCooccurrenceGraphBuilder(weighted=True, include_self_edges=False, normalize_self_edges=False)
clf = GraphToolLabelGraphClusterer(graph_builder=bld, model=sbm)
X, y = sparse.csr_matrix(EXAMPLE_X), sparse.csr_matrix(EXAMPLE_y)
division = clf.fit_predict(X, y)
for label in range(y.shape[1]):
assert any(label in partition for partition in division)
|
{
"content_hash": "dd56e2f7f996c5ffef6bac01880e8ebe",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 112,
"avg_line_length": 52.98360655737705,
"alnum_prop": 0.614480198019802,
"repo_name": "scikit-multilearn/scikit-multilearn",
"id": "41db9b58504b86073a900efc933c855dd2f65bc8",
"size": "3232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skmultilearn/cluster/tests/test_graphtool.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "Jupyter Notebook",
"bytes": "14158"
},
{
"name": "PowerShell",
"bytes": "7194"
},
{
"name": "Python",
"bytes": "347123"
}
],
"symlink_target": ""
}
|
from inits import *
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1./keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
class Layer(object):
"""Base layer class. Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg in kwargs.keys():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self, input_dim, output_dim, placeholders, dropout=0.,
sparse_inputs=False, act=tf.nn.relu, bias=False,
featureless=False, **kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
for i in range(len(self.support)):
self.vars['weights_' + str(i)] = glorot([input_dim, output_dim],
name='weights_' + str(i))
if self.bias:
self.vars['bias'] = zeros([output_dim], name='bias')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1-self.dropout)
# convolve
supports = list()
for i in range(len(self.support)):
if not self.featureless:
pre_sup = dot(x, self.vars['weights_' + str(i)],
sparse=self.sparse_inputs)
else:
pre_sup = self.vars['weights_' + str(i)]
support = dot(self.support[i], pre_sup, sparse=True)
supports.append(support)
output = tf.add_n(supports)
# bias
if self.bias:
output += self.vars['bias']
return self.act(output)
|
{
"content_hash": "a84768518644417d165673b9c555f934",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 92,
"avg_line_length": 31.28723404255319,
"alnum_prop": 0.5583134988099286,
"repo_name": "yashchandak/GNN",
"id": "0035cbbc5085ae301594a6a64d5a01e3daf37eb7",
"size": "5882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Sample_Run/gcn-multi/layers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1036599"
},
{
"name": "Shell",
"bytes": "14131"
}
],
"symlink_target": ""
}
|
import unittest
class UnitsTestCase(unittest.TestCase):
def test_create_destroy(self):
from libcellml import Units
x = Units()
del x
y = Units("mine")
self.assertEqual("mine", y.name())
del y
def test_inheritance(self):
import libcellml
from libcellml import Units
x = Units()
self.assertIsInstance(x, libcellml.importedentity.ImportedEntity)
self.assertIsInstance(x, libcellml.namedentity.NamedEntity)
self.assertIsInstance(x, libcellml.entity.Entity)
# Test access to inherited methods
x = Units()
idx = 'test'
self.assertEqual(x.id(), '')
x.setId(idx)
self.assertEqual(x.id(), idx)
def test_create_imported_entity(self):
from libcellml.importedentity import ImportedEntity
self.assertRaises(AttributeError, ImportedEntity)
def test_standard_unit(self):
from libcellml import Units
u = Units()
u.addUnit(Units.StandardUnit.AMPERE)
u.addUnit(Units.StandardUnit.BECQUEREL)
u.addUnit(Units.StandardUnit.CANDELA)
u.addUnit(Units.StandardUnit.COULOMB)
u.addUnit(Units.StandardUnit.DIMENSIONLESS)
u.addUnit(Units.StandardUnit.FARAD)
u.addUnit(Units.StandardUnit.GRAM)
u.addUnit(Units.StandardUnit.GRAY)
u.addUnit(Units.StandardUnit.HENRY)
u.addUnit(Units.StandardUnit.HERTZ)
u.addUnit(Units.StandardUnit.JOULE)
u.addUnit(Units.StandardUnit.KATAL)
u.addUnit(Units.StandardUnit.KELVIN)
u.addUnit(Units.StandardUnit.KILOGRAM)
u.addUnit(Units.StandardUnit.LITRE)
u.addUnit(Units.StandardUnit.LUMEN)
u.addUnit(Units.StandardUnit.LUX)
u.addUnit(Units.StandardUnit.METRE)
u.addUnit(Units.StandardUnit.MOLE)
u.addUnit(Units.StandardUnit.NEWTON)
u.addUnit(Units.StandardUnit.OHM)
u.addUnit(Units.StandardUnit.PASCAL)
u.addUnit(Units.StandardUnit.RADIAN)
u.addUnit(Units.StandardUnit.SECOND)
u.addUnit(Units.StandardUnit.SIEMENS)
u.addUnit(Units.StandardUnit.SIEVERT)
u.addUnit(Units.StandardUnit.STERADIAN)
u.addUnit(Units.StandardUnit.TESLA)
u.addUnit(Units.StandardUnit.VOLT)
u.addUnit(Units.StandardUnit.WATT)
u.addUnit(Units.StandardUnit.WEBER)
self.assertRaises(
RuntimeError, u.addUnit, Units.StandardUnit.AMPERE - 1)
self.assertRaises(
RuntimeError, u.addUnit, Units.StandardUnit.WEBER + 1)
def test_is_base_unit(self):
from libcellml import Units
# bool isBaseUnit()
u = Units()
self.assertTrue(u.isBaseUnit())
u.addUnit(Units.StandardUnit.NEWTON)
self.assertFalse(u.isBaseUnit())
def test_add_unit(self):
from libcellml import Units
# void addUnit(const std::string &reference, const std::string &prefix,
# double exponent=1.0, double multiplier=1.0)
u = Units()
u.addUnit('a', 'b')
u.addUnit('a', 'b', 2.0)
u.addUnit('a', 'b', -1)
u.addUnit('a', 'b', 0)
u.addUnit('a', 'b', 3, 3)
u.addUnit('a', 'b', 0.1, -1.2)
del u
# void addUnit(const std::string &reference, int prefix,
# double exponent, double multiplier=1.0)
u = Units()
u.addUnit('a', 1, -1)
u.addUnit('a', 1, 2.3)
u.addUnit('a', 1, -1, 3)
u.addUnit('a', 1, -1, 2.3)
u.addUnit('a', 1, 1.2, 3.4)
u.addUnit('a', -1, -1)
u.addUnit('a', -1, 2.3)
u.addUnit('a', -1, -1, 3)
u.addUnit('a', -1, -1, 2.3)
u.addUnit('a', -1, 1.2, 3.4)
del u
# void addUnit(const std::string &reference, double exponent)
u = Units()
u.addUnit('a', 1.0)
# TODO Ints get converted to Prefix enum, not to double!
# u.addUnit('a', -1)
del u
# void addUnit(const std::string &reference)
u = Units()
u.addUnit('')
u.addUnit('a')
del u
# void addUnit(StandardUnit standardRef, const std::string &prefix,
# double exponent=1.0, double multiplier=1.0)
u = Units()
u.addUnit(Units.StandardUnit.KATAL, 'pico')
u.addUnit(Units.StandardUnit.KATAL, 'pico', 1.0)
u.addUnit(Units.StandardUnit.KATAL, 'pico', -1)
u.addUnit(Units.StandardUnit.KATAL, 'pico', 1.0, 2.0)
u.addUnit(Units.StandardUnit.KATAL, 'pico', 1, 2.0)
u.addUnit(Units.StandardUnit.KATAL, 'pico', -1, 2)
del u
# void addUnit(StandardUnit standardRef, int prefix,
# double exponent, double multiplier=1.0)
u = Units()
u.addUnit(Units.StandardUnit.KATAL, 1, 1.0)
u.addUnit(Units.StandardUnit.KATAL, -1, -1.0)
u.addUnit(Units.StandardUnit.KATAL, 1, 1.0, 1.0)
u.addUnit(Units.StandardUnit.KATAL, -1, -1.0, 1.0, 'id')
del u
# void addUnit(StandardUnit standardRef, double exponent)
# Hidden to avoid confusion with addUnit(StandardUnit, Prefix, double,
# double)
# void addUnit(StandardUnit standardRef)
u = Units()
u.addUnit(Units.StandardUnit.KATAL)
del u
def test_unit_attributes(self):
from libcellml import Units
# void unitAttributes(size_t index, std::string &reference,
# std::string &prefix, double &exponent, double &multiplier, std::string &id)
u = Units()
x = u.unitAttributes(0)
self.assertIsInstance(x, list)
self.assertEqual(x, ['', '', 1.0, 1.0, ''])
u.addUnit('blabla', 'hello', 1.2, 3.4, 'unitid')
x = u.unitAttributes(0)
self.assertIsInstance(x, list)
self.assertEqual(x, ['blabla', 'hello', 1.2, 3.4, 'unitid'])
x = u.unitAttributes(1)
self.assertIsInstance(x, list)
self.assertEqual(x, ['', '', 1.0, 1.0, ''])
del [u, x]
# void unitAttributes(const std::string &reference,
# std::string &prefix, double &exponent, double &multiplier) const;
u = Units()
x = u.unitAttributes('newton')
self.assertIsInstance(x, list)
self.assertEqual(x, ['newton', '', 1.0, 1.0, ''])
u.addUnit('few', 'bars', 4.3, 2.1, 'job')
x = u.unitAttributes('newton')
self.assertIsInstance(x, list)
self.assertEqual(x, ['newton', '', 1.0, 1.0, ''])
x = u.unitAttributes('few')
self.assertIsInstance(x, list)
self.assertEqual(x, ['few', 'bars', 4.3, 2.1, 'job'])
del [u, x]
# This method conflicts with unitAttributes(size_t, ...)
# void unitAttributes(StandardUnit standardRef, std::string &prefix,
# double &exponent, double &multiplier) const;
def test_remove_unit(self):
from libcellml import Units
# bool removeUnit(size_t index)
u = Units()
self.assertFalse(u.removeUnit(0))
self.assertFalse(u.removeUnit(1))
self.assertFalse(u.removeUnit(-1))
u.addUnit('hello')
self.assertFalse(u.removeUnit(1))
self.assertFalse(u.removeUnit(-1))
self.assertTrue(u.removeUnit(0))
self.assertFalse(u.removeUnit(0))
del [u]
# bool removeUnit(const std::string &reference)
u = Units()
self.assertFalse(u.removeUnit('hello'))
u.addUnit('hello')
self.assertFalse(u.removeUnit('hi'))
self.assertTrue(u.removeUnit('hello'))
self.assertFalse(u.removeUnit('hello'))
del [u]
# This method conflicts with removeUnit(size_t)
# bool removeUnit(StandardUnit standardRef)
def test_unit_count(self):
from libcellml import Units
# size_t unitCount()
u = Units()
self.assertEqual(u.unitCount(), 0)
u.addUnit('')
self.assertEqual(u.unitCount(), 1)
u.addUnit('')
self.assertEqual(u.unitCount(), 2)
def test_remove_all_units(self):
from libcellml import Units
# void removeAllUnits()
u = Units()
self.assertEqual(u.unitCount(), 0)
u.addUnit('')
self.assertEqual(u.unitCount(), 1)
u.addUnit('')
self.assertEqual(u.unitCount(), 2)
u.removeAllUnits()
self.assertEqual(u.unitCount(), 0)
def test_set_source_units(self):
from libcellml import Units, ImportSource
# void setSourceUnits(const ImportPtr &imp, const std::string &name)
i = ImportSource()
u = Units()
u.setSourceUnits(i, 'hello')
self.assertTrue(u.isImport())
def test_import_units(self):
from libcellml import Units, ImportSource
i = ImportSource()
u = Units()
u.setImportSource(i)
u.setImportReference("Volt")
self.assertTrue(u.isImport())
def test_requires_imports(self):
from libcellml import Units
u = Units("Volt")
self.assertFalse(u.requiresImports())
def test_scaling_factor(self):
from libcellml import Units
from libcellml.units import Units_scalingFactor
u1 = Units("BigVolts")
u1.addUnit("volt", 0, 1, 1000)
u2 = Units("LittleVolts")
u2.addUnit("volt")
self.assertEqual(0.001, Units.scalingFactor(u1, u2))
self.assertEqual(0.001, Units_scalingFactor(u1, u2))
def test_compatible(self):
from libcellml import Units
from libcellml.units import Units_compatible
u1 = Units("BigVolts")
u1.addUnit("volt", 0, 1, 1000)
u2 = Units("LittleVolts")
u2.addUnit("volt")
self.assertTrue(Units.compatible(u1, u2))
self.assertTrue(Units_compatible(u1, u2))
def test_equivalent(self):
from libcellml import Units
from libcellml.units import Units_equivalent
u1 = Units("BigVolts")
u1.addUnit("volt", 0, 1, 1000)
u2 = Units("LittleVolts")
u2.addUnit("volt")
self.assertFalse(Units.equivalent(u1, u2))
self.assertFalse(Units_equivalent(u1, u2))
def test_clone(self):
from libcellml import Units
u = Units("BigVolts")
u.addUnit("volt", 0, 1, 1000)
uCloned = u.clone()
self.assertEqual(1, uCloned.unitCount())
def test_unit_id(self):
from libcellml import Units
u = Units("BigVolts")
u.addUnit("volt", 0, 1, 1000)
self.assertEqual("", u.unitId(0))
u.setUnitId(0, "dangerous")
self.assertEqual("dangerous", u.unitId(0))
def test_unit_attributes_single(self):
from libcellml import Units
u = Units("Best")
u.addUnit('coolio', 'hello', 1.2, 3.4)
self.assertEqual('coolio', u.unitAttributeReference(0))
self.assertEqual('hello', u.unitAttributePrefix(0))
self.assertEqual(1.2, u.unitAttributeExponent(0))
self.assertEqual(3.4, u.unitAttributeMultiplier(0))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "90f074c6841c8979509a9e8891fd8d27",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 87,
"avg_line_length": 32.08985507246377,
"alnum_prop": 0.5928100442597778,
"repo_name": "hsorby/libcellml",
"id": "79d93f22bbeb3fc35f3ec58a1938ae277ffe9405",
"size": "11108",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "tests/bindings/python/test_units.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "271668"
},
{
"name": "C++",
"bytes": "4283033"
},
{
"name": "CMake",
"bytes": "301904"
},
{
"name": "JavaScript",
"bytes": "140236"
},
{
"name": "Python",
"bytes": "476134"
},
{
"name": "SWIG",
"bytes": "120696"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: checkpoint_access_rule_facts
short_description: Get access rules objects facts on Checkpoint over Web Services API
description:
- Get access rules objects facts on Checkpoint devices.
All operations are performed over Web Services API.
version_added: "2.8"
author: "Ansible by Red Hat (@rcarrillocruz)"
options:
name:
description:
- Name of the access rule. If not provided, UID is required.
type: str
uid:
description:
- UID of the access rule. If not provided, name is required.
type: str
layer:
description:
- Layer the access rule is attached to.
required: True
type: str
"""
EXAMPLES = """
- name: Get access rule facts
checkpoint_access_rule_facts:
layer: Network
name: "Drop attacker"
"""
RETURN = """
ansible_facts:
description: The checkpoint access rule object facts.
returned: always.
type: list
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.six.moves.urllib.error import HTTPError
import json
def get_access_rule(module, connection):
name = module.params['name']
uid = module.params['uid']
layer = module.params['layer']
if uid:
payload = {'uid': uid, 'layer': layer}
elif name:
payload = {'name': name, 'layer': layer}
code, response = connection.send_request('/web_api/show-access-rule', payload)
return code, response
def main():
argument_spec = dict(
name=dict(type='str'),
uid=dict(type='str'),
layer=dict(type='str', required=True),
)
module = AnsibleModule(argument_spec=argument_spec)
connection = Connection(module._socket_path)
code, response = get_access_rule(module, connection)
if code == 200:
module.exit_json(ansible_facts=dict(checkpoint_access_rules=response))
else:
module.fail_json(msg='Checkpoint device returned error {0} with message {1}'.format(code, response))
if __name__ == '__main__':
main()
|
{
"content_hash": "a9ac01928057d9bb0851ddff2d3cc1e9",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 108,
"avg_line_length": 26,
"alnum_prop": 0.6630244755244755,
"repo_name": "SergeyCherepanov/ansible",
"id": "fa8c4c2f669e59dd4931ee9e660ddfd5c6bd752a",
"size": "2964",
"binary": false,
"copies": "30",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/checkpoint/checkpoint_access_rule_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self, plotly_name="title", parent_name="scatterpolar.marker.colorbar", **kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Title"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
""",
),
**kwargs
)
|
{
"content_hash": "694441e52b4b98dda562a1a75bf1e21d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 87,
"avg_line_length": 38.121212121212125,
"alnum_prop": 0.5405405405405406,
"repo_name": "plotly/python-api",
"id": "3ef83a4a52c6546973c52ddd8acb3040153687f2",
"size": "1258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatterpolar/marker/colorbar/_title.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""Test for snapshoter"""
import collections
from ggrc import db
import ggrc.models as models
from ggrc.snapshotter.rules import Types
from integration.ggrc.snapshotter import SnapshotterBaseTestCase
from integration.ggrc.snapshotter import snapshot_identity
class TestSnapshoting(SnapshotterBaseTestCase):
"""Test cases for Snapshoter module"""
# pylint: disable=invalid-name
def test_snapshot_create(self):
"""Test simple snapshot creation with a simple change"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
assessment = self.create_object(models.Assessment, {
"title": "Test Assessment Snapshot 1"
})
self.create_mapping(program, control)
self.create_mapping(program, assessment)
control = self.refresh_object(control)
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 EDIT 1"
})
self.create_object(models.Audit, {
"title": "Snapshotable audit",
"program": {"id": program.id},
"status": "Planned",
"snapshots": {
"operation": "create"
}
})
self.assertEqual(
db.session.query(models.Audit).filter(
models.Audit.title.like(
"%Snapshotable audit%")).count(), 1)
snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_id == control.id,
models.Snapshot.child_type == "Control",
)
self.assertEqual(snapshot.count(), 1)
self.assertEqual(
snapshot.first().revision.content["title"],
"Test Control Snapshot 1 EDIT 1")
snapshot_revision = db.session.query(
models.Revision.resource_type,
models.Revision.resource_id,
models.Revision.content
).filter(
models.Revision.resource_type == "Snapshot",
models.Revision.resource_id == snapshot.first().id,
)
self.assertEqual(snapshot_revision.count(), 1)
snapshot_revision_content = snapshot_revision.first()[2]
self.assertEqual(snapshot_revision_content["child_type"], "Control")
self.assertEqual(snapshot_revision_content["child_id"], control.id)
relationship_columns = db.session.query(models.Relationship)
relationship = relationship_columns.filter(
models.Relationship.source_type == "Control",
models.Relationship.source_id == control.id,
models.Relationship.destination_type == "Snapshot",
models.Relationship.destination_id == snapshot.first().id
).union(
relationship_columns.filter(
models.Relationship.source_type == "Snapshot",
models.Relationship.source_id == snapshot.first().id,
models.Relationship.destination_type == "Control",
models.Relationship.destination_id == control.id
)
)
self.assertEqual(relationship.count(), 1)
relationship_revision = db.session.query(
models.Revision.resource_type,
models.Revision.resource_id,
models.Revision.content,
).filter(
models.Revision.resource_type == "Relationship",
models.Revision.resource_id == relationship.first().id,
)
self.assertEqual(relationship_revision.count(), 1)
def test_snapshot_update(self):
"""Test simple snapshot creation with a simple change"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
self.create_mapping(program, control)
control = self.refresh_object(control)
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 EDIT 1"
})
self.create_audit(program)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
control_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_id == control.id,
models.Snapshot.child_type == "Control",
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id)
self.assertEqual(control_snapshot.count(), 1)
self.assertEqual(control_snapshot.first().revision.content["title"],
"Test Control Snapshot 1 EDIT 1")
# Create a new objective, add it to program and edit control to detect
# update.
objective = self.create_object(models.Objective, {
"title": "Test Objective Snapshot UNEDITED"
})
self.create_mapping(program, objective)
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 Edit 2 AFTER initial snapshot"
})
audit = self.refresh_object(audit)
# Initiate update operation
self.api.modify_object(audit, {
"snapshots": {
"operation": "upsert"
}
})
objective_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Objective",
models.Snapshot.child_id == objective.id,
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id
)
self.assertEqual(objective_snapshot.count(), 1)
self.assertEqual(
objective_snapshot.first().revision.content["title"],
"Test Objective Snapshot UNEDITED")
control_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Control",
models.Snapshot.child_id == control.id,
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id
)
self.assertEqual(control_snapshot.count(), 1)
self.assertEqual(control_snapshot.first().revision.content["title"],
"Test Control Snapshot 1 Edit 2 AFTER initial snapshot")
control_revisions = db.session.query(models.Revision).filter(
models.Revision.resource_type == control.type,
models.Revision.resource_id == control.id)
self.assertEqual(
control_revisions.count(), 3,
"There were 3 edits made at the time")
self.assertEqual(
control_revisions.order_by(models.Revision.id.desc()).first().id,
control_snapshot.one().revision_id)
def test_update_to_specific_version(self):
"""Test global update and selecting a specific revision for one object"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
objective = self.create_object(models.Objective, {
"title": "Test Objective Snapshot 1"
})
self.create_mapping(program, control)
self.create_mapping(program, objective)
control = self.refresh_object(control)
for x in xrange(1, 4):
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 EDIT {}".format(x)
})
self.create_object(models.Audit, {
"title": "Snapshotable audit",
"program": {"id": program.id},
"status": "Planned",
"snapshots": {
"operation": "create"
}
})
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
revision = db.session.query(
models.Revision.id,
models.Revision.resource_type,
models.Revision.resource_id,
models.Revision.content,
).filter(
models.Revision.resource_type == control.type,
models.Revision.resource_id == control.id,
models.Revision.content.like("%Test Control Snapshot 1 EDIT 2%"),
).one()
audit = self.refresh_object(audit)
self.api.modify_object(audit, {
"snapshots": {
"operation": "upsert",
"revisions": [{
"parent": self.objgen.create_stub(audit),
"child": self.objgen.create_stub(control),
"revision_id": revision[0]
}]
}
})
control_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == control.type,
models.Snapshot.child_id == control.id,
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id
)
self.assertEqual(control_snapshot.count(), 1)
self.assertEqual(control_snapshot.first().revision.content["title"],
"Test Control Snapshot 1 EDIT 2")
def test_snapshot_creation_with_custom_attribute_values(self):
pass
def test_creation_of_snapshots_for_multiple_parent_objects(self):
pass
def test_individual_update(self):
"""Test update of individual snapshot
1. Create program with mapped control and data asset.
2. Create audit, verify there are snapshot for control and data asset
3. Update control and data asset title
4. Run refresh on control's snapshot object
5. Verify control's title is changed and data assets NOT
"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
data_asset = self.create_object(models.DataAsset, {
"title": "Test DataAsset Snapshot 1"
})
self.create_mapping(program, control)
self.create_mapping(program, data_asset)
control = self.refresh_object(control)
data_asset = self.refresh_object(data_asset)
self.create_audit(program)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
self.assertEqual(
db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id).count(),
2)
control = self.refresh_object(control)
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 EDIT 1"
})
data_asset = self.refresh_object(data_asset)
self.api.modify_object(data_asset, {
"title": "Test Data Asset Snapshot 1 EDIT 1"
})
control_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Control",
models.Snapshot.child_id == control.id).first()
self.assertEqual(
control_snapshot.revision.content["title"],
"Test Control Snapshot 1")
self.api.modify_object(control_snapshot, {
"update_revision": "latest"
})
expected = [
(control, "Test Control Snapshot 1 EDIT 1"),
(data_asset, "Test DataAsset Snapshot 1"),
]
for obj, expected_title in expected:
snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == obj.__class__.__name__,
models.Snapshot.child_id == obj.id).first()
self.assertEquals(
snapshot.revision.content["title"],
expected_title)
control_snapshot_event = db.session.query(models.Event).filter(
models.Event.resource_type == "Snapshot",
models.Event.resource_id == control_snapshot.id,
models.Event.action == "PUT"
)
self.assertEqual(control_snapshot_event.count(), 1)
control_snapshot_revisions = db.session.query(models.Revision).filter(
models.Revision.resource_type == "Snapshot",
models.Revision.resource_id == control_snapshot.id
)
self.assertEqual(control_snapshot_revisions.count(), 2)
def test_snapshot_put_operation(self):
"""Test that performing PUT operation on snapshot does not change any values
"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
self.create_mapping(program, control)
control = self.refresh_object(control)
self.create_audit(program)
control = self.refresh_object(control)
self.api.modify_object(control, {
"title": "Test Control Snapshot 1 EDIT 1"
})
control_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Control",
models.Snapshot.child_id == control.id).first()
self.assertEqual(
control_snapshot.revision.content["title"],
"Test Control Snapshot 1")
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
update_data = {
"parent_id": audit.id + 123,
"parent_type": "DataAsset",
"child_id": control_snapshot.id + 123,
"child_type": "Regulation",
"revision_id": control_snapshot.revision_id + 123,
}
self.api.modify_object(control_snapshot, update_data)
control_snapshot_updated = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == control.__class__.__name__,
models.Snapshot.child_id == control.id).one()
for field in update_data.keys():
self.assertEqual(
getattr(control_snapshot, field),
getattr(control_snapshot_updated, field)
)
def test_update_when_mapped_objects_are_deleted(self):
"""Test global update when object got deleted or unmapped"""
pass
def test_snapshoting_of_objects(self):
"""Test that all object types that should be snapshotted are snapshotted
It is expected that all objects will be triplets.
"""
self._import_file("snapshotter_create.csv")
# Verify that all objects got imported correctly.
for _type in Types.all:
self.assertEqual(
db.session.query(getattr(models.all_models, _type)).count(),
3)
program = db.session.query(models.Program).filter(
models.Program.slug == "Prog-13211"
).one()
self.create_audit(program)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).first()
snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id,
)
self.assertEqual(snapshots.count(), len(Types.all) * 3)
type_count = collections.defaultdict(int)
for snapshot in snapshots:
type_count[snapshot.child_type] += 1
missing_types = set()
for snapshottable_type in Types.all:
if type_count[snapshottable_type] != 3:
missing_types.add(snapshottable_type)
self.assertEqual(missing_types, set())
def test_snapshot_update_is_idempotent(self):
"""Test that nothing has changed if there's nothing to update"""
self._import_file("snapshotter_create.csv")
program = db.session.query(models.Program).filter(
models.Program.slug == "Prog-13211"
).one()
self.create_audit(program)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).first()
snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id,
)
self.assertEqual(snapshots.count(), len(Types.all) * 3)
audit = self.refresh_object(audit)
self.api.modify_object(audit, {
"snapshots": {
"operation": "upsert"
}
})
old_snapshots = {s.id: s for s in snapshots}
snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id,
)
new_snapshots = {s.id: s for s in snapshots}
for _id, snapshot in new_snapshots.items():
self.assertEqual(snapshot_identity(old_snapshots[_id], snapshot), True)
def test_audit_creation_if_nothing_in_program_scope(self):
"""Test audit creation if there's nothing in prog scope"""
program_title = "empty program"
audit_title = "Audit for empty program"
self.create_object(models.Program, {
"title": program_title,
})
program = db.session.query(models.Program).filter(
models.Program.title == "empty program"
).one()
self.create_object(models.Audit, {
"title": "Audit for empty program",
"program": {"id": program.id},
"status": "Planned",
"snapshots": {
"operation": "create",
}
})
audit = db.session.query(models.Audit).filter(
models.Audit.title == audit_title).one()
self.assertEqual(
db.session.query(models.Audit).filter(
models.Audit.title == audit_title).count(), 1)
snapshots = db.session.query(models.Snapshot).filter(
models.Snapshot.parent_type == "Audit",
models.Snapshot.parent_id == audit.id,
)
self.assertEqual(snapshots.count(), 0)
def test_snapshot_post_api(self):
"""Test snapshot creation when object is in program scope already"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
self.create_audit(program)
objective = self.create_object(models.Objective, {
"title": "Test Objective Snapshot UNEDITED"
})
self.create_mapping(program, objective)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
self.api.post(models.Snapshot, [
{
"snapshot": {
"parent": {
"id": audit.id,
"type": "Audit",
"href": "/api/audits/{}".format(audit.id)
},
"child_type": "Objective",
"child_id": objective.id,
"update_revision": "new",
"context": {
"id": audit.context_id,
"type": "Context",
"href": "/api/contexts/{}".format(audit.context_id)
}
}
},
{
"snapshot": {
"parent": {
"id": audit.id,
"type": "Audit",
"href": "/api/audits/{}".format(audit.id)
},
"child_type": "Control",
"child_id": control.id,
"update_revision": "new",
"context": {
"id": audit.context_id,
"type": "Context",
"href": "/api/contexts/{}".format(audit.context_id)
}
}
}
])
objective_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Objective",
models.Snapshot.child_id == objective.id
)
objective_revision = db.session.query(models.Revision).filter(
models.Revision.resource_type == "Objective",
models.Revision.resource_id == objective.id
).one()
self.assertEquals(objective_snapshot.count(), 1)
self.assertEquals(objective_snapshot.first().revision_id,
objective_revision.id)
self.assertIsNotNone(models.Relationship.find_related(program, objective))
self.assertIsNotNone(models.Relationship.find_related(program, control))
def test_relationship_post_api(self):
"""Test snapshot creation when creating relationships to Audit"""
program = self.create_object(models.Program, {
"title": "Test Program Snapshot 1"
})
control = self.create_object(models.Control, {
"title": "Test Control Snapshot 1"
})
self.create_audit(program)
objective = self.create_object(models.Objective, {
"title": "Test Objective Snapshot UNEDITED"
})
self.create_mapping(program, objective)
audit = db.session.query(models.Audit).filter(
models.Audit.title.like("%Snapshotable audit%")).one()
self.create_mapping(audit, objective)
self.create_mapping(audit, control)
objective_snapshot = db.session.query(models.Snapshot).filter(
models.Snapshot.child_type == "Objective",
models.Snapshot.child_id == objective.id
)
objective_revision = db.session.query(models.Revision).filter(
models.Revision.resource_type == "Objective",
models.Revision.resource_id == objective.id
).one()
self.assertEquals(objective_snapshot.count(), 1)
self.assertEquals(objective_snapshot.first().revision_id,
objective_revision.id)
self.assertIsNotNone(models.Relationship.find_related(program, objective))
self.assertIsNotNone(models.Relationship.find_related(program, control))
|
{
"content_hash": "b91186ca9c819e01a0b63e01076de16e",
"timestamp": "",
"source": "github",
"line_count": 630,
"max_line_length": 80,
"avg_line_length": 32.7047619047619,
"alnum_prop": 0.6276451174529217,
"repo_name": "josthkko/ggrc-core",
"id": "7fcdca7d2063ff4b1e9c9672f32e74ddf1d7f540",
"size": "20717",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "test/integration/ggrc/snapshotter/test_snapshoting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057288"
},
{
"name": "JavaScript",
"bytes": "1492054"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2148568"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.cz.forms import (CZPostalCodeField, CZRegionSelect,
CZBirthNumberField, CZICNumberField)
class CZLocalFlavorTests(SimpleTestCase):
def test_CZRegionSelect(self):
f = CZRegionSelect()
out = '''<select name="regions">
<option value="PR">Prague</option>
<option value="CE">Central Bohemian Region</option>
<option value="SO">South Bohemian Region</option>
<option value="PI">Pilsen Region</option>
<option value="CA">Carlsbad Region</option>
<option value="US">Usti Region</option>
<option value="LB">Liberec Region</option>
<option value="HK">Hradec Region</option>
<option value="PA">Pardubice Region</option>
<option value="VY">Vysocina Region</option>
<option value="SM">South Moravian Region</option>
<option value="OL">Olomouc Region</option>
<option value="ZL">Zlin Region</option>
<option value="MS">Moravian-Silesian Region</option>
</select>'''
self.assertHTMLEqual(f.render('regions', 'TT'), out)
def test_CZPostalCodeField(self):
error_format = ['Enter a postal code in the format XXXXX or XXX XX.']
valid = {
'91909': '91909',
'917 01': '91701',
'12345': '12345',
}
invalid = {
'84545x': error_format,
'123456': error_format,
'1234': error_format,
'123 4': error_format,
}
self.assertFieldOutput(CZPostalCodeField, valid, invalid)
def test_CZBirthNumberField(self):
error_format = ['Enter a birth number in the format XXXXXX/XXXX or XXXXXXXXXX.']
error_invalid = ['Enter a valid birth number.']
valid = {
'880523/1237': '880523/1237',
'8805231237': '8805231237',
'880523/000': '880523/000',
'880523000': '880523000',
'882101/0011': '882101/0011',
}
invalid = {
'123456/12': error_format,
'123456/12345': error_format,
'12345612': error_format,
'12345612345': error_format,
'880523/1239': error_invalid,
'8805231239': error_invalid,
'990101/0011': error_invalid,
}
self.assertFieldOutput(CZBirthNumberField, valid, invalid)
def test_CZICNumberField(self):
error_invalid = ['Enter a valid IC number.']
valid = {
'12345679': '12345679',
'12345601': '12345601',
'12345661': '12345661',
'12345610': '12345610',
}
invalid = {
'1234567': error_invalid,
'12345660': error_invalid,
'12345600': error_invalid,
}
self.assertFieldOutput(CZICNumberField, valid, invalid)
|
{
"content_hash": "863f1affe0294d77a3745882477cf072",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 88,
"avg_line_length": 35.55696202531646,
"alnum_prop": 0.5941616233535066,
"repo_name": "M157q/django-localflavor",
"id": "7738ed577f392ed137845f3004f5621a3e5afa47",
"size": "2809",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_cz.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "766740"
}
],
"symlink_target": ""
}
|
from argparse import ArgumentParser
import os
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst.directives import flag, unchanged
from sphinx.util.compat import Directive
from sphinx.util.nodes import nested_parse_with_titles
from sphinxarg.parser import parse_parser, parser_navigate
def map_nested_definitions(nested_content):
if nested_content is None:
raise Exception('Nested content should be iterable, not null')
# build definition dictionary
definitions = {}
for item in nested_content:
if not isinstance(item, nodes.definition_list):
continue
for subitem in item:
if not isinstance(subitem, nodes.definition_list_item):
continue
if not len(subitem.children) > 0:
continue
classifier = '@after'
idx = subitem.first_child_matching_class(nodes.classifier)
if idx is not None:
ci = subitem[idx]
if len(ci.children) > 0:
classifier = ci.children[0].astext()
if classifier is not None and classifier not in (
'@replace', '@before', '@after'):
raise Exception('Unknown classifier: %s' % classifier)
idx = subitem.first_child_matching_class(nodes.term)
if idx is not None:
ch = subitem[idx]
if len(ch.children) > 0:
term = ch.children[0].astext()
idx = subitem.first_child_matching_class(nodes.definition)
if idx is not None:
def_node = subitem[idx]
def_node.attributes['classifier'] = classifier
definitions[term] = def_node
return definitions
def print_arg_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'args' in data:
for arg in data['args']:
my_def = [nodes.paragraph(text=arg['help'])] if arg['help'] else []
name = arg['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in arg['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', nodes.option_string(text=name)),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_opt_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'options' in data:
for opt in data['options']:
names = []
my_def = [nodes.paragraph(text=opt['help'])] if opt['help'] else []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
my_def.append(nodes.paragraph(
text=('Possible choices: %s' % ', '.join([str(c) for c in opt['choices']]))))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *my_def)))
return nodes.option_list('', *items) if items else None
def print_command_args_and_opts(arg_list, opt_list, sub_list=None):
items = []
if arg_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Positional arguments:'),
nodes.definition('', arg_list)))
if opt_list:
items.append(nodes.definition_list_item(
'', nodes.term(text='Options:'),
nodes.definition('', opt_list)))
if sub_list and len(sub_list):
items.append(nodes.definition_list_item(
'', nodes.term(text='Sub-commands:'),
nodes.definition('', sub_list)))
return nodes.definition_list('', *items)
def apply_definition(definitions, my_def, name):
if name in definitions:
definition = definitions[name]
classifier = definition['classifier']
if classifier == '@replace':
return definition.children
if classifier == '@after':
return my_def + definition.children
if classifier == '@before':
return definition.children + my_def
raise Exception('Unknown classifier: %s' % classifier)
return my_def
def print_subcommand_list(data, nested_content):
definitions = map_nested_definitions(nested_content)
items = []
if 'children' in data:
for child in data['children']:
my_def = [nodes.paragraph(
text=child['help'])] if child['help'] else []
name = child['name']
my_def = apply_definition(definitions, my_def, name)
if len(my_def) == 0:
my_def.append(nodes.paragraph(text='Undocumented'))
if 'description' in child:
my_def.append(nodes.paragraph(text=child['description']))
my_def.append(nodes.literal_block(text=child['usage']))
my_def.append(print_command_args_and_opts(
print_arg_list(child, nested_content),
print_opt_list(child, nested_content),
print_subcommand_list(child, nested_content)
))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(text=name)),
nodes.definition('', *my_def)
)
)
return nodes.definition_list('', *items)
class ArgParseDirective(Directive):
has_content = True
option_spec = dict(module=unchanged, func=unchanged, ref=unchanged,
prog=unchanged, path=unchanged, nodefault=flag,
manpage=unchanged, nosubcommands=unchanged, passparser=flag)
def _construct_manpage_specific_structure(self, parser_info):
"""
Construct a typical man page consisting of the following elements:
NAME (automatically generated, out of our control)
SYNOPSIS
DESCRIPTION
OPTIONS
FILES
SEE ALSO
BUGS
"""
# SYNOPSIS section
synopsis_section = nodes.section(
'',
nodes.title(text='Synopsis'),
nodes.literal_block(text=parser_info["bare_usage"]),
ids=['synopsis-section'])
# DESCRIPTION section
description_section = nodes.section(
'',
nodes.title(text='Description'),
nodes.paragraph(text=parser_info.get(
'description', parser_info.get(
'help', "undocumented").capitalize())),
ids=['description-section'])
nested_parse_with_titles(
self.state, self.content, description_section)
if parser_info.get('epilog'):
# TODO: do whatever sphinx does to understand ReST inside
# docstrings magically imported from other places. The nested
# parse method invoked above seem to be able to do this but
# I haven't found a way to do it for arbitrary text
description_section += nodes.paragraph(
text=parser_info['epilog'])
# OPTIONS section
options_section = nodes.section(
'',
nodes.title(text='Options'),
ids=['options-section'])
if 'args' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Positional arguments:')
options_section += self._format_positional_arguments(parser_info)
if 'options' in parser_info:
options_section += nodes.paragraph()
options_section += nodes.subtitle(text='Optional arguments:')
options_section += self._format_optional_arguments(parser_info)
items = [
# NOTE: we cannot generate NAME ourselves. It is generated by
# docutils.writers.manpage
synopsis_section,
description_section,
# TODO: files
# TODO: see also
# TODO: bugs
]
if len(options_section.children) > 1:
items.append(options_section)
if 'nosubcommands' not in self.options:
# SUBCOMMANDS section (non-standard)
subcommands_section = nodes.section(
'',
nodes.title(text='Sub-Commands'),
ids=['subcommands-section'])
if 'children' in parser_info:
subcommands_section += self._format_subcommands(parser_info)
if len(subcommands_section) > 1:
items.append(subcommands_section)
if os.getenv("INCLUDE_DEBUG_SECTION"):
import json
# DEBUG section (non-standard)
debug_section = nodes.section(
'',
nodes.title(text="Argparse + Sphinx Debugging"),
nodes.literal_block(text=json.dumps(parser_info, indent=' ')),
ids=['debug-section'])
items.append(debug_section)
return items
def _format_positional_arguments(self, parser_info):
assert 'args' in parser_info
items = []
for arg in parser_info['args']:
arg_items = []
if arg['help']:
arg_items.append(nodes.paragraph(text=arg['help']))
else:
arg_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in arg:
arg_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(arg['choices'])))
items.append(
nodes.option_list_item(
'',
nodes.option_group(
'', nodes.option(
'', nodes.option_string(text=arg['metavar'])
)
),
nodes.description('', *arg_items)))
return nodes.option_list('', *items)
def _format_optional_arguments(self, parser_info):
assert 'options' in parser_info
items = []
for opt in parser_info['options']:
names = []
opt_items = []
for name in opt['name']:
option_declaration = [nodes.option_string(text=name)]
if opt['default'] is not None \
and opt['default'] != '==SUPPRESS==':
option_declaration += nodes.option_argument(
'', text='=' + str(opt['default']))
names.append(nodes.option('', *option_declaration))
if opt['help']:
opt_items.append(nodes.paragraph(text=opt['help']))
else:
opt_items.append(nodes.paragraph(text='Undocumented'))
if 'choices' in opt:
opt_items.append(
nodes.paragraph(
text='Possible choices: ' + ', '.join(opt['choices'])))
items.append(
nodes.option_list_item(
'', nodes.option_group('', *names),
nodes.description('', *opt_items)))
return nodes.option_list('', *items)
def _format_subcommands(self, parser_info):
assert 'children' in parser_info
items = []
for subcmd in parser_info['children']:
subcmd_items = []
if subcmd['help']:
subcmd_items.append(nodes.paragraph(text=subcmd['help']))
else:
subcmd_items.append(nodes.paragraph(text='Undocumented'))
items.append(
nodes.definition_list_item(
'',
nodes.term('', '', nodes.strong(
text=subcmd['bare_usage'])),
nodes.definition('', *subcmd_items)))
return nodes.definition_list('', *items)
def _nested_parse_paragraph(self, text):
content = nodes.paragraph()
self.state.nested_parse(StringList(text.split("\n")), 0, content)
return content
def run(self):
if 'module' in self.options and 'func' in self.options:
module_name = self.options['module']
attr_name = self.options['func']
elif 'ref' in self.options:
_parts = self.options['ref'].split('.')
module_name = '.'.join(_parts[0:-1])
attr_name = _parts[-1]
else:
raise self.error(
':module: and :func: should be specified, or :ref:')
mod = __import__(module_name, globals(), locals(), [attr_name])
if not hasattr(mod, attr_name):
raise self.error((
'Module "%s" has no attribute "%s"\n'
'Incorrect argparse :module: or :func: values?'
) % (module_name, attr_name))
func = getattr(mod, attr_name)
if isinstance(func, ArgumentParser):
parser = func
elif 'passparser' in self.options:
parser = ArgumentParser()
func(parser)
else:
parser = func()
if 'path' not in self.options:
self.options['path'] = ''
path = str(self.options['path'])
if 'prog' in self.options:
parser.prog = self.options['prog']
result = parse_parser(
parser, skip_default_values='nodefault' in self.options)
result = parser_navigate(result, path)
if 'manpage' in self.options:
return self._construct_manpage_specific_structure(result)
nested_content = nodes.paragraph()
self.state.nested_parse(
self.content, self.content_offset, nested_content)
nested_content = nested_content.children
items = []
# add common content between
for item in nested_content:
if not isinstance(item, nodes.definition_list):
items.append(item)
if 'description' in result:
items.append(self._nested_parse_paragraph(result['description']))
items.append(nodes.literal_block(text=result['usage']))
items.append(print_command_args_and_opts(
print_arg_list(result, nested_content),
print_opt_list(result, nested_content),
print_subcommand_list(result, nested_content)
))
if 'epilog' in result:
items.append(self._nested_parse_paragraph(result['epilog']))
return items
def setup(app):
app.add_directive('argparse', ArgParseDirective)
|
{
"content_hash": "86a633864b337ce0485fa5c2efa848ab",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 97,
"avg_line_length": 41.509433962264154,
"alnum_prop": 0.5423376623376623,
"repo_name": "aarontuor/antk",
"id": "6b7fc4d23eeba155b97fd4c3cdc2dbc4173cd283",
"size": "15400",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "docs/sphinxext/sphinxarg/ext.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "47995"
},
{
"name": "Python",
"bytes": "332504"
}
],
"symlink_target": ""
}
|
class Config:
BUNDLE_ERRORS = True
# see: http://flask-restful-cn.readthedocs.org/en/0.3.4/reqparse.html#error-handling
@classmethod
def init_app(cls, app):
pass
|
{
"content_hash": "0a3402e827fb6c59ada3520ad6a50355",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 88,
"avg_line_length": 26.714285714285715,
"alnum_prop": 0.6577540106951871,
"repo_name": "py-in-the-sky/gae-flask-redux-react-starter-kit",
"id": "cd6dcd49965cf3d6e3b6ccb4bdb98fb60b956324",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gae/config/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2278"
},
{
"name": "JavaScript",
"bytes": "95719"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "19621"
}
],
"symlink_target": ""
}
|
"""
Django settings for shadowsocks project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q4n16g)+f%(pwd&_wgwi2ei120-ymd4xu=9c_5f#)^c)cu-7()'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ['shadowsocks.net','www.shadowsocks.net']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'shadowsocks.web',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'shadowsocks.urls'
WSGI_APPLICATION = 'shadowsocks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'shadowsocks.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'zh-CN'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/var/www/shadowsocks.net/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = '/var/www/shadowsocks.net/media/'
|
{
"content_hash": "21d9f43a141327bb08e7c7d2b85b79d3",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 71,
"avg_line_length": 24.988505747126435,
"alnum_prop": 0.7221711131554738,
"repo_name": "imlonghao/ShadowSocks.NET",
"id": "68fa42cf4f66b1aab4dd540c07b4d93955778b5d",
"size": "2174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shadowsocks/settings.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1410"
},
{
"name": "Python",
"bytes": "19602"
}
],
"symlink_target": ""
}
|
from requests.structures import CaseInsensitiveDict
from opentelemetry.propagators.aws.aws_xray_propagator import (
TRACE_HEADER_KEY,
AwsXRayPropagator,
)
XRAY_PROPAGATOR = AwsXRayPropagator()
def test_extract_single_header(benchmark):
benchmark(
XRAY_PROPAGATOR.extract,
{
TRACE_HEADER_KEY: "bdb5b63237ed38aea578af665aa5aa60-00000000000000000c32d953d73ad225"
},
)
def test_inject_empty_context(benchmark):
benchmark(
XRAY_PROPAGATOR.inject, {}, setter=CaseInsensitiveDict.__setitem__
)
|
{
"content_hash": "206a5f48195671ff5ebcbb6c49bb6760",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 97,
"avg_line_length": 24.391304347826086,
"alnum_prop": 0.714795008912656,
"repo_name": "open-telemetry/opentelemetry-python-contrib",
"id": "485dd253ac0fa941abf15d83579e10eb08a21abf",
"size": "1146",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "propagator/opentelemetry-propagator-aws-xray/tests/performance/benchmarks/test_benchmark_aws_xray_propagator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "118"
},
{
"name": "Python",
"bytes": "1960979"
},
{
"name": "Shell",
"bytes": "7256"
}
],
"symlink_target": ""
}
|
from DefineSyntax import *
from GetAllClass import *
#A function for storing user's tags
def DefineCreat(US,FS,DictHash,DefineSyntaxPath='DefineSyntax'):
"""
Creat all definitions
"""
Cls=GetAllClass(DefineSyntaxPath,DefineSyntax)
for c in Cls:
obj=c()
DictHash=obj.Creat(obj.GetFlag(),US,FS,DictHash)
return DictHash
|
{
"content_hash": "915f9f7d3ed98f498f110699ad4f14e4",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 64,
"avg_line_length": 25.23076923076923,
"alnum_prop": 0.7652439024390244,
"repo_name": "dtysky/Gal2Renpy",
"id": "3f9660196fa13714092f4fc419c3bf43fa41b9c5",
"size": "436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Gal2Renpy/G2R/DefineCreat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3678"
},
{
"name": "Python",
"bytes": "73641"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
import urllib3
from sdklib.compat import cache # noqa: F401
def disable_warnings():
urllib3.disable_warnings()
|
{
"content_hash": "d880b95f17cfb0c348670d7cb77496be",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 45,
"avg_line_length": 19.666666666666668,
"alnum_prop": 0.7457627118644068,
"repo_name": "ivanprjcts/sdklib",
"id": "a8217ec0b80585c5cf2534906c0fe75aa8372424",
"size": "118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdklib/shortcuts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Gherkin",
"bytes": "2956"
},
{
"name": "HTML",
"bytes": "71420"
},
{
"name": "Python",
"bytes": "186133"
}
],
"symlink_target": ""
}
|
"""
Django settings for avapi project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
import os.path
from os import environ
ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', '..'))
SRC_ROOT = os.path.realpath(os.path.join(os.path.dirname(__file__), '..'))
PROJECT_CONFIG_ROOT = os.path.dirname(os.path.realpath(__file__))
SITE_ROOT = os.path.abspath(os.path.dirname(__name__))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
CONFIG_JSON_FILE = open('%s/config.json' % ROOT)
CONFIG = json.load(CONFIG_JSON_FILE)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = CONFIG['secret_key']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'avapi.urls'
WSGI_APPLICATION = 'avapi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "03504cbef693d353e87576d94c5d1407",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 76,
"avg_line_length": 25.5531914893617,
"alnum_prop": 0.7148209825145712,
"repo_name": "kahnjw/avapi",
"id": "2b5d3cc83a726db3224b98b15473c877fa0469b2",
"size": "2402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avapi/avapi/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3764"
}
],
"symlink_target": ""
}
|
class Solution:
def maxProfit(self, inventory: List[int], orders: int) -> int:
inventory=sorted(inventory, reverse=True)
inventory.append(0)
answer=0
for i in range(1, len(inventory)):
if orders==0 or inventory[i-1]==0:
return answer%1000000007
diff=inventory[i-1]-inventory[i]
if diff==0:
continue
if orders>=(diff*i):
answer+=((inventory[i]+1+inventory[i-1])*diff//2)*i
orders-=(diff*i)
else:
pick=orders//i
answer+=(2*inventory[i-1]-pick+1)*(pick)*i//2
answer+=(orders-pick*i)*(inventory[i-1]-pick)
orders=0
return answer%1000000007
|
{
"content_hash": "7e017b1cec3f8ae428e9726283ec174e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 67,
"avg_line_length": 39.3,
"alnum_prop": 0.49236641221374045,
"repo_name": "Magic07/online-judge-solutions",
"id": "22d502aa6212d17adcb217d5f63d96b8493cfbdf",
"size": "786",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leetcode/1771-sell-diminishing-valued-colored-balls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34617"
}
],
"symlink_target": ""
}
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetik data.
"""
print __doc__
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD Style.
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_square_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print "======================="
print "Round %d %d" % (i, j)
print "n_features:", n_features
print "n_samples:", n_train
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print "- benching ElasticNet"
clf = ElasticNet(alpha=alpha, rho=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_square_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print "- benching SGD"
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_square_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print "- benching RidgeRegression"
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_square_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure(figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
{
"content_hash": "f9387dd7f3435ad89976535d5025243f",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 76,
"avg_line_length": 34.61538461538461,
"alnum_prop": 0.5106666666666667,
"repo_name": "joshbohde/scikit-learn",
"id": "df84c2d1aa90455fbad69a393705779c4ce83c7c",
"size": "4500",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "benchmarks/bench_sgd_regression.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
A simple python representation for AMQP content.
"""
def default(val, defval):
if val == None:
return defval
else:
return val
class Content:
def __init__(self, body = "", children = None, properties = None):
self.body = body
self.children = default(children, [])
self.properties = default(properties, {})
def size(self):
return len(self.body)
def weight(self):
return len(self.children)
def __getitem__(self, name):
return self.properties[name]
def __setitem__(self, name, value):
self.properties[name] = value
def __delitem__(self, name):
del self.properties[name]
def __str__(self):
if self.children:
return "%s [%s] %s" % (self.properties,
", ".join(map(str, self.children)),
self.body)
else:
return "%s %s" % (self.properties, self.body)
|
{
"content_hash": "698bffd0ba780f155330afc0f699c030",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 68,
"avg_line_length": 22.846153846153847,
"alnum_prop": 0.5802469135802469,
"repo_name": "SVADemoAPP/AmqpCode",
"id": "9391f4f1a8064cdcbd6c95f4e8a67ca5d03f605b",
"size": "1683",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/qpid/content.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8477"
},
{
"name": "C++",
"bytes": "70433"
},
{
"name": "Java",
"bytes": "12732"
},
{
"name": "Makefile",
"bytes": "580"
},
{
"name": "Python",
"bytes": "489612"
}
],
"symlink_target": ""
}
|
"""Tests for string_split_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class StringSplitOpTest(tf.test.TestCase):
def testStringSplit(self):
strings = ["pigs on the wing", "animals"]
with self.test_session() as sess:
tokens = tf.string_split(strings)
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [1, 0]])
self.assertAllEqual(values, [b"pigs", b"on", b"the", b"wing", b"animals"])
self.assertAllEqual(shape, [2, 4])
def testStringSplitEmptyDelimiter(self):
strings = ["hello", "hola", b"\xF0\x9F\x98\x8E"] # Last string is U+1F60E
with self.test_session() as sess:
tokens = tf.string_split(strings, delimiter="")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [0, 3], [0, 4],
[1, 0], [1, 1], [1, 2], [1, 3],
[2, 0], [2, 1], [2, 2], [2, 3]])
expected = np.array(
['h', 'e', 'l', 'l', 'o', 'h', 'o', 'l',
'a', b'\xf0', b'\x9f', b'\x98', b'\x8e'], dtype='|S1')
self.assertAllEqual(values.tolist(), expected)
self.assertAllEqual(shape, [3, 5])
def testStringSplitEmptyToken(self):
strings = [" hello ", "", "world "]
with self.test_session() as sess:
tokens = tf.string_split(strings)
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [2, 0]])
self.assertAllEqual(values, [b"hello", b"world"])
self.assertAllEqual(shape, [3, 1])
def testStringSplitWithDelimiter(self):
strings = ["hello|world", "hello world"]
with self.test_session() as sess:
self.assertRaises(
ValueError, tf.string_split, strings, delimiter=["|", ""])
self.assertRaises(ValueError, tf.string_split, strings, delimiter=["a"])
tokens = tf.string_split(strings, delimiter="|")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
tokens = tf.string_split(strings, delimiter="| ")
indices, values, shape = sess.run(tokens)
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0], [1, 1]])
self.assertAllEqual(values, [b"hello", b"world", b"hello", b"world"])
self.assertAllEqual(shape, [2, 2])
def testStringSplitWithDelimiterTensor(self):
strings = ["hello|world", "hello world"]
with self.test_session() as sess:
delimiter = tf.placeholder(tf.string)
tokens = tf.string_split(strings, delimiter=delimiter)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: "|"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [1, 0]])
self.assertAllEqual(values, [b"hello", b"world", b"hello world"])
self.assertAllEqual(shape, [2, 2])
def testStringSplitWithDelimitersTensor(self):
strings = ["hello.cruel,world", "hello cruel world"]
with self.test_session() as sess:
delimiter = tf.placeholder(tf.string)
tokens = tf.string_split(strings, delimiter=delimiter)
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a", "b"]})
with self.assertRaises(tf.errors.InvalidArgumentError):
sess.run(tokens, feed_dict={delimiter: ["a"]})
indices, values, shape = sess.run(tokens, feed_dict={delimiter: ".,"})
self.assertAllEqual(indices, [[0, 0], [0, 1], [0, 2], [1, 0]])
self.assertAllEqual(values, [b"hello", b"cruel", b"world",
b"hello cruel world"])
self.assertAllEqual(shape, [2, 3])
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "a186b2386c197405ce71e31a72301b89",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 80,
"avg_line_length": 38.822429906542055,
"alnum_prop": 0.6037554164660568,
"repo_name": "ppries/tensorflow",
"id": "5aa1390a9ac9458d291f7e2a20acc53c25620afe",
"size": "4843",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/string_split_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6963"
},
{
"name": "C",
"bytes": "118101"
},
{
"name": "C++",
"bytes": "14610065"
},
{
"name": "CMake",
"bytes": "110931"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "96398"
},
{
"name": "HTML",
"bytes": "533840"
},
{
"name": "Java",
"bytes": "179112"
},
{
"name": "JavaScript",
"bytes": "13406"
},
{
"name": "Jupyter Notebook",
"bytes": "1833491"
},
{
"name": "Makefile",
"bytes": "23553"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "151841"
},
{
"name": "Python",
"bytes": "14778281"
},
{
"name": "Shell",
"bytes": "310226"
},
{
"name": "TypeScript",
"bytes": "757225"
}
],
"symlink_target": ""
}
|
import json
from django.conf import settings
from django.http import Http404
from . import models
from accounts.models import UserSettings
from accounts.payment_plans import FEATURE_MIN_COMMENT_BOX, FEATURE_MIN_SITE_FAVICON, minimum
from podcasts.models import Podcast, PodcastEpisode
from pinecast.helpers import get_object_or_404, reverse
def gets_podcast(func):
def wrapped(req, podcast_slug, *args, **kwargs):
if isinstance(podcast_slug, Podcast):
return func(req, podcast_slug, *args, **kwargs)
pod = get_object_or_404(Podcast, slug=podcast_slug, select_related='owner')
return func(req, pod, *args, **kwargs)
return wrapped
@gets_podcast
def site_home(req, pod):
us = UserSettings.get_from_user(pod.owner)
site = get_object_or_404(models.Site, podcast=pod)
return {
'site': {
'canonical_url': site.get_domain(),
'cover_image_url': site.get_cover_image_url(),
'favicon_url': site.favicon.get_url() if site.favicon else None,
'logo_url': site.logo.get_url() if site.logo else None,
'itunes_url': site.itunes_url,
'google_play_url': site.google_play_url,
'stitcher_url': site.stitcher_url,
'analytics_id': site.analytics_id,
'itunes_banner': site.get_banner_id(),
'legacy_theme': site.theme,
'theme': json.loads(site.theme_data) if site.theme_data else {'$theme': site.theme},
},
'podcast': {
'slug': pod.slug,
'cover_image': pod.get_cover_image_url(),
'description': pod.description,
'name': pod.name,
'copyright': pod.copyright,
'subtitle': pod.subtitle,
'language': pod.language,
'author_name': pod.author_name,
},
'features': {
'comment_box': minimum(us.plan, FEATURE_MIN_COMMENT_BOX),
'favicon': minimum(us.plan, FEATURE_MIN_SITE_FAVICON),
},
'links': site.get_site_links(),
'pages': {
page.slug: _format_page(page) for
page in
site.sitepage_set.all()
},
}
def _format_page(page):
return {
'title': page.title,
'slug': page.slug,
'page_type': page.page_type,
'created': page.created.isoformat(),
'body': page.body if page.page_type == 'markdown' else json.loads(page.body),
}
def _format_ep(episode):
base_url = 'https://pinecast.com%s' if not settings.DEBUG else 'http://localhost:8000%s'
return {
'id': str(episode.id),
'title': episode.title,
'subtitle': episode.subtitle,
'image_url': episode.get_image_url(),
'audio_type': episode.audio.content_type,
'publish': episode.publish.isoformat(),
'description': episode.get_html_description(),
'description_raw': episode.description[:300].replace('\n', ' '),
'player_url': base_url % reverse('player', episode_id=str(episode.id))
}
@gets_podcast
def site_episode(req, pod, episode_id):
episode = get_object_or_404(PodcastEpisode, podcast=pod, id=episode_id)
if episode.check_is_private():
raise Http404()
return _format_ep(episode)
@gets_podcast
def site_episodes(req, pod):
episodes = pod.get_episodes(select_related='podcast')
total = len(episodes)
offset = int(req.GET.get('offset', 0))
if offset < 0 or (offset >= total and offset):
raise Http404()
count = int(req.GET.get('count', 10))
if count < 0 or count > 100:
raise Http404()
has_previous = offset > 0
has_next = count + offset < total
return {
'offset': offset,
'count': count,
'has_previous': has_previous,
'has_next': has_next,
'items': [_format_ep(episode) for episode in episodes[offset:offset + count]],
}
|
{
"content_hash": "115a440bdad810dfc45fb3ae559ab8c8",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 96,
"avg_line_length": 33.452991452991455,
"alnum_prop": 0.5955544200306592,
"repo_name": "AlmostBetterNetwork/pinecast",
"id": "bd4ccfe95b5183f5082dc157e21b25bacb17c5d7",
"size": "3914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sites/data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59857"
},
{
"name": "HTML",
"bytes": "130751"
},
{
"name": "JavaScript",
"bytes": "46479"
},
{
"name": "Python",
"bytes": "200422"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import, division
from armet.connectors.flask.http import Request, Response
__all__ = [
'Request',
'Response'
]
|
{
"content_hash": "e7879786bfb7cf901d0ad0c0b880f2d1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 66,
"avg_line_length": 24.285714285714285,
"alnum_prop": 0.7058823529411765,
"repo_name": "concordusapps/alchemist-armet",
"id": "58961fda1fb56d31604c52d7477838193638b979",
"size": "194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alchemist_armet/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3764"
}
],
"symlink_target": ""
}
|
class TypeRegistry(object):
def __init__(self):
self._registry = {}
self._registry_types = ()
def Register(self, base_type, registered):
if base_type in self._registry:
return
self._registry[base_type] = registered
self._registry_types += (base_type,)
def LookUp(self, instance):
for base_type, registered in self._registry.items():
if isinstance(instance, base_type):
return registered
def IsInstanceOfAny(self, instance):
return isinstance(instance, self._registry_types)
|
{
"content_hash": "faa169299dadb062077c657a92e53f19",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 29.61111111111111,
"alnum_prop": 0.6697936210131332,
"repo_name": "chainreactionmfg/cara",
"id": "6f6527c90d992a9efa63766e74dd677f00a2bb52",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cara/type_registry.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "33264"
},
{
"name": "Cap'n Proto",
"bytes": "2556"
},
{
"name": "Python",
"bytes": "70043"
},
{
"name": "Shell",
"bytes": "763"
}
],
"symlink_target": ""
}
|
import os
import pickle
from flask import Flask, render_template, request
from google.cloud import ndb
import redis
app = Flask(__name__)
ds_client = ndb.Client()
HOUR = 3600
REDIS_HOST = os.environ.get('REDIS_HOST', 'localhost')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS = redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
class Visit(ndb.Model):
'Visit entity registers visitor IP address & timestamp'
visitor = ndb.StringProperty()
timestamp = ndb.DateTimeProperty(auto_now_add=True)
def store_visit(remote_addr, user_agent):
'create new Visit entity in Datastore'
with ds_client.context():
Visit(visitor='{}: {}'.format(remote_addr, user_agent)).put()
def fetch_visits(limit):
'get most recent visits'
with ds_client.context():
return Visit.query().order(-Visit.timestamp).fetch(limit)
@app.route('/')
def root():
'main application (GET) handler'
# check for (hour-)cached visits
ip_addr, usr_agt = request.remote_addr, request.user_agent
visitor = '{}: {}'.format(ip_addr, usr_agt)
rsp = REDIS.get('visits')
visits = pickle.loads(rsp) if rsp else None
# register visit & run DB query if cache empty or new visitor
if not visits or visits[0].visitor != visitor:
store_visit(ip_addr, usr_agt)
visits = list(fetch_visits(10))
REDIS.set('visits', pickle.dumps(visits), ex=HOUR)
return render_template('index.html', visits=visits)
|
{
"content_hash": "9be0fc0058bd280162c0aab2b86c3329",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 69,
"avg_line_length": 33,
"alnum_prop": 0.6804407713498623,
"repo_name": "googlecodelabs/migrate-python2-appengine",
"id": "fb567e836ec60d5c212c4df4c7d2f80ba5595b2f",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mod13b-memorystore/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "252"
},
{
"name": "HTML",
"bytes": "13449"
},
{
"name": "Procfile",
"bytes": "38"
},
{
"name": "Python",
"bytes": "72484"
}
],
"symlink_target": ""
}
|
import os
from yaffsfs import *
import sys
import ctypes
dir_in_snapshot=[]
files_in_snapshot=[]
symlinks_in_snapshot=[]
unknown_in_snapshot=[]
is_mount_in_snapshot=[]
def check_for_yaffs_errors(output):
if output<0:
##error has happened
error=ctypes.c_int()
error=yaffs_get_error()
debug_message("error######################################",0)
debug_message(("error code", error), 0)
error_message=ctypes.c_char_p()
error_message.value=yaffs_error_to_str(error);
print "error message", error_message.value
def debug_message(message, debug_level):
"""note: that debug level 0 will always be printed unless debug_level is set to -1"""
"""level 0 error messages"""
"""level 1 basic tasks are shown(creating, deleating,ect)"""
"""level 2 all process are shown"""
"""level 3 shows minor tasks such as join_paths, ect"""
"""level 4 is used for bug hunting and shows each step in detail"""
if current_debug_level>=debug_level:
# for i in range(0, len(message)):
# print message,
# print"\n \n \n"
print message
def join_paths(path1, path2):
new_path=path1
if path1[len(path1)-1]=="/"and path2[0]=="/":
new_path+=path2[1:]
elif path1[len(path1)-1]!="/"and path2[0]!="/":
new_path+="/"
new_path+=path2
else:
new_path+=path2
debug_message(("adding path ", path1, " to ", path2, " resulting path: ", new_path), 3)
return new_path
def subtract_paths(path1, path2):
if len(path1)>len(path2):
if path1[len(path1)-1]!="/":
path1 +="/"
if path2[len(path2)-1]!="/":
path2 += "/"
debug_message("the two path1 is longer than path2 and can therefore be subtracted.", 4)
##if the two paths are diretly subtractable
if path1[0:len (path2)-1]==path2:
debug_message("the two paths are direcly subtractable", 4)
new_path=path1[len(path2):]
elif path1[1:len (path2)-1]==path2:
debug_message("the path1 has one more charecter at the begining. assuming that the first chareter is a slash", 4)##fix this assumption.
new_path=path1[len(path2)+1:]
elif path1[1:len (path2)]==path2[1:]:
debug_message("the path2 has one more charecter at the begining. assuming that the first chareter is a slash", 4)##fix this assumption.
new_path=path1[len(path2)-1:]
else :
debug_message("error:could not subtract paths", 0)
debug_message( ("paths do not match:"+ path1+ " "+path2), 0)
return 0
else :
debug_message( ("cannot subtract path2(:", path2, ") from path1(", path1, ")because path 2 is too long"), 0)
return 0
debug_message(("subtracting paths ", path2, " from ", path1, " resulting path: ", new_path), 3)
return new_path
def create_file(file):
# freespace=ctypes.c_longlong
# freespace.value=yaffs_freespace(yaffs_root_dir_path)
# print "yaffs free space:", freespace.value
debug_message( "\n \n \n", 2)
file_path= join_paths(yaffs_root_dir_path, file["path"][len(path):])
debug_message( ("creating file:", file_path), 2)
debug_message (("mode", file["mode"]), 2)
debug_message("opening file",2)
# yaffs_ls(file["path"])
##if there is already a file in yaffs then remove the file . this is to prevent yaffs from opening and writing to a read only file
if yaffs_access(file_path, 0)==0:##the 0 means does it exist.
debug_message ("file already exists in yaffs", 2)
output=yaffs_unlink(file_path)
debug_message(("unlinking", file_path, output), 2)
check_for_yaffs_errors(output)
current_handle=yaffs_open(file_path, yaffs_O_CREAT | yaffs_O_TRUNC| yaffs_O_RDWR, yaffs_S_IREAD | yaffs_S_IWRITE) ##opens a file with mode set to write
debug_message(("current_handle", current_handle), 2)
data_file=open(file["path"], "r")
output=yaffs_lseek(current_handle, 0, 0)
if output==-1:
##if there is no more space in the emfile then this is where it will show up.
freespace=ctypes.c_longlong
freespace.value=yaffs_freespace(yaffs_root_dir_path)
print "yaffs free space:", freespace.value
if freespace.value==0:
#print "yaffs free space:", yaffs_freespace(yaffs_root_dir_path)
print "yaffs is out of space exiting program"
#sys.exit()
debug_message("error with yaffs lseeking", 2)
check_for_yaffs_errors(output)
data_to_be_written= data_file.read()
length_of_file=len(data_to_be_written)
debug_message (("length of data to be written",length_of_file), 3)
output=yaffs_write(current_handle,data_to_be_written , length_of_file)
if output>=0:
debug_message(( "writing to ", file_path," ", output), 1)
else :
debug_message(( "error writing file:", output), 0)
check_for_yaffs_errors(output)
output=yaffs_ftruncate(current_handle, length_of_file)
if output>=0:
debug_message(( "truncating file:", output), 2)
else :
debug_message(( "error truncating file:", output), 0)
check_for_yaffs_errors(output)
output=yaffs_close(current_handle)
if output>=0:
debug_message(( "closing file:", output), 2)
else :
debug_message(( "error closing file:", output), 0)
check_for_yaffs_errors(output)
##changes the mode of the yaffs file to be the same as the scanned file
yaffs_chmod(file_path, file["mode"]);
if output>=0:
debug_message(( "chmoding file:", output), 2)
else :
debug_message(( "error chmoding file:", output), 0)
check_for_yaffs_errors(output)
def create_dir(dir, scanned_path, yaffs_path):
debug_message( "\n \n \n", 2)
absolute_dir_path=join_paths(yaffs_path, subtract_paths(dir["path"],scanned_path))
debug_message( ("creating dir:", absolute_dir_path), 2)
debug_message (("mode(in octal", oct(dir["mode"])), 2)
##this is a bug in yaffs which will not make a dir if there is a slash on the end
if absolute_dir_path[len(absolute_dir_path)-1]=="/":
absolute_dir_path=absolute_dir_path[0:len(absolute_dir_path)-1]
debug_message (("path has slash on the end. removing slash new path is:",absolute_dir_path) , 4)
##if there is already a dir in yaffs then remove the dir . this is to clean the yaffs folder if it already exists.
##in yaffs all of the files in the dir to be removed must be empty.
##need to create a reverse ls to delete all of the files first.
# if yaffs_access(absolute_dir_path, 0)==0:##the 0 means does it exist.
# debug_message ("folder already exists in yaffs", 2)
# output=yaffs_rmdir(absolute_dir_path)
# debug_message(("unlinking", absolute_dir_path, output), 2)
# check_for_yaffs_errors(output)
output=yaffs_mkdir(absolute_dir_path, dir["mode"] )
if output>=0:
debug_message(( "created dir:", absolute_dir_path," ", output), 1)
else :
debug_message(( "error creating dir ", absolute_dir_path, " ", output), 0)
check_for_yaffs_errors(output)
if output==17:
printf("the directory already exists")
def remove_file_from_path(path):
slash_id=[]
for i in range(0, len(path)):
if path[i]=="/":
slash_id.append(i)
new_path=path[:slash_id[len(slash_id)-1]]
debug_message( ("removed file from path", new_path), 2)
return new_path
def is_dir_hidden(dir):
"""this code tests if a directory is hidden (has a ./<name> format) and returns true if it is hidden"""
slash_id=[]
for i in range(0, len(dir)):
if dir[i]=="/":
slash_id.append(i)
if dir[slash_id[len(slash_id)-1]+1]==".":
return True
else :
return False
def scan_dir(path, search_hidden_directories=True, ):
"""this function scans all of the files and directories in a directory. The function then calls its self on any of the directories that it found. this causes it to build up a tree of all the files and directories """
global files_in_snapshot
global symlinks_in_snapshot
global dir_in_snapshot
dir_in_current_dir=[]
global unknown_in_snapshot
# files_in_snapshot=[]
# symlinks_in_snapshot=[]
# dir_in_snapshot=[]
# dir_in_current_dir=[]
# unknown_in_snapshot=[]
if os.path.exists(path)==False:
debug_message ("error#############################",0)
debug_message (("path:", path, " doesnot exist"), 0)
return 0
dir_snapshot=os.listdir(path)
for i in range(0, len(dir_snapshot)):
current_snapshot=os.path.join(path, dir_snapshot[i])
##debug_message (("current snapshot:", current_snapshot), 2)
isDir=os.path.isdir(current_snapshot)
isFile=os.path.isfile(current_snapshot)
isLink=os.path.islink(current_snapshot)
isMount=os.path.ismount(current_snapshot)
stat=os.lstat(current_snapshot)
##note the order of these if and elif statemens is importaint since a file can be symbloic link and a file
if isDir:
if search_hidden_directories==True or (is_dir_hidden(current_snapshot) ==False or search_hidden_directories==True ) :
# st_mode ##mode of the folder read/write ect
dir_in_snapshot.append({"path":current_snapshot, "mode":stat.st_mode})
dir_in_current_dir.append(current_snapshot)
else :
debug_message( ("file is hidden so it is ingored", current_snapshot,), 1)
elif isLink:
##for some reason the os.readlink only gives the target link realative to the directory which the symbloic link is in. change this into a absolute path
x=current_snapshot
x=remove_file_from_path(x)
target=join_paths(x,os.readlink(current_snapshot) )
symlinks_in_snapshot.append({"path":current_snapshot, "target":target})
elif isFile:
# stat.st_ino ##inode number
# st_nlink ##number of hard links to this file
# st_size ##size of file
files_in_snapshot.append({"path":current_snapshot, "inode": stat.st_ino, "size":stat.st_size, "num_of_hardlinks":stat.st_nlink, "mode":stat.st_mode})
# elif isMount:
# is_mount_in_snapshot.append(current_snapshot)
else:
unknown_in_snapshot.append(current_snapshot)
for i in range(0, len(dir_in_current_dir)):
debug_message(("scanning dir", dir_in_current_dir[i]) , 2)
scan_dir(dir_in_current_dir[i], search_hidden_directories)
# #debug_message(("data 0", data[0][0]), 2)
# if len(files)
# files_in_snapshot.append(data[0][0])
# dir_in_snapshot.append(data[1][0])
# symlinks_in_snapshot.append(data[2][0])
# unknown_in_snapshot.append(data[3][0])
return (files_in_snapshot, dir_in_snapshot, symlinks_in_snapshot, unknown_in_snapshot)
##
##def print_scanned_dir_list():
## global files_in_snapshot
## global symlinks_in_snapshot
## print( "scanning dir", 2)
##
##
## for i in range(0, len(files_in_snapshot)):
## if files_in_snapshot[i]["num_of_hardlinks"]>1:
## print "inode",files_in_snapshot[i]["inode"],"size",files_in_snapshot[i]["size"],"path:", files_in_snapshot[i]["path"], " num of hard links", files_in_snapshot[i]["num_of_hardlinks"]
##
## else :
## print "inode",files_in_snapshot[i]["inode"],"size",files_in_snapshot[i]["size"],"path:", files_in_snapshot[i]["path"]
### current_open_file=open(files_in_snapshot[i], "r")
### #current_open_file.f.read(3)
### lines_in_file=current_open_file.readlines()
### #use for loop to write code into yaffs file
### print "number of line of code:", len(lines_in_file)
### print current_open_file
## for i in range(0, len(symlinks_in_snapshot)):
## print "symlinks in snapshot:", symlinks_in_snapshot[i]
## for i in range(0, len(dir_in_snapshot)):
## print "directories in snapshot:", dir_in_snapshot[i]
## for i in range(0, len(unknown_in_snapshot)):
## print "unknown objects in snapshot:", unknown_in_snapshot[i]
##
def copy_scanned_files_into_yaffs(files_in_snapshot, dir_in_snapshot, symlinks_in_snapshot, unknown_in_snapshot, path, yaffs_root_dir_path="/yaffs2/", yaffs_mount_point_path="/yaffs2/" ):
#files_in_snapshot, dir_in_snapshot, symlinks_in_snapshot, unknown_in_snapshot
#########################################copy directories into yaffs so the files can be created in these directories
debug_message("making directories in yaffs", 1)
if yaffs_root_dir_path!=yaffs_mount_point_path:
slash_id=[]
debug_message("making directories to the place in yaffs where the directories will copied to", 2)
root_branch_path=subtract_paths(yaffs_root_dir_path, yaffs_mount_point_path)
for i in range(0, len(root_branch_path)):
if root_branch_path[i]=="/" and i != 0:
slash_id.append(i)
debug_message(("slash_id", slash_id),4)
for i in range(0, len(slash_id)):
create_dir({"path":root_branch_path[:slash_id[i]], "mode": yaffs_S_IREAD | yaffs_S_IWRITE}, "/", yaffs_mount_point_path)
for i in range(0, len(dir_in_snapshot)):
create_dir(dir_in_snapshot[i], path, yaffs_root_dir_path)
#########################################copy file into yaffs
debug_message("copying scanned files into yaffs", 1)
list=[]
inode_blacklist=[]
debug_message("files to be copyied into yaffs", 2)
for a in range(0, len(files_in_snapshot)):
debug_message(files_in_snapshot[a], 2)
debug_message("\n\n\n", 2)
for i in range(0, len(files_in_snapshot)):
list=[]
if files_in_snapshot[i]["num_of_hardlinks"]>1 and files_in_snapshot[i]["inode"] not in inode_blacklist :
debug_message("found a hard link", 2)
debug_message(("inode",files_in_snapshot[i]["inode"],"size",files_in_snapshot[i]["size"],"path:", files_in_snapshot[i]["path"], " num of hard links", files_in_snapshot[i]["num_of_hardlinks"] ), 2)
for a in range(0, len(files_in_snapshot) ) :
if files_in_snapshot[a]["inode"] ==files_in_snapshot[i]["inode"] :
##and os.path.isfile(files_in_snapshot[i])
debug_message(("found this file which matches inode",files_in_snapshot[a]), 2)
list.append(files_in_snapshot[a])
debug_message(("length of list", len(list)), 2)
if len(list)==files_in_snapshot[i]["num_of_hardlinks"]:
break
for a in range(0, len(list)):
debug_message(list[a], 2)
##add inode to blacklist. all of the indoes in the list should be the same.
inode_blacklist.append(list[0]["inode"])
##create a file from the first hardlink.
create_file(list[0])
target_path=yaffs_root_dir_path+list[0]["path"][len(path):]
for i in range(1, len(list)):
debug_message("creating_symlink", 2)
debug_message(("target path", target_path), 2)
hardlink_path=yaffs_root_dir_path+list[i]["path"][len(path):]
debug_message(("hardlink path", hardlink_path), 2)
output=yaffs_link(target_path,hardlink_path)
debug_message(("creating hardlink:", list[i]["path"], "output:", output), 1)
elif files_in_snapshot[i]["inode"] not in inode_blacklist :
create_file(files_in_snapshot[i])
############################copy symlinks into yaffs
for i in range(0, len(symlinks_in_snapshot)):
debug_message(("symlinks in snapshot:", symlinks_in_snapshot[i]), 2)
target_path=join_paths(yaffs_root_dir_path, subtract_paths(symlinks_in_snapshot[i]["target"], path))
new_path=join_paths(yaffs_root_dir_path, subtract_paths(symlinks_in_snapshot[i]["path"], path))
output=yaffs_symlink(target_path, new_path)
debug_message(("created symlink",new_path , " > ", target_path, " output:", output), 1)
##yaffs_symlink(const YCHAR *oldpath, const YCHAR *newpath);
for i in range(0, len(unknown_in_snapshot)):
debug_message( ("unknown object in snapshot:", unknown_in_snapshot[i]), 0)
def import_into_yaffs(file_path, yaffs_path="/yaffs2/", debug_level=1, copy_hidden_dir=True ,new_yaffs_trace_val=-1 ):
# global current_debug_level
# global search_hidden_directories
# global yaffs_root_dir_path
# global path
# current_debug_level=debug_level
# search_hidden_directories=copy_hidden_dir
# yaffs_root_dir_path=yaffs_path
# path=file_path
old_yaffs_trace_val=yaffs_get_trace()
if new_yaffs_trace_val!=-1:
yaffs_set_trace(new_yaffs_trace_val)
data=scan_dir(file_path, copy_hidden_dir)
copy_scanned_files_into_yaffs(data[0], data[1], data[2], data[3],file_path, yaffs_path)
yaffs_set_trace(old_yaffs_trace_val)
if __name__=="__main__":
yaffs_start_up()
yaffs_mount("/yaffs2/")
#yaffs_set_trace(0)
# absolute_path = os.path.abspath(os.path.curdir)
#print "absolute path:", absolute_path
current_debug_level=1
search_hidden_directories=True
yaffs_root_dir_path="/yaffs2/"
yaffs_trace=-1
#print sys.argv
path=sys.argv[1]
for i in range(2, len(sys.argv)):
if sys.argv[i]=="-d":
current_debug_level=int( sys.argv[i+1])
if sys.argv[i]=="-ignore_hidden_directories":
search_hidden_directories=False
if sys.argv[i]=="-o":
yaffs_root_dir_path=sys.argv[i+1]
if sys.argv[i]=="-yaffs_trace":
yaffs_trace=int(sys.argv[i+1])
#
#
# path="/home/timothy/work/yaffs/git/yaffs2"
# path="/home/timothy/my_stuff/old_laptop/timothy/programming_lejos/"
import_into_yaffs(path, yaffs_root_dir_path, current_debug_level, search_hidden_directories, yaffs_trace )
# scan_dir(path)
# copy_scanned_files_into_yaffs()
#print_scanned_dir_list()
print"unmounting yaffs:", yaffs_unmount("/yaffs2/")
|
{
"content_hash": "88b0729983ac1a36e6a468bcb6ddd95c",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 220,
"avg_line_length": 43.57783018867924,
"alnum_prop": 0.6104345943605564,
"repo_name": "One-ders/CEC-gw",
"id": "86f65e996930543312fd8ca5911c6e86a181e83a",
"size": "18847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bl2/fs/yaffs2/test-framework/python/yaffs_importer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "48170"
},
{
"name": "C",
"bytes": "2707773"
},
{
"name": "C++",
"bytes": "101788"
},
{
"name": "GDB",
"bytes": "657"
},
{
"name": "Makefile",
"bytes": "62124"
},
{
"name": "Objective-C",
"bytes": "151"
},
{
"name": "Python",
"bytes": "52047"
},
{
"name": "Shell",
"bytes": "9066"
}
],
"symlink_target": ""
}
|
import sys
import pytest
def pytest_addoption(parser):
parser.addoption("--with-speedups", action="store_true", default=False,
help="Run tests with speedups.")
def pytest_runtest_setup(item):
if item.config.getoption("--with-speedups"):
import shapely.speedups
if not shapely.speedups.available:
print("Speedups have been demanded but are unavailable")
sys.exit(1)
shapely.speedups.enable()
print("Speedups enabled for %s." % item.name)
|
{
"content_hash": "113ed743535d3fe3db87cc4b8c516fce",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 75,
"avg_line_length": 32,
"alnum_prop": 0.66015625,
"repo_name": "johanvdw/python-shapely",
"id": "3fe9d4ecbc774ba97b446518fff2792971553a2a",
"size": "512",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "319559"
},
{
"name": "Shell",
"bytes": "1467"
}
],
"symlink_target": ""
}
|
import smtplib
import os
class Emailer:
def __init__(self, email_address):
self.FROM_EMAIL = "websitepolling@gmail.com"
self.PASSWORD = "ipollyoursites"
self.email_address = email_address
self.init_server()
def init_server(self):
self.server = smtplib.SMTP("smtp.gmail.com", 587)
self.server.starttls()
self.server.login(self.FROM_EMAIL, self.PASSWORD)
def send_message(self, message="\nYou've got mail!"):
email = "Subject: %s\n\n%s" % ("Your reminder from Website Polling", message)
self.server.sendmail(self.FROM_EMAIL, self.email_address, email)
def notify(self, title, subtitle, message, open):
t = '-title {!r}'.format(title)
s = '-subtitle {!r}'.format(subtitle)
m = '-message {!r}'.format(message)
o = '-open {!r}'.format(open)
os.system('terminal-notifier {}'.format(' '.join([m, t, s, o])))
|
{
"content_hash": "1705f8f42e3e4e626db9e2d7c88f0f0a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 32.23076923076923,
"alnum_prop": 0.6778042959427207,
"repo_name": "varunrau/WebsitePolling",
"id": "179e36a22b2461c11accff5ff286bfe99d8849e9",
"size": "838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "emailer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2642"
}
],
"symlink_target": ""
}
|
import settings
import importlib
class BotManager:
def __init__(self):
self.bots = []
registered_bots = self.load_registered_bots()
self.initialize_bots(registered_bots)
self.current_active_bot_id = None
def load_registered_bots(self):
registered_bots = settings.REGISTERED_BOTS
bots = []
for bot in registered_bots:
*module, classname = bot.split(".")
bots.append((".".join(module), classname))
return bots
def initialize_bots(self, bots):
id = 0
for module, classname in bots:
module = importlib.import_module(module)
bot_class = getattr(module, classname)
self.bots.append(bot_class(id))
id += 1
def run_action(self, intent):
# TODO: RUN THE REQUIRED BOT
if self.current_active_bot_id:
for bot in self.bots:
if self.current_active_bot_id == bot.id:
try:
return self.activate_bot(bot, intent)
except Exception as e:
raise(e)
action = intent.action
for bot in self.bots:
if action in bot.actions:
try:
return self.activate_bot(bot, intent)
except Exception as e:
raise(e)
# TODO: Raise exception if no bot can process this intent
def activate_bot(self, bot, intent):
self.current_active_bot_id = bot.id
bot.extract_attr(intent)
if bot.has_missing_attr():
try:
return bot.request_missing_attr()
except Exception as e:
raise(e)
else:
try:
print('ID is ',bot.id)
if not bot.is_long_running:
self.current_active_bot_id = None
return bot.execute()
except Exception as e:
raise(e)
def resume_bot(self, command_index):
if self.current_active_bot_id:
for bot in self.bots:
if self.current_active_bot_id == bot.id:
return bot.run_command(command_index)
def terminate_bot(self):
if self.current_active_bot_id:
for bot in self.bots:
if self.current_active_bot_id == bot.id:
bot.terminate()
self.current_active_bot_id = None
return
|
{
"content_hash": "f7a3140227ce3ce8d629cddea2728723",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 65,
"avg_line_length": 32.86842105263158,
"alnum_prop": 0.5156124899919936,
"repo_name": "brainbots/assistant",
"id": "82204ccab714b9243af6e813fc14180554eeea45",
"size": "2498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assistant/bots/bots_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64028"
}
],
"symlink_target": ""
}
|
from gamecology.app import create_app
from gamecology.settings import ProdConfig, DevConfig
def test_production_config():
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
assert app.config['ASSETS_DEBUG'] is False
def test_dev_config():
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
assert app.config['ASSETS_DEBUG'] is True
|
{
"content_hash": "77c5de9b9412318095734f3480ccb5bc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 32,
"alnum_prop": 0.69921875,
"repo_name": "karimtabet/gamecology",
"id": "d9b0497a35f0a1ce3860d647d097dfa2f4638b2a",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "207932"
},
{
"name": "JavaScript",
"bytes": "238863"
},
{
"name": "Python",
"bytes": "28891"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django import http
from django.utils.http import urlencode
from mox3.mox import IsA
import six
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.usage import quotas
from horizon.workflows import views
INDEX_URL = reverse('horizon:project:floating_ips:index')
NAMESPACE = "horizon:project:floating_ips"
class FloatingIpViewTests(test.TestCase):
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
url = reverse('%s:associate' % NAMESPACE)
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertNotIn(self.floating_ips.first(), choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'floating_ip_target_get_by_instance',
'tenant_floating_ip_list',)})
def test_associate_with_instance_id(self):
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_target_get_by_instance(
IsA(http.HttpRequest), 'TEST-ID', self.servers.list()) \
.AndReturn('TEST-ID')
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'instance_id': 'TEST-ID'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertNotIn(self.floating_ips.first(), choices)
@test.create_stubs({api.network: ('floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_with_port_id(self):
targets = [api.nova.FloatingIpTarget(s) for s in self.servers.list()]
targets[0].port_id = '101'
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(targets)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
self.mox.ReplayAll()
base_url = reverse('%s:associate' % NAMESPACE)
params = urlencode({'port_id': '101'})
url = '?'.join([base_url, params])
res = self.client.get(url)
self.assertTemplateUsed(res, views.WorkflowView.template_name)
workflow = res.context['workflow']
choices = dict(workflow.steps[0].action.fields['ip_id'].choices)
# Verify that our "associated" floating IP isn't in the choices list.
self.assertNotIn(self.floating_ips.first(), choices)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_redirect(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id)
self.mox.ReplayAll()
next = reverse("horizon:project:instances:index")
form_data = {'instance_id': server.id,
'next': next,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, next)
@test.create_stubs({api.network: ('floating_ip_associate',
'floating_ip_target_list',
'tenant_floating_ip_list',)})
def test_associate_post_with_exception(self):
floating_ip = self.floating_ips.list()[1]
server = self.servers.first()
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.network.floating_ip_target_list(IsA(http.HttpRequest)) \
.AndReturn(self.servers.list())
api.network.floating_ip_associate(IsA(http.HttpRequest),
floating_ip.id,
server.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {'instance_id': server.id,
'ip_id': floating_ip.id}
url = reverse('%s:associate' % NAMESPACE)
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest), detailed=False) \
.AndReturn([self.servers.list(), False])
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertMessageCount(success=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.nova: ('server_list',),
api.network: ('floating_ip_disassociate',
'tenant_floating_ip_get',
'tenant_floating_ip_list',),
api.neutron: ('is_extension_supported',)})
def test_disassociate_post_with_exception(self):
floating_ip = self.floating_ips.first()
api.nova.server_list(IsA(http.HttpRequest), detailed=False) \
.AndReturn([self.servers.list(), False])
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.AndReturn(self.floating_ips.list())
api.neutron.is_extension_supported(IsA(http.HttpRequest),
'subnet_allocation')\
.AndReturn(True)
api.network.floating_ip_disassociate(IsA(http.HttpRequest),
floating_ip.id) \
.AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
action = "floating_ips__disassociate__%s" % floating_ip.id
res = self.client.post(INDEX_URL, {"action": action})
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('tenant_floating_ip_list',
'floating_ip_pools_list',),
api.nova: ('server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_attributes(self):
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 10
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.server_list(
IsA(http.HttpRequest), detailed=False) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertEqual(set(['ajax-modal']), set(allocate_action.classes))
self.assertEqual('Allocate IP To Project',
six.text_type(allocate_action.verbose_name))
self.assertIsNone(allocate_action.policy_rules)
url = 'horizon:project:floating_ips:allocate'
self.assertEqual(url, allocate_action.url)
@test.create_stubs({api.network: ('tenant_floating_ip_list',
'floating_ip_pools_list',),
api.nova: ('server_list',),
quotas: ('tenant_quota_usages',),
api.base: ('is_service_enabled',)})
def test_allocate_button_disabled_when_quota_exceeded(self):
floating_ips = self.floating_ips.list()
floating_pools = self.pools.list()
quota_data = self.quota_usages.first()
quota_data['floating_ips']['available'] = 0
api.network.tenant_floating_ip_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_ips)
api.network.floating_ip_pools_list(
IsA(http.HttpRequest)) \
.AndReturn(floating_pools)
api.nova.server_list(
IsA(http.HttpRequest), detailed=False) \
.AndReturn([self.servers.list(), False])
quotas.tenant_quota_usages(
IsA(http.HttpRequest)).MultipleTimes() \
.AndReturn(quota_data)
api.base.is_service_enabled(
IsA(http.HttpRequest),
'network').MultipleTimes() \
.AndReturn(True)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
allocate_action = self.getAndAssertTableAction(res, 'floating_ips',
'allocate')
self.assertIn('disabled', allocate_action.classes,
'The create button should be disabled')
self.assertEqual('Allocate IP To Project (Quota exceeded)',
six.text_type(allocate_action.verbose_name))
class FloatingIpNeutronViewTests(FloatingIpViewTests):
def setUp(self):
super(FloatingIpViewTests, self).setUp()
self._floating_ips_orig = self.floating_ips
self.floating_ips = self.floating_ips_uuid
def tearDown(self):
self.floating_ips = self._floating_ips_orig
super(FloatingIpViewTests, self).tearDown()
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'is_router_enabled',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'compute') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.is_router_enabled(IsA(http.HttpRequest)) \
.AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.subnets.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(self.networks.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
@test.create_stubs({api.nova: ('tenant_quota_get', 'flavor_list',
'server_list'),
api.network: ('floating_ip_pools_list',
'floating_ip_supported',
'security_group_list',
'tenant_floating_ip_list'),
api.neutron: ('is_extension_supported',
'is_router_enabled',
'tenant_quota_get',
'network_list',
'router_list',
'subnet_list'),
api.base: ('is_service_enabled',),
api.cinder: ('is_volume_service_enabled',)})
@test.update_settings(OPENSTACK_NEUTRON_NETWORK={'enable_quotas': True})
def test_correct_quotas_displayed_shared_networks(self):
servers = [s for s in self.servers.list()
if s.tenant_id == self.request.user.tenant_id]
api.cinder.is_volume_service_enabled(IsA(http.HttpRequest)) \
.AndReturn(False)
api.base.is_service_enabled(IsA(http.HttpRequest), 'network') \
.MultipleTimes().AndReturn(True)
api.base.is_service_enabled(IsA(http.HttpRequest), 'compute') \
.MultipleTimes().AndReturn(True)
api.nova.tenant_quota_get(IsA(http.HttpRequest), '1') \
.AndReturn(self.quotas.first())
api.nova.flavor_list(IsA(http.HttpRequest)) \
.AndReturn(self.flavors.list())
search_opts = {'tenant_id': self.request.user.tenant_id}
api.nova.server_list(IsA(http.HttpRequest), search_opts=search_opts) \
.AndReturn([servers, False])
api.neutron.is_extension_supported(
IsA(http.HttpRequest), 'security-group').AndReturn(True)
api.neutron.is_extension_supported(IsA(http.HttpRequest), 'quotas') \
.AndReturn(True)
api.neutron.is_router_enabled(
IsA(http.HttpRequest)).AndReturn(True)
api.neutron.tenant_quota_get(IsA(http.HttpRequest), self.tenant.id) \
.AndReturn(self.neutron_quotas.first())
api.neutron.router_list(IsA(http.HttpRequest)) \
.AndReturn(self.routers.list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(list())
api.neutron.subnet_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(self.subnets.list())
api.neutron.network_list(IsA(http.HttpRequest), shared=False) \
.AndReturn(list())
api.neutron.network_list(IsA(http.HttpRequest), shared=True) \
.AndReturn(self.networks.list())
api.network.floating_ip_supported(IsA(http.HttpRequest)) \
.AndReturn(True)
api.network.tenant_floating_ip_list(IsA(http.HttpRequest)) \
.MultipleTimes().AndReturn(self.floating_ips.list())
api.network.floating_ip_pools_list(IsA(http.HttpRequest)) \
.AndReturn(self.pools.list())
api.network.security_group_list(IsA(http.HttpRequest)) \
.AndReturn(self.security_groups.list())
self.mox.ReplayAll()
url = reverse('%s:allocate' % NAMESPACE)
res = self.client.get(url)
self.assertEqual(res.context['usages']['floating_ips']['quota'],
self.neutron_quotas.first().get('floatingip').limit)
|
{
"content_hash": "78d0fd4a0dde6b5f2dee353158b94ed5",
"timestamp": "",
"source": "github",
"line_count": 418,
"max_line_length": 78,
"avg_line_length": 47.66028708133971,
"alnum_prop": 0.5658066459190845,
"repo_name": "kogotko/carburetor",
"id": "5ae49f825d267a514f7143e565966425ecd62cea",
"size": "20748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/floating_ips/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9097503"
},
{
"name": "HTML",
"bytes": "1650202"
},
{
"name": "JavaScript",
"bytes": "4712562"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "5086985"
},
{
"name": "Shell",
"bytes": "18571"
}
],
"symlink_target": ""
}
|
"""Test CppProperties flags generation."""
import imp
import platform
from os import path, environ
from unittest import TestCase
from EasyClangComplete.plugin.flags_sources import CppProperties
from EasyClangComplete.plugin.utils import tools
from EasyClangComplete.plugin.utils import flag
from EasyClangComplete.plugin.utils import file
from EasyClangComplete.plugin.utils import search_scope
imp.reload(CppProperties)
imp.reload(tools)
imp.reload(flag)
imp.reload(file)
imp.reload(search_scope)
CppProperties = CppProperties.CppProperties
SearchScope = search_scope.TreeSearchScope
Flag = flag.Flag
File = file.File
def _get_test_folder(name):
return path.join(path.dirname(__file__), 'CppProperties_files', name)
class TestCppProperties(TestCase):
"""Test generating flags with a 'CppProperties.json' file."""
def test_get_all_flags(self):
"""Test if CppProperties.json is found."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = [
Flag('-I', path.normpath('/folder/include/path')),
Flag('-I', path.normpath('/another/file/path')),
]
scope = SearchScope(from_folder=_get_test_folder('simple'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_expand_environment_variables(self):
"""Test environment variables are expanded."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
environ['TEST_VARIABLE_TO_EXPAND'] = '/lib_include_dir'
expected = [
Flag('-I', path.normpath('/lib_include_dir')),
]
scope = SearchScope(from_folder=_get_test_folder('environment'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
def test_no_db_in_folder(self):
"""Test if no json is found."""
if platform.system() == "Darwin":
# This test is disabled as the current path is trying to reach a
# network resource on MacOS. I guess we have to deal with this at
# some point later.
return
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = None
self.assertEqual(expected, db.get_flags(
File.canonical_path('/home/user/dummy_main.cpp')))
def test_empty_include_and_defines(self):
"""Test that empty fields are handled correctly."""
include_prefixes = ['-I']
db = CppProperties(include_prefixes)
expected = []
scope = SearchScope(from_folder=_get_test_folder('empty'))
self.assertEqual(expected, db.get_flags(search_scope=scope))
|
{
"content_hash": "a49bc9e64f6ad55a0a06a89f9c5f3ebb",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 77,
"avg_line_length": 32.81481481481482,
"alnum_prop": 0.6610233258088789,
"repo_name": "niosus/EasyClangComplete",
"id": "4d9e5709b642e4112742b358388abe1fcda263f8",
"size": "2658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_CppProperties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "469"
},
{
"name": "C++",
"bytes": "4969"
},
{
"name": "CMake",
"bytes": "1160"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "Makefile",
"bytes": "444"
},
{
"name": "Objective-C",
"bytes": "4185"
},
{
"name": "Objective-C++",
"bytes": "87"
},
{
"name": "Python",
"bytes": "1615297"
},
{
"name": "Starlark",
"bytes": "105"
}
],
"symlink_target": ""
}
|
"""!stock <search term> return a stock photo for <search term>"""
from random import shuffle
import re
try:
from urllib import quote
except ImportError:
from urllib.request import quote
import requests
from bs4 import BeautifulSoup
def stock(searchterm):
searchterm = quote(searchterm)
url = "http://www.shutterstock.com/cat.mhtml?searchterm={0}&search_group=&lang=en&language=en&search_source=search_form&version=llv1".format(searchterm)
r = requests.get(url)
soup = BeautifulSoup(r.text, "html5lib")
images = [x["src"] for x in soup.select(".gc_clip img")]
shuffle(images)
return images[0] if images else ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!stock (.*)", text)
if not match:
return
return stock(match[0].encode("utf8"))
on_bot_message = on_message
|
{
"content_hash": "096acbbd6214c77d98b0fa5190c75414",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 156,
"avg_line_length": 27.774193548387096,
"alnum_prop": 0.6840882694541232,
"repo_name": "TetraEtc/limbo",
"id": "aa701a80f5fcf8f1645cb8cfff84b0222dabf1de",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limbo/plugins/stockphoto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1035"
},
{
"name": "Python",
"bytes": "138692"
},
{
"name": "Shell",
"bytes": "196"
}
],
"symlink_target": ""
}
|
"""Tests for tfx.dsl.compiler.node_inputs_compiler."""
from typing import List, Type
import tensorflow as tf
from tfx import types
from tfx.dsl.compiler import compiler_context
from tfx.dsl.compiler import node_inputs_compiler
from tfx.dsl.components.base import base_component
from tfx.dsl.components.base import base_node
from tfx.dsl.components.base import executor_spec
from tfx.dsl.control_flow import for_each
from tfx.dsl.experimental.conditionals import conditional
from tfx.dsl.input_resolution import resolver_function
from tfx.dsl.input_resolution import resolver_op
from tfx.orchestration import pipeline
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import channel as channel_types
from tfx.types import component_spec
from tfx.types import standard_artifacts
from google.protobuf import text_format
class DummyArtifact(types.Artifact):
TYPE_NAME = 'Dummy'
PROPERTIES = {}
class DummyNode(base_node.BaseNode):
def __init__(self, id: str, inputs=None, exec_properties=None): # pylint: disable=redefined-builtin
super().__init__()
self.with_id(id)
self._inputs = inputs or {}
self._exec_properties = exec_properties or {}
self._outputs = {}
def output(self, key: str, artifact_type=DummyArtifact):
if key not in self._outputs:
self._outputs[key] = channel_types.OutputChannel(artifact_type, self, key)
return self._outputs[key]
@property
def inputs(self) -> ...:
return self._inputs
@property
def exec_properties(self) -> ...:
return self._exec_properties
@property
def outputs(self) -> ...:
return self._outputs
class DummyArtifactList(
resolver_op.ResolverOp,
canonical_name='testing.DummyArtifactList',
arg_data_types=(),
return_data_type=resolver_op.DataType.ARTIFACT_LIST):
def apply(self):
return []
class DummyDict(
resolver_op.ResolverOp,
canonical_name='testing.DummyDict',
arg_data_types=(),
return_data_type=resolver_op.DataType.ARTIFACT_MULTIMAP):
def apply(self):
return {'x': []}
class DummyDictList(
resolver_op.ResolverOp,
canonical_name='testing.DummyDictList',
arg_data_types=(),
return_data_type=resolver_op.DataType.ARTIFACT_MULTIMAP_LIST):
def apply(self):
return []
@resolver_function.resolver_function
def dummy_artifact_list():
return DummyArtifactList()
@resolver_function.resolver_function
def dummy_dict():
return DummyDict()
@resolver_function.resolver_function
def dummy_dict_list():
return DummyDictList()
class NodeInputsCompilerTest(tf.test.TestCase):
pipeline_name = 'dummy-pipeline'
def _prepare_pipeline(
self, components: List[base_node.BaseNode]) -> pipeline.Pipeline:
return pipeline.Pipeline(
pipeline_name=self.pipeline_name,
components=components)
def _compile_node_inputs(
self, node, components=None) -> pipeline_pb2.NodeInputs:
if not components:
components = [node]
p = self._prepare_pipeline(components)
ctx = compiler_context.PipelineContext(p)
result = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, node, result)
return result
def _get_channel_pb(
self, artifact_type: Type[types.Artifact] = DummyArtifact,
pipeline_name: str = '', node_id: str = '',
output_key: str = '') -> pipeline_pb2.InputSpec.Channel:
result = pipeline_pb2.InputSpec.Channel()
node_inputs_compiler._compile_channel_pb(
artifact_type=artifact_type,
pipeline_name=pipeline_name or self.pipeline_name,
node_id=node_id,
output_key=output_key,
result=result)
return result
def testCompileAlreadyCompiledInputs(self):
producer = DummyNode('MyProducer')
consumer = DummyNode('MyConsumer', inputs={'x': producer.output('x')})
p = self._prepare_pipeline([producer, consumer])
ctx = compiler_context.PipelineContext(p)
fake_channel_pb = text_format.Parse("""
context_queries {
type {
name: "Foo"
}
name {
field_value {
string_value: "foo"
}
}
}
artifact_query {
type {
name: "Dummy"
}
}
output_key: "x"
""", pipeline_pb2.InputSpec.Channel())
ctx.channels[producer.output('x')] = fake_channel_pb
result = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, consumer, result)
self.assertProtoEquals(
result.inputs['x'].channels[0],
fake_channel_pb)
def testCompileChannel(self):
channel = channel_types.Channel(
type=DummyArtifact,
producer_component_id='MyProducer',
output_key='my_output_key')
node = DummyNode('MyNode', inputs={'x': channel})
result = self._compile_node_inputs(node)
self.assertLen(result.inputs['x'].channels, 1)
self.assertProtoEquals(
result.inputs['x'].channels[0],
self._get_channel_pb(
node_id='MyProducer', output_key='my_output_key'))
def testCompileExternalPipelineOutputChannel(self):
a = DummyNode('A')
p1 = pipeline.Pipeline(
pipeline_name='p1',
components=[a],
outputs={'x': a.output('x')})
p2_inputs = pipeline.PipelineInputs({'x': p1.outputs['x']})
p2 = pipeline.Pipeline(pipeline_name='p2', inputs=p2_inputs)
ctx = compiler_context.PipelineContext(p2)
result = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, p2, result)
self.assertLen(result.inputs, 1)
self.assertLen(result.inputs['x'].channels, 1)
self.assertProtoEquals(
result.inputs['x'].channels[0],
self._get_channel_pb(pipeline_name='p1', node_id='A', output_key='x'))
def testCompileUnionChannel(self):
producer = DummyNode('MyProducer')
consumer = DummyNode('MyConsumer', inputs={
'x': channel_types.union([
producer.output('x'),
producer.output('y'),
channel_types.Channel(
type=DummyArtifact,
producer_component_id='Z',
output_key='z'),
])
})
result = self._compile_node_inputs(
consumer, components=[producer, consumer])
self.assertLen(result.inputs, 4)
self.assertEqual(
result.inputs['x'].mixed_inputs.method,
pipeline_pb2.InputSpec.Mixed.Method.UNION)
dep_input_keys = list(result.inputs['x'].mixed_inputs.input_keys)
self.assertLen(dep_input_keys, 3)
def testCompileForEach(self):
producer = DummyNode('MyProducer')
with for_each.ForEach(producer.output('xs')) as x:
consumer = DummyNode('MyConsumer', inputs={'x': x})
result = self._compile_node_inputs(
consumer, components=[producer, consumer])
self.assertLen(result.inputs, 2)
other_input_key = [
input_key for input_key in result.inputs
if input_key != 'x'][0]
graph_id = result.inputs['x'].input_graph_ref.graph_id
self.assertNotEmpty(graph_id)
self.assertLen(result.input_graphs, 1)
for graph_node in result.input_graphs[graph_id].nodes.values():
if graph_node.WhichOneof('kind') == 'op_node':
self.assertEqual(graph_node.op_node.op_type, 'tfx.internal.Unnest')
elif graph_node.WhichOneof('kind') == 'input_node':
self.assertEqual(
graph_node.input_node.input_key, other_input_key)
def testCompileInputGraph(self):
channel = dummy_artifact_list.with_output_type(DummyArtifact)()
node = DummyNode('MyNode', inputs={'x': channel})
p = self._prepare_pipeline([node])
ctx = compiler_context.PipelineContext(p)
result = pipeline_pb2.NodeInputs()
with self.subTest('First compilation'):
input_graph_id = node_inputs_compiler._compile_input_graph(
ctx, node, channel, result)
self.assertLen(result.input_graphs, 1)
self.assertProtoEquals("""
nodes {
key: "op_1"
value: {
output_data_type: ARTIFACT_LIST
op_node {
op_type: "testing.DummyArtifactList"
}
}
}
result_node: "op_1"
""", result.input_graphs[input_graph_id])
with self.subTest('Second compilation'):
second_input_graph_id = node_inputs_compiler._compile_input_graph(
ctx, node, channel, result)
self.assertEqual(input_graph_id, second_input_graph_id)
def testCompileInputGraphRef(self):
x1 = dummy_artifact_list.with_output_type(DummyArtifact)()
x2 = dummy_dict.with_output_type({'x': DummyArtifact})()['x']
dict_list = dummy_dict_list.with_output_type({'x': DummyArtifact})()
with for_each.ForEach(dict_list) as each_dict:
x3 = each_dict['x']
node = DummyNode('MyNode', inputs={'x1': x1, 'x2': x2, 'x3': x3})
result = self._compile_node_inputs(node)
self.assertNotEmpty(result.inputs['x1'].input_graph_ref.graph_id)
self.assertEmpty(result.inputs['x1'].input_graph_ref.key)
self.assertNotEmpty(result.inputs['x2'].input_graph_ref.graph_id)
self.assertEqual(result.inputs['x2'].input_graph_ref.key, 'x')
self.assertNotEmpty(result.inputs['x3'].input_graph_ref.graph_id)
self.assertEqual(result.inputs['x3'].input_graph_ref.key, 'x')
def testCompileConditionals(self):
cond_node = DummyNode('CondNode')
with conditional.Cond(
cond_node.output('x').future().value == 42):
inner_node = DummyNode('InnerNode')
result = self._compile_node_inputs(
inner_node, components=[cond_node, inner_node])
self.assertLen(result.inputs, 1)
cond_input_key = list(result.inputs)[0]
self.assertFalse(result.inputs[cond_input_key].hidden)
self.assertEqual(result.inputs[cond_input_key].min_count, 1)
self.assertLen(result.conditionals, 1)
cond = list(result.conditionals.values())[0]
self.assertProtoEquals("""
operator {
compare_op {
op: EQUAL
rhs {
value {
int_value: 42
}
}
lhs {
operator {
artifact_value_op {
expression {
operator {
index_op {
expression {
placeholder {
key: "%s"
}
}
}
}
}
}
}
}
}
}
""" % cond_input_key, cond.placeholder_expression)
def testCompileInputsForDynamicProperties(self):
producer = DummyNode('Producer')
consumer = DummyNode('Consumer', exec_properties={
'x': producer.output('x', standard_artifacts.Integer).future().value
})
result = self._compile_node_inputs(
consumer, components=[producer, consumer])
self.assertLen(result.inputs, 1)
dynamic_prop_input_key = list(result.inputs)[0]
self.assertFalse(result.inputs[dynamic_prop_input_key].hidden)
self.assertEqual(result.inputs[dynamic_prop_input_key].min_count, 1)
def testCompileMinCount(self):
class DummyComponentSpec(component_spec.ComponentSpec):
INPUTS = {
'required': component_spec.ChannelParameter(
DummyArtifact, optional=False),
'optional_but_not_allow_empty': component_spec.ChannelParameter(
DummyArtifact, optional=True, allow_empty=False),
'optional_and_allow_empty': component_spec.ChannelParameter(
DummyArtifact, optional=True, allow_empty=True),
}
OUTPUTS = {}
PARAMETERS = {}
class DummyComponent(base_component.BaseComponent):
SPEC_CLASS = DummyComponentSpec
EXECUTOR_SPEC = executor_spec.ExecutorSpec()
def __init__(self, **inputs):
super().__init__(DummyComponentSpec(**inputs))
producer = DummyNode('Producer')
c1 = DummyComponent(
required=producer.output('x'),
).with_id('Consumer1')
c2 = DummyComponent(
required=producer.output('x'),
optional_but_not_allow_empty=producer.output('x'),
).with_id('Consumer2')
c3 = DummyComponent(
required=producer.output('x'),
optional_and_allow_empty=producer.output('x'),
).with_id('Consumer3')
p = self._prepare_pipeline([producer, c1, c2, c3])
ctx = compiler_context.PipelineContext(p)
r1 = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, c1, r1)
r2 = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, c2, r2)
r3 = pipeline_pb2.NodeInputs()
node_inputs_compiler.compile_node_inputs(ctx, c3, r3)
self.assertEqual(r1.inputs['required'].min_count, 1)
self.assertEqual(r2.inputs['optional_but_not_allow_empty'].min_count, 1)
self.assertEqual(r2.inputs['optional_and_allow_empty'].min_count, 0)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "980054e7d9c0c7df87168c99f2684c87",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 102,
"avg_line_length": 32.08457711442786,
"alnum_prop": 0.6421150565979221,
"repo_name": "tensorflow/tfx",
"id": "037a9fdbc4e6920768306ad1d7b3975f8ad0dd04",
"size": "13494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tfx/dsl/compiler/node_inputs_compiler_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7405"
},
{
"name": "Jupyter Notebook",
"bytes": "38579"
},
{
"name": "Python",
"bytes": "6009050"
},
{
"name": "Shell",
"bytes": "34056"
},
{
"name": "Starlark",
"bytes": "20324"
}
],
"symlink_target": ""
}
|
from daemon.tests.base_test import TestCase
from daemon import settings, create_app
from daemon.database import db
from daemon.models import Threshold
from daemon.utils import generate_threshold_config
class UtilitiesTestCase(TestCase):
"""
This class performs testing upon `utils.py` module containing:
* filtering metrics function
* converting configuration to / from XML functions
"""
def create_app(self):
settings.settings["SQLALCHEMY_DATABASE_URI"] = "sqlite://"
settings.settings["TESTING"] = True
return create_app(settings.settings)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_generate_threshold_config(self):
"""
Test: generate_threshold_config, utility function
* generates exact config from a given set of thresholds
"""
obj1 = Threshold(type="counter", warning_min=0, warning_max=1000,
failure_min=0, failure_max=1200, invert=False, persist=False,
type_instance="some_instance")
obj2 = Threshold(type="df", warning_max=90, percentage=True)
obj3 = Threshold(type="load", datasource="midterm", warning_max=1,
hysteresis=0.3)
obj4 = Threshold(type="cpu", type_instance="user", warning_max=85,
hits=6)
obj5 = Threshold(plugin="interface", plugin_instance="eth0",
type="if_octets", datasource="rx", failure_max=10000000)
obj6 = Threshold(host="hostname", type="cpu", type_instance="idle",
failure_min=10)
obj7 = Threshold(host="hostname", plugin="memory", type="memory",
type_instance="cached", warning_min=100000000)
for obj in [obj1, obj2, obj3, obj4, obj5, obj6, obj7]:
db.session.add(obj)
db.session.commit()
self.assertEqual(Threshold.query.count(), 7)
result_set = Threshold.query.order_by(Threshold.host). \
order_by(Threshold.plugin).order_by(Threshold.type). \
order_by(Threshold.id)
with open(self.app.config["collectd_threshold_file"], "r") as f:
self.assertEqual(f.read(), generate_threshold_config(result_set))
|
{
"content_hash": "abe84c0998d275a360c83252d3fd59c0",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 77,
"avg_line_length": 39.206896551724135,
"alnum_prop": 0.6292875989445911,
"repo_name": "pbanaszkiewicz/collectd-webdaemon",
"id": "b78220393e2274867e595e0c3599afc11612cf32",
"size": "2291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "daemon/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49563"
}
],
"symlink_target": ""
}
|
"""Unit tests for datastore_utils.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import itertools
import math
import mock
from six.moves import range
from google.appengine.api import datastore_errors
from google.appengine.ext import ndb
from google.appengine.ext.ndb import polymodel
from upvote.gae.datastore import utils as datastore_utils
from upvote.gae.lib.testing import basetest
from upvote.shared import constants
class CopyEntityTest(basetest.UpvoteTestCase):
def setUp(self):
super(CopyEntityTest, self).setUp()
class A(ndb.Model):
a = ndb.StringProperty()
self.default_model = A
def testUpdateProperties(self):
inst = self.default_model(a='abc')
inst.put()
new = datastore_utils.CopyEntity(inst, a='xyz')
new.put()
self.assertEqual('abc', inst.a)
self.assertEqual('xyz', new.a)
self.assertNotEqual(new.key, inst.key)
def testFailToSet_AutoNowProperty(self):
class A(ndb.Model):
a = ndb.DateTimeProperty(auto_now=True)
inst = A()
inst.put()
with self.assertRaises(datastore_utils.PropertyError):
datastore_utils.CopyEntity(
inst, a=datetime.datetime.utcnow())
def testFailToSet_ComputedProperty(self):
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.ComputedProperty(lambda self: self.a[0])
inst = A(a='xyz')
inst.put()
self.assertEqual('x', inst.b)
with self.assertRaises(datastore_utils.PropertyError):
datastore_utils.CopyEntity(inst, b='a')
def testModelWithComputedProperty(self):
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.ComputedProperty(lambda self: self.a[0])
inst = A(a='xyz')
inst.put()
self.assertEqual('x', inst.b)
new = datastore_utils.CopyEntity(inst, a='abc')
new.put()
self.assertEqual('a', new.b)
def testPolyModel(self):
class A(datastore_utils.polymodel.PolyModel):
a = ndb.StringProperty()
class B(A):
pass
inst = B(a='abc')
inst.put()
new = datastore_utils.CopyEntity(inst, a='xyz')
new.put()
self.assertEqual('xyz', new.a)
self.assertIsInstance(new, B)
def testPolyModel_NoClass(self):
class A(datastore_utils.polymodel.PolyModel):
a = ndb.StringProperty()
class B(A):
pass
inst = B(a='abc')
a_copy = datastore_utils.CopyEntity(inst, a='xyz')
a_copy.put()
inst.put()
self.assertEqual('xyz', a_copy.a)
self.assertEqual('abc', inst.a)
def testNewId(self):
inst = self.default_model(a='abc')
inst.put()
new = datastore_utils.CopyEntity(inst, id='an_id')
new.put()
self.assertEqual('abc', new.a)
self.assertEqual('an_id', new.key.id())
def testNewIdWithParent(self):
inst = self.default_model(a='abc')
inst.put()
parent = ndb.Key('C', 'c', 'B', 'b')
expected = ndb.Key('C', 'c', 'B', 'b', 'A', 'an_id')
new = datastore_utils.CopyEntity(
inst, new_parent=parent, id='an_id')
new.put()
self.assertEqual(expected, new.key)
def testIdWithKey(self):
inst = self.default_model(a='abc')
inst.put()
with self.assertRaises(datastore_errors.BadArgumentError):
datastore_utils.CopyEntity(
inst, new_key=ndb.Key('A', 'a_key'), id='an_id')
def testParentWithKey(self):
inst = self.default_model(a='abc')
inst.put()
parent = ndb.Key('C', 'c', 'B', 'b')
with self.assertRaises(datastore_errors.BadArgumentError):
datastore_utils.CopyEntity(
inst, new_key=ndb.Key('A', 'a_key'), new_parent=parent)
def testUnknownProperty(self):
inst = self.default_model(a='abc')
inst.put()
with self.assertRaises(datastore_utils.PropertyError):
datastore_utils.CopyEntity(inst, not_a_property='a')
def testDeletedProperty(self):
inst = self.default_model(a='abc')
inst.put()
class A(ndb.Model): # pylint: disable=unused-variable
b = ndb.StringProperty()
inst = inst.key.get(use_cache=False)
copy = datastore_utils.CopyEntity(inst)
self.assertFalse(hasattr(copy, 'a'))
class DeletePropertyTest(basetest.UpvoteTestCase):
def setUp(self):
super(DeletePropertyTest, self).setUp()
def testSameSchema(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty()
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone :)
self.assertIsNone(inst.b)
def testSameSchema_DoesntDeleteProperty(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty()
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
# Create a new instance and verify that the 'b' hasn't disappeared
new = A(a='abc', b='def')
new.put()
self.assertTrue(datastore_utils.HasProperty(new, 'b'))
def testSameSchema_RepeatedProperty(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty(repeated=True)
# Create an entity using the initial schema
inst = A(a='abc', b=['def'])
inst.put()
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is...kinda gone :|
self.assertEqual([], inst.b)
def testChangeSchema(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty()
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
# Revised schema
class A(ndb.Model): # pylint: disable=function-redefined
a = ndb.StringProperty()
# Retrieve and save the old instance
inst = A.get_by_id(inst.key.id())
inst.put()
# The old data is still there :(
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone :)
self.assertIsNone(inst.b)
def testChangeSchema_RequiredField(self):
# Initial schema but this time with a required property
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty(required=True)
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
# Revised schema without the required property
class A(ndb.Model): # pylint: disable=function-redefined
a = ndb.StringProperty()
# Retrieve and save the old instance
inst = A.get_by_id(inst.key.id())
inst.put()
# The old data is still there :(
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone :)
self.assertIsNone(inst.b)
def testUnknownProperty(self):
class A(ndb.Model):
a = ndb.StringProperty()
inst = A(a='abc')
inst.put()
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
self.assertIsNotNone(inst.a)
def testChangeSchema_PolyModel(self):
# Initial schema
class Base(polymodel.PolyModel):
a = ndb.StringProperty()
b = ndb.StringProperty(required=True)
class A(Base):
pass
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
# Revised schema
class Base(polymodel.PolyModel): # pylint: disable=function-redefined
a = ndb.StringProperty()
class A(Base): # pylint: disable=function-redefined
pass
# Retrieve and save the old instance
inst = A.get_by_id(inst.key.id())
inst.put()
# The old data is still there :(
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeleteProperty(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone :)
self.assertIsNone(inst.b)
class DeletePropertyValueTest(basetest.UpvoteTestCase):
def testDeleteValue(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty()
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeletePropertyValue(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone :)
self.assertIsNone(inst.b)
def testDatetimeAutoNowAdd(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.DateTimeProperty(auto_now_add=True)
# Create an entity using the initial schema
inst = A(a='abc')
inst.put()
# Delete the property and save the entity
datastore_utils.DeletePropertyValue(inst, 'b')
inst.put()
self.assertTrue(datastore_utils.HasProperty(inst, 'b'))
self.assertIsNotNone(inst.b)
def testRepeatedProperty(self):
# Initial schema
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty(repeated=True)
# Create an entity using the initial schema
inst = A(a='abc', b=['def'])
inst.put()
self.assertIsNotNone(inst.b)
# Delete the property and save the entity
datastore_utils.DeletePropertyValue(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
# The old data is gone
self.assertEqual([], inst.b)
def testRequiredField(self):
# Initial schema but this time with a required property
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.StringProperty(required=True)
# Create an entity using the initial schema
inst = A(a='abc', b='def')
inst.put()
# Delete the property and save the entity
datastore_utils.DeletePropertyValue(inst, 'b')
# Property required but no longer has a value.
with self.assertRaises(Exception):
inst.put()
def testUnknownProperty(self):
class A(ndb.Model):
a = ndb.StringProperty()
inst = A(a='abc')
inst.put()
datastore_utils.DeletePropertyValue(inst, 'b')
inst.put()
inst = A.get_by_id(inst.key.id())
self.assertIsNotNone(inst.a)
class HasValueTest(basetest.UpvoteTestCase):
def testHasValue(self):
class Foo(ndb.Model):
a = ndb.ComputedProperty(lambda self: 'a')
b = ndb.StringProperty()
foo = Foo()
self.assertFalse(datastore_utils.HasValue(foo, 'a'))
self.assertFalse(datastore_utils.HasValue(foo, 'b'))
foo.b = 'b'
self.assertFalse(datastore_utils.HasValue(foo, 'a'))
self.assertTrue(datastore_utils.HasValue(foo, 'b'))
foo.put()
self.assertTrue(datastore_utils.HasValue(foo, 'a'))
self.assertTrue(datastore_utils.HasValue(foo, 'b'))
class GetLocalComputedPropertyValueTest(basetest.UpvoteTestCase):
def setUp(self):
super(GetLocalComputedPropertyValueTest, self).setUp()
class A(ndb.Model):
a = ndb.StringProperty()
b = ndb.ComputedProperty(lambda self: self.a[0])
self.inst = A(a='xyz')
def testNormal(self):
self.assertIsNone(
datastore_utils.GetLocalComputedPropertyValue(self.inst, 'b'))
self.inst.put()
self.assertEqual(
'x', datastore_utils.GetLocalComputedPropertyValue(self.inst, 'b'))
self.inst.a = 'cdg'
self.assertEqual(
'x', datastore_utils.GetLocalComputedPropertyValue(self.inst, 'b'))
self.inst.put()
self.assertEqual(
'c', datastore_utils.GetLocalComputedPropertyValue(self.inst, 'b'))
def testUnknownProperty(self):
with self.assertRaises(datastore_utils.PropertyError):
datastore_utils.GetLocalComputedPropertyValue(
self.inst, 'NotARealProperty')
def testNotComputedProperty(self):
with self.assertRaises(datastore_utils.PropertyError):
datastore_utils.GetLocalComputedPropertyValue(self.inst, 'a')
class KeyHasAncestorTest(basetest.UpvoteTestCase):
def testKeyHasAncestor(self):
self.assertFalse(
datastore_utils.KeyHasAncestor(ndb.Key('A', 1), ndb.Key('A', 1)))
self.assertTrue(
datastore_utils.KeyHasAncestor(
ndb.Key('A', 1, 'B', 2), ndb.Key('A', 1)))
self.assertFalse(
datastore_utils.KeyHasAncestor(
ndb.Key('A', 1, 'B', 2), ndb.Key('A', 2)))
self.assertFalse(
datastore_utils.KeyHasAncestor(
ndb.Key('A', 1, 'B', 2), ndb.Key('A', 1, 'B', 2)))
self.assertTrue(
datastore_utils.KeyHasAncestor(
ndb.Key('A', 1, 'B', 2, 'C', 3), ndb.Key('A', 1, 'B', 2)))
class ConcatenateKeysTest(basetest.UpvoteTestCase):
def testSuccess(self):
keys = [ndb.Key('A', 1, 'B', 2), ndb.Key('C', 3)]
self.assertEqual(
ndb.Key('A', 1, 'B', 2, 'C', 3), datastore_utils.ConcatenateKeys(*keys))
def testEmpty(self):
self.assertIsNone(datastore_utils.ConcatenateKeys())
class GetKeyFromUrlsafeTest(basetest.UpvoteTestCase):
def testSuccess(self):
key = ndb.Key('A', 'a', 'B', 'b')
self.assertEqual(key, datastore_utils.GetKeyFromUrlsafe(key.urlsafe()))
def testError(self):
self.assertIsNone(
datastore_utils.GetKeyFromUrlsafe('not a real ndb key string'))
class FutureFactoryTest(basetest.UpvoteTestCase):
def testInTxn(self):
def AssertInTxn():
self.assertTrue(ndb.in_transaction())
def RunAssert():
fut = datastore_utils.GetNoOpFuture()
fut.add_callback(AssertInTxn)
fut.add_immediate_callback(AssertInTxn)
fut.get_result()
ndb.transaction(RunAssert)
class GetNoOpFutureTest(basetest.UpvoteTestCase):
def testNone(self):
future = datastore_utils.GetNoOpFuture()
self.assertTrue(future.done())
self.assertIsNone(future.get_result())
def testResult(self):
result = 'foobar'
future = datastore_utils.GetNoOpFuture(result)
self.assertTrue(future.done())
self.assertEqual(result, future.get_result())
class GetMultiFutureTest(basetest.UpvoteTestCase):
def testNoInput(self):
mf = datastore_utils.GetMultiFuture([])
self.assertTrue(mf.done())
def testSingleFuture(self):
f = ndb.Future()
mf = datastore_utils.GetMultiFuture([f])
self.assertFalse(f.done())
self.assertFalse(mf.done())
f.set_result(None)
self.assertTrue(f.done())
self.assertFalse(mf.done())
# Event loop must run for the MultiFuture to be marked as done.
mf.wait()
self.assertTrue(mf.done())
def testManyFutures(self):
futures = [ndb.Future() for _ in range(3)]
mf = datastore_utils.GetMultiFuture(futures)
self.assertFalse(any(f.done() for f in futures))
self.assertFalse(mf.done())
for f in futures:
f.set_result(None)
self.assertTrue(all(f.done() for f in futures))
self.assertFalse(mf.done())
# Event loop must run for the MultiFuture to be marked as done.
mf.wait()
self.assertTrue(mf.done())
def testCantModifyResult(self):
f = ndb.Future()
mf = datastore_utils.GetMultiFuture([f])
with self.assertRaises(RuntimeError):
mf.add_dependent(ndb.Future())
class TestModel(ndb.Model):
foo = ndb.StringProperty()
bar = ndb.IntegerProperty()
def CreateEntity(foo='foo', bar=0):
entity = TestModel(foo=foo, bar=bar)
entity.put()
return entity
def CreateEntities(count, **kwargs):
return [CreateEntity(**kwargs) for _ in range(count)]
_GLOBAL_CBK_MOCK = mock.MagicMock()
def CallMock(*args, **kwargs):
_GLOBAL_CBK_MOCK(*args, **kwargs)
def GetKey(key):
return key.get()
def ReturnFoo(entity):
return entity.foo
def ReturnBar(entity):
return entity.bar
class PaginateTest(basetest.UpvoteTestCase):
def testSuccess(self):
page_size = 10
for entity_count in range(50):
# Create some number of entities.
CreateEntities(entity_count)
# Verify that we get the expected number of pages.
pages = list(
datastore_utils.Paginate(TestModel.query(), page_size=page_size))
expected_page_count = int(math.ceil(float(entity_count) / page_size))
self.assertLen(pages, expected_page_count)
# Verify that we get the expected number of entities.
entities = list(itertools.chain(*pages))
self.assertLen(entities, entity_count)
# Delete everything.
for entity in entities:
entity.key.delete()
class QueuedPaginatedBatchApply(basetest.UpvoteTestCase):
def tearDown(self):
super(QueuedPaginatedBatchApply, self).tearDown()
_GLOBAL_CBK_MOCK.reset_mock()
def testSuccess(self):
entities = CreateEntities(3)
datastore_utils.QueuedPaginatedBatchApply(
TestModel.query(), CallMock, page_size=2)
for _ in range(3):
self.assertTaskCount(constants.TASK_QUEUE.DEFAULT, 1)
self.RunDeferredTasks()
self.assertTaskCount(constants.TASK_QUEUE.DEFAULT, 0)
self.assertTrue(_GLOBAL_CBK_MOCK.called_with(entities[:2]))
self.assertTrue(_GLOBAL_CBK_MOCK.called_with(entities[2:]))
self.assertEqual(2, _GLOBAL_CBK_MOCK.call_count)
def testExtraArgs(self):
entities = CreateEntities(1)
datastore_utils.QueuedPaginatedBatchApply(
TestModel.query(), CallMock, extra_args=['a', 'b'],
extra_kwargs={'c': 'c'})
for _ in range(2):
self.assertTaskCount(constants.TASK_QUEUE.DEFAULT, 1)
self.RunDeferredTasks()
self.assertTaskCount(constants.TASK_QUEUE.DEFAULT, 0)
self.assertTrue(_GLOBAL_CBK_MOCK.called_with(entities, 'a', 'b', c='c'))
if __name__ == '__main__':
basetest.main()
|
{
"content_hash": "0dcc2fb0f4c9b55a737067981ccf9679",
"timestamp": "",
"source": "github",
"line_count": 712,
"max_line_length": 80,
"avg_line_length": 25.323033707865168,
"alnum_prop": 0.658014420410427,
"repo_name": "google/upvote_py2",
"id": "d0b7d747b9c74b2dc5e77c52003b569f60e577c6",
"size": "18627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "upvote/gae/datastore/utils_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9963"
},
{
"name": "HTML",
"bytes": "85316"
},
{
"name": "JavaScript",
"bytes": "375357"
},
{
"name": "Python",
"bytes": "1242301"
},
{
"name": "Shell",
"bytes": "2846"
},
{
"name": "Starlark",
"bytes": "78267"
}
],
"symlink_target": ""
}
|
"""
Commands for pipsi-tool
Module defines helpers for searching for commands and aliases for them.
"""
from .upgrade_all import UpgradeAll
from .reinstall_all import ReinstallAll
COMMANDS = {
'upgrade-all': (UpgradeAll, 'upgradeall', ),
'reinstall-all': (ReinstallAll, 'reinstallall', ),
}
COMMAND_MAPPER = {}
COMMAND_ALIASES = {}
def _manage_commands():
"""
Build COMMAND_MAPPER and COMMAND_ALIASES dictionaries using COMMANDS
@return:None
"""
for name, (command, *aliases) in COMMANDS.items():
COMMAND_MAPPER[name] = command
for alias in aliases:
COMMAND_MAPPER[alias] = command
COMMAND_ALIASES[name] = aliases
_manage_commands()
del _manage_commands
def get(name):
"""
Find command class for given command name
@param name:str
@return: commands.Command
"""
return COMMAND_MAPPER.get(name)
def get_aliases_for(name):
"""
Find aliases for given command name
@param name: str
@return: Str[]
"""
return COMMAND_ALIASES[name]
|
{
"content_hash": "65a22d20a7562a9611b37987d9445813",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 72,
"avg_line_length": 22.717391304347824,
"alnum_prop": 0.6593301435406699,
"repo_name": "msztolcman/pipsi-tool",
"id": "d2e9830fe4dcd6090fd302fc448d4619d45bb6d0",
"size": "1045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipsi_tool/commands/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "318"
},
{
"name": "Python",
"bytes": "7625"
}
],
"symlink_target": ""
}
|
from numba.core import types, errors, ir, sigutils, ir_utils
from numba.core.typing.typeof import typeof_impl
from numba.core.transforms import find_region_inout_vars
class WithContext(object):
"""A dummy object for use as contextmanager.
This can be used as a contextmanager.
"""
is_callable = False
def __enter__(self):
pass
def __exit__(self, typ, val, tb):
pass
def mutate_with_body(self, func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory, extra):
"""Mutate the *blocks* to implement this contextmanager.
Parameters
----------
func_ir : FunctionIR
blocks : dict[ir.Block]
blk_start, blk_end : int
labels of the starting and ending block of the context-manager.
body_block: sequence[int]
A sequence of int's representing labels of the with-body
dispatcher_factory : callable
A callable that takes a `FunctionIR` and returns a `Dispatcher`.
"""
raise NotImplementedError
@typeof_impl.register(WithContext)
def typeof_contextmanager(val, c):
return types.ContextManager(val)
def _get_var_parent(name):
"""Get parent of the variable given its name
"""
# If not a temporary variable
if not name.startswith('$'):
# Return the base component of the name
return name.split('.', )[0]
def _clear_blocks(blocks, to_clear):
"""Remove keys in *to_clear* from *blocks*.
"""
for b in to_clear:
del blocks[b]
class _ByPassContextType(WithContext):
"""A simple context-manager that tells the compiler to bypass the body
of the with-block.
"""
def mutate_with_body(self, func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory, extra):
assert extra is None
# Determine variables that need forwarding
vlt = func_ir.variable_lifetime
inmap = {_get_var_parent(k): k for k in vlt.livemap[blk_start]}
outmap = {_get_var_parent(k): k for k in vlt.livemap[blk_end]}
forwardvars = {inmap[k]: outmap[k] for k in filter(bool, outmap)}
# Transform the block
_bypass_with_context(blocks, blk_start, blk_end, forwardvars)
_clear_blocks(blocks, body_blocks)
bypass_context = _ByPassContextType()
class _CallContextType(WithContext):
"""A simple context-manager that tells the compiler to lift the body of the
with-block as another function.
"""
def mutate_with_body(self, func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory, extra):
assert extra is None
vlt = func_ir.variable_lifetime
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=vlt.livemap,
callfrom=blk_start,
returnto=blk_end,
body_block_ids=set(body_blocks),
)
lifted_blks = {k: blocks[k] for k in body_blocks}
_mutate_with_block_callee(lifted_blks, blk_start, blk_end,
inputs, outputs)
# XXX: transform body-blocks to return the output variables
lifted_ir = func_ir.derive(
blocks=lifted_blks,
arg_names=tuple(inputs),
arg_count=len(inputs),
force_non_generator=True,
)
dispatcher = dispatcher_factory(lifted_ir)
newblk = _mutate_with_block_caller(
dispatcher, blocks, blk_start, blk_end, inputs, outputs,
)
blocks[blk_start] = newblk
_clear_blocks(blocks, body_blocks)
return dispatcher
call_context = _CallContextType()
class _ObjModeContextType(WithContext):
"""Creates a contextmanager to be used inside jitted functions to enter
*object-mode* for using interpreter features. The body of the with-context
is lifted into a function that is compiled in *object-mode*. This
transformation process is limited and cannot process all possible
Python code. However, users can wrap complicated logic in another
Python function, which will then be executed by the interpreter.
Use this as a function that takes keyword arguments only.
The argument names must correspond to the output variables from the
with-block. Their respective values are strings representing the expected
types. When exiting the with-context, the output variables are cast
to the expected nopython types according to the annotation. This process
is the same as passing Python objects into arguments of a nopython
function.
Example::
import numpy as np
from numba import njit, objmode
def bar(x):
# This code is executed by the interpreter.
return np.asarray(list(reversed(x.tolist())))
@njit
def foo():
x = np.arange(5)
y = np.zeros_like(x)
with objmode(y='intp[:]'): # annotate return type
# this region is executed by object-mode.
y += bar(x)
return y
.. note:: Known limitations:
- with-block cannot use incoming list objects.
- with-block cannot use incoming function objects.
- with-block cannot ``yield``, ``break``, ``return`` or ``raise`` \
such that the execution will leave the with-block immediately.
- with-block cannot contain `with` statements.
- random number generator states do not synchronize; i.e. \
nopython-mode and object-mode uses different RNG states.
.. note:: When used outside of no-python mode, the context-manager has no
effect.
.. warning:: This feature is experimental. The supported features may
change with or without notice.
"""
is_callable = True
def _legalize_args(self, extra, loc):
"""
Legalize arguments to the context-manager
"""
if extra is None:
return {}
if len(extra['args']) != 0:
raise errors.CompilerError(
"objectmode context doesn't take any positional arguments",
)
callkwargs = extra['kwargs']
typeanns = {}
for k, v in callkwargs.items():
if not isinstance(v, ir.Const) or not isinstance(v.value, str):
raise errors.CompilerError(
"objectmode context requires constants string for "
"type annotation",
)
typeanns[k] = sigutils._parse_signature_string(v.value)
return typeanns
def mutate_with_body(self, func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory, extra):
typeanns = self._legalize_args(extra, loc=blocks[blk_start].loc)
vlt = func_ir.variable_lifetime
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=vlt.livemap,
callfrom=blk_start,
returnto=blk_end,
body_block_ids=set(body_blocks),
)
# Determine types in the output tuple
def strip_var_ver(x):
return x.split('.', 1)[0]
stripped_outs = list(map(strip_var_ver, outputs))
# Verify that only outputs are annotated
extra_annotated = set(typeanns) - set(stripped_outs)
if extra_annotated:
msg = (
'Invalid type annotation on non-outgoing variables: {}.'
'Suggestion: remove annotation of the listed variables'
)
raise errors.TypingError(msg.format(extra_annotated))
# Verify that all outputs are annotated
not_annotated = set(stripped_outs) - set(typeanns)
if not_annotated:
msg = (
'Missing type annotation on outgoing variable(s): {0}\n\n'
'Example code: with objmode({1}=\'<'
'add_type_as_string_here>\')\n'
)
stable_ann = sorted(not_annotated)
raise errors.TypingError(msg.format(stable_ann, stable_ann[0]))
# Get output types
outtup = types.Tuple([typeanns[v] for v in stripped_outs])
lifted_blks = {k: blocks[k] for k in body_blocks}
_mutate_with_block_callee(lifted_blks, blk_start, blk_end,
inputs, outputs)
lifted_ir = func_ir.derive(
blocks=lifted_blks,
arg_names=tuple(inputs),
arg_count=len(inputs),
force_non_generator=True,
)
dispatcher = dispatcher_factory(lifted_ir, objectmode=True,
output_types=outtup)
newblk = _mutate_with_block_caller(
dispatcher, blocks, blk_start, blk_end, inputs, outputs,
)
blocks[blk_start] = newblk
_clear_blocks(blocks, body_blocks)
return dispatcher
def __call__(self, *args, **kwargs):
# No effect when used in pure-python
return self
objmode_context = _ObjModeContextType()
def _bypass_with_context(blocks, blk_start, blk_end, forwardvars):
"""Given the starting and ending block of the with-context,
replaces the head block with a new block that jumps to the end.
*blocks* is modified inplace.
"""
sblk = blocks[blk_start]
scope = sblk.scope
loc = sblk.loc
newblk = ir.Block(scope=scope, loc=loc)
for k, v in forwardvars.items():
newblk.append(ir.Assign(value=scope.get_exact(k),
target=scope.get_exact(v),
loc=loc))
newblk.append(ir.Jump(target=blk_end, loc=loc))
blocks[blk_start] = newblk
def _mutate_with_block_caller(dispatcher, blocks, blk_start, blk_end,
inputs, outputs):
"""Make a new block that calls into the lifeted with-context.
Parameters
----------
dispatcher : Dispatcher
blocks : dict[ir.Block]
blk_start, blk_end : int
labels of the starting and ending block of the context-manager.
inputs: sequence[str]
Input variable names
outputs: sequence[str]
Output variable names
"""
sblk = blocks[blk_start]
scope = sblk.scope
loc = sblk.loc
newblock = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=newblock,
callee=dispatcher,
label_next=blk_end,
inputs=inputs,
outputs=outputs,
)
return newblock
def _mutate_with_block_callee(blocks, blk_start, blk_end, inputs, outputs):
"""Mutate *blocks* for the callee of a with-context.
Parameters
----------
blocks : dict[ir.Block]
blk_start, blk_end : int
labels of the starting and ending block of the context-manager.
inputs: sequence[str]
Input variable names
outputs: sequence[str]
Output variable names
"""
head_blk = min(blocks)
temp_blk = blocks[head_blk]
scope = temp_blk.scope
loc = temp_blk.loc
blocks[blk_start] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=inputs,
label_next=head_blk,
)
blocks[blk_end] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=outputs,
)
|
{
"content_hash": "dde1396f9a4a0a839bcaf5785147600d",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 79,
"avg_line_length": 33.122093023255815,
"alnum_prop": 0.6007547832192381,
"repo_name": "sklam/numba",
"id": "5a21ab57afe5a40eefe25393a025dafacb1e6716",
"size": "11394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/core/withcontexts.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6783"
},
{
"name": "C",
"bytes": "638283"
},
{
"name": "C++",
"bytes": "52741"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "7918676"
},
{
"name": "Shell",
"bytes": "7823"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from .models import Tag
admin.site.register(Tag)
|
{
"content_hash": "49ceb0a0a5afa6041e422cdee1696925",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 14.166666666666666,
"alnum_prop": 0.788235294117647,
"repo_name": "hellsgate1001/waypoints",
"id": "85ae0d72f3135b2d541ab3afc4b8f61e53f06b6f",
"size": "85",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waypoints/wptags/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "135765"
},
{
"name": "HTML",
"bytes": "6888"
},
{
"name": "JavaScript",
"bytes": "256360"
},
{
"name": "Python",
"bytes": "22111"
}
],
"symlink_target": ""
}
|
"""
Django settings for GoogleNews_Scraper project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '=y#0)e-))9v@cktd$up3gsjctg&@_)d89j#1+(=1m15hd^nzll'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost']
# Application definition
INSTALLED_APPS = [
'news.apps.NewsConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'GoogleNews_Scraper.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'GoogleNews_Scraper.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'google_news_scrapper',
'USER': 'admin_gnews',
'PASSWORD': 'prestige',
'HOST': 'localhost',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATIC_URL = '/static/'
|
{
"content_hash": "e9b823bd6d65942ed26825f2346f8ca6",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 91,
"avg_line_length": 25.977099236641223,
"alnum_prop": 0.6758742286218042,
"repo_name": "andresmtz98/GoogleNews_Scraper_Django",
"id": "b476250f1143cca21d901d793c8ec810bd428885",
"size": "3403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GoogleNews_Scraper/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "29399"
},
{
"name": "HTML",
"bytes": "22016"
},
{
"name": "Python",
"bytes": "10742"
}
],
"symlink_target": ""
}
|
import kivy
kivy.require('1.9.0')
import math
from kivy.uix.togglebutton import ToggleButton
from kivy.graphics import Line, Color
from comicwidgets import StickMan, DraggableWidget
class ToolButton(ToggleButton):
def on_touch_down(self, touch):
ds = self.parent.drawing_space
if self.state == 'down' and ds.parent.collide_point(touch.x,touch.y):
(x,y) = ds.to_widget(touch.x, touch.y)
self.draw(ds,x,y)
return True
return super(ToolButton, self).on_touch_down(touch)
def draw(self, ds, x, y):
pass
class ToolStickman(ToolButton):
def draw(self, ds, x, y):
sm = StickMan(width=48, height=48)
sm.center = (x,y)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
sm.canvas.before.add(Color(*color_picker.color))
ds.add_widget(sm)
class ToolFigure(ToolButton):
def draw(self, ds, x, y):
(self.ix, self.iy) = (x,y)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
with ds.canvas:
Color(*color_picker.color)
self.figure=self.create_figure(x,y,x+1,y+1)
ds.bind(on_touch_move=self.update_figure)
ds.bind(on_touch_up=self.end_figure)
def update_figure(self, ds, touch):
ds.canvas.remove(self.figure)
with ds.canvas:
self.figure = self.create_figure(self.ix, self.iy,touch.x,touch.y)
def end_figure(self, ds, touch):
ds.unbind(on_touch_move=self.update_figure)
ds.unbind(on_touch_up=self.end_figure)
ds.canvas.remove(self.figure)
self.widgetize(ds,self.ix,self.iy,touch.x,touch.y)
def widgetize(self,ds,ix,iy,fx,fy):
widget = self.create_widget(ix,iy,fx,fy)
(ix,iy) = widget.to_local(ix,iy,relative=True)
(fx,fy) = widget.to_local(fx,fy,relative=True)
screen_manager = self.parent.comic_creator.manager
color_picker = screen_manager.color_picker
widget.canvas.add(Color(*color_picker.color))
widget.canvas.add(self.create_figure(ix,iy,fx,fy))
ds.add_widget(widget)
def create_figure(self,ix,iy,fx,fy):
pass
def create_widget(self,ix,iy,fx,fy):
pass
class ToolLine(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(points=[ix, iy, fx, fy])
def create_widget(self,ix,iy,fx,fy):
pos = (min(ix, fx), min(iy, fy))
size = (abs(fx-ix), abs(fy-iy))
return DraggableWidget(pos = pos, size = size)
class ToolCircle(ToolFigure):
def create_figure(self,ix,iy,fx,fy):
return Line(circle=[ix,iy,math.hypot(ix-fx,iy-fy)])
def create_widget(self,ix,iy,fx,fy):
r = math.hypot(ix-fx, iy-fy)
pos = (ix-r, iy-r)
size = (2*r, 2*r)
return DraggableWidget(pos = pos, size = size)
|
{
"content_hash": "db92ec67cd41aa3b9833134274b8d883",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 34.70238095238095,
"alnum_prop": 0.6229845626072041,
"repo_name": "pimier15/PyGUI",
"id": "308002505275c9d1d08643d9031cdc22f0283cf6",
"size": "2939",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Kivy/Kivy/Bk_Interractive/My/C4/multi_touch_samplebase/toolbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "636598"
}
],
"symlink_target": ""
}
|
import MySQLdb
with open('input.txt', 'r') as f:
lines = f.readlines()
primeCustomers = []
for line in lines:
words = line.split()
primeCustomers.append({"id": int(words[0]), "newRep": words[1]})
try:
db = MySQLdb.connect('localhost', 'root', '', 'sports2000')
cur = db.cursor()
# Get a list of all the customers we are going to impact
# This section is analagous to the ABL code:
# FOR EACH primeCustomer, FIRST Customer
# WHERE Customer.CustNum = primeCustomer.id BY primeCustomer.id:
# DISPLAY Customer.Name primeCustomer.id.
# END.
# Create a string to use for the SQL IN operator
customerNumbers = '('
customerNumbers += ", ".join([p['id'] for p in primeCustomers])
customerNumbers += ')'
sql = ('SELECT Name, custNum FROM customers'
' WHERE custNum IN %s')
cur.execute(sql, (customerNumbers,))
row = cur.fetchone()
while row is not None:
print ", ".join([str(c) for c in row])
row = cur.fetchone()
# Get a list of all the new salesReps for Massachusetts customers
# This is analagous to the ABL code:
# FOR EACH Customer WHERE Customer.State = "MA",
# EACH primeCustomer WHERE primeCustomer.id = Customer.CustNum:
# DISPLAY Customer.Name primeCustomer.newRep.
# END.
sql = "SELECT Name, custNum from customers where State = 'MA'"
cur.execute(sql)
row = cur.fetchone()
while row is not None:
for p in primeCustomers:
if row[1] == p['id']:
print row[0], p['newRep']
row = cur.fetchone()
# Go through each customer and assign them the sales rep
# with the given name
#
# This is analagous to the ABL code:
# FOR EACH prime:
# FIND Customer WHERE Customer.CustNum = primeCustomer.customerNumber.
# FIND SalesRep WHERE SalesRep.RepName = primeCustomer.newRep.
# Customer.SalesRep = SalesRep.SalesRep.
# END.
for p in primeCustomers:
sql = ('UPDATE customer'
'SET salesRep = '
'(SELECT salesRep FROM SalesRep WHERE repName = "%s" LIMIT 1) '
'WHERE customerNumber = %s')
cur.execute(sql, (p['newRep'], p['id']))
db.commit()
except Exception as e:
db.rollback()
print e
finally:
if cur:
cur.close()
if db:
db.close()
|
{
"content_hash": "09e42f00072f22c9bce9115f8cba2365",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 78,
"avg_line_length": 30.358974358974358,
"alnum_prop": 0.6110641891891891,
"repo_name": "progress/WhyABL",
"id": "4251102144b637b96ff97a1929de20927efc62b2",
"size": "2477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DbAndStructures/example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C#",
"bytes": "2229"
},
{
"name": "Java",
"bytes": "18965"
},
{
"name": "OpenEdge ABL",
"bytes": "7121"
},
{
"name": "Python",
"bytes": "10545"
}
],
"symlink_target": ""
}
|
"""Tests for learned_optimizers.tasks.fixed.vit."""
from absl.testing import absltest
from absl.testing import parameterized
from learned_optimization.tasks import test_utils
from learned_optimization.tasks.fixed import mlp_mixer
tasks = [
'MLPMixer_Cifar100_bs256_tiny16',
'MLPMixer_Cifar100_small16',
'MLPMixer_Cifar100_tiny16',
'MLPMixer_Food101_64_bs256_tiny16',
'MLPMixer_Food101_64_small16',
'MLPMixer_Food101_64_tiny16',
'MLPMixer_ImageNet64_bs256_tiny16',
'MLPMixer_ImageNet64_small16',
'MLPMixer_ImageNet64_tiny16',
]
class MLPMixerTest(parameterized.TestCase):
@parameterized.parameters(tasks)
def test_tasks(self, task_name):
task = getattr(mlp_mixer, task_name)()
test_utils.smoketest_task(task)
if __name__ == '__main__':
absltest.main()
|
{
"content_hash": "86cadbdbc1cd1d6ce471644a84a837b9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 54,
"avg_line_length": 26.933333333333334,
"alnum_prop": 0.7252475247524752,
"repo_name": "google/learned_optimization",
"id": "96e0e265917146cc5fd5de6236b1749063f48cc7",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "learned_optimization/tasks/fixed/mlp_mixer_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "177493"
},
{
"name": "Python",
"bytes": "1290675"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import print_function
import re
import operator
import six
from six.moves import range
from collections import Counter
debug = False
test = False
def is_number(s):
try:
float(s) if '.' in s else int(s)
return True
except ValueError:
return False
def load_stop_words(stop_word_file):
"""
Utility function to load stop words from a file and return as a list of words
@param stop_word_file Path and file name of a file containing stop words.
@return list A list of stop words.
"""
stop_words = []
for line in open(stop_word_file):
if line.strip()[0:1] != "#":
for word in line.split(): # in case more than one per line
stop_words.append(word)
return stop_words
def separate_words(text, min_word_return_size):
"""
Utility function to return a list of all words that are have a length greater than a specified number of characters.
@param text The text that must be split in to words.
@param min_word_return_size The minimum no of characters a word must have to be included.
"""
splitter = re.compile('[^a-zA-Z0-9_\\+\\-/]')
words = []
for single_word in splitter.split(text):
current_word = single_word.strip().lower()
# leave numbers in phrase, but don't count as words, since they tend to invalidate scores of their phrases
if len(current_word) > min_word_return_size and current_word != '' and not is_number(current_word):
words.append(current_word)
return words
def split_sentences(text):
"""
Utility function to return a list of sentences.
@param text The text that must be split in to sentences.
"""
sentence_delimiters = re.compile(u'[\\[\\]\n.!?,;:\t\\-\\"\\(\\)\\\'\u2019\u2013]')
sentences = sentence_delimiters.split(text)
return sentences
def build_stop_word_regex(stop_word_list):
stop_word_regex_list = []
for word in stop_word_list:
word_regex = '\\b' + word + '\\b'
stop_word_regex_list.append(word_regex)
stop_word_pattern = re.compile('|'.join(stop_word_regex_list), re.IGNORECASE)
return stop_word_pattern
#
# Function that extracts the adjoined candidates from a list of sentences and filters them by frequency
#
def extract_adjoined_candidates(sentence_list, stoplist, min_keywords, max_keywords, min_freq):
adjoined_candidates = []
for s in sentence_list:
# Extracts the candidates from each single sentence and adds them to the list
adjoined_candidates += adjoined_candidates_from_sentence(s, stoplist, min_keywords, max_keywords)
# Filters the candidates and returns them
return filter_adjoined_candidates(adjoined_candidates, min_freq)
# return adjoined_candidates
#
# Function that extracts the adjoined candidates from a single sentence
#
def adjoined_candidates_from_sentence(s, stoplist, min_keywords, max_keywords):
# Initializes the candidate list to empty
candidates = []
# Splits the sentence to get a list of lowercase words
sl = s.lower().split()
# For each possible length of the adjoined candidate
for num_keywords in range(min_keywords, max_keywords + 1):
# Until the third-last word
for i in range(0, len(sl) - num_keywords):
# Position i marks the first word of the candidate. Proceeds only if it's not a stopword
if sl[i] not in stoplist:
candidate = sl[i]
# Initializes j (the pointer to the next word) to 1
j = 1
# Initializes the word counter. This counts the non-stopwords words in the candidate
keyword_counter = 1
contains_stopword = False
# Until the word count reaches the maximum number of keywords or the end is reached
while keyword_counter < num_keywords and i + j < len(sl):
# Adds the next word to the candidate
candidate = candidate + ' ' + sl[i + j]
# If it's not a stopword, increase the word counter. If it is, turn on the flag
if sl[i + j] not in stoplist:
keyword_counter += 1
else:
contains_stopword = True
# Next position
j += 1
# Adds the candidate to the list only if:
# 1) it contains at least a stopword (if it doesn't it's already been considered)
# AND
# 2) the last word is not a stopword
# AND
# 3) the adjoined candidate keyphrase contains exactly the correct number of keywords (to avoid doubles)
if contains_stopword and candidate.split()[-1] not in stoplist and keyword_counter == num_keywords:
candidates.append(candidate)
return candidates
#
# Function that filters the adjoined candidates to keep only those that appears with a certain frequency
#
def filter_adjoined_candidates(candidates, min_freq):
# Creates a dictionary where the key is the candidate and the value is the frequency of the candidate
candidates_freq = Counter(candidates)
filtered_candidates = []
# Uses the dictionary to filter the candidates
for candidate in candidates:
freq = candidates_freq[candidate]
if freq >= min_freq:
filtered_candidates.append(candidate)
return filtered_candidates
def generate_candidate_keywords(sentence_list, stopword_pattern, stop_word_list, min_char_length=1, max_words_length=5,
min_words_length_adj=1, max_words_length_adj=1, min_phrase_freq_adj=2):
phrase_list = []
for s in sentence_list:
tmp = re.sub(stopword_pattern, '|', s.strip())
phrases = tmp.split("|")
for phrase in phrases:
phrase = phrase.strip().lower()
if phrase != "" and is_acceptable(phrase, min_char_length, max_words_length):
phrase_list.append(phrase)
phrase_list += extract_adjoined_candidates(sentence_list, stop_word_list, min_words_length_adj,
max_words_length_adj, min_phrase_freq_adj)
return phrase_list
def is_acceptable(phrase, min_char_length, max_words_length):
# a phrase must have a min length in characters
if len(phrase) < min_char_length:
return 0
# a phrase must have a max number of words
words = phrase.split()
if len(words) > max_words_length:
return 0
digits = 0
alpha = 0
for i in range(0, len(phrase)):
if phrase[i].isdigit():
digits += 1
elif phrase[i].isalpha():
alpha += 1
# a phrase must have at least one alpha character
if alpha == 0:
return 0
# a phrase must have more alpha than digits characters
if digits > alpha:
return 0
return 1
def calculate_word_scores(phraseList):
word_frequency = {}
word_degree = {}
for phrase in phraseList:
word_list = separate_words(phrase, 0)
word_list_length = len(word_list)
word_list_degree = word_list_length - 1
# if word_list_degree > 3: word_list_degree = 3 #exp.
for word in word_list:
word_frequency.setdefault(word, 0)
word_frequency[word] += 1
word_degree.setdefault(word, 0)
word_degree[word] += word_list_degree # orig.
# word_degree[word] += 1/(word_list_length*1.0) #exp.
for item in word_frequency:
word_degree[item] = word_degree[item] + word_frequency[item]
# Calculate Word scores = deg(w)/frew(w)
word_score = {}
for item in word_frequency:
word_score.setdefault(item, 0)
word_score[item] = word_degree[item] / (word_frequency[item] * 1.0) # orig.
# word_score[item] = word_frequency[item]/(word_degree[item] * 1.0) #exp.
return word_score
def generate_candidate_keyword_scores(phrase_list, word_score, min_keyword_frequency=1):
keyword_candidates = {}
for phrase in phrase_list:
if min_keyword_frequency > 1:
if phrase_list.count(phrase) < min_keyword_frequency:
continue
keyword_candidates.setdefault(phrase, 0)
word_list = separate_words(phrase, 0)
candidate_score = 0
for word in word_list:
candidate_score += word_score[word]
keyword_candidates[phrase] = candidate_score
return keyword_candidates
class Rake(object):
def __init__(self, stop_words_path, min_char_length=1, max_words_length=5, min_keyword_frequency=1,
min_words_length_adj=1, max_words_length_adj=1, min_phrase_freq_adj=2):
self.__stop_words_path = stop_words_path
self.__stop_words_list = load_stop_words(stop_words_path)
self.__min_char_length = min_char_length
self.__max_words_length = max_words_length
self.__min_keyword_frequency = min_keyword_frequency
self.__min_words_length_adj = min_words_length_adj
self.__max_words_length_adj = max_words_length_adj
self.__min_phrase_freq_adj = min_phrase_freq_adj
def run(self, text):
sentence_list = split_sentences(text)
stop_words_pattern = build_stop_word_regex(self.__stop_words_list)
phrase_list = generate_candidate_keywords(sentence_list, stop_words_pattern, self.__stop_words_list,
self.__min_char_length, self.__max_words_length,
self.__min_words_length_adj, self.__max_words_length_adj,
self.__min_phrase_freq_adj)
word_scores = calculate_word_scores(phrase_list)
keyword_candidates = generate_candidate_keyword_scores(phrase_list, word_scores, self.__min_keyword_frequency)
sorted_keywords = sorted(six.iteritems(keyword_candidates), key=operator.itemgetter(1), reverse=True)
return sorted_keywords
if test and __name__ == '__main__':
text = "Compatibility of systems of linear constraints over the set of natural numbers. Criteria of compatibility of a system of linear Diophantine equations, strict inequations, and nonstrict inequations are considered. Upper bounds for components of a minimal set of solutions and algorithms of construction of minimal generating sets of solutions for all types of systems are given. These criteria and the corresponding algorithms for constructing a minimal supporting set of solutions can be used in solving all the considered types of systems and systems of mixed types."
# Split text into sentences
sentenceList = split_sentences(text)
# stoppath = "FoxStoplist.txt" #Fox stoplist contains "numbers", so it will not find "natural numbers" like in Table 1.1
stoppath = "data/stoplists/SmartStoplist.txt" # SMART stoplist misses some of the lower-scoring keywords in Figure 1.5, which means that the top 1/3 cuts off one of the 4.0 score words in Table 1.1
stopwordpattern = build_stop_word_regex(stoppath)
# generate candidate keywords
phraseList = generate_candidate_keywords(sentenceList, stopwordpattern, load_stop_words(stoppath))
# calculate individual word scores
wordscores = calculate_word_scores(phraseList)
# generate candidate keyword scores
keywordcandidates = generate_candidate_keyword_scores(phraseList, wordscores)
if debug: print(keywordcandidates)
sortedKeywords = sorted(six.iteritems(keywordcandidates), key=operator.itemgetter(1), reverse=True)
if debug: print(sortedKeywords)
totalKeywords = len(sortedKeywords)
if debug: print(totalKeywords)
print(sortedKeywords[0:(totalKeywords // 3)])
rake = Rake("data/stoplists/SmartStoplist.txt")
keywords = rake.run(text)
print(keywords)
|
{
"content_hash": "903c3ab6d4b8db6555f41ae4c2a464a2",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 580,
"avg_line_length": 42.26855123674912,
"alnum_prop": 0.6444574485871928,
"repo_name": "zelandiya/RAKE-tutorial",
"id": "4e047e0257dd7cdde9147d227954cac033695ef7",
"size": "13016",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rake.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22042"
}
],
"symlink_target": ""
}
|
import math
import numpy as np
from scipy.stats import norm
from . import common_args
from ..util import read_param_file, ResultDict
def analyze(
problem,
Y,
M=4,
num_resamples=100,
conf_level=0.95,
print_to_console=False,
seed=None,
):
"""Perform extended Fourier Amplitude Sensitivity Test on model outputs
Returns a dictionary with keys 'S1' and 'ST', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Notes
-----
Compatible with:
`fast_sampler` : :func:`SALib.sample.fast_sampler.sample`
Examples
--------
>>> X = fast_sampler.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = fast.analyze(problem, Y, print_to_console=False)
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 4)
print_to_console : bool
Print results directly to console (default False)
seed : int
Seed to generate a random number
References
----------
.. [1] Cukier, R. I., C. M. Fortuin, K. E. Shuler, A. G. Petschek,
and J. H. Schaibly (1973).
"Study of the sensitivity of coupled reaction
systems to uncertainties in rate coefficients." J. Chem. Phys.,
59(8):3873-3878, doi:10.1063/1.1680571.
.. [2] Saltelli, A., S. Tarantola, and K. P.-S. Chan (1999). "A
Quantitative Model-Independent Method for Global Sensitivity
Analysis of Model Output." Technometrics, 41(1):39-56,
doi:10.1080/00401706.1999.10485594.
.. [3] Pujol, G. (2006)
fast99 - R `sensitivity` package
https://github.com/cran/sensitivity/blob/master/R/fast99.R
"""
if seed:
np.random.seed(seed)
D = problem["num_vars"]
if Y.size % (D) == 0:
N = int(Y.size / D)
else:
msg = """
Error: Number of samples in model output file must be a multiple of D,
where D is the number of parameters.
"""
raise ValueError(msg)
# Recreate the vector omega used in the sampling
omega_0 = math.floor((N - 1) / (2 * M))
# Calculate and Output the First and Total Order Values
Si = ResultDict((k, [None] * D) for k in ["S1", "ST", "S1_conf", "ST_conf"])
Si["names"] = problem["names"]
for i in range(D):
z = np.arange(i * N, (i + 1) * N)
Y_l = Y[z]
S1, ST = compute_orders(Y_l, N, M, omega_0)
Si["S1"][i] = S1
Si["ST"][i] = ST
S1_d_conf, ST_d_conf = bootstrap(Y_l, M, num_resamples, conf_level)
Si["S1_conf"][i] = S1_d_conf
Si["ST_conf"][i] = ST_d_conf
if print_to_console:
print(Si.to_df())
return Si
def compute_orders(outputs: np.ndarray, N: int, M: int, omega: int):
f = np.fft.fft(outputs)
Sp = np.power(np.absolute(f[np.arange(1, math.ceil(N / 2))]) / N, 2)
V = 2.0 * np.sum(Sp)
# Calculate first and total order
D1 = 2.0 * np.sum(Sp[np.arange(1, M + 1) * omega - 1])
Dt = 2.0 * np.sum(Sp[np.arange(math.floor(omega / 2.0))])
return (D1 / V), (1.0 - Dt / V)
def bootstrap(Y: np.ndarray, M: int, resamples: int, conf_level: float):
"""Compute CIs.
Infers ``N`` from results of sub-sample ``Y`` and re-estimates omega (ω)
for the above ``N``.
"""
# Use half of available data each time
T_data = Y.shape[0]
n_size = math.ceil(T_data * 0.5)
res_S1 = np.zeros(resamples)
res_ST = np.zeros(resamples)
for i in range(resamples):
sample_idx = np.random.choice(T_data, replace=True, size=n_size)
Y_rs = Y[sample_idx]
N = len(Y_rs)
omega = math.floor((N - 1) / (2 * M))
S1, ST = compute_orders(Y_rs, N, M, omega)
res_S1[i] = S1
res_ST[i] = ST
bnd = norm.ppf(0.5 + conf_level / 2.0)
S1_conf = bnd * res_S1.std(ddof=1)
ST_conf = bnd * res_ST.std(ddof=1)
return S1_conf, ST_conf
# No additional arguments required for FAST
def cli_parse(parser):
"""Add method specific options to CLI parser.
Parameters
----------
parser : argparse object
Returns
----------
Updated argparse object
"""
parser.add_argument(
"-M", "--M", type=int, required=False, default=4, help="Inference parameter"
)
parser.add_argument(
"-r",
"--resamples",
type=int,
required=False,
default=100,
help="Number of bootstrap resamples for Sobol " "confidence intervals",
)
return parser
def cli_action(args):
problem = read_param_file(args.paramfile)
Y = np.loadtxt(
args.model_output_file, delimiter=args.delimiter, usecols=(args.column,)
)
analyze(
problem,
Y,
M=args.M,
num_resamples=args.resamples,
print_to_console=True,
seed=args.seed,
)
if __name__ == "__main__":
common_args.run_cli(cli_parse, cli_action)
|
{
"content_hash": "d5b29dba60a59fc3f6b25fc26e164a17",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 84,
"avg_line_length": 26.871134020618555,
"alnum_prop": 0.574717053520046,
"repo_name": "SALib/SALib",
"id": "c37a690009c7c72798774bcfb153d1f0c6aab6dd",
"size": "5214",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/SALib/analyze/fast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "142"
},
{
"name": "Python",
"bytes": "2362843"
},
{
"name": "TeX",
"bytes": "40231"
}
],
"symlink_target": ""
}
|
"""Performs (incremental) backups of activities for a given Garmin Connect
account.
The activities are stored in a local directory on the user's computer.
The backups are incremental, meaning that only activities that aren't already
stored in the backup directory will be downloaded.
"""
import argparse
import getpass
from garminexport.garminclient import GarminClient
import garminexport.backup
from garminexport.backup import export_formats
import logging
import os
import re
import sys
import traceback
logging.basicConfig(
level=logging.INFO, format="%(asctime)-15s [%(levelname)s] %(message)s")
log = logging.getLogger(__name__)
LOG_LEVELS = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR
}
"""Command-line (string-based) log-level mapping to logging module levels."""
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
"Performs incremental backups of activities for a "
"given Garmin Connect account. Only activities that "
"aren't already stored in the backup directory will "
"be downloaded."))
# positional args
parser.add_argument(
"username", metavar="<username>", type=str, help="Account user name.")
# optional args
parser.add_argument(
"--password", type=str, help="Account password.")
parser.add_argument(
"--backup-dir", metavar="DIR", type=str,
help=("Destination directory for downloaded activities. Default: "
"./activities/"), default=os.path.join(".", "activities"))
parser.add_argument(
"--log-level", metavar="LEVEL", type=str,
help=("Desired log output level (DEBUG, INFO, WARNING, ERROR). "
"Default: INFO."), default="INFO")
parser.add_argument(
"-f", "--format", choices=export_formats,
default=None, action='append',
help=("Desired output formats ("+', '.join(export_formats)+"). "
"Default: ALL."))
parser.add_argument(
"-E", "--ignore-errors", action='store_true',
help="Ignore errors and keep going. Default: FALSE")
args = parser.parse_args()
if not args.log_level in LOG_LEVELS:
raise ValueError("Illegal log-level: {}".format(args.log_level))
# if no --format was specified, all formats are to be backed up
args.format = args.format if args.format else export_formats
log.info("backing up formats: %s", ", ".join(args.format))
logging.root.setLevel(LOG_LEVELS[args.log_level])
try:
if not os.path.isdir(args.backup_dir):
os.makedirs(args.backup_dir)
if not args.password:
args.password = getpass.getpass("Enter password: ")
with GarminClient(args.username, args.password) as client:
# get all activity ids and timestamps from Garmin account
log.info("retrieving activities for {} ...".format(args.username))
activities = set(client.list_activities())
log.info("account has a total of {} activities.".format(
len(activities)))
missing_activities = garminexport.backup.need_backup(
activities, args.backup_dir, args.format)
backed_up = activities - missing_activities
log.info("{} contains {} backed up activities.".format(
args.backup_dir, len(backed_up)))
log.info("activities that aren't backed up: {}".format(
len(missing_activities)))
for index, activity in enumerate(missing_activities):
id, start, stationary = activity
log.info("backing up %sactivity %d from %s (%d out of %d) ..." %
("stationary " if stationary else "", id, start, index+1, len(missing_activities)))
try:
garminexport.backup.download(
client, activity, args.backup_dir, args.format)
except Exception as e:
log.error(u"failed with exception: %s", e)
if not args.ignore_errors:
raise
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
log.error(u"failed with exception: %s", e)
raise
|
{
"content_hash": "a793fae6917338f6a86244b58fecce08",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 108,
"avg_line_length": 40.111111111111114,
"alnum_prop": 0.6168051708217913,
"repo_name": "dlenski/garminexport",
"id": "b8512805458ddeb4beb38c1966b7982cf42dfa3a",
"size": "4355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garminbackup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "151"
},
{
"name": "Python",
"bytes": "36013"
}
],
"symlink_target": ""
}
|
__author__ = 'xsank'
import json
class BaseData(object):
def __init__(self, data=""):
self.from_json(data)
def from_json(self, data=""):
self.__dict__ = json.loads(data)
def to_json(self):
return json.dumps(self)
def get_type(self):
return self.tp
class ClientData(BaseData):
def __init__(self, data=""):
super(ClientData, self).__init__(data)
class ServerData(BaseData):
def __init__(self, data=""):
self.tp = 'server'
self.data = data
|
{
"content_hash": "ae8e6533b882a93bf329fe3a2cf91317",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 46,
"avg_line_length": 17.096774193548388,
"alnum_prop": 0.5584905660377358,
"repo_name": "Saberlion/docker-webssh",
"id": "aad3d238f6ec3305f699424623238f31996f3d3b",
"size": "530",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "app/data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "536"
},
{
"name": "HTML",
"bytes": "4403"
},
{
"name": "JavaScript",
"bytes": "164374"
},
{
"name": "Python",
"bytes": "7915"
},
{
"name": "Shell",
"bytes": "87"
}
],
"symlink_target": ""
}
|
from models import Employee
def create_employee(
username='johndoe',
department='Engineering',
first_name='John',
last_name='Doe',
photo_url=None,
office=None,
):
if photo_url is None:
photo_url = 'http://example.com/photos/{0}.jpg'.format(username)
return Employee.create_from_dict({
'username': username,
'department': department,
'first_name': first_name,
'last_name': last_name,
'photo_url': photo_url,
'office': office,
})
|
{
"content_hash": "f502823334712e18c1eb5ae80596fcd0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 22.782608695652176,
"alnum_prop": 0.5935114503816794,
"repo_name": "Yelp/love",
"id": "6d5611365a64276e265fe93e5863ed9892752195",
"size": "548",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testing/factories/employee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5902"
},
{
"name": "HTML",
"bytes": "43114"
},
{
"name": "JavaScript",
"bytes": "836"
},
{
"name": "Makefile",
"bytes": "1241"
},
{
"name": "Python",
"bytes": "118945"
}
],
"symlink_target": ""
}
|
""" """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import numpy as np
from numpy import cos, sin
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates.angles import rotation_matrix
from astropy.coordinates.builtin_frames.galactocentric import _ROLL0 as ROLL0
from .propermotion import pm_gal_to_icrs, pm_icrs_to_gal
__all__ = ["vgsr_to_vhel", "vhel_to_vgsr", "vgal_to_hel", "vhel_to_gal", "poincare_polar"]
# This is the default circular velocity and LSR peculiar velocity of the Sun
# TODO: make this a config item?
VCIRC = 220.*u.km/u.s
VLSR = [10., 5.25, 7.17]*u.km/u.s
def vgsr_to_vhel(coordinate, vgsr, vcirc=VCIRC, vlsr=VLSR):
"""
Convert a radial velocity in the Galactic standard of rest (GSR) to
a barycentric radial velocity.
Parameters
----------
coordinate : :class:`~astropy.coordinates.SkyCoord`
An Astropy SkyCoord object or anything object that can be passed
to the SkyCoord initializer.
vgsr : :class:`~astropy.units.Quantity`
GSR line-of-sight velocity.
vcirc : :class:`~astropy.units.Quantity`
Circular velocity of the Sun.
vlsr : :class:`~astropy.units.Quantity`
Velocity of the Sun relative to the local standard
of rest (LSR).
Returns
-------
vhel : :class:`~astropy.units.Quantity`
Radial velocity in a barycentric rest frame.
"""
c = coord.SkyCoord(coordinate)
g = c.galactic
l,b = g.l, g.b
if not isinstance(vgsr, u.Quantity):
raise TypeError("vgsr must be a Quantity subclass")
# compute the velocity relative to the LSR
lsr = vgsr - vcirc*sin(l)*cos(b)
# velocity correction for Sun relative to LSR
v_correct = vlsr[0]*cos(b)*cos(l) + \
vlsr[1]*cos(b)*sin(l) + \
vlsr[2]*sin(b)
vhel = lsr - v_correct
return vhel
def vhel_to_vgsr(coordinate, vhel, vcirc=VCIRC, vlsr=VLSR):
""" Convert a velocity from a heliocentric radial velocity to
the Galactic standard of rest (GSR).
Parameters
----------
coordinate : :class:`~astropy.coordinates.SkyCoord`
An Astropy SkyCoord object or anything object that can be passed
to the SkyCoord initializer.
vhel : :class:`~astropy.units.Quantity`
Barycentric line-of-sight velocity.
vcirc : :class:`~astropy.units.Quantity`
Circular velocity of the Sun.
vlsr : :class:`~astropy.units.Quantity`
Velocity of the Sun relative to the local standard
of rest (LSR).
Returns
-------
vgsr : :class:`~astropy.units.Quantity`
Radial velocity in a galactocentric rest frame.
"""
c = coord.SkyCoord(coordinate)
g = c.galactic
l,b = g.l, g.b
if not isinstance(vhel, u.Quantity):
raise TypeError("vhel must be a Quantity subclass")
lsr = vhel + vcirc*sin(l)*cos(b)
# velocity correction for Sun relative to LSR
v_correct = vlsr[0]*cos(b)*cos(l) + \
vlsr[1]*cos(b)*sin(l) + \
vlsr[2]*sin(b)
vgsr = lsr + v_correct
return vgsr
def _icrs_gctc_velocity_matrix(galactocentric_frame):
""" Construct a transformation matrix to go from heliocentric ICRS to a galactocentric
frame. This is just a rotation and tilt which makes it approximately the same
as transforming to Galactic coordinates. This only works for velocity because there
is no shift due to the position of the Sun.
"""
# define rotation matrix to align x(ICRS) with the vector to the Galactic center
M1 = rotation_matrix(-galactocentric_frame.galcen_dec, 'y')
M2 = rotation_matrix(galactocentric_frame.galcen_ra, 'z')
# extra roll away from the Galactic x-z plane
M3 = rotation_matrix(ROLL0 - galactocentric_frame.roll, 'x')
# rotate about y' to account for tilt due to Sun's height above the plane
z_d = (galactocentric_frame.z_sun / galactocentric_frame.galcen_distance).decompose()
M4 = rotation_matrix(-np.arcsin(z_d), 'y')
return M4*M3*M1*M2 # this is right: 4,3,1,2
def vgal_to_hel(coordinate, vxyz, vcirc=VCIRC, vlsr=VLSR, galactocentric_frame=None):
r"""
Convert a Galactocentric, cartesian velocity to a Heliocentric velocity in
spherical coordinates (e.g., proper motion and radial velocity).
The frame of the input coordinate determines the output frame of the proper motions.
For example, if the input coordinate is in the ICRS frame, the proper motions
returned will be :math:`(\mu_\alpha\cos\delta,\mu_delta)`. This function also
handles array inputs (see examples below).
Examples
--------
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.Galactocentric(x=15.*u.kpc, y=13.*u.kpc, z=2.*u.kpc)
>>> vxyz = [-115., 100., 95.]*u.km/u.s
>>> icrs = c.transform_to(coord.ICRS)
>>> vgal_to_hel(icrs, vxyz)
(<Quantity -0.876885123328934 mas / yr>, <Quantity 0.024501209459030334 mas / yr>, <Quantity -163.24449462243052 km / s>)
>>> c = coord.Galactocentric([[15.,11.],[13,21.],[2.,-7]]*u.kpc)
>>> vxyz = [[-115.,11.], [100.,-21.], [95.,103]]*u.km/u.s
>>> icrs = c.transform_to(coord.ICRS)
>>> vgal_to_hel(icrs, vxyz)
(<Quantity [-0.87688512,-0.91157482] mas / yr>, <Quantity [ 0.02450121,-0.86124895] mas / yr>, <Quantity [-163.24449462,-198.31241148] km / s>)
Parameters
----------
coordinate : :class:`~astropy.coordinates.SkyCoord`, :class:`~astropy.coordinates.BaseCoordinateFrame`
This is most commonly a :class:`~astropy.coordinates.SkyCoord` object, but
alternatively, it can be any coordinate frame object that is transformable to the
Galactocentric frame.
vxyz : :class:`~astropy.units.Quantity`, iterable
Cartesian velocity components :math:`(v_x,v_y,v_z)`. This should either be a single
:class:`~astropy.units.Quantity` object with shape (3,N), or an iterable
object with 3 :class:`~astropy.units.Quantity` objects as elements.
vcirc : :class:`~astropy.units.Quantity` (optional)
Circular velocity of the Sun.
vlsr : :class:`~astropy.units.Quantity` (optional)
Velocity of the Sun relative to the local standard
of rest (LSR).
galactocentric_frame : :class:`~astropy.coordinates.Galactocentric` (optional)
An instantiated :class:`~astropy.coordinates.Galactocentric` frame object with
custom parameters for the Galactocentric coordinates. For example, if you want
to set your own position of the Galactic center, you can pass in a frame with
custom `galcen_ra` and `galcen_dec`.
Returns
-------
pmv : tuple
A tuple containing the proper motions (in Galactic coordinates) and
radial velocity, all as :class:`~astropy.units.Quantity` objects.
"""
if galactocentric_frame is None:
galactocentric_frame = coord.Galactocentric
# so I don't accidentally modify in place
vxyz = vxyz.copy()
# make sure this is a coordinate and get the frame for later use
c = coord.SkyCoord(coordinate)
coord_frame = c.frame
R = _icrs_gctc_velocity_matrix(galactocentric_frame)
# remove circular and LSR velocities
vxyz[1] = vxyz[1] - vcirc
for i in range(3):
vxyz[i] = vxyz[i] - vlsr[i]
orig_shape = vxyz.shape
v_icrs = np.linalg.inv(R).dot(vxyz.reshape(vxyz.shape[0], np.prod(vxyz.shape[1:]))).reshape(orig_shape)
# get cartesian galactocentric
x_icrs = c.icrs.cartesian.xyz
d = np.sqrt(np.sum(x_icrs**2, axis=0))
dxy = np.sqrt(x_icrs[0]**2 + x_icrs[1]**2)
vr = np.sum(x_icrs * v_icrs, axis=0) / d
with u.set_enabled_equivalencies(u.dimensionless_angles()):
mua = ((x_icrs[0]*v_icrs[1] - v_icrs[0]*x_icrs[1]) / dxy**2).to(u.mas/u.yr)
mua_cosd = (mua * dxy / d).to(u.mas/u.yr)
mud = (-(x_icrs[2]*(x_icrs[0]*v_icrs[0] + x_icrs[1]*v_icrs[1]) - dxy**2*v_icrs[2]) / d**2 / dxy).to(u.mas/u.yr)
pm_radec = (mua_cosd, mud)
if coord_frame.name == 'icrs':
pm = u.Quantity(map(np.atleast_1d,pm_radec))
elif coord_frame.name == 'galactic':
# transform to ICRS proper motions
pm = pm_icrs_to_gal(c, pm_radec)
else:
raise NotImplementedError("Proper motions in the {} system are not "
"currently supported.".format(coord_frame.name))
if c.isscalar:
vr = vr.reshape(())
pm = (pm[0].reshape(()), pm[1].reshape(()))
return tuple(pm) + (vr,)
def vhel_to_gal(coordinate, pm, rv, vcirc=VCIRC, vlsr=VLSR, galactocentric_frame=None):
r"""
Convert a Heliocentric velocity in spherical coordinates (e.g., proper motion
and radial velocity) in the ICRS or Galactic frame to a Galactocentric, cartesian
velocity.
The frame of the input coordinate determines how to interpret the given
proper motions. For example, if the input coordinate is in the ICRS frame, the
proper motions are assumed to be :math:`(\mu_\alpha\cos\delta,\mu_\delta)`. This
function also handles array inputs (see examples below).
Examples
--------
>>> import astropy.units as u
>>> import astropy.coordinates as coord
>>> c = coord.SkyCoord(ra=196.5*u.degree, dec=-10.33*u.deg, distance=16.2*u.kpc)
>>> pm = [-1.53, 3.5]*u.mas/u.yr
>>> rv = 161.4*u.km/u.s
>>> vhel_to_gal(c, pm=pm, rv=rv)
<Quantity [-137.29984564, 262.64052249, 305.50786499] km / s>
>>> c = coord.SkyCoord(ra=[196.5,51.3]*u.degree, dec=[-10.33,2.1]*u.deg, distance=[16.2,11.]*u.kpc)
>>> pm = [[-1.53,4.5], [3.5,10.9]]*u.mas/u.yr
>>> rv = [161.4,-210.2]*u.km/u.s
>>> vhel_to_gal(c, pm=pm, rv=rv)
<Quantity [[-137.29984564,-212.10415701],
[ 262.64052249, 496.85687803],
[ 305.50786499, 554.16562628]] km / s>
Parameters
----------
coordinate : :class:`~astropy.coordinates.SkyCoord`, :class:`~astropy.coordinates.BaseCoordinateFrame`
This is most commonly a :class:`~astropy.coordinates.SkyCoord` object, but
alternatively, it can be any coordinate frame object that is transformable to the
Galactocentric frame.
pm : :class:`~astropy.units.Quantity` or iterable of :class:`~astropy.units.Quantity` objects
Proper motion in the same frame as the coordinate. For example, if your input
coordinate is in :class:`~astropy.coordinates.ICRS`, then the proper motion is
assumed to be in this frame as well. The order of elements should always be
proper motion in (longitude, latitude), and should have shape (2,N). The longitude
component is assumed to have the cosine of the latitude already multiplied in, so
that in ICRS, for example, this would be :math:`\mu_\alpha\cos\delta`.
rv : :class:`~astropy.units.Quantity`
Barycentric radial velocity. Should have shape (1,N) or (N,).
vcirc : :class:`~astropy.units.Quantity` (optional)
Circular velocity of the Sun.
vlsr : :class:`~astropy.units.Quantity` (optional)
Velocity of the Sun relative to the local standard
of rest (LSR).
galactocentric_frame : :class:`~astropy.coordinates.Galactocentric` (optional)
An instantiated :class:`~astropy.coordinates.Galactocentric` frame object with
custom parameters for the Galactocentric coordinates. For example, if you want
to set your own position of the Galactic center, you can pass in a frame with
custom `galcen_ra` and `galcen_dec`.
Returns
-------
vxyz : :class:`~astropy.units.Quantity` (optional)
Cartesian velocity components (U,V,W). A :class:`~astropy.units.Quantity`
object with shape (3,N).
"""
if galactocentric_frame is None:
galactocentric_frame = coord.Galactocentric
# make sure this is a coordinate and get the frame for later use
c = coord.SkyCoord(coordinate)
coord_frame = c.frame
if coord_frame.name == 'icrs':
pm_radec = u.Quantity(map(np.atleast_1d,pm))
elif coord_frame.name == 'galactic':
# transform to ICRS proper motions
pm_radec = pm_gal_to_icrs(c, pm)
else:
raise NotImplementedError("Proper motions in the {} system are not "
"currently supported.".format(coord_frame.name))
# proper motion components: longitude, latitude
mura_cosdec, mudec = pm_radec
# Adrian, you're fired
a,d,D = c.icrs.ra, c.icrs.dec, c.distance
with u.set_enabled_equivalencies(u.dimensionless_angles()):
v_icrs = [rv*np.cos(a)*np.cos(d) - D*np.sin(a)*mura_cosdec - D*np.cos(a)*np.sin(d)*mudec,
rv*np.sin(a)*np.cos(d) + D*np.cos(a)*mura_cosdec - D*np.sin(a)*np.sin(d)*mudec,
rv*np.sin(d) + D*np.cos(d)*mudec]
v_icrs = np.array([v.to(u.km/u.s).value for v in v_icrs]) * u.km/u.s
R = _icrs_gctc_velocity_matrix(galactocentric_frame)
orig_shape = v_icrs.shape
v_gc = R.dot(v_icrs.reshape(v_icrs.shape[0], np.prod(v_icrs.shape[1:]))).reshape(orig_shape)
# remove circular and LSR velocities
v_gc[1] = v_gc[1] + vcirc
for i in range(3):
v_gc[i] = v_gc[i] + vlsr[i]
if c.isscalar:
return v_gc.reshape((3,))
else:
return v_gc
def poincare_polar(w):
""" TODO: this needs a home """
R = np.sqrt(w[...,0]**2 + w[...,1]**2)
# phi = np.arctan2(w[...,1], w[...,0])
phi = np.arctan2(w[...,0], w[...,1])
vR = (w[...,0]*w[...,0+3] + w[...,1]*w[...,1+3]) / R
vPhi = w[...,0]*w[...,1+3] - w[...,1]*w[...,0+3]
new_w = np.vstack((R.T,phi.T,w[...,2].T, vR.T,vPhi.T,w[...,2+3].T)).T
return new_w
|
{
"content_hash": "a229387fb657a284d8a900cdf256c209",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 151,
"avg_line_length": 39.39660056657224,
"alnum_prop": 0.6293233623355144,
"repo_name": "abonaca/gary",
"id": "e041121215c0df5c83cdeb17e304b9de2a407b06",
"size": "13924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gary/coordinates/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "67332"
},
{
"name": "C++",
"bytes": "7004"
},
{
"name": "Python",
"bytes": "490956"
}
],
"symlink_target": ""
}
|
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
from litex.gen import LiteXModule
from litex.build.io import DDROutput
from litex_boards.platforms import mist
from litex.soc.cores.clock import CycloneIVPLL
from litex.soc.integration.soc import SoCRegion
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.video import VideoVGAPHY
from litex.soc.cores.led import LedChaser
from litedram.modules import MT48LC16M16
from litedram.phy import GENSDRPHY
# CRG ----------------------------------------------------------------------------------------------
class _CRG(LiteXModule):
def __init__(self, platform, sys_clk_freq):
self.rst = Signal()
self.cd_sys = ClockDomain()
self.cd_sys_ps = ClockDomain()
self.cd_vga = ClockDomain()
# # #
# Clk / Rst
clk27 = platform.request("clk27")
# PLL
self.pll = pll = CycloneIVPLL(speedgrade="-8")
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(clk27, 27e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys_ps, sys_clk_freq, phase=90)
pll.create_clkout(self.cd_vga, 40e6)
# SDRAM clock
self.specials += DDROutput(1, 0, platform.request("sdram_clock"), ClockSignal("sys_ps"))
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=50e6, with_led_chaser=True, with_video_terminal=False, **kwargs):
platform = mist.Platform()
# CRG --------------------------------------------------------------------------------------
self.crg = _CRG(platform, sys_clk_freq)
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq, ident="LiteX SoC on MIST", **kwargs)
# SDR SDRAM --------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.sdrphy = GENSDRPHY(platform.request("sdram"), sys_clk_freq)
self.add_sdram("sdram",
phy = self.sdrphy,
module = MT48LC16M16(sys_clk_freq, "1:1"),
l2_cache_size = kwargs.get("l2_size", 8192)
)
# Video Terminal ---------------------------------------------------------------------------
if with_video_terminal:
self.videophy = VideoVGAPHY(platform.request("vga"), clock_domain="vga")
self.add_video_terminal(phy=self.videophy, timings="800x600@60Hz", clock_domain="vga")
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.build.parser import LiteXArgumentParser
parser = LiteXArgumentParser(platform=mist.Platform, description="LiteX SoC on MIST.")
parser.add_target_argument("--sys-clk-freq", default=50e6, type=float, help="System clock frequency.")
parser.add_target_argument("--with-video-terminal", action="store_true", help="Enable Video Terminal (VGA).")
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = args.sys_clk_freq,
with_video_terminal = args.with_video_terminal,
**parser.soc_argdict
)
builder = Builder(soc, **parser.builder_argdict)
if args.build:
builder.build(**parser.toolchain_argdict)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))
if __name__ == "__main__":
main()
|
{
"content_hash": "8ed94483d46083974241f22286b3bda5",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 118,
"avg_line_length": 39.82178217821782,
"alnum_prop": 0.5181501740427648,
"repo_name": "litex-hub/litex-boards",
"id": "b985caf09ba5b5bd0677ae0950d2b9994ad13e43",
"size": "4188",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "litex_boards/targets/mist.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1813530"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from caffe.models import Caffe
from employees.models import Employee
from .forms import CashReportForm, CompanyForm, ExpenseForm, FullExpenseForm
from .models import Company, Expense
class CompanyFormTest(TestCase):
"""Tests Company form."""
def setUp(self):
"""Test data setup."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
def test_company(self):
"""Check validation."""
bad_company = CompanyForm(
{'name': ''},
caffe=self.caffe
)
self.assertFalse(bad_company.is_valid())
good_company = CompanyForm(
{'name': 'very good'},
caffe=self.caffe
)
self.assertTrue(good_company.is_valid())
good_company.save()
copycat_company = CompanyForm(
{'name': 'very good'},
caffe=self.caffe
)
self.assertFalse(copycat_company.is_valid())
with self.assertRaises(Exception):
CompanyForm({'bakery'})
def test_name_validation(self):
"""Check name validation."""
# same name, different caffes
Company.objects.create(name='Bakery', caffe=self.filtry)
form_correct = CompanyForm(
{'name': 'Bakery'},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
# same name, same caffes
Company.objects.create(name='Bakery', caffe=self.caffe)
form_incorrect = CompanyForm(
{'name': 'Bakery'},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
class ExpenseFormTest(TestCase):
"""Tests Expense form."""
def setUp(self):
"""Prepare objects for tests."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
self.company = Company.objects.create(
name='GoodCake',
caffe=self.caffe
)
self.company_f = Company.objects.create(
name='GoodCake',
caffe=self.filtry
)
def test_validation(self):
"""Check validation."""
empty_name_expense = ExpenseForm(
{'name': '', 'company': self.company},
caffe=self.caffe
)
self.assertFalse(empty_name_expense.is_valid())
empty_company_expense1 = ExpenseForm(
{'name': 'newspapers'},
caffe=self.caffe
)
self.assertTrue(empty_company_expense1.is_valid())
empty_company_expense2 = ExpenseForm(
{'name': 'newspapers', 'company': None},
caffe=self.caffe
)
self.assertTrue(empty_company_expense2.is_valid())
empty_company_expense3 = ExpenseForm(
{'name': 'newspapers', 'company': ''},
caffe=self.caffe
)
self.assertTrue(empty_company_expense3.is_valid())
complete_expense = ExpenseForm(
{'name': 'cakes', 'company': self.company.pk},
caffe=self.caffe
)
self.assertTrue(complete_expense.is_valid())
different_caffe = ExpenseForm(
{'name': 'cakes', 'company': self.company.pk},
caffe=self.filtry
)
with self.assertRaises(Exception):
different_caffe.save()
with self.assertRaises(Exception):
ExpenseForm({'name': 'cakes', 'company': self.company.pk})
def test_name_validation(self):
"""Check name validation."""
Expense.objects.create(
name='cakes',
company=self.company_f,
caffe=self.filtry
)
form_correct = ExpenseForm(
{'name': 'cakes', 'company': self.company.pk},
caffe=self.caffe
)
self.assertTrue(form_correct.is_valid())
# invalid name
Expense.objects.create(
name='cakes',
company=self.company,
caffe=self.caffe
)
form_incorrect = ExpenseForm(
{'name': 'cakes', 'company': self.company.pk},
caffe=self.caffe
)
self.assertFalse(form_incorrect.is_valid())
class FullExpenseFormTest(TestCase):
"""Tests FullExpense form."""
def setUp(self):
"""Prepare objects for tests."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
self.company = Company.objects.create(
name='GoodCake',
caffe=self.caffe
)
self.expense = Expense.objects.create(
name='cakes',
company=self.company,
caffe=self.caffe
)
def test_validation(self):
"""Test validation."""
cakes_for_10 = FullExpenseForm(
{'expense': self.expense.pk, 'amount': 10},
caffe=self.caffe
)
self.assertTrue(cakes_for_10.is_valid())
cakes_for_nothing = FullExpenseForm(
{'expense': self.expense.pk, 'amount': None},
caffe=self.caffe
)
self.assertFalse(cakes_for_nothing.is_valid())
money_for_nothing = FullExpenseForm(
{'expense': None, 'amount': 10},
caffe=self.caffe
)
self.assertFalse(money_for_nothing.is_valid())
different_caffe = FullExpenseForm(
{'expense': self.expense.pk, 'amount': 1},
caffe=self.filtry
)
with self.assertRaises(Exception):
different_caffe.save()
class CashReportFormTest(TestCase):
"""Tests CashReport form."""
def setUp(self):
"""Test data setup."""
self.caffe = Caffe.objects.create(
name='kafo',
city='Gliwice',
street='Wieczorka',
house_number='14',
postal_code='44-100'
)
self.filtry = Caffe.objects.create(
name='filtry',
city='Warszawa',
street='Filry',
house_number='14',
postal_code='44-100'
)
self.kate = Employee.objects.create(
username='KateT',
first_name='Kate',
last_name='Tempest',
telephone_number='12345678',
email='kate@tempest.com',
favorite_coffee='flat white',
caffe=self.caffe
)
def test_validation(self):
"""Test validation of CashReport form."""
no_cash_report = CashReportForm(
{'card_payments': 500, 'amount_due': 1700},
creator=self.kate,
caffe=self.caffe,
)
self.assertFalse(no_cash_report.is_valid())
no_cards_report = CashReportForm(
{
'cash_before_shift': 1000,
'cash_after_shift': 2000,
'amount_due': 1700
},
creator=self.kate,
caffe=self.caffe
)
self.assertFalse(no_cards_report.is_valid())
no_due_report = CashReportForm(
{
'card_payments': 500,
'cash_before_shift': 1000,
'cash_after_shift': 2000,
},
creator=self.kate,
caffe=self.caffe
)
self.assertFalse(no_due_report.is_valid())
perfectly_fine_report = CashReportForm(
{
'card_payments': 500,
'cash_before_shift': 1000,
'cash_after_shift': 2000,
'amount_due': 1700
},
creator=self.kate,
caffe=self.caffe
)
self.assertTrue(perfectly_fine_report.is_valid())
different_caffe = CashReportForm(
{
'card_payments': 500,
'cash_before_shift': 1000,
'cash_after_shift': 2000,
'amount_due': 1700
},
creator=self.kate,
caffe=self.filtry
)
with self.assertRaises(Exception):
different_caffe.save()
with self.assertRaises(Exception):
CashReportForm(
{
'card_payments': 500,
'cash_before_shift': 1000,
'cash_after_shift': 2000,
'amount_due': 1700
},
creator=self.kate
)
with self.assertRaises(Exception):
CashReportForm(
{
'card_payments': 500,
'cash_before_shift': 1000,
'cash_after_shift': 2000,
'amount_due': 1700
},
caffe=self.caffe
)
|
{
"content_hash": "5d77c616a420c52f55378ab495e10bf0",
"timestamp": "",
"source": "github",
"line_count": 353,
"max_line_length": 76,
"avg_line_length": 27.362606232294617,
"alnum_prop": 0.5051247541153329,
"repo_name": "VirrageS/io-kawiarnie",
"id": "d93e5080f81d2231a73da837a41fcb71c3ee9e9f",
"size": "9686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "caffe/cash/test_forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42462"
},
{
"name": "HTML",
"bytes": "57136"
},
{
"name": "JavaScript",
"bytes": "49334"
},
{
"name": "Python",
"bytes": "344245"
},
{
"name": "Shell",
"bytes": "199"
}
],
"symlink_target": ""
}
|
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import RendererBase
from matplotlib.testing.decorators import image_comparison, cleanup
import matplotlib.pyplot as plt
import matplotlib.transforms as transforms
import matplotlib.path as path
from nose.tools import assert_equal
import numpy as np
import os
import shutil
import tempfile
def test_uses_per_path():
id = transforms.Affine2D()
paths = [path.Path.unit_regular_polygon(i) for i in range(3, 7)]
tforms = [id.rotate(i) for i in range(1, 5)]
offsets = np.arange(20).reshape((10, 2))
facecolors = ['red', 'green']
edgecolors = ['red', 'green']
def check(master_transform, paths, all_transforms,
offsets, facecolors, edgecolors):
rb = RendererBase()
raw_paths = list(rb._iter_collection_raw_paths(master_transform,
paths, all_transforms))
gc = rb.new_gc()
ids = [path_id for xo, yo, path_id, gc0, rgbFace in
rb._iter_collection(gc, master_transform, all_transforms,
range(len(raw_paths)), offsets,
transforms.IdentityTransform(),
facecolors, edgecolors, [], [], [False],
[], 'data')]
uses = rb._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
seen = [0] * len(raw_paths)
for i in ids:
seen[i] += 1
for n in seen:
assert n in (uses-1, uses)
check(id, paths, tforms, offsets, facecolors, edgecolors)
check(id, paths[0:1], tforms, offsets, facecolors, edgecolors)
check(id, [], tforms, offsets, facecolors, edgecolors)
check(id, paths, tforms[0:1], offsets, facecolors, edgecolors)
check(id, paths, [], offsets, facecolors, edgecolors)
for n in range(0, offsets.shape[0]):
check(id, paths, tforms, offsets[0:n, :], facecolors, edgecolors)
check(id, paths, tforms, offsets, [], edgecolors)
check(id, paths, tforms, offsets, facecolors, [])
check(id, paths, tforms, offsets, [], [])
check(id, paths, tforms, offsets, facecolors[0:1], edgecolors)
@cleanup
def test_get_default_filename():
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
filename = canvas.get_default_filename()
assert_equal(filename, 'image.png')
finally:
shutil.rmtree(test_dir)
@cleanup
def test_get_default_filename_already_exists():
# From #3068: Suggest non-existing default filename
try:
test_dir = tempfile.mkdtemp()
plt.rcParams['savefig.directory'] = test_dir
fig = plt.figure()
canvas = FigureCanvasBase(fig)
# create 'image.png' in figure's save dir
open(os.path.join(test_dir, 'image.png'), 'w').close()
filename = canvas.get_default_filename()
assert_equal(filename, 'image-1.png')
finally:
shutil.rmtree(test_dir)
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
{
"content_hash": "0dc7fcddf71de7b20391266ffec7abc6",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 36.32222222222222,
"alnum_prop": 0.6072193331293974,
"repo_name": "yuanagain/seniorthesis",
"id": "780c34335891de3df2672f94d0e138d909cfa422",
"size": "3269",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/matplotlib/tests/test_backend_bases.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "246695"
},
{
"name": "C++",
"bytes": "3399079"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "Fortran",
"bytes": "9868"
},
{
"name": "HTML",
"bytes": "128332"
},
{
"name": "JavaScript",
"bytes": "23881"
},
{
"name": "Jupyter Notebook",
"bytes": "86661"
},
{
"name": "Makefile",
"bytes": "76057"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "22981564"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
}
|
import fudge
from contextlib import contextmanager
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase, Client
from nose.plugins.skip import SkipTest
from ignite.tests.decorators import ignite_skip
def get_users(filter_f=None):
users = User.objects.all()
assert len(users) > 0
if not filter_f:
return users
filtered = filter(filter_f, users)
assert len(filtered) > 0
return filtered
@contextmanager
def user_with_generated_id(fake_auth, **kwargs):
users = get_users(lambda u: not u.get_profile().has_chosen_identifier)
fake_auth.expects_call().returns(users[0])
yield
@contextmanager
def user_with_chosen_id(fake_auth, **kwargs):
users = get_users(lambda u: u.get_profile().has_chosen_identifier)
fake_auth.expects_call().returns(users[0])
yield
class MiddlewareTests(TestCase):
"""
Test `ProfileMiddleware`. Should ensure users have at least chosen a
display name.
"""
fixtures = ['users.json']
def _get_and_follow_redirect(self, client, path):
response = client.get(path)
if response.status_code != 301:
return response
location = response.get('location', None)
return client.get(location)
@fudge.patch('django_browserid.auth.BrowserIDBackend.authenticate')
def test_unsafe_paths(self, fake):
with user_with_generated_id(fake):
client = Client()
client.login()
response = self._get_and_follow_redirect(client, '/')
assert response.status_code == 302
location = response.get('location', None)
assert location.endswith(reverse('users_edit'))
response = self._get_and_follow_redirect(client, '/projects/')
assert response.status_code == 302
location = response.get('location', None)
assert location.endswith(reverse('users_edit'))
@fudge.patch('django_browserid.auth.BrowserIDBackend.authenticate')
def test_safe_paths(self, fake):
# This test was erroneously passing due to the 404 handler responding
# with a 200 status code. I don't currently have enough knowledge about
# how the profile code works to either fix the code or fix the test
raise SkipTest()
with user_with_generated_id(fake):
client = Client()
client.login()
expect_ok = (
self._get_and_follow_redirect(client, reverse('users_edit')),
self._get_and_follow_redirect(
client, reverse('django.views.static.serve', kwargs={
'path': '/media/css/innovate/main.css'
})
),
self._get_and_follow_redirect(
client, reverse('users_profile_add_link')),
self._get_and_follow_redirect(
client, reverse('users_profile_links')),
self._get_and_follow_redirect(
client, reverse('users_profile_delete_link', kwargs={
'id': 1,
})
)
)
for response in expect_ok:
self.assertEqual(200, response.status_code)
# users_signout is expected to 302 regardless.
response = self._get_and_follow_redirect(
client, reverse('users_signout'))
self.assertEqual(302, response.status_code)
location = response.get('location', None)
self.assertEqual('http://testserver/', location)
@ignite_skip
@fudge.patch('django_browserid.auth.BrowserIDBackend.authenticate')
def test_user_with_chosen_id(self, fake):
with user_with_chosen_id(fake):
client = Client()
client.login()
response = self._get_and_follow_redirect(client, '/')
assert response.status_code == 200
response = self._get_and_follow_redirect(client, '/projects/')
assert response.status_code == 200
|
{
"content_hash": "6334a068f4c01ca999673293bd4e4b71",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 36.785714285714285,
"alnum_prop": 0.6021844660194174,
"repo_name": "mozilla/mozilla-ignite",
"id": "729ca0a375004dcc42db213c77c0d4c29a86dbb5",
"size": "4120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/innovate/tests/test_middleware.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "230222"
},
{
"name": "JavaScript",
"bytes": "457971"
},
{
"name": "Puppet",
"bytes": "11448"
},
{
"name": "Python",
"bytes": "4064774"
},
{
"name": "SQL",
"bytes": "71"
},
{
"name": "Shell",
"bytes": "1462"
},
{
"name": "TeX",
"bytes": "19491"
}
],
"symlink_target": ""
}
|
import sys
from django.core.management.base import BaseCommand
from apps.bulk.models import SovereigntyHolder, Sovereignty
from utils import connection
# execute api tasks
class Command(BaseCommand):
#handle is what actualy will be executed
def handle(self, *args, **options):
systems = connection.api_request("Sovereignty").solarSystems
for sov in systems:
system, created = Sovereignty.objects.get_or_create(
solarsystemid=sov.solarSystemID,
solarsystemname=sov.solarSystemName,
)
try:
holder = SovereigntyHolder.objects.filter(
sovereignty=system
).order_by("-last_refresh")[0]
if holder.allianceid != int(sov.allianceID) or \
holder.factionid != int(sov.factionID) or \
holder.corporationid != int(sov.corporationID):
self.create_holder(sov, system)
except IndexError:
self.create_holder(sov, system)
except:
self.stdout.write(
sys.exc_info(),
"Unexpected error updating sovereignty:"
)
#crate new sovholder
@staticmethod
def create_holder(sov, system):
try:
SovereigntyHolder.objects.create(
sovereignty=system,
allianceid=int(sov.allianceID),
corporationid=int(sov.corporationID),
factionid=int(sov.factionID),
)
except:
self.stdout.write(
sys.exc_info(),
"Unexpected error creating sovereignty:"
)
|
{
"content_hash": "545cfb51a23492592d764a4022a05546",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 71,
"avg_line_length": 34.38,
"alnum_prop": 0.5567190226876091,
"repo_name": "Sult/evetool",
"id": "097493b12a9cf54fc836389d0b6a5990244e7f6e",
"size": "1719",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tasks/management/commands/update_sovereignty.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7241"
},
{
"name": "HTML",
"bytes": "40759"
},
{
"name": "JavaScript",
"bytes": "546"
},
{
"name": "Python",
"bytes": "277072"
}
],
"symlink_target": ""
}
|
"""
=============================================
Compute MxNE with time-frequency sparse prior
=============================================
The TF-MxNE solver is a distributed inverse method (like dSPM or sLORETA)
that promotes focal (sparse) sources (such as dipole fitting techniques)
[1]_ [2]_. The benefit of this approach is that:
- it is spatio-temporal without assuming stationarity (sources properties
can vary over time)
- activations are localized in space, time and frequency in one step.
- with a built-in filtering process based on a short time Fourier
transform (STFT), data does not need to be low passed (just high pass
to make the signals zero mean).
- the solver solves a convex optimization problem, hence cannot be
trapped in local minima.
References
----------
.. [1] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Time-Frequency Mixed-Norm Estimates: Sparse M/EEG imaging with
non-stationary source activations",
Neuroimage, Volume 70, pp. 410-422, 15 April 2013.
DOI: 10.1016/j.neuroimage.2012.12.051
.. [2] A. Gramfort, D. Strohmeier, J. Haueisen, M. Hamalainen, M. Kowalski
"Functional Brain Imaging with M/EEG Using Structured Sparsity in
Time-Frequency Dictionaries",
Proceedings Information Processing in Medical Imaging
Lecture Notes in Computer Science, Volume 6801/2011, pp. 600-611, 2011.
DOI: 10.1007/978-3-642-22092-0_49
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
#
# License: BSD (3-clause)
import numpy as np
import mne
from mne.datasets import sample
from mne.minimum_norm import make_inverse_operator, apply_inverse
from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles
from mne.viz import (plot_sparse_source_estimates,
plot_dipole_locations, plot_dipole_amplitudes)
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path + '/subjects'
fwd_fname = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
ave_fname = data_path + '/MEG/sample/sample_audvis-no-filter-ave.fif'
cov_fname = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
# Read noise covariance matrix
cov = mne.read_cov(cov_fname)
# Handling average file
condition = 'Left visual'
evoked = mne.read_evokeds(ave_fname, condition=condition, baseline=(None, 0))
evoked = mne.pick_channels_evoked(evoked)
# We make the window slightly larger than what you'll eventually be interested
# in ([-0.05, 0.3]) to avoid edge effects.
evoked.crop(tmin=-0.1, tmax=0.4)
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
###############################################################################
# Run solver
# alpha_space regularization parameter is between 0 and 100 (100 is high)
alpha_space = 30. # spatial regularization parameter
# alpha_time parameter promotes temporal smoothness
# (0 means no temporal regularization)
alpha_time = 1. # temporal regularization parameter
loose, depth = 0.2, 0.9 # loose orientation & depth weighting
# Compute dSPM solution to be used as weights in MxNE
inverse_operator = make_inverse_operator(evoked.info, forward, cov,
loose=loose, depth=depth)
stc_dspm = apply_inverse(evoked, inverse_operator, lambda2=1. / 9.,
method='dSPM')
# Compute TF-MxNE inverse solution with dipole output
dipoles, residual = tf_mixed_norm(
evoked, forward, cov, alpha_space, alpha_time, loose=loose, depth=depth,
maxit=200, tol=1e-6, weights=stc_dspm, weights_min=8., debias=True,
wsize=16, tstep=4, window=0.05, return_as_dipoles=True,
return_residual=True)
# Crop to remove edges
for dip in dipoles:
dip.crop(tmin=-0.05, tmax=0.3)
evoked.crop(tmin=-0.05, tmax=0.3)
residual.crop(tmin=-0.05, tmax=0.3)
###############################################################################
# Plot dipole activations
plot_dipole_amplitudes(dipoles)
# Plot dipole location of the strongest dipole with MRI slices
idx = np.argmax([np.max(np.abs(dip.amplitude)) for dip in dipoles])
plot_dipole_locations(dipoles[idx], forward['mri_head_t'], 'sample',
subjects_dir=subjects_dir, mode='orthoview',
idx='amplitude')
# # Plot dipole locations of all dipoles with MRI slices
# for dip in dipoles:
# plot_dipole_locations(dip, forward['mri_head_t'], 'sample',
# subjects_dir=subjects_dir, mode='orthoview',
# idx='amplitude')
###############################################################################
# Show the evoked response and the residual for gradiometers
ylim = dict(grad=[-120, 120])
evoked.pick_types(meg='grad', exclude='bads')
evoked.plot(titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim,
proj=True)
residual.pick_types(meg='grad', exclude='bads')
residual.plot(titles=dict(grad='Residuals: Gradiometers'), ylim=ylim,
proj=True)
###############################################################################
# Generate stc from dipoles
stc = make_stc_from_dipoles(dipoles, forward['src'])
###############################################################################
# View in 2D and 3D ("glass" brain like 3D plot)
plot_sparse_source_estimates(forward['src'], stc, bgcolor=(1, 1, 1),
opacity=0.1, fig_name="TF-MxNE (cond %s)"
% condition, modes=['sphere'], scale_factors=[1.])
time_label = 'TF-MxNE time=%0.2f ms'
clim = dict(kind='value', lims=[10e-9, 15e-9, 20e-9])
brain = stc.plot('sample', 'inflated', 'rh', views='medial',
clim=clim, time_label=time_label, smoothing_steps=5,
subjects_dir=subjects_dir, initial_time=150, time_unit='ms')
brain.add_label("V1", color="yellow", scalar_thresh=.5, borders=True)
brain.add_label("V2", color="red", scalar_thresh=.5, borders=True)
|
{
"content_hash": "114d4569ebe06d79d43cd4d66b977563",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 79,
"avg_line_length": 41.95804195804196,
"alnum_prop": 0.6366666666666667,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "80be86f0c3eda3c0dd965acd2af82918f7fe2779",
"size": "6000",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "0.15/_downloads/plot_time_frequency_mixed_norm_inverse.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
from BaseHTTPServer import BaseHTTPRequestHandler
import cgi
import json
import httplib
class HttpMsgHandler(BaseHTTPRequestHandler):
m_data = []
def do_GET(self):
try:
print(self.path)
print(self.headers.headers[0])
message = json.dumps(self.m_data)
self.send_response(httplib.OK)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(message)
except Exception as e:
print(e)
finally:
print("===============================================================")
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers['content-type'])
if(ctype != 'application/json'):
self.send_error(httplib.UNSUPPORTED_MEDIA_TYPE, "Only json data is supported")
return
length = int(self.headers['content-length'])
post_values = self.rfile.read(length)
try:
print("recv post data: %s" % post_values)
parse_values = json.loads(post_values)
self.m_data.append(parse_values)
self.send_response(httplib.OK)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(post_values)
except Exception as e:
print(e)
else:
print("handler post data ok")
finally:
print("===============================================================")
def start_http_server( ip, port ):
try:
from BaseHTTPServer import HTTPServer
http_server = HTTPServer((ip, port), HttpMsgHandler)
print("simple http server started ...")
http_server.serve_forever()
except Exception as e:
print(e)
if(__name__ == "__main__"):
# 测试指令: curl -H "Content-type: application/json;charset=UTF-8" localhost:8899 -d "{\"key\":\"value\"}"
start_http_server( "localhost", 8890 )
|
{
"content_hash": "2516d31410f5d50d798f0735c027d7a5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 107,
"avg_line_length": 34.41379310344828,
"alnum_prop": 0.5420841683366734,
"repo_name": "lyx003288/python_test",
"id": "353cbd505c43332d186f865de83bddcaff7c4239",
"size": "2046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_http/simple_http_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22759"
}
],
"symlink_target": ""
}
|
"""
XSLT processing engine
"""
import os, sys, operator, cStringIO, warnings
from gettext import gettext as _
DEFAULT_ENCODING = 'UTF-8'
#from amara import DEFAULT_ENCODING
from amara import ReaderError, tree
from amara.lib import iri, inputsource
from amara.xpath import XPathError
from amara.xslt import XsltError
from amara.xslt import xsltcontext
from amara.xslt.reader import stylesheet_reader
from amara.xslt.result import stringresult
# For builtin extension elements/functions
#from amara.xslt import exslt
#from amara.xslt.extensions import builtins
# Media types that signal that an xml-stylesheet PI points to an XSLT
# document, when the PI contains a type pseudo-attribute.
#
# Note: RFC 3023 suggests application/xslt+xml, and says the +xml
# suffix is not required (but is a SHOULD). If you want to use the
# 'text/xsl' convention, do Processor.XSLT_IMT.append('text/xsl')
# after import, but before instantiating Processor.Processor.
#
XSLT_IMT = ['application/xslt+xml', 'application/xslt',
'text/xml', 'application/xml']
# for xsl:message output
MESSAGE_TEMPLATE = _('STYLESHEET MESSAGE:\n%s\nEND STYLESHEET MESSAGE\n')
class processor(object):
"""
An XSLT processing engine (4XSLT).
Typical usage:
from Ft.Lib.Uri import OsPathToUri
from Ft.Xml import InputSource
from Ft.Xml.Xslt import Processor
# this is just one of several ways to create InputSources
styuri = OsPathToUri('/absolute/path/to/stylesheet.xslt')
srcuri = OsPathToUri('/absolute/path/to/doc.xml')
STY = InputSource.DefaultFactory.fromUri(styuri)
SRC = InputSource.DefaultFactory.fromUri(srcuri)
proc = Processor.Processor()
proc.appendStylesheet(STY)
result = proc.run(SRC)
See the run() and runNode() methods for additional runtime options.
The ignore_pis flag, if true, will cause xml-stylesheet
processing instructions in the source document to be ignored.
Important instance attributes:
.extension_parameters: a dictionary that allows one to attach
additional metadata to a processor instance. We use this
to make invocation-specific data like HTTP query args and
logfile handles available to XSLT extension functions & elements
when invoking the processor via the repository's HTTP server.
.media_descriptors: the preferred/target media, for the purpose of
picking from multiple xml-stylesheet processing instructions.
Defaults to None. If set to a string, xml-stylesheet PIs
without that string in their 'media' pseudo-attribute will be
ignored.
.message_template: format string for `xsl:message` output.
.transform: the complete transformation tree.
"""
# defaults for ExtendedProcessingElements.ExtendedProcessor
_4xslt_debug = False
_4xslt_profile = False
_4xslt_trace = False
_suppress_messages = False
# has the "built-in template invoked with params" warning been issued?
_builtInWarningGiven = False
def __init__(self, ignore_pis=False, content_types=None,
media_descriptors=None, extension_parameters=None,
message_stream=None, message_template=None):
self.ignore_pis = ignore_pis
if content_types is None:
content_types = set(XSLT_IMT)
self.content_types = content_types
# Although nt in the DTD, the prose for HTML LINK element states that
# the default value for the media attribute is "screen".
if media_descriptors is None:
media_descriptors = set(['screen'])
self.media_descriptors = media_descriptors
if extension_parameters is None:
extension_parameters = {}
self.extension_parameters = extension_parameters
if message_stream is None:
message_stream = sys.stderr
self.message_stream = message_stream
if message_template is None:
message_template = MESSAGE_TEMPLATE
self.message_template = message_template
self.transform = None
self._extfunctions = {} #Cache ext functions to give to the context
self._extelements = {}
#self._extelements.update(exslt.ExtElements)
#self._extelements.update(builtins.ExtElements)
self._reader = stylesheet_reader()
return
def getStripElements(self):
if self.transform:
return self.transform.space_rules
else:
return ()
def registerExtensionModules(self, modules):
"""
Registers a list of Python modules that have public ExtFunctions
and/or ExtElements dictionaries.
In a Python module that contains extension implementations,
define a dictionary named ExtFunctions that, for each extension
function or element, maps a (namespace-URI, xpath-function-name)
tuple to a direct reference to the Python function that
implements the extension. To make the function available to the
Processor, call this method, passing in ['your.module.name'].
See Ft.Xml.Xslt.Exslt.*, Ft.Xml.Xslt.BuiltInExtFunctions and
BuiltInExtElements for working examples of extension modules.
"""
for module in modules:
if module:
module = __import__(module, {}, {}, ['ExtFunctions'])
if hasattr(module, 'ExtFunctions'):
self._extfunctions.update(mod.ExtFunctions)
if hasattr(module, 'ExtElements'):
elements = module.ExtElements
self._extelements.update(elements)
self._reader.addExtensionElementMapping(elements)
return
def registerExtensionFunction(self, namespace, localName, function):
"""
Register a single extension function.
For example, implement your own extension function as a Python
function that takes an Ft.Xml.XPath.Context.Context instance as
its first argument. Then, to make the function available to the
Processor, call this method, passing in the namespace URI and
local name of the function, and a direct reference to the Python
function that implements the extension.
See also registerExtensionModules().
"""
self._extfunctions[namespace, localName] = function
return
def registerExtensionElement(self, namespace, localName, klass):
"""
Register a single extension element.
For example, implement your own extension element as a subclass
of Ft.Xml.Xslt.xsltelement. To make the element available to the
Processor, call this method, passing in the namespace URI and
local name of the element, and a direct reference to the class
that implements the extension.
See also registerExtensionModules().
"""
self._extelements[namespace, localName] = klass
mapping = { (namespace, localName) : klass }
self._reader.addExtensionElementMapping(mapping)
return
def append_transform(self, source, uri=None):
"""
Add an XSL transformation document to the processor.
uri - optional override document URI.
This method establishes the transformation that the processor will use
to transform a source tree into a result tree. If a transform has
already been appended, then this method is equivalent to having, in an
outer "shell" document, an `xsl:import` for the most recently added
transform followed by an `xsl:import` for the document accessible via
the given `transform`.
"""
if isinstance(source, tree.node):
document = source.xml_root
if not uri:
try:
uri = document.xml_base
except AttributeError:
raise ValueError('base-uri required for %s' % document)
self._documents[uri] = document
self.transform = self._reader.parse(document)
else:
if not isinstance(source, inputsource):
source = inputsource(source, uri)
self.transform = self._reader.parse(source)
return
def run(self, source, parameters=None, result=None):
"""
Transform a source document as given via an InputSource.
Assumes that either the Processor instance has already had
stylesheets appended (via appendStylesheet(), for example), or
the source document contains xml-stylesheet processing
instructions that are not being ignored.
The `parameters` argument is an optional dictionary of
stylesheet parameters, the keys of which may be given as
strings if they have no namespace, or as (uri, localname)
tuples otherwise.
The optional writer argument is a SAX-like event handler that
is an Ft.Xml.Xslt.NullWriter subclass. The default writer is
either an Ft.Xml.Xslt.XmlWriter, HtmlWriter or PlainTextWriter,
depending on the stylesheet(s).
The optional `output` argument is a Python file-like object
to be used as the destination for the writer's output.
"""
try:
document = tree.parse(source)
except ReaderError, e:
raise XsltError(XsltError.SOURCE_PARSE_ERROR,
uri=(source.uri or '<Python string>'), text=e)
if self.__checkStylesheetPis(document, source):
#Do it again with updates WS strip lists
#NOTE: There is a case where this will produce the wrong results. If, there were
#previous stylesheets that defined removing white space, then the
#processing instruction referenced a stylesheet that overrode some of these
#whitespace processing rules, the original trimmed space will be lost
#Regardless, we need to remove any new whitespace defined in the PI
self._stripElements(document)
return self._run(document, parameters, result)
def runNode(self, node, sourceUri=None, parameters=None, result=None,
preserveSrc=0, docInputSource=None):
"""
Transform a source document as given via a Domlette document
node.
Use Ft.Xml.Domlette.ConvertDocument() to create a Domlette
from some other type of DOM.
Assumes that either the Processor instance has already had
stylesheets appended (via appendStylesheet(), for example), or
the source document contains xml-stylesheet processing
instructions that are not being ignored.
sourceUri - The absolute URI of the document
entity that the node represents, and should be explicitly
provided, even if it is available from the node itself.
`parameters` - optional dictionary of
stylesheet parameters, the keys of which may be given as
strings if they have no namespace, or as (uri, localname)
tuples otherwise.
writer - optional SAX-like event handler that
is an Ft.Xml.Xslt.NullWriter subclass. The default writer is
either an Ft.Xml.Xslt.XmlWriter, HtmlWriter or PlainTextWriter,
depending on the stylesheet(s).
output - optional Python file-like object
to be used as the destination for the writer's output.
preserveSrc - (flag) If set signals that the source DOM should not be
mutated, as would normally happen when honoring XSLT whitespace
stripping requirements. Setting preserveSrc results in the
creation of a copy of the source DOM.
isrc - optional input source used strictly for further resolution
relative the given DOM
"""
if not isinstance(node, tree.entity):
raise ValueError(MessageSource.g_errorMessages[
XsltError.CANNOT_TRANSFORM_FRAGMENT])
# A base URI must be absolute, but DOM L3 Load & Save allows
# implementation-dependent behavior if the URI is actually
# relative, empty or missing. We'll generate a URN for the
# InputSource's benefit if the base URI is empty/missing.
# Relative URIs can pass through; the resolvers will handle
# them appropriately (we hope).
if not sourceUri:
sourceUri = node.xml_base or Uri.BASIC_RESOLVER.generate()
if preserveSrc:
# preserve the node's baseURI so our DOM is a true copy
entity = tree.entity(node.xml_base)
for child in node:
entity.xml_append(entity.importNode(child, 1))
node = entity
self._stripElements(node)
if not docInputSource:
#Create a dummy iSrc
docInputSource = inputsource.input_source(
None, sourceUri, processIncludes=1,
stripElements=self.getStripElements(),
factory=self.inputSourceFactory)
if self.__checkStylesheetPis(node, docInputSource):
#Do it again with updated WS strip lists
#NOTE: There is a case where this will produce the wrong results. If, there were
#previous stylesheets that defined removing white space, then the
#processing instruction referenced a stylesheet that overrode some of these
#whitespace processing rules, the original trimmed space will be lost
#Regardless, we need to remove any new whitespace defined in the PI
self._stripElements(node)
return self._run(node, parameters, result)
def __cmp_stys(self, a, b):
"""
Internal function to assist in sorting xml-stylesheet
processing instructions. See __checkStylesheetPis().
"""
# sort by priority (natural order)
return cmp(a[0], b[0])
##
## For future reference, to support more advanced
## preferences, such as having an ordered list of
## preferred target media values rather than just one,
## and using the Internet media type list in a similar
## fashion, we can sort on multiple pseudo-attrs like
## this:
##
## sort by priority (natural order)
#if cmp(a[0], b[0]):
# return cmp(a[0], b[0])
## then media (natural order)
#elif cmp(a[1], b[1]):
# return cmp(a[1], b[1])
## then type (XSLT_IMT order)
#else:
# for imt in XSLT_IMT:
# if a[2] == imt:
# return b[2] != imt
# else:
# return -(b[2] == imt)
def __checkStylesheetPis(self, node, inputSource):
"""
Looks for xml-stylesheet processing instructions that are
children of the given node's root node, and calls
appendStylesheet() for each one, unless it does not have an
RFC 3023 compliant 'type' pseudo-attribute or does not have
a 'media' pseudo-attribute that matches the preferred media
type that was set as Processor.mediaPref. Uses the given
InputSource to resolve the 'href' pseudo-attribute. If the
instruction has an alternate="yes" pseudo-attribute, it is
treated as a candidate for the first stylesheet only.
"""
# relevant links:
# http://www.w3.org/TR/xml-stylesheet/
# http://lists.fourthought.com/pipermail/4suite/2001-January/001283.html
# http://lists.fourthought.com/pipermail/4suite/2003-February/005088.html
# http://lists.fourthought.com/pipermail/4suite/2003-February/005108.html
#
# The xml-stylsheet spec defers to HTML 4.0's LINK element
# for semantics. It is not clear in HTML how the user-agent
# should interpret multiple LINK elements with rel="stylesheet"
# and without alternate="yes". In XSLT processing, we, like
# Saxon, choose to treat such subsequent non-alternates as
# imports (i.e. each non-alternate stylesheet is imported by
# the previous one).
#
# Given that alternates can appear before or after the
# non-alternate, there's no way to know whether they apply
# to the preceding or following non-alternate. So we choose
# to just treat alternates as only applying to the selection
# of the first stylesheet.
#
# Also, the absence of processing guidelines means we can't
# know whether to treat the absence of a 'media' pseudo-attr
# as implying that this is a default stylesheet (e.g. when the
# preferred media is "foo" and there is no "foo", you use
# this stylesheet), or whether to treat it as only being the
# appropriate stylesheet when no media preference is given to
# the processor.
#
# Furthermore, if more than one candidate for the first
# stylesheet is a match on the 'media' preference (or lack
# thereof), it's not clear what to do. Do we give preference
# to the one with a 'type' that is considered more favorable
# due to its position in the XSLT_IMT list? Do we just use the
# first one? The last one? For now, if there's one that does
# not have alternate="yes", we use that one; otherwise we use
# the first one. Thus, given
# <?xml-stylesheet type="application/xslt+xml" href="sty0"?>
# <?xml-stylesheet type="application/xslt+xml" href="sty1"
# alternate="yes"?>
# sty0 is used, even if the PIs are swapped; whereas if the
# only choices are
# <?xml-stylesheet type="application/xslt+xml" href="sty1"
# alternate="yes"?>
# <?xml-stylesheet type="application/xslt+xml" href="sty2"
# alternate="yes"?>
# then sty1 is used because it comes first.
root = node.xml_root
c = 1 # count of alternates, +1
found_nonalt = 0
stys = []
for node in root:
# only look at prolog, not anything that comes after it
if isinstance(node, tree.element): break
# build dict of pseudo-attrs for the xml-stylesheet PIs
if not (isinstance(node, tree.processing_instruction)
and node.xml_target == 'xml-stylesheet'):
continue
pseudo_attrs = {}
for attdecl in node.xml_data.split():
try:
name, value = attdecl.split('=', 1)
except ValueError:
pass
else:
pseudo_attrs[name] = value[1:-1]
# PI must have both href, type pseudo-attributes;
# type pseudo-attr must match valid XSLT types;
# media pseudo-attr must match preferred media
# (which can be None)
if 'href' in pseudo_attrs and 'type' in pseudo_attrs:
href = pseudo_attrs['href']
imt = pseudo_attrs['type']
media = pseudo_attrs.get('media') # defaults to None
if media in self.media_descriptors and imt in XSLT_IMT:
if ('alternate' in pseudo_attrs
and pseudo_attrs['alternate'] == 'yes'):
stys.append((1, media, imt, href))
elif found_nonalt:
c += 1
stys.append((c, media, imt, href))
else:
stys.append((0, media, imt, href))
found_nonalt = 1
stys.sort(self.__cmp_stys)
# Assume stylesheets for irrelevant media and disallowed IMTs
# are filtered out. Assume stylesheets are in ascending order
# by level. Now just use first stylesheet at each level, but
# treat levels 0 and 1 the same. Meaning of the levels:
# level 0 is first without alternate="yes"
# level 1 is all with alternate="yes"
# levels 2 and up are the others without alternate="yes"
hrefs = []
last_level = -1
#print "stys=",repr(stys)
for sty in stys:
level = sty[0]
if level == 1 and last_level == 0:
# we want to ignore level 1s if we had a level 0
last_level = 1
if level == last_level:
# proceed to next level (effectively, we only use
# the first stylesheet at each level)
continue
last_level = level
hrefs.append(sty[3])
if hrefs:
for href in hrefs:
# Resolve the PI with the InputSource for the document
# containing the PI, so relative hrefs work correctly
new_source = inputSource.resolve(href,
hint='xml-stylesheet PI')
self.appendStylesheet(new_source)
# Return true if any xml-stylesheet PIs were processed
# (i.e., the stylesheets they reference are going to be used)
return not not hrefs
def _run(self, node, parameters=None, result=None):
"""
Runs the stylesheet processor against the given XML DOM node with the
stylesheets that have been registered. It does not mutate the source.
If writer is given, it is used in place of the default output method
decisions for choosing the proper writer.
"""
#QUESTION: What about ws stripping?
#ANSWER: Whitespace stripping happens only in the run*() interfaces.
# This method is use-at-your-own-risk. The XSLT conformance of the
# source is maintained by the caller. This exists as a performance
# hook.
parameters = parameters or {}
self.attributeSets = {}
self.keys = {}
#See f:chain-to extension element
self.chainTo = None
self.chainParams = None
if not self.transform:
raise XsltError(XsltError.NO_STYLESHEET)
# Use an internal result to gather the output only if the caller
# didn't supply other means of retrieving it.
if result is None:
result = stringresult()
result.parameters = self.transform.output_parameters
assert result.writer
# Initialize any stylesheet parameters
initial_variables = parameters.copy()
for name in parameters:
if name not in self.transform.parameters:
del initial_variables[name]
# Prepare the stylesheet for processing
context = xsltcontext.xsltcontext(node,
variables=initial_variables,
transform=self.transform,
processor=self,
extfunctions=self._extfunctions,
output_parameters=result.parameters)
context.add_document(node, node.xml_base)
context.push_writer(result.writer)
self.transform.root.prime(context)
# Process the document
try:
self.transform.apply_templates(context, [node])
except XPathError, e:
raise
instruction = context.instruction
strerror = str(e)
e.message = MessageSource.EXPRESSION_POSITION_INFO % (
instruction.baseUri, instruction.lineNumber,
instruction.columnNumber, instruction.nodeName, strerror)
raise
except XsltError:
raise
except (KeyboardInterrupt, SystemExit):
raise
except:
raise
import traceback
sio = cStringIO.StringIO()
sio.write("Lower-level traceback:\n")
traceback.print_exc(None, sio)
instruction = context.currentInstruction
strerror = sio.getvalue()
raise RuntimeError(MessageSource.EXPRESSION_POSITION_INFO % (
instruction.baseUri, instruction.lineNumber,
instruction.columnNumber, instruction.nodeName, strerror))
writer = context.pop_writer()
assert writer is result.writer
# Perform cleanup
self.transform.root.teardown()
if isinstance(result, stringresult):
return result.clone()
return result
def message_control(self, suppress):
"""
Controls whether the processor emits warnings and xsl:message
messages. Call with suppress=1 to suppress such output.
"""
self._suppress_messages = not not suppress
return
def message(self, message):
"""
Intended to be used by XSLT instruction implementations only.
Used by xsl:message to emit a message to sys.stderr, unless such
messages are suppressed (see messageControl()). Uses the
msgPrefix & msgSuffix instance attributes.
"""
message = self.message_template % (message,)
if not self._suppress_messages:
self.message_stream.write(message)
self.message_stream.flush()
return
def warning(self, message):
"""
Emits a warning via Python's warnings framework, unless warnings
are suppressed (see messageControl()).
Used, for example, to announce that built-in templates are being
invoked with params.
"""
if not self._suppress_messages:
# Using level=2 to show the stack where the warning occured.
warnings.warn(message, stacklevel=2)
return
def addHandler(self, outputParams, stream):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with an output writer wrapper that first
determines which XSLT output method is going to be used (i.e.,
by looking at the output parameters or waiting to see if an
'html' element is the first new node generated), then replaces
itself with the appropriate writer instance.
outputParams is an Ft.Xml.Xslt.OutputParameters instance.
stream will be passed on to the constructor of the real writer.
"""
handler = OutputHandler.OutputHandler(outputParams, stream)
self.writers.append(handler)
handler.startDocument()
return
def removeHandler(self):
"""
Intended to be used by XSLT instruction implementations only.
Deletes the most recently added output writer.
"""
self.writers[-1].endDocument()
del self.writers[-1]
return
def pushResultTree(self, baseUri, implementation=None):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with a new output writer that produces
a separate document. The new document will have the given
baseUri as its URI. This is used to generate result tree
fragments.
Allows specifying an alternative DOM implementation for the
creation of the new document.
"""
writer = RtfWriter.RtfWriter(self.outputParams, baseUri)
self.writers.append(writer)
return writer
def pushResultString(self):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with an output writer that buffers the text
from text events and keeps track of whether non-text events
occurred. This is used by the implementations of XSLT
instructions such as xsl:attribute.
"""
writer = StringWriter.StringWriter(self.outputParams)
self.writers.append(writer)
return
def pushResult(self, handler=None):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with a new output writer (the given handler
of SAX-like output events).
"""
if handler is None:
warnings.warn("Use pushResultTree(uri) to create RTFs",
DeprecationWarning, stacklevel=2)
handler = RtfWriter.RtfWriter(self.outputParams,
self.stylesheet.baseUri)
self.writers.append(handler)
handler.startDocument()
return
def popResult(self):
"""
Intended to be used by XSLT instruction implementations only.
Ends temporary output writing that was started with
pushResultString(), pushResultTree(), or pushResult(), and
returns the result.
"""
handler = self.writers[-1]
del self.writers[-1]
handler.endDocument()
return handler.getResult()
def writer(self):
"""
Intended to be used by XSLT instruction implementations only.
Returns the current output writer.
"""
return self.writers[-1]
writer = property(writer)
def _strip_elements(self, node):
stripElements = self.getStripElements()
if stripElements:
StripElements.StripElements(node, stripElements)
return
def reset(self):
"""
Returns the processor to a state where it can be used to do a
new transformation with a new stylesheet. Deletes the current
stylesheet tree, and may do other cleanup.
"""
self.stylesheet = None
self.getStylesheetReader().reset()
return
|
{
"content_hash": "8bf72d7e8d9d7d728b76c8b82a9db162",
"timestamp": "",
"source": "github",
"line_count": 728,
"max_line_length": 94,
"avg_line_length": 41.01510989010989,
"alnum_prop": 0.6267122140728089,
"repo_name": "zepheira/amara",
"id": "785ee799ed42402ae80e6ac80e20f73c050d02f8",
"size": "29958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/xslt/processor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1830216"
},
{
"name": "C++",
"bytes": "82201"
},
{
"name": "GLSL",
"bytes": "5081"
},
{
"name": "HTML",
"bytes": "578831"
},
{
"name": "JavaScript",
"bytes": "18734"
},
{
"name": "Logos",
"bytes": "175"
},
{
"name": "Objective-C",
"bytes": "26041"
},
{
"name": "Python",
"bytes": "1507578"
},
{
"name": "Shell",
"bytes": "2497"
},
{
"name": "XSLT",
"bytes": "398316"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import TestSuite
def get_test_suite_from_test_cases(test_cases):
suite = TestSuite()
for test_class in test_cases:
tests = unittest.TestLoader().loadTestsFromTestCase(test_class)
suite.addTests(tests)
return suite
def get_test_suite_from_test_suites(suites):
return unittest.TestSuite(suites)
|
{
"content_hash": "daf29d9b9e2bb35b7aa5646affb03036",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 25.642857142857142,
"alnum_prop": 0.7270194986072424,
"repo_name": "marcoprado17/flask-bone",
"id": "e1c18e5fbd22a5cc0488f26fcae7964b15bf72e9",
"size": "860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/flask_bombril/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3196"
},
{
"name": "HTML",
"bytes": "10430"
},
{
"name": "JavaScript",
"bytes": "3983"
},
{
"name": "Python",
"bytes": "96101"
},
{
"name": "Shell",
"bytes": "2801"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import abc
import logging
import os
import sys
from optparse import OptionGroup
from optparse import OptionParser
from uge.config.config_manager import ConfigManager
from uge.exceptions.invalid_argument import InvalidArgument
from uge.exceptions.qconf_exception import QconfException
from uge.log.log_manager import LogManager
# compatible with Python 2 *and* 3:
ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
class QconfCli(ABC):
""" Base qconf command line interface class. """
def __init__(self, valid_arg_count=0):
"""
Class constructor.
:param valid_arg_count: Number of allowed positional arguments (default: 0).
:type valid_arg_count: int
"""
self.logger = LogManager.get_instance().get_logger(self.__class__.__name__)
self.parser = OptionParser(add_help_option=False)
self.options = {}
self.args = []
self.valid_arg_count = valid_arg_count
self.option_group_dict = {}
common_group = 'Common Options'
self.add_option_group(common_group, None)
self.add_option_to_group(common_group, '-h', '--help', action='help', help='Show this help message and exit.')
self.add_option_to_group(common_group, '-?', '', action='help', help='Show this help message and exit.')
self.add_option_to_group(common_group, '-v', '', action='store_true', dest='cmd_version', default=False,
help='Print version and exit.')
self.add_option_to_group(common_group, '-d', '--debug', dest='console_log_level',
help='Set debug level; valid values are: critical, error, warning, info, debug')
def add_option(self, *args, **kwargs):
"""
Add CLI option.
"""
self.parser.add_option(*args, **kwargs)
def add_option_to_group(self, group_name, *args, **kwargs):
"""
Add option to the given group.
Group should be created using add_option_group().
:param group_name: Group name.
:type group_name: str
"""
group = self.option_group_dict.get(group_name)
group.add_option(*args, **kwargs)
def add_option_group(self, group_name, desc):
"""
Add option group.
:param group_name: Group name.
:type group_name: str
"""
group = OptionGroup(self.parser, group_name, desc)
self.parser.add_option_group(group)
self.option_group_dict[group_name] = group
def parse_args(self, usage=None):
"""
Parse command arguments.
:param usage: Command usage.
:type usage: str
"""
if usage:
self.parser.usage = usage
try:
(self.options, self.args) = self.parser.parse_args()
except SystemExit as rc:
sys.stdout.flush()
sys.stderr.flush()
os._exit(int(str(rc)))
if self.valid_arg_count < len(self.args):
# Postitional args are not enabled and we have some
msg = "Invalid Argument(s):"
for arg in self.args[self.valid_arg_count:]:
msg += " " + arg
raise InvalidArgument(msg)
opt_dict = self.options.__dict__
if opt_dict.get('cmd_version'):
print('%s version: %s' % (os.path.basename(sys.argv[0]), ConfigManager.get_instance().get_version()))
os._exit(0)
# Log level.
console_log_level = opt_dict.get('console_log_level', None)
if console_log_level:
LogManager.get_instance().set_console_log_level(console_log_level)
# Check input arguments.
self.check_input_args()
return (self.options, self.args)
def usage(self, s=None):
""" Print the help provided by optparse. """
if s: print('Error:', s, '\n', file=sys.stderr)
self.parser.print_help()
os._exit(1)
def get_options(self):
""" Returns the command line options. """
return self.options
def get_n_args(self):
""" Returns the number of command line arguments. """
return len(self.args)
def get_args(self):
""" Returns the command line argument list. """
return self.args
def get_arg(self, i):
""" Returns the i-th command line argument. """
return self.args[i]
@abc.abstractmethod
def run_command(self):
""" This method must be implemented by the derived class. """
pass
def check_input_args(self):
"""
This method should verify required arguments in the derived class.
"""
pass
def run(self):
"""
Run command. This method simply invokes run_command() and handles
any exceptions.
"""
try:
self.run_command()
except QconfException as ex:
if self.logger.level < logging.INFO:
self.logger.exception('%s' % ex)
print('%s' % ex.get_error_message())
raise SystemExit(ex.get_error_code())
except SystemExit as ex:
raise
except Exception as ex:
self.logger.exception('%s' % ex)
print('%s' % ex)
raise SystemExit(-1)
#############################################################################
# Testing.
if __name__ == '__main__':
cli = QconfCli()
cli.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
cli.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = cli.parse_args()
print('OPTIONS: ', options)
print('ARGS: ', args)
print('OPTIONS: ', cli.get_options())
print('ARGS: ', cli.get_args())
print('options.filename', options.filename)
print('cli.getOptions().filename', cli.get_options().filename)
o = cli.get_options()
print('o.filename', o.filename)
print('cli.get_args()', cli.get_args())
print('len(cli.get_args())', len(cli.get_args()))
for a in cli.get_args():
print('arg', a)
first_arg = cli.get_arg(0)
print('first_arg', first_arg)
second_arg = cli.get_arg(1)
print('second_arg', second_arg)
try:
third_arg = cli.get_arg(2)
print('third_arg', third_arg)
except:
print('no third arg')
|
{
"content_hash": "d01547e51e9e0daee8a95e8ffb59b218",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 118,
"avg_line_length": 32.38308457711443,
"alnum_prop": 0.5667537256106929,
"repo_name": "gridengine/config-api",
"id": "d0977050cc49e89d275e03b49f311d4c629109c6",
"size": "7353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "uge/cli/qconf_cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "7063"
},
{
"name": "Python",
"bytes": "660511"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
from datetime import datetime
import copy
import decimal
import logging
import os
import re
import threading
import time
import warnings
try:
import sqlite3
except ImportError:
sqlite3 = None
try:
import psycopg2
except ImportError:
psycopg2 = None
try:
import MySQLdb as mysql
except ImportError:
mysql = None
__all__ = [
'ImproperlyConfigured', 'SqliteDatabase', 'MySQLDatabase', 'PostgresqlDatabase',
'asc', 'desc', 'Count', 'Max', 'Min', 'Sum', 'Q', 'Field', 'CharField', 'TextField',
'DateTimeField', 'BooleanField', 'DecimalField', 'FloatField', 'IntegerField',
'PrimaryKeyField', 'ForeignKeyField', 'DoubleField', 'BigIntegerField', 'Model',
'filter_query', 'annotate_query', 'F', 'R',
]
class ImproperlyConfigured(Exception):
pass
if sqlite3 is None and psycopg2 is None and mysql is None:
raise ImproperlyConfigured('Either sqlite3, psycopg2 or MySQLdb must be installed')
if sqlite3:
sqlite3.register_adapter(decimal.Decimal, lambda v: str(v))
sqlite3.register_converter('decimal', lambda v: decimal.Decimal(v))
if psycopg2:
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
DATABASE_NAME = os.environ.get('PEEWEE_DATABASE', 'peewee.db')
logger = logging.getLogger('peewee.logger')
class BaseAdapter(object):
"""
The various subclasses of `BaseAdapter` provide a bridge between the high-
level `Database` abstraction and the underlying python libraries like
psycopg2. It also provides a way to unify the pythonic field types with
the underlying column types used by the database engine.
The `BaseAdapter` provides two types of mappings:
- mapping between filter operations and their database equivalents
- mapping between basic field types and their database column types
The `BaseAdapter` also is the mechanism used by the `Database` class to:
- handle connections with the database
- extract information from the database cursor
"""
operations = {'eq': '= %s'}
interpolation = '%s'
sequence_support = False
for_update_support = False
subquery_delete_same_table = True
reserved_tables = []
quote_char = '"'
def get_field_types(self):
field_types = {
'integer': 'INTEGER',
'bigint': 'INTEGER',
'float': 'REAL',
'decimal': 'DECIMAL',
'double': 'REAL',
'string': 'VARCHAR',
'text': 'TEXT',
'datetime': 'DATETIME',
'primary_key': 'INTEGER',
'primary_key_with_sequence': 'INTEGER',
'foreign_key': 'INTEGER',
'boolean': 'SMALLINT',
'blob': 'BLOB',
}
field_types.update(self.get_field_overrides())
return field_types
def get_field_overrides(self):
return {}
def connect(self, database, **kwargs):
raise NotImplementedError
def close(self, conn):
conn.close()
def lookup_cast(self, lookup, value):
"""
When a lookup is being performed as a part of a WHERE clause, provides
a way to alter the incoming value that is passed to the database driver
as part of the list of parameters
"""
if lookup in ('contains', 'icontains'):
return '%%%s%%' % value
elif lookup in ('startswith', 'istartswith'):
return '%s%%' % value
return value
def last_insert_id(self, cursor, model):
return cursor.lastrowid
def rows_affected(self, cursor):
return cursor.rowcount
class SqliteAdapter(BaseAdapter):
# note the sqlite library uses a non-standard interpolation string
operations = {
'lt': '< %s',
'lte': '<= %s',
'gt': '> %s',
'gte': '>= %s',
'eq': '= %s',
'ne': '!= %s', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS %s',
'isnull': 'IS NULL',
'between': 'BETWEEN %s AND %s',
'icontains': "LIKE %s ESCAPE '\\'", # surround param with %'s
'contains': "GLOB %s", # surround param with *'s
'istartswith': "LIKE %s ESCAPE '\\'",
'startswith': "GLOB %s",
}
interpolation = '?'
def connect(self, database, **kwargs):
if not sqlite3:
raise ImproperlyConfigured('sqlite3 must be installed on the system')
return sqlite3.connect(database, **kwargs)
def lookup_cast(self, lookup, value):
if lookup == 'contains':
return '*%s*' % value
elif lookup == 'icontains':
return '%%%s%%' % value
elif lookup == 'startswith':
return '%s*' % value
elif lookup == 'istartswith':
return '%s%%' % value
return value
class PostgresqlAdapter(BaseAdapter):
operations = {
'lt': '< %s',
'lte': '<= %s',
'gt': '> %s',
'gte': '>= %s',
'eq': '= %s',
'ne': '!= %s', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS %s',
'isnull': 'IS NULL',
'between': 'BETWEEN %s AND %s',
'icontains': 'ILIKE %s', # surround param with %'s
'contains': 'LIKE %s', # surround param with *'s
'istartswith': 'ILIKE %s',
'startswith': 'LIKE %s',
}
reserved_tables = ['user']
sequence_support = True
for_update_support = True
def connect(self, database, **kwargs):
if not psycopg2:
raise ImproperlyConfigured('psycopg2 must be installed on the system')
return psycopg2.connect(database=database, **kwargs)
def get_field_overrides(self):
return {
'primary_key': 'SERIAL',
'primary_key_with_sequence': 'INTEGER',
'datetime': 'TIMESTAMP',
'decimal': 'NUMERIC',
'double': 'DOUBLE PRECISION',
'bigint': 'BIGINT',
'boolean': 'BOOLEAN',
'blob': 'BYTEA',
}
def last_insert_id(self, cursor, model):
if model._meta.pk_sequence:
cursor.execute("SELECT CURRVAL('\"%s\"')" % (
model._meta.pk_sequence))
else:
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (
model._meta.db_table, model._meta.pk_name))
return cursor.fetchone()[0]
class MySQLAdapter(BaseAdapter):
operations = {
'lt': '< %s',
'lte': '<= %s',
'gt': '> %s',
'gte': '>= %s',
'eq': '= %s',
'ne': '!= %s', # watch yourself with this one
'in': 'IN (%s)', # special-case to list q-marks
'is': 'IS %s',
'isnull': 'IS NULL',
'between': 'BETWEEN %s AND %s',
'icontains': 'LIKE %s', # surround param with %'s
'contains': 'LIKE BINARY %s', # surround param with *'s
'istartswith': 'LIKE %s',
'startswith': 'LIKE BINARY %s',
}
quote_char = '`'
for_update_support = True
subquery_delete_same_table = False
def connect(self, database, **kwargs):
if not mysql:
raise ImproperlyConfigured('MySQLdb must be installed on the system')
conn_kwargs = {
'charset': 'utf8',
'use_unicode': True,
}
conn_kwargs.update(kwargs)
return mysql.connect(db=database, **conn_kwargs)
def get_field_overrides(self):
return {
'primary_key': 'integer AUTO_INCREMENT',
'boolean': 'bool',
'float': 'float',
'double': 'double precision',
'bigint': 'bigint',
'text': 'longtext',
'decimal': 'numeric',
}
class Database(object):
"""
A high-level api for working with the supported database engines. `Database`
provides a wrapper around some of the functions performed by the `Adapter`,
in addition providing support for:
- execution of SQL queries
- creating and dropping tables and indexes
"""
def require_sequence_support(func):
def inner(self, *args, **kwargs):
if not self.adapter.sequence_support:
raise ValueError('%s adapter does not support sequences' % (self.adapter))
return func(self, *args, **kwargs)
return inner
def __init__(self, adapter, database, threadlocals=False, autocommit=True, **connect_kwargs):
self.adapter = adapter
self.database = database
self.connect_kwargs = connect_kwargs
if threadlocals:
self.__local = threading.local()
else:
self.__local = type('DummyLocal', (object,), {})
self._conn_lock = threading.Lock()
self.autocommit = autocommit
def connect(self):
with self._conn_lock:
self.__local.conn = self.adapter.connect(self.database, **self.connect_kwargs)
self.__local.closed = False
def close(self):
with self._conn_lock:
self.adapter.close(self.__local.conn)
self.__local.closed = True
def get_conn(self):
if not hasattr(self.__local, 'closed') or self.__local.closed:
self.connect()
return self.__local.conn
def get_cursor(self):
return self.get_conn().cursor()
def execute(self, sql, params=None):
cursor = self.get_cursor()
res = cursor.execute(sql, params or ())
if self.get_autocommit():
self.commit()
logger.debug((sql, params))
return cursor
def commit(self):
self.get_conn().commit()
def rollback(self):
self.get_conn().rollback()
def set_autocommit(self, autocommit):
self.__local.autocommit = autocommit
def get_autocommit(self):
if not hasattr(self.__local, 'autocommit'):
self.set_autocommit(self.autocommit)
return self.__local.autocommit
def commit_on_success(self, func):
def inner(*args, **kwargs):
orig = self.get_autocommit()
self.set_autocommit(False)
try:
res = func(*args, **kwargs)
self.commit()
except:
self.rollback()
raise
else:
return res
finally:
self.set_autocommit(orig)
return inner
def last_insert_id(self, cursor, model):
if model._meta.auto_increment:
return self.adapter.last_insert_id(cursor, model)
def rows_affected(self, cursor):
return self.adapter.rows_affected(cursor)
def quote_name(self, name):
return ''.join((self.adapter.quote_char, name, self.adapter.quote_char))
def column_for_field(self, field):
return self.column_for_field_type(field.get_db_field())
def column_for_field_type(self, db_field_type):
try:
return self.adapter.get_field_types()[db_field_type]
except KeyError:
raise AttributeError('Unknown field type: "%s", valid types are: %s' % \
db_field_type, ', '.join(self.adapter.get_field_types().keys())
)
def field_sql(self, field):
return '%s %s' % (self.quote_name(field.db_column), field.render_field_template())
def create_table_query(self, model_class, safe):
if model_class._meta.pk_sequence and self.adapter.sequence_support:
if not self.sequence_exists(model_class._meta.pk_sequence):
self.create_sequence(model_class._meta.pk_sequence)
exists_statement = "IF NOT EXISTS" if safe else ""
charset_statement = "DEFAULT CHARSET=utf8 COLLATE utf8_general_ci"
framing = "CREATE TABLE %s %%s (%%s) %s;" % (exists_statement, charset_statement)
columns = []
for field in model_class._meta.get_fields():
columns.append(self.field_sql(field))
table = self.quote_name(model_class._meta.db_table)
return framing % (table, ', '.join(columns))
def create_table(self, model_class, safe=False):
self.execute(self.create_table_query(model_class, safe))
def create_index_query(self, model_class, field_name, unique):
framing = 'CREATE %(unique)s INDEX %(index)s ON %(table)s(%(field)s);'
if field_name not in model_class._meta.fields:
raise AttributeError(
'Field %s not on model %s' % (field_name, model_class)
)
field_obj = model_class._meta.fields[field_name]
db_table = model_class._meta.db_table
index_name = self.quote_name('%s_%s' % (db_table, field_obj.db_column))
unique_expr = ternary(unique, 'UNIQUE', '')
return framing % {
'unique': unique_expr,
'index': index_name,
'table': self.quote_name(db_table),
'field': self.quote_name(field_obj.db_column),
}
def create_index(self, model_class, field_name, unique=False):
self.execute(self.create_index_query(model_class, field_name, unique))
def create_foreign_key(self, model_class, field):
return self.create_index(model_class, field.name, field.unique)
def drop_table(self, model_class, fail_silently=False):
framing = fail_silently and 'DROP TABLE IF EXISTS %s;' or 'DROP TABLE %s;'
self.execute(framing % self.quote_name(model_class._meta.db_table))
def add_column_sql(self, model_class, field_name):
field = model_class._meta.fields[field_name]
return 'ALTER TABLE %s ADD COLUMN %s' % (
self.quote_name(model_class._meta.db_table),
self.field_sql(field),
)
def rename_column_sql(self, model_class, field_name, new_name):
# this assumes that the field on the model points to the *old* fieldname
field = model_class._meta.fields[field_name]
return 'ALTER TABLE %s RENAME COLUMN %s TO %s' % (
self.quote_name(model_class._meta.db_table),
self.quote_name(field.db_column),
self.quote_name(new_name),
)
def drop_column_sql(self, model_class, field_name):
field = model_class._meta.fields[field_name]
return 'ALTER TABLE %s DROP COLUMN %s' % (
self.quote_name(model_class._meta.db_table),
self.quote_name(field.db_column),
)
@require_sequence_support
def create_sequence(self, sequence_name):
return self.execute('CREATE SEQUENCE %s;' % self.quote_name(sequence_name))
@require_sequence_support
def drop_sequence(self, sequence_name):
return self.execute('DROP SEQUENCE %s;' % self.quote_name(sequence_name))
def get_indexes_for_table(self, table):
raise NotImplementedError
def get_tables(self):
raise NotImplementedError
def sequence_exists(self, sequence):
raise NotImplementedError
class SqliteDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(SqliteDatabase, self).__init__(SqliteAdapter(), database, **connect_kwargs)
def get_indexes_for_table(self, table):
res = self.execute('PRAGMA index_list(%s);' % self.quote_name(table))
rows = sorted([(r[1], r[2] == 1) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute('select name from sqlite_master where type="table" order by name')
return [r[0] for r in res.fetchall()]
def drop_column_sql(self, model_class, field_name):
raise NotImplementedError('Sqlite3 does not have direct support for dropping columns')
def rename_column_sql(self, model_class, field_name, new_name):
raise NotImplementedError('Sqlite3 does not have direct support for renaming columns')
class PostgresqlDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(PostgresqlDatabase, self).__init__(PostgresqlAdapter(), database, **connect_kwargs)
def get_indexes_for_table(self, table):
res = self.execute("""
SELECT c2.relname, i.indisprimary, i.indisunique
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2, pg_catalog.pg_index i
WHERE c.relname = %s AND c.oid = i.indrelid AND i.indexrelid = c2.oid
ORDER BY i.indisprimary DESC, i.indisunique DESC, c2.relname""", (table,))
return sorted([(r[0], r[1]) for r in res.fetchall()])
def get_tables(self):
res = self.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)
ORDER BY c.relname""")
return [row[0] for row in res.fetchall()]
def sequence_exists(self, sequence):
res = self.execute("""
SELECT COUNT(*)
FROM pg_class, pg_namespace
WHERE relkind='S'
AND pg_class.relnamespace = pg_namespace.oid
AND relname=%s""", (sequence,))
return bool(res.fetchone()[0])
class MySQLDatabase(Database):
def __init__(self, database, **connect_kwargs):
super(MySQLDatabase, self).__init__(MySQLAdapter(), database, **connect_kwargs)
def create_foreign_key(self, model_class, field):
framing = """
ALTER TABLE %(table)s ADD CONSTRAINT %(constraint)s
FOREIGN KEY (%(field)s) REFERENCES %(to)s(%(to_field)s)%(cascade)s;
"""
db_table = model_class._meta.db_table
constraint = 'fk_%s_%s_%s' % (
db_table,
field.to._meta.db_table,
field.db_column,
)
query = framing % {
'table': self.quote_name(db_table),
'constraint': self.quote_name(constraint),
'field': self.quote_name(field.db_column),
'to': self.quote_name(field.to._meta.db_table),
'to_field': self.quote_name(field.to._meta.pk_name),
'cascade': ' ON DELETE CASCADE' if field.cascade else '',
}
self.execute(query)
return super(MySQLDatabase, self).create_foreign_key(model_class, field)
def rename_column_sql(self, model_class, field_name, new_name):
field = model_class._meta.fields[field_name]
return 'ALTER TABLE %s CHANGE COLUMN %s %s %s' % (
self.quote_name(model_class._meta.db_table),
self.quote_name(field.db_column),
self.quote_name(new_name),
field.render_field_template(),
)
def get_indexes_for_table(self, table):
res = self.execute('SHOW INDEXES IN %s;' % self.quote_name(table))
rows = sorted([(r[2], r[1] == 0) for r in res.fetchall()])
return rows
def get_tables(self):
res = self.execute('SHOW TABLES;')
return [r[0] for r in res.fetchall()]
class QueryResultWrapper(object):
"""
Provides an iterator over the results of a raw Query, additionally doing
two things:
- converts rows from the database into model instances
- ensures that multiple iterations do not result in multiple queries
"""
def __init__(self, model, cursor, meta=None):
self.model = model
self.cursor = cursor
self.query_meta = meta or {}
self.column_meta = self.query_meta.get('columns')
self.join_meta = self.query_meta.get('graph')
self.__ct = 0
self.__idx = 0
self._result_cache = []
self._populated = False
def model_from_rowset(self, model_class, attr_dict):
instance = model_class()
for attr, value in attr_dict.iteritems():
if attr in instance._meta.columns:
field = instance._meta.columns[attr]
setattr(instance, attr, field.python_value(value))
else:
setattr(instance, attr, value)
return instance
def _row_to_dict(self, row):
return dict((self.cursor.description[i][0], value)
for i, value in enumerate(row))
def construct_instance(self, row):
if not self.column_meta:
# use attribute names pulled from the result cursor description,
# and do not attempt to follow joined models
row_dict = self._row_to_dict(row)
return self.model_from_rowset(self.model, row_dict)
else:
# we have columns, models, and a graph of joins to reconstruct
collected_models = {}
for i, (model, col) in enumerate(self.column_meta):
value = row[i]
if isinstance(col, tuple):
if len(col) == 3:
model = self.model # special-case aggregates
col_name = attr = col[2]
else:
col_name, attr = col
else:
col_name = attr = col
if model not in collected_models:
collected_models[model] = model()
instance = collected_models[model]
if col_name in instance._meta.columns:
field = instance._meta.columns[col_name]
setattr(instance, field.name, field.python_value(value))
else:
setattr(instance, attr, value)
return self.follow_joins(self.join_meta, collected_models, self.model)
def follow_joins(self, joins, collected_models, current):
inst = collected_models[current]
if current not in joins:
return inst
for joined_model, _, _ in joins[current]:
if joined_model in collected_models:
joined_inst = self.follow_joins(joins, collected_models, joined_model)
fk_field = current._meta.get_related_field_for_model(joined_model)
if not fk_field:
continue
if not joined_inst.get_pk():
joined_inst.set_pk(getattr(inst, fk_field.id_storage))
setattr(inst, fk_field.name, joined_inst)
setattr(inst, fk_field.id_storage, joined_inst.get_pk())
return inst
def __iter__(self):
self.__idx = 0
if not self._populated:
return self
else:
return iter(self._result_cache)
def first(self):
try:
self.__idx = 0 # move to beginning of the list
inst = self.next()
except StopIteration:
inst = None
self.__idx = 0
return inst
def fill_cache(self):
if not self._populated:
idx = self.__idx
self.__idx = self.__ct
for x in self:
pass
self.__idx = idx
def iterate(self):
row = self.cursor.fetchone()
if row:
return self.construct_instance(row)
else:
self._populated = True
raise StopIteration
def iterator(self):
while 1:
yield self.iterate()
def next(self):
if self.__idx < self.__ct:
inst = self._result_cache[self.__idx]
self.__idx += 1
return inst
instance = self.iterate()
self._result_cache.append(instance)
self.__ct += 1
self.__idx += 1
return instance
# create
class DoesNotExist(Exception):
pass
# semantic wrappers for ordering the results of a `SelectQuery`
def asc(f):
return (f, 'ASC')
def desc(f):
return (f, 'DESC')
# wrappers for performing aggregation in a `SelectQuery`
def Count(f, alias='count'):
return ('COUNT', f, alias)
def Max(f, alias='max'):
return ('MAX', f, alias)
def Min(f, alias='min'):
return ('MIN', f, alias)
def Sum(f, alias='sum'):
return ('SUM', f, alias)
# decorator for query methods to indicate that they change the state of the
# underlying data structures
def returns_clone(func):
def inner(self, *args, **kwargs):
clone = self.clone()
res = func(clone, *args, **kwargs)
return clone
return inner
# helpers
ternary = lambda cond, t, f: (cond and [t] or [f])[0]
class Node(object):
def __init__(self, connector='AND', children=None):
self.connector = connector
self.children = children or []
self.negated = False
def connect(self, rhs, connector):
if isinstance(rhs, Leaf):
if connector == self.connector:
self.children.append(rhs)
return self
else:
p = Node(connector)
p.children = [self, rhs]
return p
elif isinstance(rhs, Node):
p = Node(connector)
p.children = [self, rhs]
return p
def __or__(self, rhs):
return self.connect(rhs, 'OR')
def __and__(self, rhs):
return self.connect(rhs, 'AND')
def __invert__(self):
self.negated = not self.negated
return self
def __nonzero__(self):
return bool(self.children)
def __unicode__(self):
query = []
nodes = []
for child in self.children:
if isinstance(child, Q):
query.append(unicode(child))
elif isinstance(child, Node):
nodes.append('(%s)' % unicode(child))
query.extend(nodes)
connector = ' %s ' % self.connector
query = connector.join(query)
if self.negated:
query = 'NOT %s' % query
return query
class Leaf(object):
def __init__(self):
self.parent = None
def connect(self, connector):
if self.parent is None:
self.parent = Node(connector)
self.parent.children.append(self)
def __or__(self, rhs):
self.connect('OR')
return self.parent | rhs
def __and__(self, rhs):
self.connect('AND')
return self.parent & rhs
def __invert__(self):
self.negated = not self.negated
return self
class Q(Leaf):
def __init__(self, _model=None, **kwargs):
self.model = _model
self.query = kwargs
self.negated = False
super(Q, self).__init__()
def __unicode__(self):
bits = ['%s = %s' % (k, v) for k, v in self.query.items()]
if len(self.query.items()) > 1:
connector = ' AND '
expr = '(%s)' % connector.join(bits)
else:
expr = bits[0]
if self.negated:
expr = 'NOT %s' % expr
return expr
class F(object):
def __init__(self, field, model=None):
self.field = field
self.model = model
self.op = None
self.value = None
def __add__(self, rhs):
self.op = '+'
self.value = rhs
return self
def __sub__(self, rhs):
self.op = '-'
self.value = rhs
return self
class R(Leaf):
def __init__(self, *params):
self.params = params
super(R, self).__init__()
def sql_select(self):
if len(self.params) == 2:
return self.params
else:
raise ValueError('Incorrect number of argument provided for R() expression')
def sql_where(self):
return self.params[0], self.params[1:]
def apply_model(model, item):
"""
Q() objects take a model, which provides context for the keyword arguments.
In this way Q() objects can be mixed across models. The purpose of this
function is to recurse into a query datastructure and apply the given model
to all Q() objects that do not have a model explicitly set.
"""
if isinstance(item, Node):
for child in item.children:
apply_model(model, child)
elif isinstance(item, Q):
if item.model is None:
item.model = model
def parseq(model, *args, **kwargs):
"""
Convert any query into a single Node() object -- used to build up the list
of where clauses when querying.
"""
node = Node()
for piece in args:
apply_model(model, piece)
if isinstance(piece, (Q, R, Node)):
node.children.append(piece)
else:
raise TypeError('Unknown object: %s', piece)
if kwargs:
node.children.append(Q(model, **kwargs))
return node
def find_models(item):
"""
Utility function to find models referenced in a query and return a set()
containing them. This function is used to generate the list of models that
are part of a where clause.
"""
seen = set()
if isinstance(item, Node):
for child in item.children:
seen.update(find_models(child))
elif isinstance(item, Q):
seen.add(item.model)
return seen
class EmptyResultException(Exception):
pass
class BaseQuery(object):
query_separator = '__'
force_alias = False
def __init__(self, model):
self.model = model
self.query_context = model
self.database = self.model._meta.database
self.operations = self.database.adapter.operations
self.interpolation = self.database.adapter.interpolation
self._dirty = True
self._where = []
self._where_models = set()
self._joins = {}
self._joined_models = set()
def _clone_dict_graph(self, dg):
cloned = {}
for node, edges in dg.items():
cloned[node] = list(edges)
return cloned
def clone_where(self):
return list(self._where)
def clone_joins(self):
return self._clone_dict_graph(self._joins)
def clone(self):
raise NotImplementedError
def qn(self, name):
return self.database.quote_name(name)
def lookup_cast(self, lookup, value):
return self.database.adapter.lookup_cast(lookup, value)
def parse_query_args(self, model, **query):
"""
Parse out and normalize clauses in a query. The query is composed of
various column+lookup-type/value pairs. Validates that the lookups
are valid and returns a list of lookup tuples that have the form:
(field name, (operation, value))
"""
parsed = []
for lhs, rhs in query.iteritems():
if self.query_separator in lhs:
lhs, op = lhs.rsplit(self.query_separator, 1)
else:
op = 'eq'
if lhs in model._meta.columns:
lhs = model._meta.columns[lhs].name
try:
field = model._meta.get_field_by_name(lhs)
except AttributeError:
field = model._meta.get_related_field_by_name(lhs)
if field is None:
raise
if isinstance(rhs, R):
expr, params = rhs.sql_where()
lookup_value = [field.db_value(o) for o in params]
combined_expr = self.operations[op] % expr
operation = combined_expr % tuple(self.interpolation for p in params)
elif isinstance(rhs, F):
lookup_value = rhs
operation = self.operations[op] # leave as "%s"
else:
if op == 'in':
if isinstance(rhs, SelectQuery):
lookup_value = rhs
operation = 'IN (%s)'
else:
if not rhs:
raise EmptyResultException
lookup_value = [field.db_value(o) for o in rhs]
operation = self.operations[op] % \
(','.join([self.interpolation for v in lookup_value]))
elif op == 'is':
if rhs is not None:
raise ValueError('__is lookups only accept None')
operation = 'IS NULL'
lookup_value = []
elif op == 'isnull':
operation = 'IS NULL' if rhs else 'IS NOT NULL'
lookup_value = []
elif isinstance(rhs, (list, tuple)):
# currently this only happens on 'between' lookups, but leave
# it general to lists and tuples
lookup_value = [field.db_value(o) for o in rhs]
operation = self.operations[op] % \
tuple(self.interpolation for v in lookup_value)
else:
lookup_value = field.db_value(rhs)
operation = self.operations[op] % self.interpolation
parsed.append(
(field.db_column, (operation, self.lookup_cast(op, lookup_value)))
)
return parsed
@returns_clone
def where(self, *args, **kwargs):
parsed = parseq(self.query_context, *args, **kwargs)
if parsed:
self._where.append(parsed)
self._where_models.update(find_models(parsed))
@returns_clone
def join(self, model, join_type=None, on=None):
if self.query_context._meta.rel_exists(model):
self._joined_models.add(model)
self._joins.setdefault(self.query_context, [])
self._joins[self.query_context].append((model, join_type, on))
self.query_context = model
else:
raise AttributeError('No foreign key found between %s and %s' % \
(self.query_context.__name__, model.__name__))
@returns_clone
def switch(self, model):
if model == self.model:
self.query_context = model
return
if model in self._joined_models:
self.query_context = model
return
raise AttributeError('You must JOIN on %s' % model.__name__)
def use_aliases(self):
return len(self._joined_models) > 0 or self.force_alias
def combine_field(self, alias, field_col):
quoted = self.qn(field_col)
if alias:
return '%s.%s' % (alias, quoted)
return quoted
def safe_combine(self, model, alias, col):
if col in model._meta.columns:
return self.combine_field(alias, col)
elif col in model._meta.fields:
return self.combine_field(alias, model._meta.fields[col].db_column)
return col
def follow_joins(self, current, alias_map, alias_required, alias_count, seen=None):
computed = []
seen = seen or set()
if current not in self._joins:
return computed
for i, (model, join_type, on) in enumerate(self._joins[current]):
seen.add(model)
if alias_required:
alias_count += 1
alias_map[model] = 't%d' % alias_count
else:
alias_map[model] = ''
from_model = current
field = from_model._meta.get_related_field_for_model(model, on)
if field:
left_field = field.db_column
right_field = model._meta.pk_name
else:
field = from_model._meta.get_reverse_related_field_for_model(model, on)
left_field = from_model._meta.pk_name
right_field = field.db_column
if join_type is None:
if field.null and model not in self._where_models:
join_type = 'LEFT OUTER'
else:
join_type = 'INNER'
computed.append(
'%s JOIN %s AS %s ON %s = %s' % (
join_type,
self.qn(model._meta.db_table),
alias_map[model],
self.combine_field(alias_map[from_model], left_field),
self.combine_field(alias_map[model], right_field),
)
)
computed.extend(self.follow_joins(model, alias_map, alias_required, alias_count, seen))
return computed
def compile_where(self):
alias_count = 0
alias_map = {}
alias_required = self.use_aliases()
if alias_required:
alias_count += 1
alias_map[self.model] = 't%d' % alias_count
else:
alias_map[self.model] = ''
computed_joins = self.follow_joins(self.model, alias_map, alias_required, alias_count)
clauses = [self.parse_node(node, alias_map) for node in self._where]
return computed_joins, clauses, alias_map
def flatten_clauses(self, clauses):
where_with_alias = []
where_data = []
for query, data in clauses:
where_with_alias.append(query)
where_data.extend(data)
return where_with_alias, where_data
def convert_where_to_params(self, where_data):
flattened = []
for clause in where_data:
if isinstance(clause, (tuple, list)):
flattened.extend(clause)
else:
flattened.append(clause)
return flattened
def parse_node(self, node, alias_map):
query = []
query_data = []
for child in node.children:
if isinstance(child, Q):
parsed, data = self.parse_q(child, alias_map)
query.append(parsed)
query_data.extend(data)
elif isinstance(child, R):
parsed, data = self.parse_r(child, alias_map)
query.append(parsed % tuple(self.interpolation for o in data))
query_data.extend(data)
elif isinstance(child, Node):
parsed, data = self.parse_node(child, alias_map)
query.append('(%s)' % parsed)
query_data.extend(data)
connector = ' %s ' % node.connector
query = connector.join(query)
if node.negated:
query = 'NOT (%s)' % query
return query, query_data
def parse_q(self, q, alias_map):
model = q.model or self.model
query = []
query_data = []
parsed = self.parse_query_args(model, **q.query)
for (name, lookup) in parsed:
operation, value = lookup
if isinstance(value, SelectQuery):
sql, value = self.convert_subquery(value)
operation = operation % sql
if isinstance(value, F):
f_model = value.model or model
operation = operation % self.parse_f(value, f_model, alias_map)
else:
query_data.append(value)
combined = self.combine_field(alias_map[model], name)
query.append('%s %s' % (combined, operation))
if len(query) > 1:
query = '(%s)' % (' AND '.join(query))
else:
query = query[0]
if q.negated:
query = 'NOT %s' % query
return query, query_data
def parse_f(self, f_object, model, alias_map):
combined = self.combine_field(alias_map[model], f_object.field)
if f_object.op is not None:
combined = '(%s %s %s)' % (combined, f_object.op, f_object.value)
return combined
def parse_r(self, r_object, alias_map):
return r_object.sql_where()
def convert_subquery(self, subquery):
orig_query = subquery.query
if subquery.query == '*':
subquery.query = subquery.model._meta.pk_name
subquery.force_alias, orig_alias = True, subquery.force_alias
sql, data = subquery.sql()
subquery.query = orig_query
subquery.force_alias = orig_alias
return sql, data
def sorted_models(self, alias_map):
return [
(model, alias) \
for (model, alias) in sorted(alias_map.items(), key=lambda i: i[1])
]
def sql(self):
raise NotImplementedError
def execute(self):
raise NotImplementedError
def raw_execute(self, query, params):
return self.database.execute(query, params)
class RawQuery(BaseQuery):
def __init__(self, model, query, *params):
self._sql = query
self._params = list(params)
super(RawQuery, self).__init__(model)
def clone(self):
return RawQuery(self.model, self._sql, *self._params)
def sql(self):
return self._sql, self._params
def execute(self):
return QueryResultWrapper(self.model, self.raw_execute(*self.sql()))
def join(self):
raise AttributeError('Raw queries do not support joining programmatically')
def where(self):
raise AttributeError('Raw queries do not support querying programmatically')
def switch(self):
raise AttributeError('Raw queries do not support switching contexts')
def __iter__(self):
return iter(self.execute())
class SelectQuery(BaseQuery):
def __init__(self, model, query=None):
self.query = query or '*'
self._group_by = []
self._having = []
self._order_by = []
self._limit = None
self._offset = None
self._distinct = False
self._qr = None
self._for_update = False
super(SelectQuery, self).__init__(model)
def clone(self):
query = SelectQuery(self.model, self.query)
query.query_context = self.query_context
query._group_by = list(self._group_by)
query._having = list(self._having)
query._order_by = list(self._order_by)
query._limit = self._limit
query._offset = self._offset
query._distinct = self._distinct
query._qr = self._qr
query._for_update = self._for_update
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
@returns_clone
def paginate(self, page, paginate_by=20):
if page > 0:
page -= 1
self._limit = paginate_by
self._offset = page * paginate_by
@returns_clone
def limit(self, num_rows):
self._limit = num_rows
@returns_clone
def offset(self, num_rows):
self._offset = num_rows
@returns_clone
def for_update(self, for_update=True):
self._for_update = for_update
def count(self):
if self._distinct or self._group_by:
return self.wrapped_count()
clone = self.order_by()
clone._limit = clone._offset = None
if clone.use_aliases():
clone.query = 'COUNT(t1.%s)' % (clone.model._meta.pk_name)
else:
clone.query = 'COUNT(%s)' % (clone.model._meta.pk_name)
res = clone.database.execute(*clone.sql())
return (res.fetchone() or [0])[0]
def wrapped_count(self):
clone = self.order_by()
clone._limit = clone._offset = None
sql, params = clone.sql()
query = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql
res = clone.database.execute(query, params)
return res.fetchone()[0]
@returns_clone
def group_by(self, *clauses):
model = self.query_context
for clause in clauses:
if isinstance(clause, basestring):
fields = (clause,)
elif isinstance(clause, (list, tuple)):
fields = clause
elif issubclass(clause, Model):
model = clause
fields = clause._meta.get_field_names()
self._group_by.append((model, fields))
@returns_clone
def having(self, *clauses):
self._having = clauses
@returns_clone
def distinct(self):
self._distinct = True
@returns_clone
def order_by(self, *clauses):
order_by = []
for clause in clauses:
if isinstance(clause, tuple):
if len(clause) == 3:
model, field, ordering = clause
elif len(clause) == 2:
if isinstance(clause[0], basestring):
model = self.query_context
field, ordering = clause
else:
model, field = clause
ordering = 'ASC'
else:
raise ValueError('Incorrect arguments passed in order_by clause')
else:
model = self.query_context
field = clause
ordering = 'ASC'
order_by.append(
(model, field, ordering)
)
self._order_by = order_by
def exists(self):
clone = self.paginate(1, 1)
clone.query = '(1) AS a'
curs = self.database.execute(*clone.sql())
return bool(curs.fetchone())
def get(self, *args, **kwargs):
orig_ctx = self.query_context
self.query_context = self.model
query = self.where(*args, **kwargs).paginate(1, 1)
try:
obj = query.execute().next()
return obj
except StopIteration:
raise self.model.DoesNotExist('instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % (
query.sql()
))
finally:
self.query_context = orig_ctx
def filter(self, *args, **kwargs):
return filter_query(self, *args, **kwargs)
def annotate(self, related_model, aggregation=None):
return annotate_query(self, related_model, aggregation)
def parse_select_query(self, alias_map):
q = self.query
if isinstance(q, (list, tuple)):
q = {self.model: self.query}
elif isinstance(q, basestring):
# convert '*' and primary key lookups
if q == '*':
q = {self.model: self.model._meta.get_field_names()}
elif q == self.model._meta.pk_name:
q = {self.model: [self.model._meta.pk_name]}
else:
return q, []
# by now we should have a dictionary if a valid type was passed in
if not isinstance(q, dict):
raise TypeError('Unknown type encountered parsing select query')
# gather aliases and models
sorted_models = self.sorted_models(alias_map)
# normalize if we are working with a dictionary
columns = []
model_cols = []
for model, alias in sorted_models:
if model not in q:
continue
if '*' in q[model]:
idx = q[model].index('*')
q[model] = q[model][:idx] + model._meta.get_field_names() + q[model][idx+1:]
for clause in q[model]:
if isinstance(clause, R):
clause = clause.sql_select()
if isinstance(clause, tuple):
if len(clause) == 3:
func, col_name, col_alias = clause
column = model._meta.get_column(col_name)
columns.append('%s(%s) AS %s' % \
(func, self.safe_combine(model, alias, column), col_alias)
)
model_cols.append((model, (func, column, col_alias)))
elif len(clause) == 2:
col_name, col_alias = clause
column = model._meta.get_column(col_name)
columns.append('%s AS %s' % \
(self.safe_combine(model, alias, column), col_alias)
)
model_cols.append((model, (column, col_alias)))
else:
raise ValueError('Clause must be either a 2- or 3-tuple')
else:
column = model._meta.get_column(clause)
columns.append(self.safe_combine(model, alias, column))
model_cols.append((model, column))
return ', '.join(columns), model_cols
def sql_meta(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
table = self.qn(self.model._meta.db_table)
params = []
group_by = []
use_aliases = self.use_aliases()
if use_aliases:
table = '%s AS %s' % (table, alias_map[self.model])
for model, clause in self._group_by:
if use_aliases:
alias = alias_map[model]
else:
alias = ''
for field in clause:
group_by.append(self.safe_combine(model, alias, field))
parsed_query, model_cols = self.parse_select_query(alias_map)
query_meta = {
'columns': model_cols,
'graph': self._joins,
}
if self._distinct:
sel = 'SELECT DISTINCT'
else:
sel = 'SELECT'
select = '%s %s FROM %s' % (sel, parsed_query, table)
joins = '\n'.join(joins)
where = ' AND '.join(where)
group_by = ', '.join(group_by)
having = ' AND '.join(self._having)
order_by = []
for piece in self._order_by:
model, field, ordering = piece
if use_aliases:
alias = alias_map[model]
else:
alias = ''
order_by.append('%s %s' % (self.safe_combine(model, alias, field), ordering))
pieces = [select]
if joins:
pieces.append(joins)
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
if group_by:
pieces.append('GROUP BY %s' % group_by)
if having:
pieces.append('HAVING %s' % having)
if order_by:
pieces.append('ORDER BY %s' % ', '.join(order_by))
if self._limit:
pieces.append('LIMIT %d' % self._limit)
if self._offset:
pieces.append('OFFSET %d' % self._offset)
if self._for_update and self.database.adapter.for_update_support:
pieces.append('FOR UPDATE')
return ' '.join(pieces), params, query_meta
def sql(self):
query, params, meta = self.sql_meta()
return query, params
def execute(self):
if self._dirty or not self._qr:
try:
sql, params, meta = self.sql_meta()
except EmptyResultException:
return []
else:
self._qr = QueryResultWrapper(self.model, self.raw_execute(sql, params), meta)
self._dirty = False
return self._qr
else:
# call the __iter__ method directly
return self._qr
def __iter__(self):
return iter(self.execute())
class UpdateQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.update_query = kwargs
super(UpdateQuery, self).__init__(model)
def clone(self):
query = UpdateQuery(self.model, **self.update_query)
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
def parse_update(self):
sets = {}
for k, v in self.update_query.iteritems():
if k in self.model._meta.columns:
k = self.model._meta.columns[k].name
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
if not isinstance(v, F):
v = field.db_value(v)
sets[field.db_column] = v
return sets
def sql(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
set_statement = self.parse_update()
params = []
update_params = []
alias = alias_map.get(self.model)
for k, v in set_statement.iteritems():
if isinstance(v, F):
value = self.parse_f(v, v.model or self.model, alias_map)
else:
params.append(v)
value = self.interpolation
update_params.append('%s=%s' % (self.combine_field(alias, k), value))
update = 'UPDATE %s SET %s' % (
self.qn(self.model._meta.db_table), ', '.join(update_params))
where = ' AND '.join(where)
pieces = [update]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.rows_affected(result)
class DeleteQuery(BaseQuery):
def clone(self):
query = DeleteQuery(self.model)
query._where = self.clone_where()
query._where_models = set(self._where_models)
query._joined_models = self._joined_models.copy()
query._joins = self.clone_joins()
return query
def sql(self):
joins, clauses, alias_map = self.compile_where()
where, where_data = self.flatten_clauses(clauses)
params = []
delete = 'DELETE FROM %s' % (self.qn(self.model._meta.db_table))
where = ' AND '.join(where)
pieces = [delete]
if where:
pieces.append('WHERE %s' % where)
params.extend(self.convert_where_to_params(where_data))
return ' '.join(pieces), params
def join(self, *args, **kwargs):
raise AttributeError('Update queries do not support JOINs in sqlite')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.rows_affected(result)
class InsertQuery(BaseQuery):
def __init__(self, model, **kwargs):
self.insert_query = kwargs
super(InsertQuery, self).__init__(model)
def parse_insert(self):
cols = []
vals = []
for k, v in self.insert_query.iteritems():
if k in self.model._meta.columns:
k = self.model._meta.columns[k].name
try:
field = self.model._meta.get_field_by_name(k)
except AttributeError:
field = self.model._meta.get_related_field_by_name(k)
if field is None:
raise
cols.append(self.qn(field.db_column))
vals.append(field.db_value(v))
return cols, vals
def sql(self):
cols, vals = self.parse_insert()
insert = 'INSERT INTO %s (%s) VALUES (%s)' % (
self.qn(self.model._meta.db_table),
','.join(cols),
','.join(self.interpolation for v in vals)
)
return insert, vals
def where(self, *args, **kwargs):
raise AttributeError('Insert queries do not support WHERE clauses')
def join(self, *args, **kwargs):
raise AttributeError('Insert queries do not support JOINs')
def execute(self):
result = self.raw_execute(*self.sql())
return self.database.last_insert_id(result, self.model)
def model_or_select(m_or_q):
"""
Return both a model and a select query for the provided model *OR* select
query.
"""
if isinstance(m_or_q, BaseQuery):
return (m_or_q.model, m_or_q)
else:
return (m_or_q, m_or_q.select())
def convert_lookup(model, joins, lookup):
"""
Given a model, a graph of joins, and a lookup, return a tuple containing
a normalized lookup:
(model actually being queried, updated graph of joins, normalized lookup)
"""
operations = model._meta.database.adapter.operations
pieces = lookup.split('__')
operation = None
query_model = model
if len(pieces) > 1:
if pieces[-1] in operations:
operation = pieces.pop()
lookup = pieces.pop()
# we have some joins
if len(pieces):
for piece in pieces:
# piece is something like 'blog' or 'entry_set'
joined_model = None
for field in query_model._meta.get_fields():
if not isinstance(field, ForeignKeyField):
continue
if piece in (field.name, field.db_column, field.related_name):
joined_model = field.to
if not joined_model:
try:
joined_model = query_model._meta.reverse_relations[piece]
except KeyError:
raise ValueError('Unknown relation: "%s" of "%s"' % (
piece,
query_model,
))
joins.setdefault(query_model, set())
joins[query_model].add(joined_model)
query_model = joined_model
if operation:
lookup = '%s__%s' % (lookup, operation)
return query_model, joins, lookup
def filter_query(model_or_query, *args, **kwargs):
"""
Provide a django-like interface for executing queries
"""
model, select_query = model_or_select(model_or_query)
query = {} # mapping of models to queries
joins = {} # a graph of joins needed, passed into the convert_lookup function
# traverse Q() objects, find any joins that may be lurking -- clean up the
# lookups and assign the correct model
def fix_q(node_or_q, joins):
if isinstance(node_or_q, Node):
for child in node_or_q.children:
fix_q(child, joins)
elif isinstance(node_or_q, Q):
new_query = {}
curr_model = node_or_q.model or model
for raw_lookup, value in node_or_q.query.items():
query_model, joins, lookup = convert_lookup(curr_model, joins, raw_lookup)
new_query[lookup] = value
node_or_q.model = query_model
node_or_q.query = new_query
for node_or_q in args:
fix_q(node_or_q, joins)
# iterate over keyword lookups and determine lookups and necessary joins
for raw_lookup, value in kwargs.items():
queried_model, joins, lookup = convert_lookup(model, joins, raw_lookup)
query.setdefault(queried_model, [])
query[queried_model].append((lookup, value))
def follow_joins(current, query):
if current in joins:
for joined_model in joins[current]:
query = query.switch(current)
if joined_model not in query._joined_models:
query = query.join(joined_model)
query = follow_joins(joined_model, query)
return query
select_query = follow_joins(model, select_query)
for node in args:
select_query = select_query.where(node)
for model, lookups in query.items():
qargs, qkwargs = [], {}
for lookup in lookups:
if isinstance(lookup, tuple):
qkwargs[lookup[0]] = lookup[1]
else:
qargs.append(lookup)
select_query = select_query.switch(model).where(*qargs, **qkwargs)
return select_query
def annotate_query(select_query, related_model, aggregation):
"""
Perform an aggregation against a related model
"""
aggregation = aggregation or Count(related_model._meta.pk_name)
model = select_query.model
select_query = select_query.switch(model)
cols = select_query.query
# ensure the join is there
if related_model not in select_query._joined_models:
select_query = select_query.join(related_model).switch(model)
# query for it
if isinstance(cols, dict):
selection = cols
group_by = cols[model]
elif isinstance(cols, basestring):
selection = {model: [cols]}
if cols == '*':
group_by = model
else:
group_by = [col.strip() for col in cols.split(',')]
elif isinstance(cols, (list, tuple)):
selection = {model: cols}
group_by = cols
else:
raise ValueError('Unknown type passed in to select query: "%s"' % type(cols))
# query for the related object
selection[related_model] = [aggregation]
select_query.query = selection
return select_query.group_by(group_by)
class Column(object):
db_field = ''
template = '%(column_type)s'
def __init__(self, **attributes):
self.attributes = self.get_attributes()
self.attributes.update(**attributes)
def get_attributes(self):
return {}
def python_value(self, value):
return value
def db_value(self, value):
return value
def render(self, db):
params = {'column_type': db.column_for_field_type(self.db_field)}
params.update(self.attributes)
return self.template % params
class VarCharColumn(Column):
db_field = 'string'
template = '%(column_type)s(%(max_length)d)'
def get_attributes(self):
return {'max_length': 255}
def db_value(self, value):
value = value or ''
return value[:self.attributes['max_length']]
class TextColumn(Column):
db_field = 'text'
def db_value(self, value):
return value or ''
class DateTimeColumn(Column):
db_field = 'datetime'
def python_value(self, value):
if isinstance(value, basestring):
value = value.rsplit('.', 1)[0]
return datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6])
return value
class IntegerColumn(Column):
db_field = 'integer'
def db_value(self, value):
return value or 0
def python_value(self, value):
if value is not None:
return int(value)
class BigIntegerColumn(IntegerColumn):
db_field = 'bigint'
class BooleanColumn(Column):
db_field = 'boolean'
def db_value(self, value):
return bool(value)
def python_value(self, value):
return bool(value)
class FloatColumn(Column):
db_field = 'float'
def db_value(self, value):
return value or 0.0
def python_value(self, value):
if value is not None:
return float(value)
class DoubleColumn(FloatColumn):
db_field = 'double'
class DecimalColumn(Column):
db_field = 'decimal'
field_template = '%(column_type)s(%(max_digits)d, %(decimal_places)d)'
def get_attributes(self):
return {
'max_digits': 10,
'decimal_places': 5,
}
def db_value(self, value):
return value or decimal.Decimal(0)
def python_value(self, value):
if value is not None:
if isinstance(value, decimal.Decimal):
return value
return decimal.Decimal(str(value))
class PrimaryKeyColumn(Column):
db_field = 'primary_key'
class PrimaryKeySequenceColumn(PrimaryKeyColumn):
db_field = 'primary_key_with_sequence'
class FieldDescriptor(object):
def __init__(self, field):
self.field = field
self._cache_name = '__%s' % self.field.name
def __get__(self, instance, instance_type=None):
if instance:
return getattr(instance, self._cache_name, None)
return self.field
def __set__(self, instance, value):
setattr(instance, self._cache_name, value)
class Field(object):
column_class = None
default = None
field_template = "%(column)s%(nullable)s"
_field_counter = 0
_order = 0
def __init__(self, null=False, db_index=False, unique=False, verbose_name=None,
help_text=None, db_column=None, default=None, *args, **kwargs):
self.null = null
self.db_index = db_index
self.unique = unique
self.verbose_name = verbose_name
self.help_text = help_text
self.db_column = db_column
self.default = default
self.attributes = kwargs
Field._field_counter += 1
self._order = Field._field_counter
def add_to_class(self, klass, name):
self.name = name
self.model = klass
self.verbose_name = self.verbose_name or re.sub('_+', ' ', name).title()
self.db_column = self.db_column or self.name
self.column = self.get_column()
setattr(klass, name, FieldDescriptor(self))
def get_column(self):
return self.column_class(**self.attributes)
def render_field_template(self):
params = {
'column': self.column.render(self.model._meta.database),
'nullable': ternary(self.null, '', ' NOT NULL'),
}
params.update(self.column.attributes)
return self.field_template % params
def db_value(self, value):
if (self.null and value is None):
return None
return self.column.db_value(value)
def python_value(self, value):
return self.column.python_value(value)
def lookup_value(self, lookup_type, value):
return self.db_value(value)
def class_prepared(self):
pass
class CharField(Field):
column_class = VarCharColumn
class TextField(Field):
column_class = TextColumn
class DateTimeField(Field):
column_class = DateTimeColumn
class IntegerField(Field):
column_class = IntegerColumn
class BigIntegerField(IntegerField):
column_class = BigIntegerColumn
class BooleanField(IntegerField):
column_class = BooleanColumn
class FloatField(Field):
column_class = FloatColumn
class DoubleField(Field):
column_class = DoubleColumn
class DecimalField(Field):
column_class = DecimalColumn
class PrimaryKeyField(IntegerField):
column_class = PrimaryKeyColumn
field_template = "%(column)s NOT NULL PRIMARY KEY%(nextval)s"
def __init__(self, column_class=None, *args, **kwargs):
if kwargs.get('null'):
raise ValueError('Primary keys cannot be nullable')
if column_class:
self.column_class = column_class
if 'nextval' not in kwargs:
kwargs['nextval'] = ''
super(PrimaryKeyField, self).__init__(*args, **kwargs)
def get_column_class(self):
# check to see if we're using the default pk column
if self.column_class == PrimaryKeyColumn:
# if we have a sequence and can support them, then use the special
# column class that supports sequences
if self.model._meta.pk_sequence != None and self.model._meta.database.adapter.sequence_support:
self.column_class = PrimaryKeySequenceColumn
return self.column_class
def get_column(self):
return self.get_column_class()(**self.attributes)
class ForeignRelatedObject(object):
def __init__(self, to, field):
self.to = to
self.field = field
self.field_name = self.field.name
self.field_column = self.field.id_storage
self.cache_name = '_cache_%s' % self.field_name
def __get__(self, instance, instance_type=None):
if not instance:
return self.field
if not getattr(instance, self.cache_name, None):
id = getattr(instance, self.field_column, 0)
qr = self.to.select().where(**{self.to._meta.pk_name: id})
try:
setattr(instance, self.cache_name, qr.get())
except self.to.DoesNotExist:
if not self.field.null:
raise
return getattr(instance, self.cache_name, None)
def __set__(self, instance, obj):
if self.field.null and obj is None:
setattr(instance, self.field_column, None)
setattr(instance, self.cache_name, None)
else:
if not isinstance(obj, Model):
setattr(instance, self.field_column, obj)
else:
assert isinstance(obj, self.to), "Cannot assign %s to %s, invalid type" % (obj, self.field.name)
setattr(instance, self.field_column, obj.get_pk())
setattr(instance, self.cache_name, obj)
class ReverseForeignRelatedObject(object):
def __init__(self, related_model, name):
self.field_name = name
self.related_model = related_model
def __get__(self, instance, instance_type=None):
query = {self.field_name: instance.get_pk()}
qr = self.related_model.select().where(**query)
return qr
class ForeignKeyField(IntegerField):
field_template = '%(column)s%(nullable)s REFERENCES %(to_table)s (%(to_pk)s)%(cascade)s%(extra)s'
def __init__(self, to, null=False, related_name=None, cascade=False, extra=None, *args, **kwargs):
self.to = to
self._related_name = related_name
self.cascade = cascade
self.extra = extra
kwargs.update({
'cascade': ' ON DELETE CASCADE' if self.cascade else '',
'extra': self.extra or '',
})
super(ForeignKeyField, self).__init__(null=null, *args, **kwargs)
def add_to_class(self, klass, name):
self.name = name
self.model = klass
self.db_column = self.db_column or self.name + '_id'
if self.name == self.db_column:
self.id_storage = self.db_column + '_id'
else:
self.id_storage = self.db_column
if self.to == 'self':
self.to = self.model
self.verbose_name = self.verbose_name or re.sub('_', ' ', name).title()
if self._related_name is not None:
self.related_name = self._related_name
else:
self.related_name = klass._meta.db_table + '_set'
klass._meta.rel_fields[name] = self.name
setattr(klass, self.name, ForeignRelatedObject(self.to, self))
setattr(klass, self.id_storage, None)
reverse_rel = ReverseForeignRelatedObject(klass, self.name)
setattr(self.to, self.related_name, reverse_rel)
self.to._meta.reverse_relations[self.related_name] = klass
def lookup_value(self, lookup_type, value):
if isinstance(value, Model):
return value.get_pk()
return value or None
def db_value(self, value):
if isinstance(value, Model):
return value.get_pk()
if self.null and value is None:
return None
return self.column.db_value(value)
def get_column(self):
to_pk = self.to._meta.get_field_by_name(self.to._meta.pk_name)
to_col_class = to_pk.get_column_class()
if to_col_class not in (PrimaryKeyColumn, PrimaryKeySequenceColumn):
self.column_class = to_pk.get_column_class()
return self.column_class(**self.attributes)
def class_prepared(self):
# unfortunately because we may not know the primary key field
# at the time this field's add_to_class() method is called, we
# need to update the attributes after the class has been built
self.attributes.update({
'to_table': self.to._meta.db_table,
'to_pk': self.to._meta.pk_name,
})
self.column = self.get_column()
# define a default database object in the module scope
database = SqliteDatabase(DATABASE_NAME)
class BaseModelOptions(object):
ordering = None
pk_sequence = None
def __init__(self, model_class, options=None):
# configurable options
options = options or {'database': database}
for k, v in options.items():
setattr(self, k, v)
self.rel_fields = {}
self.reverse_relations = {}
self.fields = {}
self.columns = {}
self.model_class = model_class
def get_sorted_fields(self):
return sorted(self.fields.items(), key=lambda (k,v): (k == self.pk_name and 1 or 2, v._order))
def get_field_names(self):
return [f[0] for f in self.get_sorted_fields()]
def get_fields(self):
return [f[1] for f in self.get_sorted_fields()]
def get_field_by_name(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError('Field named %s not found' % name)
def get_column_names(self):
return self.columns.keys()
def get_column(self, field_or_col):
if field_or_col in self.fields:
return self.fields[field_or_col].db_column
return field_or_col
def get_related_field_by_name(self, name):
if name in self.rel_fields:
return self.fields[self.rel_fields[name]]
def get_related_field_for_model(self, model, name=None):
for field in self.fields.values():
if isinstance(field, ForeignKeyField) and field.to == model:
if name is None or name == field.name or name == field.db_column:
return field
def get_reverse_related_field_for_model(self, model, name=None):
for field in model._meta.fields.values():
if isinstance(field, ForeignKeyField) and field.to == self.model_class:
if name is None or name == field.name or name == field.db_column:
return field
def get_field_for_related_name(self, model, related_name):
for field in model._meta.fields.values():
if isinstance(field, ForeignKeyField) and field.to == self.model_class:
if field.related_name == related_name:
return field
def rel_exists(self, model):
return self.get_related_field_for_model(model) or \
self.get_reverse_related_field_for_model(model)
class BaseModel(type):
inheritable_options = ['database', 'ordering', 'pk_sequence']
def __new__(cls, name, bases, attrs):
cls = super(BaseModel, cls).__new__(cls, name, bases, attrs)
if not bases:
return cls
attr_dict = {}
meta = attrs.pop('Meta', None)
if meta:
attr_dict = meta.__dict__
for b in bases:
base_meta = getattr(b, '_meta', None)
if not base_meta:
continue
for (k, v) in base_meta.__dict__.items():
if k in cls.inheritable_options and k not in attr_dict:
attr_dict[k] = v
elif k == 'fields':
for field_name, field_obj in v.items():
if isinstance(field_obj, PrimaryKeyField):
continue
if field_name in cls.__dict__:
continue
field_copy = copy.deepcopy(field_obj)
setattr(cls, field_name, field_copy)
_meta = BaseModelOptions(cls, attr_dict)
if not hasattr(_meta, 'db_table'):
_meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower())
if _meta.db_table in _meta.database.adapter.reserved_tables:
warnings.warn('Table for %s ("%s") is reserved, please override using Meta.db_table' % (
cls, _meta.db_table,
))
setattr(cls, '_meta', _meta)
_meta.pk_name = None
for name, attr in cls.__dict__.items():
if isinstance(attr, Field):
attr.add_to_class(cls, name)
_meta.fields[attr.name] = attr
_meta.columns[attr.db_column] = attr
if isinstance(attr, PrimaryKeyField):
_meta.pk_name = attr.name
if _meta.pk_name is None:
_meta.pk_name = 'id'
pk = PrimaryKeyField()
pk.add_to_class(cls, _meta.pk_name)
_meta.fields[_meta.pk_name] = pk
_meta.model_name = cls.__name__
pk_field = _meta.fields[_meta.pk_name]
pk_col = pk_field.column
if _meta.pk_sequence and _meta.database.adapter.sequence_support:
pk_col.attributes['nextval'] = " default nextval('%s')" % _meta.pk_sequence
_meta.auto_increment = isinstance(pk_col, PrimaryKeyColumn)
for field in _meta.fields.values():
field.class_prepared()
if hasattr(cls, '__unicode__'):
setattr(cls, '__repr__', lambda self: '<%s: %s>' % (
_meta.model_name, self.__unicode__()))
exception_class = type('%sDoesNotExist' % _meta.model_name, (DoesNotExist,), {})
cls.DoesNotExist = exception_class
return cls
class Model(object):
__metaclass__ = BaseModel
def __init__(self, *args, **kwargs):
self.initialize_defaults()
for k, v in kwargs.items():
setattr(self, k, v)
def initialize_defaults(self):
for field in self._meta.fields.values():
if field.default is not None:
if callable(field.default):
field_value = field.default()
else:
field_value = field.default
setattr(self, field.name, field_value)
def __eq__(self, other):
return other.__class__ == self.__class__ and \
self.get_pk() and \
other.get_pk() == self.get_pk()
def get_field_dict(self):
field_dict = {}
for field in self._meta.fields.values():
if isinstance(field, ForeignKeyField):
field_dict[field.name] = getattr(self, field.id_storage)
else:
field_dict[field.name] = getattr(self, field.name)
return field_dict
@classmethod
def table_exists(cls):
return cls._meta.db_table in cls._meta.database.get_tables()
@classmethod
def create_table(cls, fail_silently=False):
if fail_silently and cls.table_exists():
return
cls._meta.database.create_table(cls)
for field_name, field_obj in cls._meta.fields.items():
if isinstance(field_obj, ForeignKeyField):
cls._meta.database.create_foreign_key(cls, field_obj)
elif field_obj.db_index or field_obj.unique:
cls._meta.database.create_index(cls, field_obj.name, field_obj.unique)
@classmethod
def drop_table(cls, fail_silently=False):
cls._meta.database.drop_table(cls, fail_silently)
@classmethod
def filter(cls, *args, **kwargs):
return filter_query(cls, *args, **kwargs)
@classmethod
def select(cls, query=None):
select_query = SelectQuery(cls, query)
if cls._meta.ordering:
select_query = select_query.order_by(*cls._meta.ordering)
return select_query
@classmethod
def update(cls, **query):
return UpdateQuery(cls, **query)
@classmethod
def insert(cls, **query):
return InsertQuery(cls, **query)
@classmethod
def delete(cls, **query):
return DeleteQuery(cls, **query)
@classmethod
def raw(cls, sql, *params):
return RawQuery(cls, sql, *params)
@classmethod
def create(cls, **query):
inst = cls(**query)
inst.save(force_insert=True)
return inst
@classmethod
def get_or_create(cls, **query):
try:
inst = cls.get(**query)
except cls.DoesNotExist:
inst = cls.create(**query)
return inst
@classmethod
def get(cls, *args, **kwargs):
return cls.select().get(*args, **kwargs)
def get_pk(self):
return getattr(self, self._meta.pk_name, None)
def set_pk(self, pk):
pk_field = self._meta.fields[self._meta.pk_name]
setattr(self, self._meta.pk_name, pk_field.python_value(pk))
def save(self, force_insert=False):
field_dict = self.get_field_dict()
if self.get_pk() and not force_insert:
field_dict.pop(self._meta.pk_name)
update = self.update(
**field_dict
).where(**{self._meta.pk_name: self.get_pk()})
update.execute()
else:
if self._meta.auto_increment:
field_dict.pop(self._meta.pk_name)
insert = self.insert(**field_dict)
new_pk = insert.execute()
if self._meta.auto_increment:
setattr(self, self._meta.pk_name, new_pk)
@classmethod
def collect_models(cls, accum=None):
# dfs to grab any affected models, then from the bottom up issue
# proper deletes using subqueries to obtain objects to remove
accum = accum or []
models = []
for related_name, rel_model in cls._meta.reverse_relations.items():
rel_field = cls._meta.get_field_for_related_name(rel_model, related_name)
coll = [(rel_model, rel_field.name, rel_field.null)] + accum
if not rel_field.null:
models.extend(rel_model.collect_models(coll))
models.append(coll)
return models
def collect_queries(self):
select_queries = []
nullable_queries = []
collected_models = self.collect_models()
if collected_models:
for model_joins in collected_models:
depth = len(model_joins)
base, last, nullable = model_joins[0]
query = base.select([base._meta.pk_name])
for model, join, _ in model_joins[1:]:
query = query.join(model, on=last)
last = join
query = query.where(**{last: self.get_pk()})
if nullable:
nullable_queries.append((query, last, depth))
else:
select_queries.append((query, last, depth))
return select_queries, nullable_queries
def delete_instance(self, recursive=False):
# XXX: it is strongly recommended you run this in a transaction if using
# the recursive delete
if recursive:
# reverse relations, i.e. anything that would be orphaned, delete.
select_queries, nullable_queries = self.collect_queries()
# currently doesn't work with mysql:
# http://dev.mysql.com/doc/refman/5.1/en/subquery-restrictions.html
for query, fk_field, depth in select_queries:
model = query.model
if not self._meta.database.adapter.subquery_delete_same_table:
query = [obj.get_pk() for obj in query]
if not query:
continue
model.delete().where(**{
'%s__in' % model._meta.pk_name: query,
}).execute()
for query, fk_field, depth in nullable_queries:
model = query.model
if not self._meta.database.adapter.subquery_delete_same_table:
query = [obj.get_pk() for obj in query]
if not query:
continue
model.update(**{fk_field: None}).where(**{
'%s__in' % model._meta.pk_name: query,
}).execute()
return self.delete().where(**{
self._meta.pk_name: self.get_pk()
}).execute()
def refresh(self, *fields):
fields = fields or self._meta.get_field_names()
obj = self.select(fields).get(**{self._meta.pk_name: self.get_pk()})
for field_name in fields:
setattr(self, field_name, getattr(obj, field_name))
|
{
"content_hash": "20746062f313ab31182d77313cd8b082",
"timestamp": "",
"source": "github",
"line_count": 2587,
"max_line_length": 112,
"avg_line_length": 32.345187475840746,
"alnum_prop": 0.560739510259689,
"repo_name": "giserh/peewee",
"id": "f0690bb3ec1196e6707314dc1efada696b9c1838",
"size": "83823",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peewee.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "797"
},
{
"name": "HTML",
"bytes": "4486"
},
{
"name": "Python",
"bytes": "244410"
}
],
"symlink_target": ""
}
|
from pecan import hooks
from neutron.api import api_common
from neutron import manager
from neutron.pecan_wsgi.hooks import policy_enforcement
from neutron.pecan_wsgi.hooks import utils
# TODO(blogan): ideally it'd be nice to get the pagination and sorting
# helpers from the controller but since the controllers are
# instantiated at startup and not on request, it would cause race
# conditions because we need a new instantiation of a pagination
# and sorting helper per request/response flow. As a result, we're forced to
# pass them through the request context.
def _get_pagination_helper(request, controller):
if 'pagination_helper' in request.context:
return request.context['pagination_helper']
if not controller.allow_pagination:
helper = api_common.NoPaginationHelper(request, controller.primary_key)
elif controller.native_pagination:
helper = api_common.PaginationNativeHelper(request,
controller.primary_key)
else:
helper = api_common.PaginationEmulatedHelper(request,
controller.primary_key)
request.context['pagination_helper'] = helper
return helper
def _get_sorting_helper(request, controller):
if 'sorting_helper' in request.context:
return request.context['sorting_helper']
if not controller.allow_sorting:
helper = api_common.NoSortingHelper(request, controller.resource_info)
elif controller.native_sorting:
helper = api_common.SortingNativeHelper(request,
controller.resource_info)
else:
helper = api_common.SortingEmulatedHelper(request,
controller.resource_info)
request.context['sorting_helper'] = helper
return helper
def _listify(thing):
return thing if isinstance(thing, list) else [thing]
def _set_fields(state, controller):
params = state.request.params.mixed()
fields = params.get('fields', [])
# if only one fields query parameter is passed, pecan will not put
# that parameter in a list, so we need to convert it into a list
fields = _listify(fields)
combined_fields, added_fields = controller.build_field_list(fields)
state.request.context['query_params']['fields'] = combined_fields
state.request.context['added_fields'] = added_fields
return combined_fields, added_fields
def _set_filters(state, controller):
params = state.request.params.mixed()
filters = api_common.get_filters_from_dict(
{k: _listify(v) for k, v in params.items()},
controller.resource_info,
skips=['fields', 'sort_key', 'sort_dir',
'limit', 'marker', 'page_reverse'],
is_filter_validation_supported=controller.filter_validation)
return filters
class QueryParametersHook(hooks.PecanHook):
# NOTE(blogan): needs to be run after the priority hook. after methods
# are run in reverse priority order.
priority = policy_enforcement.PolicyHook.priority - 1
def before(self, state):
self._process_if_match_headers(state)
state.request.context['query_params'] = {}
if state.request.method != 'GET':
return
collection = state.request.context.get('collection')
if not collection:
return
controller = utils.get_controller(state)
combined_fields, added_fields = _set_fields(state, controller)
filters = _set_filters(state, controller)
query_params = {'fields': combined_fields, 'filters': filters}
pagination_helper = _get_pagination_helper(state.request, controller)
sorting_helper = _get_sorting_helper(state.request, controller)
sorting_helper.update_args(query_params)
sorting_helper.update_fields(query_params.get('fields', []),
added_fields)
pagination_helper.update_args(query_params)
pagination_helper.update_fields(query_params.get('fields', []),
added_fields)
state.request.context['query_params'] = query_params
def _process_if_match_headers(self, state):
collection = state.request.context.get('collection')
if not collection:
return
# add in if-match criterion to the context if present
revision_number = api_common.check_request_for_revision_constraint(
state.request)
if revision_number is None:
return
state.request.context['neutron_context'].set_transaction_constraint(
collection, state.request.context['resource_id'], revision_number)
def after(self, state):
resource = state.request.context.get('resource')
collection = state.request.context.get('collection')
# NOTE(blogan): don't paginate extension list or non-GET requests
if (not resource or resource == 'extension' or
state.request.method != 'GET'):
return
try:
data = state.response.json
except ValueError:
return
# Do not attempt to paginate if the body is not a list of entities
if not data or resource in data or collection not in data:
return
controller = manager.NeutronManager.get_controller_for_resource(
collection)
sorting_helper = _get_sorting_helper(state.request, controller)
pagination_helper = _get_pagination_helper(state.request, controller)
obj_list = sorting_helper.sort(data[collection])
obj_list = pagination_helper.paginate(obj_list)
resp_body = {collection: obj_list}
pagination_links = pagination_helper.get_links(obj_list)
if pagination_links:
resp_body['_'.join([collection, 'links'])] = pagination_links
state.response.json = resp_body
|
{
"content_hash": "0a9b2d392eb04540e10c5af36edeed75",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 79,
"avg_line_length": 43.33576642335766,
"alnum_prop": 0.6523496715512885,
"repo_name": "huntxu/neutron",
"id": "800aa6ccde051acb52df4ff26e4db2f4cf37ecf3",
"size": "6510",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/pecan_wsgi/hooks/query_parameters.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "11111676"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
import mock
from cinder.compute import nova
from cinder import context
from cinder import test
class NovaClientTestCase(test.TestCase):
def setUp(self):
super(NovaClientTestCase, self).setUp()
self.ctx = context.RequestContext('regularuser', 'e3f0833dc08b4cea',
auth_token='token', is_admin=False)
self.ctx.service_catalog = \
[{'type': 'compute', 'name': 'nova', 'endpoints':
[{'publicURL': 'http://novahost:8774/v2/e3f0833dc08b4cea'}]},
{'type': 'identity', 'name': 'keystone', 'endpoints':
[{'publicURL': 'http://keystonehost:5000/v2.0'}]}]
self.override_config('nova_endpoint_template',
'http://novahost:8774/v2/%(project_id)s')
self.override_config('nova_endpoint_admin_template',
'http://novaadmhost:4778/v2/%(project_id)s')
self.override_config('os_privileged_user_name', 'adminuser')
self.override_config('os_privileged_user_password', 'strongpassword')
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_regular(self, p_client):
nova.novaclient(self.ctx)
p_client.assert_called_once_with(
'regularuser', 'token', None, region_name=None,
auth_url='http://novahost:8774/v2/e3f0833dc08b4cea',
insecure=False, endpoint_type='publicURL', cacert=None,
timeout=None, extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_admin_endpoint(self, p_client):
nova.novaclient(self.ctx, admin_endpoint=True)
p_client.assert_called_once_with(
'regularuser', 'token', None, region_name=None,
auth_url='http://novaadmhost:4778/v2/e3f0833dc08b4cea',
insecure=False, endpoint_type='adminURL', cacert=None,
timeout=None, extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_privileged_user(self, p_client):
nova.novaclient(self.ctx, privileged_user=True)
p_client.assert_called_once_with(
'adminuser', 'strongpassword', None, region_name=None,
auth_url='http://keystonehost:5000/v2.0',
insecure=False, endpoint_type='publicURL', cacert=None,
timeout=None, extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_privileged_user_custom_auth_url(self, p_client):
self.override_config('os_privileged_user_auth_url',
'http://privatekeystonehost:5000/v2.0')
nova.novaclient(self.ctx, privileged_user=True)
p_client.assert_called_once_with(
'adminuser', 'strongpassword', None, region_name=None,
auth_url='http://privatekeystonehost:5000/v2.0',
insecure=False, endpoint_type='publicURL', cacert=None,
timeout=None, extensions=nova.nova_extensions)
@mock.patch('novaclient.v1_1.client.Client')
def test_nova_client_custom_region(self, p_client):
self.override_config('os_region_name', 'farfaraway')
nova.novaclient(self.ctx)
p_client.assert_called_once_with(
'regularuser', 'token', None, region_name='farfaraway',
auth_url='http://novahost:8774/v2/e3f0833dc08b4cea',
insecure=False, endpoint_type='publicURL', cacert=None,
timeout=None, extensions=nova.nova_extensions)
class FakeNovaClient(object):
class Volumes(object):
def __getattr__(self, item):
return None
def __init__(self):
self.volumes = self.Volumes()
def create_volume_snapshot(self, *args, **kwargs):
pass
def delete_volume_snapshot(self, *args, **kwargs):
pass
class NovaApiTestCase(test.TestCase):
def setUp(self):
super(NovaApiTestCase, self).setUp()
self.api = nova.API()
self.novaclient = FakeNovaClient()
self.ctx = context.get_admin_context()
def test_update_server_volume(self):
with mock.patch.object(nova, 'novaclient') as mock_novaclient, \
mock.patch.object(self.novaclient.volumes,
'update_server_volume') as \
mock_update_server_volume:
mock_novaclient.return_value = self.novaclient
self.api.update_server_volume(self.ctx, 'server_id',
'attach_id', 'new_volume_id')
mock_novaclient.assert_called_once_with(self.ctx)
mock_update_server_volume.assert_called_once_with(
'server_id',
'attach_id',
'new_volume_id'
)
|
{
"content_hash": "ef35afbe03ff03acad4d988a5ab03e89",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 77,
"avg_line_length": 41.75438596491228,
"alnum_prop": 0.6130252100840337,
"repo_name": "petrutlucian94/cinder",
"id": "4521fdd011f1e52132959f68f0963fab7ed743cb",
"size": "5365",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/compute/test_nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12246766"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
"""
WSGI config for theo_site project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "theo_site.settings")
application = get_wsgi_application()
|
{
"content_hash": "d0d749424675170ccfda84b823503bee",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.6875,
"alnum_prop": 0.7670886075949367,
"repo_name": "theonaun/theo_site",
"id": "d04ee53c01a054831e94d1b28c3cf5e463309f54",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "theo_site/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1527"
},
{
"name": "HTML",
"bytes": "52243"
},
{
"name": "JavaScript",
"bytes": "3718"
},
{
"name": "Python",
"bytes": "175143"
},
{
"name": "Shell",
"bytes": "195"
}
],
"symlink_target": ""
}
|
"""
Baseline cases
==============
This script generates a list of all subject that have a valid baseline.
Usage:
python baseline_cases.py
"""
from __future__ import print_function
import os
import pandas
import redcap
def get_project(args):
# First REDCap connection for the Summary project (this is where we put data)
summary_key_file = open(os.path.join( os.path.expanduser("~"), '.server_config/redcap-dataentry-token' ), 'r')
summary_api_key = summary_key_file.read().strip()
rc_summary = redcap.Project('https://ncanda.sri.com/redcap/api/', summary_api_key, verify_ssl=False)
# Get all np reports for baseline and 1r
visit = rc_summary.export_records(fields=['study_id', 'exclude',
'visit_ignore___yes'],
forms=['mr_session_report','visit_date'],
events=['baseline_visit_arm_1'],
format='df')
return visit
def np_filter_dataframe(dataframe):
# Create filters for cases that are included
case_included = dataframe.exclude != 1 # subject excluded from NCANDA Study
visit_included = dataframe.visit_ignore___yes != 1 # subject did not have a valid visit for this event
# Apply filters for results
included = dataframe[case_included]
results = included[visit_included]
return results
def main(args):
if args.verbose:
print("Connecting to REDCap...")
project = get_project(args)
if args.verbose:
print("Filtering dataframe...")
if args.subjectlist:
with open(args.subjectlist, 'r') as f:
subject_list = [line.strip() for line in f]
project = project[project['mri_xnat_sid'].isin(subject_list)]
results = np_filter_dataframe(project)
if args.verbose:
print("Writing results to {}...".format(args.outfile))
# Write out results
results.to_csv(os.path.join(args.csvdir, args.outfile), columns = ['exclude',
'visit_ignore___yes', 'mri_xnat_sid','mri_xnat_eids'])
if __name__ == '__main__':
import argparse
formatter = argparse.RawDescriptionHelpFormatter
default = 'default: %(default)s'
parser = argparse.ArgumentParser(prog="baseline_cases.py",
description=__doc__,
formatter_class=formatter)
parser.add_argument('-c', '--csvdir', action="store", default = '',
help="Directory where CSV will be stored.")
parser.add_argument('-o', '--outfile', dest="outfile",
help="File to write out. {}".format(default),
default='baseline_1yr_cases.csv')
parser.add_argument('-v', '--verbose', dest="verbose",
help="Turn on verbose", action='store_true')
argv = parser.parse_args()
sys.exit(main(args=argv))
|
{
"content_hash": "68d35e486871ecb6bb57a2f63fe7c602",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 114,
"avg_line_length": 40.69444444444444,
"alnum_prop": 0.5941979522184301,
"repo_name": "sibis-platform/ncanda-data-integration",
"id": "3e50f792598b080b7ebd2782475dcf704535b761",
"size": "3078",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/reporting/baseline_cases.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "907897"
},
{
"name": "Python",
"bytes": "1067601"
},
{
"name": "Roff",
"bytes": "276"
},
{
"name": "SAS",
"bytes": "186381"
},
{
"name": "Shell",
"bytes": "43610"
}
],
"symlink_target": ""
}
|
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
Test.Summary = 'Test that Trafficserver starts with different thread configurations.'
Test.ContinueOnFail = True
ts = Test.MakeATSProcess('ts')
server = Test.MakeOriginServer('server')
Test.testName = ''
request_header = {
'headers': 'GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
response_header = {
'headers': 'HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n',
'timestamp': '1469733493.993',
'body': ''
}
server.addResponse("sessionfile.log", request_header, response_header)
ts.Disk.records_config.update({
'proxy.config.exec_thread.autoconfig': 0,
'proxy.config.exec_thread.autoconfig.scale': 1.5,
'proxy.config.exec_thread.limit': 100,
'proxy.config.accept_threads': 0,
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'iocore_thread_start|iocore_net_accept_start'})
ts.Disk.remap_config.AddLine(
'map http://www.example.com http://127.0.0.1:{0}'.format(server.Variables.Port)
)
ts.Setup.CopyAs('check_threads.py', Test.RunDirectory)
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'curl --proxy http://127.0.0.1:{0} http://www.example.com -H "Proxy-Connection: Keep-Alive" --verbose'.format(ts.Variables.port)
tr.Processes.Default.ReturnCode = 0
tr.Processes.Default.StartBefore(ts)
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.Streams.stderr = 'gold/http_200.gold'
tr.StillRunningAfter = server
tr.StillRunningAfter = ts
tr = Test.AddTestRun()
tr.Processes.Default.Command = 'python3 check_threads.py -t {0} -e {1} -a {2}'.format(ts.Env['TS_ROOT'], 100, 0)
tr.Processes.Default.ReturnCode = 0
|
{
"content_hash": "398b23ffd36476e0a0818bde0553e926",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 159,
"avg_line_length": 37.40909090909091,
"alnum_prop": 0.7245848521668692,
"repo_name": "davidbz/trafficserver",
"id": "b93f614200d10321c0ca109325a0cd70096b2dc9",
"size": "2469",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/gold_tests/thread_config/thread_100_0.test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1332466"
},
{
"name": "C++",
"bytes": "14185272"
},
{
"name": "CMake",
"bytes": "18489"
},
{
"name": "Dockerfile",
"bytes": "3324"
},
{
"name": "Java",
"bytes": "9881"
},
{
"name": "Lua",
"bytes": "46851"
},
{
"name": "M4",
"bytes": "192615"
},
{
"name": "Makefile",
"bytes": "210210"
},
{
"name": "Objective-C",
"bytes": "15182"
},
{
"name": "Perl",
"bytes": "119201"
},
{
"name": "Python",
"bytes": "731661"
},
{
"name": "Shell",
"bytes": "126015"
},
{
"name": "TSQL",
"bytes": "5188"
},
{
"name": "Vim script",
"bytes": "192"
}
],
"symlink_target": ""
}
|
import re
text = open('mybooks.xml').read()
found = re.findall('<title>(.*)</title>', text)
for title in found: print(title)
|
{
"content_hash": "419ca2c855471ad651e614ae1e62c9e6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 47,
"avg_line_length": 31.5,
"alnum_prop": 0.6587301587301587,
"repo_name": "dreadrel/UWF_2014_spring_COP3990C-2507",
"id": "2b8ee9f37c7137d56480c685839d226cae090ee0",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notebooks/scripts/book_code/code/patternparse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1796"
},
{
"name": "Python",
"bytes": "493591"
}
],
"symlink_target": ""
}
|
"""
Tests For CellsManager
"""
import copy
import datetime
from oslo.config import cfg
from nova.cells import messaging
from nova.cells import utils as cells_utils
from nova import context
from nova.openstack.common import timeutils
from nova import test
from nova.tests.cells import fakes
from nova.tests import fake_instance_actions
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
FAKE_COMPUTE_NODES = [dict(id=1), dict(id=2)]
FAKE_SERVICES = [dict(id=1, host='host1',
compute_node=[FAKE_COMPUTE_NODES[0]]),
dict(id=2, host='host2',
compute_node=[FAKE_COMPUTE_NODES[1]]),
dict(id=3, host='host3', compute_node=[])]
FAKE_TASK_LOGS = [dict(id=1, host='host1'),
dict(id=2, host='host2')]
class CellsManagerClassTestCase(test.NoDBTestCase):
"""Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
fakes.init(self)
# pick a child cell to use for tests.
self.our_cell = 'grandchild-cell1'
self.cells_manager = fakes.get_cells_manager(self.our_cell)
self.msg_runner = self.cells_manager.msg_runner
self.state_manager = fakes.get_state_manager(self.our_cell)
self.driver = self.cells_manager.driver
self.ctxt = 'fake_context'
def _get_fake_response(self, raw_response=None, exc=False):
if exc:
return messaging.Response('fake', test.TestingException(),
True)
if raw_response is None:
raw_response = 'fake-response'
return messaging.Response('fake', raw_response, False)
def test_get_cell_info_for_neighbors(self):
self.mox.StubOutWithMock(self.cells_manager.state_manager,
'get_cell_info_for_neighbors')
self.cells_manager.state_manager.get_cell_info_for_neighbors()
self.mox.ReplayAll()
self.cells_manager.get_cell_info_for_neighbors(self.ctxt)
def test_post_start_hook_child_cell(self):
self.mox.StubOutWithMock(self.driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.cells_manager, '_update_our_parents')
self.driver.start_servers(self.msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
self.cells_manager._update_our_parents(self.ctxt)
self.mox.ReplayAll()
self.cells_manager.post_start_hook()
def test_post_start_hook_middle_cell(self):
cells_manager = fakes.get_cells_manager('child-cell2')
msg_runner = cells_manager.msg_runner
driver = cells_manager.driver
self.mox.StubOutWithMock(driver, 'start_servers')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capabilities')
self.mox.StubOutWithMock(msg_runner,
'ask_children_for_capacities')
driver.start_servers(msg_runner)
context.get_admin_context().AndReturn(self.ctxt)
msg_runner.ask_children_for_capabilities(self.ctxt)
msg_runner.ask_children_for_capacities(self.ctxt)
self.mox.ReplayAll()
cells_manager.post_start_hook()
def test_update_our_parents(self):
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capabilities')
self.mox.StubOutWithMock(self.msg_runner,
'tell_parents_our_capacities')
self.msg_runner.tell_parents_our_capabilities(self.ctxt)
self.msg_runner.tell_parents_our_capacities(self.ctxt)
self.mox.ReplayAll()
self.cells_manager._update_our_parents(self.ctxt)
def test_schedule_run_instance(self):
host_sched_kwargs = 'fake_host_sched_kwargs_silently_passed'
self.mox.StubOutWithMock(self.msg_runner, 'schedule_run_instance')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.schedule_run_instance(self.ctxt, our_cell,
host_sched_kwargs)
self.mox.ReplayAll()
self.cells_manager.schedule_run_instance(self.ctxt,
host_sched_kwargs=host_sched_kwargs)
def test_build_instances(self):
build_inst_kwargs = {'instances': [1, 2]}
self.mox.StubOutWithMock(self.msg_runner, 'build_instances')
our_cell = self.msg_runner.state_manager.get_my_state()
self.msg_runner.build_instances(self.ctxt, our_cell, build_inst_kwargs)
self.mox.ReplayAll()
self.cells_manager.build_instances(self.ctxt,
build_inst_kwargs=build_inst_kwargs)
def test_run_compute_api_method(self):
# Args should just be silently passed through
cell_name = 'fake-cell-name'
method_info = 'fake-method-info'
self.mox.StubOutWithMock(self.msg_runner,
'run_compute_api_method')
fake_response = self._get_fake_response()
self.msg_runner.run_compute_api_method(self.ctxt,
cell_name,
method_info,
True).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.run_compute_api_method(
self.ctxt, cell_name=cell_name, method_info=method_info,
call=True)
self.assertEqual('fake-response', response)
def test_instance_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_update_at_top')
self.msg_runner.instance_update_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_update_at_top(self.ctxt,
instance='fake-instance')
def test_instance_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'instance_destroy_at_top')
self.msg_runner.instance_destroy_at_top(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.instance_destroy_at_top(self.ctxt,
instance='fake-instance')
def test_instance_delete_everywhere(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_delete_everywhere')
self.msg_runner.instance_delete_everywhere(self.ctxt,
'fake-instance',
'fake-type')
self.mox.ReplayAll()
self.cells_manager.instance_delete_everywhere(
self.ctxt, instance='fake-instance',
delete_type='fake-type')
def test_instance_fault_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_fault_create_at_top')
self.msg_runner.instance_fault_create_at_top(self.ctxt,
'fake-fault')
self.mox.ReplayAll()
self.cells_manager.instance_fault_create_at_top(
self.ctxt, instance_fault='fake-fault')
def test_bw_usage_update_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bw_usage_update_at_top')
self.msg_runner.bw_usage_update_at_top(self.ctxt,
'fake-bw-info')
self.mox.ReplayAll()
self.cells_manager.bw_usage_update_at_top(
self.ctxt, bw_update_info='fake-bw-info')
def test_heal_instances(self):
self.flags(instance_updated_at_threshold=1000,
instance_update_num_instances=2,
group='cells')
fake_context = context.RequestContext('fake', 'fake')
stalled_time = timeutils.utcnow()
updated_since = stalled_time - datetime.timedelta(seconds=1000)
def utcnow():
return stalled_time
call_info = {'get_instances': 0, 'sync_instances': []}
instances = ['instance1', 'instance2', 'instance3']
def get_instances_to_sync(context, **kwargs):
self.assertEqual(context, fake_context)
call_info['shuffle'] = kwargs.get('shuffle')
call_info['project_id'] = kwargs.get('project_id')
call_info['updated_since'] = kwargs.get('updated_since')
call_info['get_instances'] += 1
return iter(instances)
def instance_get_by_uuid(context, uuid):
return instances[int(uuid[-1]) - 1]
def sync_instance(context, instance):
self.assertEqual(context, fake_context)
call_info['sync_instances'].append(instance)
self.stubs.Set(cells_utils, 'get_instances_to_sync',
get_instances_to_sync)
self.stubs.Set(self.cells_manager.db, 'instance_get_by_uuid',
instance_get_by_uuid)
self.stubs.Set(self.cells_manager, '_sync_instance',
sync_instance)
self.stubs.Set(timeutils, 'utcnow', utcnow)
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertIsNone(call_info['project_id'])
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 1)
# Only first 2
self.assertEqual(call_info['sync_instances'],
instances[:2])
call_info['sync_instances'] = []
self.cells_manager._heal_instances(fake_context)
self.assertEqual(call_info['shuffle'], True)
self.assertIsNone(call_info['project_id'])
self.assertEqual(call_info['updated_since'], updated_since)
self.assertEqual(call_info['get_instances'], 2)
# Now the last 1 and the first 1
self.assertEqual(call_info['sync_instances'],
[instances[-1], instances[0]])
def test_sync_instances(self):
self.mox.StubOutWithMock(self.msg_runner,
'sync_instances')
self.msg_runner.sync_instances(self.ctxt, 'fake-project',
'fake-time', 'fake-deleted')
self.mox.ReplayAll()
self.cells_manager.sync_instances(self.ctxt,
project_id='fake-project',
updated_since='fake-time',
deleted='fake-deleted')
def test_service_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of services.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
services = []
for service in FAKE_SERVICES:
services.append(copy.deepcopy(service))
expected_service = copy.deepcopy(service)
cells_utils.add_cell_to_service(expected_service, cell_name)
expected_response.append(expected_service)
response = messaging.Response(cell_name, services, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'service_get_all')
self.msg_runner.service_get_all(self.ctxt,
'fake-filters').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.service_get_all(self.ctxt,
filters='fake-filters')
self.assertEqual(expected_response, response)
def test_service_get_by_compute_host(self):
self.mox.StubOutWithMock(self.msg_runner,
'service_get_by_compute_host')
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell, FAKE_SERVICES[0],
False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.msg_runner.service_get_by_compute_host(self.ctxt,
fake_cell, 'fake-host').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_get_by_compute_host(self.ctxt,
host_name=cell_and_host)
self.assertEqual(expected_response, response)
def test_get_host_uptime(self):
fake_cell = 'parent!fake-cell'
fake_host = 'fake-host'
fake_cell_and_host = cells_utils.cell_with_item(fake_cell, fake_host)
host_uptime = (" 08:32:11 up 93 days, 18:25, 12 users, load average:"
" 0.20, 0.12, 0.14")
fake_response = messaging.Response(fake_cell, host_uptime, False)
self.mox.StubOutWithMock(self.msg_runner,
'get_host_uptime')
self.msg_runner.get_host_uptime(self.ctxt, fake_cell, fake_host).\
AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.get_host_uptime(self.ctxt,
fake_cell_and_host)
self.assertEqual(host_uptime, response)
def test_service_update(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(
fake_cell, FAKE_SERVICES[0], False)
expected_response = copy.deepcopy(FAKE_SERVICES[0])
cells_utils.add_cell_to_service(expected_response, fake_cell)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
params_to_update = {'disabled': True}
self.mox.StubOutWithMock(self.msg_runner, 'service_update')
self.msg_runner.service_update(self.ctxt,
fake_cell, 'fake-host', 'nova-api',
params_to_update).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.service_update(
self.ctxt, host_name=cell_and_host, binary='nova-api',
params_to_update=params_to_update)
self.assertEqual(expected_response, response)
def test_proxy_rpc_to_manager(self):
self.mox.StubOutWithMock(self.msg_runner,
'proxy_rpc_to_manager')
fake_response = self._get_fake_response()
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
topic = "%s.%s" % (CONF.compute_topic, cell_and_host)
self.msg_runner.proxy_rpc_to_manager(self.ctxt, 'fake-cell',
'fake-host', topic, 'fake-rpc-msg',
True, -1).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.proxy_rpc_to_manager(self.ctxt,
topic=topic, rpc_message='fake-rpc-msg', call=True,
timeout=-1)
self.assertEqual('fake-response', response)
def _build_task_log_responses(self, num):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of task log
# entries. Manager should turn these into a single list of
# task log entries.
for i in xrange(num):
cell_name = 'path!to!cell%i' % i
task_logs = []
for task_log in FAKE_TASK_LOGS:
task_logs.append(copy.deepcopy(task_log))
expected_task_log = copy.deepcopy(task_log)
cells_utils.add_cell_to_task_log(expected_task_log,
cell_name)
expected_response.append(expected_task_log)
response = messaging.Response(cell_name, task_logs, False)
responses.append(response)
return expected_response, responses
def test_task_log_get_all(self):
expected_response, responses = self._build_task_log_responses(3)
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, None,
'fake-name', 'fake-begin',
'fake-end', host=None, state=None).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_filters(self):
expected_response, responses = self._build_task_log_responses(1)
cell_and_host = cells_utils.cell_with_item('fake-cell', 'fake-host')
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host='fake-host',
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_task_log_get_all_with_cell_but_no_host_filters(self):
expected_response, responses = self._build_task_log_responses(1)
# Host filter only has cell name.
cell_and_host = 'fake-cell'
self.mox.StubOutWithMock(self.msg_runner,
'task_log_get_all')
self.msg_runner.task_log_get_all(self.ctxt, 'fake-cell',
'fake-name', 'fake-begin', 'fake-end', host=None,
state='fake-state').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.task_log_get_all(self.ctxt,
task_name='fake-name',
period_beginning='fake-begin', period_ending='fake-end',
host=cell_and_host, state='fake-state')
self.assertEqual(expected_response, response)
def test_compute_node_get_all(self):
responses = []
expected_response = []
# 3 cells... so 3 responses. Each response is a list of computes.
# Manager should turn these into a single list of responses.
for i in xrange(3):
cell_name = 'path!to!cell%i' % i
compute_nodes = []
for compute_node in FAKE_COMPUTE_NODES:
compute_nodes.append(copy.deepcopy(compute_node))
expected_compute_node = copy.deepcopy(compute_node)
cells_utils.add_cell_to_compute_node(expected_compute_node,
cell_name)
expected_response.append(expected_compute_node)
response = messaging.Response(cell_name, compute_nodes, False)
responses.append(response)
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get_all')
self.msg_runner.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match').AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get_all(self.ctxt,
hypervisor_match='fake-match')
self.assertEqual(expected_response, response)
def test_compute_node_stats(self):
raw_resp1 = {'key1': 1, 'key2': 2}
raw_resp2 = {'key2': 1, 'key3': 2}
raw_resp3 = {'key3': 1, 'key4': 2}
responses = [messaging.Response('cell1', raw_resp1, False),
messaging.Response('cell2', raw_resp2, False),
messaging.Response('cell2', raw_resp3, False)]
expected_resp = {'key1': 1, 'key2': 3, 'key3': 3, 'key4': 2}
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_stats')
self.msg_runner.compute_node_stats(self.ctxt).AndReturn(responses)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_stats(self.ctxt)
self.assertEqual(expected_resp, response)
def test_compute_node_get(self):
fake_cell = 'fake-cell'
fake_response = messaging.Response(fake_cell,
FAKE_COMPUTE_NODES[0],
False)
expected_response = copy.deepcopy(FAKE_COMPUTE_NODES[0])
cells_utils.add_cell_to_compute_node(expected_response, fake_cell)
cell_and_id = cells_utils.cell_with_item(fake_cell, 'fake-id')
self.mox.StubOutWithMock(self.msg_runner,
'compute_node_get')
self.msg_runner.compute_node_get(self.ctxt,
'fake-cell', 'fake-id').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.compute_node_get(self.ctxt,
compute_id=cell_and_id)
self.assertEqual(expected_response, response)
def test_actions_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', [fake_act], False)
expected_response = [fake_act]
self.mox.StubOutWithMock(self.msg_runner, 'actions_get')
self.msg_runner.actions_get(self.ctxt, 'fake-cell',
'fake-uuid').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.actions_get(self.ctxt, 'fake-cell',
'fake-uuid')
self.assertEqual(expected_response, response)
def test_action_get_by_request_id(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_req_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_act = fake_instance_actions.FAKE_ACTIONS[fake_uuid][fake_req_id]
fake_response = messaging.Response('fake-cell', fake_act, False)
expected_response = fake_act
self.mox.StubOutWithMock(self.msg_runner, 'action_get_by_request_id')
self.msg_runner.action_get_by_request_id(self.ctxt, 'fake-cell',
'fake-uuid', 'req-fake').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_get_by_request_id(self.ctxt,
'fake-cell',
'fake-uuid',
'req-fake')
self.assertEqual(expected_response, response)
def test_action_events_get(self):
fake_action_id = fake_instance_actions.FAKE_ACTION_ID1
fake_events = fake_instance_actions.FAKE_EVENTS[fake_action_id]
fake_response = messaging.Response('fake-cell', fake_events, False)
expected_response = fake_events
self.mox.StubOutWithMock(self.msg_runner, 'action_events_get')
self.msg_runner.action_events_get(self.ctxt, 'fake-cell',
'fake-action').AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.action_events_get(self.ctxt, 'fake-cell',
'fake-action')
self.assertEqual(expected_response, response)
def test_consoleauth_delete_tokens(self):
instance_uuid = 'fake-instance-uuid'
self.mox.StubOutWithMock(self.msg_runner,
'consoleauth_delete_tokens')
self.msg_runner.consoleauth_delete_tokens(self.ctxt, instance_uuid)
self.mox.ReplayAll()
self.cells_manager.consoleauth_delete_tokens(self.ctxt,
instance_uuid=instance_uuid)
def test_get_capacities(self):
cell_name = 'cell_name'
response = {"ram_free":
{"units_by_mb": {"64": 20, "128": 10}, "total_mb": 1491}}
self.mox.StubOutWithMock(self.state_manager,
'get_capacities')
self.state_manager.get_capacities(cell_name).AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.get_capacities(self.ctxt, cell_name))
def test_validate_console_port(self):
instance_uuid = 'fake-instance-uuid'
cell_name = 'fake-cell-name'
instance = {'cell_name': cell_name}
console_port = 'fake-console-port'
console_type = 'fake-console-type'
self.mox.StubOutWithMock(self.msg_runner,
'validate_console_port')
self.mox.StubOutWithMock(self.cells_manager.db,
'instance_get_by_uuid')
fake_response = self._get_fake_response()
self.cells_manager.db.instance_get_by_uuid(self.ctxt,
instance_uuid).AndReturn(instance)
self.msg_runner.validate_console_port(self.ctxt, cell_name,
instance_uuid, console_port,
console_type).AndReturn(fake_response)
self.mox.ReplayAll()
response = self.cells_manager.validate_console_port(self.ctxt,
instance_uuid=instance_uuid, console_port=console_port,
console_type=console_type)
self.assertEqual('fake-response', response)
def test_bdm_update_or_create_at_top(self):
self.mox.StubOutWithMock(self.msg_runner,
'bdm_update_or_create_at_top')
self.msg_runner.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
self.mox.ReplayAll()
self.cells_manager.bdm_update_or_create_at_top(self.ctxt,
'fake-bdm',
create='foo')
def test_bdm_destroy_at_top(self):
self.mox.StubOutWithMock(self.msg_runner, 'bdm_destroy_at_top')
self.msg_runner.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
self.mox.ReplayAll()
self.cells_manager.bdm_destroy_at_top(self.ctxt,
'fake_instance_uuid',
device_name='fake_device_name',
volume_id='fake_volume_id')
def test_get_migrations(self):
filters = {'status': 'confirmed'}
cell1_migrations = [{'id': 123}]
cell2_migrations = [{'id': 456}]
fake_responses = [self._get_fake_response(cell1_migrations),
self._get_fake_response(cell2_migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, None, False, filters).\
AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual([cell1_migrations[0], cell2_migrations[0]], response)
def test_get_migrations_for_a_given_cell(self):
filters = {'status': 'confirmed', 'cell_name': 'ChildCell1'}
target_cell = '%s%s%s' % (CONF.cells.name, '!', filters['cell_name'])
migrations = [{'id': 123}]
fake_responses = [self._get_fake_response(migrations)]
self.mox.StubOutWithMock(self.msg_runner,
'get_migrations')
self.msg_runner.get_migrations(self.ctxt, target_cell, False,
filters).AndReturn(fake_responses)
self.mox.ReplayAll()
response = self.cells_manager.get_migrations(self.ctxt, filters)
self.assertEqual(migrations, response)
def test_instance_update_from_api(self):
self.mox.StubOutWithMock(self.msg_runner,
'instance_update_from_api')
self.msg_runner.instance_update_from_api(self.ctxt,
'fake-instance',
'exp_vm', 'exp_task',
'admin_reset')
self.mox.ReplayAll()
self.cells_manager.instance_update_from_api(
self.ctxt, instance='fake-instance',
expected_vm_state='exp_vm',
expected_task_state='exp_task',
admin_state_reset='admin_reset')
def test_start_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'start_instance')
self.msg_runner.start_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.start_instance(self.ctxt, instance='fake-instance')
def test_stop_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'stop_instance')
self.msg_runner.stop_instance(self.ctxt, 'fake-instance',
do_cast='meow')
self.mox.ReplayAll()
self.cells_manager.stop_instance(self.ctxt,
instance='fake-instance',
do_cast='meow')
def test_cell_create(self):
values = 'values'
response = 'created_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_create')
self.state_manager.cell_create(self.ctxt, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_create(self.ctxt, values))
def test_cell_update(self):
cell_name = 'cell_name'
values = 'values'
response = 'updated_cell'
self.mox.StubOutWithMock(self.state_manager,
'cell_update')
self.state_manager.cell_update(self.ctxt, cell_name, values).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_update(self.ctxt, cell_name,
values))
def test_cell_delete(self):
cell_name = 'cell_name'
response = 1
self.mox.StubOutWithMock(self.state_manager,
'cell_delete')
self.state_manager.cell_delete(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_delete(self.ctxt, cell_name))
def test_cell_get(self):
cell_name = 'cell_name'
response = 'cell_info'
self.mox.StubOutWithMock(self.state_manager,
'cell_get')
self.state_manager.cell_get(self.ctxt, cell_name).\
AndReturn(response)
self.mox.ReplayAll()
self.assertEqual(response,
self.cells_manager.cell_get(self.ctxt, cell_name))
def test_reboot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'reboot_instance')
self.msg_runner.reboot_instance(self.ctxt, 'fake-instance',
'HARD')
self.mox.ReplayAll()
self.cells_manager.reboot_instance(self.ctxt,
instance='fake-instance',
reboot_type='HARD')
def test_suspend_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'suspend_instance')
self.msg_runner.suspend_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.suspend_instance(self.ctxt,
instance='fake-instance')
def test_resume_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resume_instance')
self.msg_runner.resume_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.resume_instance(self.ctxt,
instance='fake-instance')
def test_terminate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'terminate_instance')
self.msg_runner.terminate_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.terminate_instance(self.ctxt,
instance='fake-instance')
def test_soft_delete_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'soft_delete_instance')
self.msg_runner.soft_delete_instance(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.soft_delete_instance(self.ctxt,
instance='fake-instance')
def test_resize_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'resize_instance')
self.msg_runner.resize_instance(self.ctxt, 'fake-instance',
'fake-flavor', 'fake-updates')
self.mox.ReplayAll()
self.cells_manager.resize_instance(
self.ctxt, instance='fake-instance', flavor='fake-flavor',
extra_instance_updates='fake-updates')
def test_live_migrate_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'live_migrate_instance')
self.msg_runner.live_migrate_instance(self.ctxt, 'fake-instance',
'fake-block', 'fake-commit',
'fake-host')
self.mox.ReplayAll()
self.cells_manager.live_migrate_instance(
self.ctxt, instance='fake-instance',
block_migration='fake-block', disk_over_commit='fake-commit',
host_name='fake-host')
def test_revert_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'revert_resize')
self.msg_runner.revert_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.revert_resize(self.ctxt, instance='fake-instance')
def test_confirm_resize(self):
self.mox.StubOutWithMock(self.msg_runner, 'confirm_resize')
self.msg_runner.confirm_resize(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.confirm_resize(self.ctxt, instance='fake-instance')
def test_reset_network(self):
self.mox.StubOutWithMock(self.msg_runner, 'reset_network')
self.msg_runner.reset_network(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.reset_network(self.ctxt, instance='fake-instance')
def test_inject_network_info(self):
self.mox.StubOutWithMock(self.msg_runner, 'inject_network_info')
self.msg_runner.inject_network_info(self.ctxt, 'fake-instance')
self.mox.ReplayAll()
self.cells_manager.inject_network_info(self.ctxt,
instance='fake-instance')
def test_snapshot_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'snapshot_instance')
self.msg_runner.snapshot_instance(self.ctxt, 'fake-instance',
'fake-id')
self.mox.ReplayAll()
self.cells_manager.snapshot_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id')
def test_backup_instance(self):
self.mox.StubOutWithMock(self.msg_runner, 'backup_instance')
self.msg_runner.backup_instance(self.ctxt, 'fake-instance',
'fake-id', 'backup-type',
'rotation')
self.mox.ReplayAll()
self.cells_manager.backup_instance(self.ctxt,
instance='fake-instance',
image_id='fake-id',
backup_type='backup-type',
rotation='rotation')
|
{
"content_hash": "9302bd4b6fbf54c7fac6be4191b59833",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 79,
"avg_line_length": 46.75892857142857,
"alnum_prop": 0.5682915518699364,
"repo_name": "devoid/nova",
"id": "adf804da1e0089d0a9afedf9579bd34809a3a779",
"size": "37295",
"binary": false,
"copies": "3",
"ref": "refs/heads/sheepdog-nova-support",
"path": "nova/tests/cells/test_cells_manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13261365"
},
{
"name": "Shell",
"bytes": "16180"
}
],
"symlink_target": ""
}
|
import webob
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova.api.openstack import extensions
from nova import db
from nova import exception
from nova import quota
authorize = extensions.extension_authorizer('compute', 'quotas')
class QuotaTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('quota_set', selector='quota_set')
root.set('id')
for resource in quota.quota_resources:
elem = xmlutil.SubTemplateElement(root, resource)
elem.text = resource
return xmlutil.MasterTemplate(root, 1)
class QuotaSetsController(object):
def _format_quota_set(self, project_id, quota_set):
"""Convert the quota object to a result dict"""
result = dict(id=str(project_id))
for resource in quota.quota_resources:
result[resource] = quota_set[resource]
return dict(quota_set=result)
@wsgi.serializers(xml=QuotaTemplate)
def show(self, req, id):
context = req.environ['nova.context']
authorize(context)
try:
db.sqlalchemy.api.authorize_project_context(context, id)
return self._format_quota_set(id,
quota.get_project_quotas(context, id))
except exception.NotAuthorized:
raise webob.exc.HTTPForbidden()
@wsgi.serializers(xml=QuotaTemplate)
def update(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
project_id = id
for key in body['quota_set'].keys():
if key in quota.quota_resources:
value = int(body['quota_set'][key])
try:
db.quota_update(context, project_id, key, value)
except exception.ProjectQuotaNotFound:
db.quota_create(context, project_id, key, value)
except exception.AdminRequired:
raise webob.exc.HTTPForbidden()
return {'quota_set': quota.get_project_quotas(context, project_id)}
@wsgi.serializers(xml=QuotaTemplate)
def defaults(self, req, id):
authorize(req.environ['nova.context'])
return self._format_quota_set(id, quota._get_default_quotas())
class Quotas(extensions.ExtensionDescriptor):
"""Quotas management support"""
name = "Quotas"
alias = "os-quota-sets"
namespace = "http://docs.openstack.org/compute/ext/quotas-sets/api/v1.1"
updated = "2011-08-08T00:00:00+00:00"
def get_resources(self):
resources = []
res = extensions.ResourceExtension('os-quota-sets',
QuotaSetsController(),
member_actions={'defaults': 'GET'})
resources.append(res)
return resources
|
{
"content_hash": "b84cfd26f65e9d04eab62f6938691c25",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 32.93103448275862,
"alnum_prop": 0.6139616055846422,
"repo_name": "gyang/nova",
"id": "0738fb81be22279fa49c5ac1554d5c1c29baf53d",
"size": "3540",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/contrib/quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import City, Country, EUCity, EUCountry, Person, PersonProfile
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = ['select_for_update_person"."id', 'select_for_update_country"."id']
else:
expected = ['select_for_update_person', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self',)))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_eucountry"."country_ptr_id']
else:
expected = ['select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self', 'country_ptr',)))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucountry"."country_ptr_id',
'select_for_update_country"."id',
]
else:
expected = ['select_for_update_eucountry', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_nested_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country__country_ptr',),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_country"."id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_inheritance_of_argument_raises_error_ptr_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, %s.'
)
with self.assertRaisesMessage(
FieldError,
msg % 'country, country__country_ptr',
):
with transaction.atomic():
EUCity.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
with self.assertRaisesMessage(FieldError, msg % 'country_ptr'):
with transaction.atomic():
EUCountry.objects.select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
|
{
"content_hash": "7b2dc35464e95ab125ac4ffdb71f48e3",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 117,
"avg_line_length": 43.46442687747036,
"alnum_prop": 0.6224253171463647,
"repo_name": "simonw/django",
"id": "0bb21972d10b51e53bb59fc2fc794c167a5f540b",
"size": "21993",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tests/select_for_update/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85351"
},
{
"name": "HTML",
"bytes": "227641"
},
{
"name": "JavaScript",
"bytes": "258434"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "13501540"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Question'
db.create_table(u'questionnaire_question', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('instructions', self.gf('django.db.models.fields.TextField')(null=True)),
('UID', self.gf('django.db.models.fields.CharField')(max_length=6)),
('answer_type', self.gf('django.db.models.fields.CharField')(max_length=10)),
))
db.send_create_signal('questionnaire', ['Question'])
def backwards(self, orm):
# Deleting model 'Question'
db.delete_table(u'questionnaire_question')
models = {
'questionnaire.country': {
'Meta': {'object_name': 'Country'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'countries'", 'null': 'True', 'to': "orm['questionnaire.Region']"})
},
'questionnaire.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'})
},
'questionnaire.question': {
'Meta': {'object_name': 'Question'},
'UID': ('django.db.models.fields.CharField', [], {'max_length': '6'}),
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instructions': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'questionnaire.questionnaire': {
'Meta': {'object_name': 'Questionnaire'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.region': {
'Meta': {'object_name': 'Region'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'regions'", 'null': 'True', 'to': "orm['questionnaire.Organization']"})
},
'questionnaire.section': {
'Meta': {'object_name': 'Section'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'questionnaire': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sections'", 'to': "orm['questionnaire.Questionnaire']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'questionnaire.subsection': {
'Meta': {'object_name': 'SubSection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {}),
'section': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sub_sections'", 'to': "orm['questionnaire.Section']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
}
}
complete_apps = ['questionnaire']
|
{
"content_hash": "67b9f0fa0d6f14be721b76690b39ef76",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 188,
"avg_line_length": 67.8913043478261,
"alnum_prop": 0.5734870317002881,
"repo_name": "testvidya11/ejrf",
"id": "fa487df072a9e6ca186ad88a742e9ed46b7d8537",
"size": "6270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "questionnaire/migrations/0008_auto__add_question.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "6961"
},
{
"name": "JavaScript",
"bytes": "55198"
},
{
"name": "Python",
"bytes": "1196960"
},
{
"name": "Shell",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import os
import sys
import subprocess
from multiprocessing import Pool
def upload_file(input_file):
print input_file
s3_location = ""
subprocess.call(["aws", "s3", "cp", input_file, s3_location])
if __name__ == '__main__':
work_dir = sys.argv[1]
file_extension = sys.argv[2]
file_list = []
for dirpath, directories, filenames in os.walk(work_dir):
for filename in filenames:
if filename.endswith(file_extension):
input_file = os.path.join(dirpath, filename)
file_list.append(input_file)
pool = Pool(4)
pool.map(upload_file, file_list)
pool.close()
pool.join()
|
{
"content_hash": "d2f8993366c83548a97d4c2d91ee4aa0",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 65,
"avg_line_length": 23.5,
"alnum_prop": 0.6156028368794326,
"repo_name": "ucsd-ccbb/jupyter-genomics",
"id": "ef8d0e693720d7a63138df2ed5b68559e6358b2c",
"size": "705",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/awsCluster/util/MultipleThreadedUploader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "254329"
},
{
"name": "Java",
"bytes": "278021"
},
{
"name": "Jupyter Notebook",
"bytes": "19771596"
},
{
"name": "Perl",
"bytes": "14052"
},
{
"name": "Python",
"bytes": "428899"
},
{
"name": "R",
"bytes": "6817"
},
{
"name": "Shell",
"bytes": "37476"
}
],
"symlink_target": ""
}
|
import time
import datetime
import oauth2 as oauth
from provider import scope as oauth2_provider_scope
from rest_framework.test import APIClient
from rest_framework_oauth.authentication import (
oauth2_provider,
OAuthAuthentication,
OAuth2Authentication
)
from rest_framework import status, permissions
from rest_framework.views import APIView
from django.conf.urls import patterns, include, url
from django.http import HttpResponse
from django.utils.http import urlencode
from django.test import TestCase
from django.contrib.auth.models import User
class OAuth2AuthenticationDebug(OAuth2Authentication):
allow_query_params_token = True
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def post(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
def put(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
urlpatterns = patterns(
'',
(r'^oauth/$', MockView.as_view(authentication_classes=[OAuthAuthentication])),
(
r'^oauth-with-scope/$',
MockView.as_view(
authentication_classes=[OAuthAuthentication],
permission_classes=[permissions.TokenHasReadWriteScope]
)
),
url(r'^auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^oauth2/', include('provider.oauth2.urls', namespace='oauth2')),
url(r'^oauth2-test/$', MockView.as_view(authentication_classes=[OAuth2Authentication])),
url(r'^oauth2-test-debug/$', MockView.as_view(authentication_classes=[OAuth2AuthenticationDebug])),
url(
r'^oauth2-with-scope-test/$',
MockView.as_view(
authentication_classes=[OAuth2Authentication],
permission_classes=[permissions.TokenHasReadWriteScope]
)
)
)
class OAuthTests(TestCase):
"""OAuth 1.0a authentication"""
urls = 'tests.test_oauth'
def setUp(self):
# these imports are here because oauth is optional and hiding them in try..except block or compat
# could obscure problems if something breaks
from oauth_provider.models import Consumer, Scope
from oauth_provider.models import Token as OAuthToken
from oauth_provider import consts
self.consts = consts
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CONSUMER_KEY = 'consumer_key'
self.CONSUMER_SECRET = 'consumer_secret'
self.TOKEN_KEY = "token_key"
self.TOKEN_SECRET = "token_secret"
self.consumer = Consumer.objects.create(
key=self.CONSUMER_KEY, secret=self.CONSUMER_SECRET,
name='example', user=self.user, status=self.consts.ACCEPTED
)
self.scope = Scope.objects.create(name="resource name", url="api/")
self.token = OAuthToken.objects.create(
user=self.user, consumer=self.consumer, scope=self.scope,
token_type=OAuthToken.ACCESS, key=self.TOKEN_KEY, secret=self.TOKEN_SECRET,
is_approved=True
)
def _create_authorization_header(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return req.to_header()["Authorization"]
def _create_authorization_url_parameters(self):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="GET", url="http://example.com", parameters=params)
signature_method = oauth.SignatureMethod_PLAINTEXT()
req.sign_request(signature_method, self.consumer, self.token)
return dict(req)
def test_post_form_passing_oauth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_post_form_repeated_nonce_failing_oauth(self):
"""Ensure POSTing form over OAuth with repeated auth (same nonces and timestamp) credentials fails"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
# simulate reply attack auth header containes already used (nonce, timestamp) pair
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_token_removed_failing_oauth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_consumer_status_not_accepted_failing_oauth(self):
"""Ensure POSTing when consumer status is anything other than ACCEPTED fails"""
for consumer_status in (self.consts.CANCELED, self.consts.PENDING, self.consts.REJECTED):
self.consumer.status = consumer_status
self.consumer.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_request_token_failing_oauth(self):
"""Ensure POSTing with unauthorized request token instead of access token fails"""
self.token.token_type = self.token.REQUEST
self.token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', {'example': 'example'}, HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_urlencoded_parameters(self):
"""Ensure POSTing with x-www-form-urlencoded auth parameters passes"""
params = self._create_authorization_url_parameters()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth/', params, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_get_form_with_url_parameters(self):
"""Ensure GETing with auth in url parameters passes"""
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth/', params)
self.assertEqual(response.status_code, 200)
def test_post_hmac_sha1_signature_passes(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_get_form_with_readonly_resource_passing_auth(self):
"""Ensure POSTing with a readonly scope instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.scope.is_readonly = True
read_only_access_token.scope.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.get('/oauth-with-scope/', params)
self.assertEqual(response.status_code, 200)
def test_post_form_with_readonly_resource_failing_auth(self):
"""Ensure POSTing with a readonly resource instead of a write scope fails"""
read_only_access_token = self.token
read_only_access_token.scope.is_readonly = True
read_only_access_token.scope.save()
params = self._create_authorization_url_parameters()
response = self.csrf_client.post('/oauth-with-scope/', params)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_write_resource_passing_auth(self):
"""Ensure POSTing with a write resource succeed"""
read_write_access_token = self.token
read_write_access_token.scope.is_readonly = False
read_write_access_token.scope.save()
params = self._create_authorization_url_parameters()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth-with-scope/', params, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_bad_consumer_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': self.token.key,
'oauth_consumer_key': 'badconsumerkey'
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_bad_token_key(self):
"""Ensure POSTing using HMAC_SHA1 signature method passes"""
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'oauth_token': 'badtokenkey',
'oauth_consumer_key': self.consumer.key
}
req = oauth.Request(method="POST", url="http://testserver/oauth/", parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, self.consumer, self.token)
auth = req.to_header()["Authorization"]
response = self.csrf_client.post('/oauth/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
class OAuth2Tests(TestCase):
"""OAuth 2.0 authentication"""
urls = 'tests.test_oauth'
def setUp(self):
self.csrf_client = APIClient(enforce_csrf_checks=True)
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
self.CLIENT_ID = 'client_key'
self.CLIENT_SECRET = 'client_secret'
self.ACCESS_TOKEN = "access_token"
self.REFRESH_TOKEN = "refresh_token"
self.oauth2_client = oauth2_provider.oauth2.models.Client.objects.create(
client_id=self.CLIENT_ID,
client_secret=self.CLIENT_SECRET,
redirect_uri='',
client_type=0,
name='example',
user=None,
)
self.access_token = oauth2_provider.oauth2.models.AccessToken.objects.create(
token=self.ACCESS_TOKEN,
client=self.oauth2_client,
user=self.user,
)
self.refresh_token = oauth2_provider.oauth2.models.RefreshToken.objects.create(
user=self.user,
access_token=self.access_token,
client=self.oauth2_client
)
def _create_authorization_header(self, token=None):
return "Bearer {0}".format(token or self.access_token.token)
def test_get_form_with_wrong_authorization_header_token_type_failing(self):
"""Ensure that a wrong token type lead to the correct HTTP error status code"""
auth = "Wrong token-type-obsviously"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_get_form_with_wrong_authorization_header_token_format_failing(self):
"""Ensure that a wrong token format lead to the correct HTTP error status code"""
auth = "Bearer wrong token format"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_get_form_with_wrong_authorization_header_token_failing(self):
"""Ensure that a wrong token lead to the correct HTTP error status code"""
auth = "Bearer wrong-token"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_get_form_with_wrong_authorization_header_token_missing(self):
"""Ensure that a missing token lead to the correct HTTP error status code"""
auth = "Bearer"
response = self.csrf_client.get('/oauth2-test/', {}, HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_get_form_passing_auth(self):
"""Ensure GETing form over OAuth with correct client credentials succeed"""
auth = self._create_authorization_header()
response = self.csrf_client.get('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_post_form_passing_auth_url_transport(self):
"""Ensure GETing form over OAuth with correct client credentials in form data succeed"""
response = self.csrf_client.post(
'/oauth2-test/',
data={'access_token': self.access_token.token}
)
self.assertEqual(response.status_code, 200)
def test_get_form_passing_auth_url_transport(self):
"""Ensure GETing form over OAuth with correct client credentials in query succeed when DEBUG is True"""
query = urlencode({'access_token': self.access_token.token})
response = self.csrf_client.get('/oauth2-test-debug/?%s' % query)
self.assertEqual(response.status_code, 200)
def test_get_form_failing_auth_url_transport(self):
"""Ensure GETing form over OAuth with correct client credentials in query fails when DEBUG is False"""
query = urlencode({'access_token': self.access_token.token})
response = self.csrf_client.get('/oauth2-test/?%s' % query)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_passing_auth(self):
"""Ensure POSTing form over OAuth with correct credentials passes and does not require CSRF"""
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_post_form_token_removed_failing_auth(self):
"""Ensure POSTing when there is no OAuth access token in db fails"""
self.access_token.delete()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_refresh_token_failing_auth(self):
"""Ensure POSTing with refresh token instead of access token fails"""
auth = self._create_authorization_header(token=self.refresh_token.token)
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
def test_post_form_with_expired_access_token_failing_auth(self):
"""Ensure POSTing with expired access token fails with an 'Invalid token' error"""
self.access_token.expires = datetime.datetime.now() - datetime.timedelta(seconds=10) # 10 seconds late
self.access_token.save()
auth = self._create_authorization_header()
response = self.csrf_client.post('/oauth2-test/', HTTP_AUTHORIZATION=auth)
self.assertIn(response.status_code, (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN))
self.assertIn('Invalid token', response.content)
def test_post_form_with_invalid_scope_failing_auth(self):
"""Ensure POSTing with a readonly scope instead of a write scope fails"""
read_only_access_token = self.access_token
read_only_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['read']
read_only_access_token.save()
auth = self._create_authorization_header(token=read_only_access_token.token)
response = self.csrf_client.get('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_post_form_with_valid_scope_passing_auth(self):
"""Ensure POSTing with a write scope succeed"""
read_write_access_token = self.access_token
read_write_access_token.scope = oauth2_provider_scope.SCOPE_NAME_DICT['write']
read_write_access_token.save()
auth = self._create_authorization_header(token=read_write_access_token.token)
response = self.csrf_client.post('/oauth2-with-scope-test/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "6ee57519d85eb19de4b35049a075c994",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 111,
"avg_line_length": 46.60679611650485,
"alnum_prop": 0.6643578793875637,
"repo_name": "jlafon/django-rest-framework-oauth",
"id": "ad88c6a03a1a33de13e57a9242b850ca40e2cc33",
"size": "19202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_oauth.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "36251"
}
],
"symlink_target": ""
}
|
__version__ = "1.2.1"
__all__ = "Proxy",
try:
import copy_reg as copyreg
except ImportError:
import copyreg
from .utils import identity
copyreg.constructor(identity)
try:
from .cext import Proxy
from .cext import identity
except ImportError:
from .slots import Proxy
else:
copyreg.constructor(identity)
|
{
"content_hash": "0c6231ee57a755a7b9b19b04b4e1baf0",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 33,
"avg_line_length": 16.6,
"alnum_prop": 0.7018072289156626,
"repo_name": "mith1979/ansible_automation",
"id": "2ff10af529f0a5b660a4d21da475414d766f803b",
"size": "332",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "applied_python/applied_python/lib64/python2.7/site-packages/lazy_object_proxy/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1005"
},
{
"name": "C",
"bytes": "84868"
},
{
"name": "CSS",
"bytes": "50289"
},
{
"name": "HTML",
"bytes": "70428"
},
{
"name": "JavaScript",
"bytes": "105262"
},
{
"name": "PowerShell",
"bytes": "51840"
},
{
"name": "Python",
"bytes": "19073705"
},
{
"name": "Shell",
"bytes": "3747"
},
{
"name": "XSLT",
"bytes": "152770"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from pprint import pformat
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
class BeginningPrinter(Protocol):
def __init__(self, finished):
self.finished = finished
self.remaining = 1024 * 10
def dataReceived(self, bytes):
if self.remaining:
display = bytes[:self.remaining]
print('Some data received:')
print(display)
self.remaining -= len(display)
def connectionLost(self, reason):
print('Finished receiving body:', reason.getErrorMessage())
self.finished.callback(None)
agent = Agent(reactor)
d = agent.request(
'GET',
'http://example.com/',
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
def cbRequest(response):
print('Response version:', response.version)
print('Response code:', response.code)
print('Response phrase:', response.phrase)
print('Response headers:')
print(pformat(list(response.headers.getAllRawHeaders())))
finished = Deferred()
response.deliverBody(BeginningPrinter(finished))
return finished
d.addCallback(cbRequest)
def cbShutdown(ignored):
reactor.stop()
d.addBoth(cbShutdown)
reactor.run()
|
{
"content_hash": "ab56431aa6475d5d49ed67422b2c7dea",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 67,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.6920315865039484,
"repo_name": "EricMuller/mywebmarks-backend",
"id": "aee0962a10f7f8492a4017473285572371db8390",
"size": "1393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/docs/web/howto/listings/client/response.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "23736"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "66211"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "1087560"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "23014526"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "15482"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
import os
import mock
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_middleware import request_id
from oslo_policy import opts as policy_opts
from oslo_utils import importutils
import webob
from heat.common import context
from heat.common import exception
from heat.tests import common
policy_path = os.path.dirname(os.path.realpath(__file__)) + "/policy/"
class TestRequestContext(common.HeatTestCase):
def setUp(self):
self.ctx = {'username': 'mick',
'trustor_user_id': None,
'auth_token': '123',
'auth_token_info': {'123info': 'woop'},
'is_admin': False,
'user': 'mick',
'password': 'foo',
'trust_id': None,
'show_deleted': False,
'roles': ['arole', 'notadmin'],
'tenant_id': '456tenant',
'user_id': 'fooUser',
'tenant': u'\u5218\u80dc',
'auth_url': 'http://xyz',
'aws_creds': 'blah',
'region_name': 'RegionOne',
'user_identity': 'fooUser 456tenant',
'user_domain_id': None,
'project_domain_id': None}
super(TestRequestContext, self).setUp()
def test_request_context_init(self):
ctx = context.RequestContext(
auth_token=self.ctx.get('auth_token'),
username=self.ctx.get('username'),
password=self.ctx.get('password'),
aws_creds=self.ctx.get('aws_creds'),
project_name=self.ctx.get('tenant'),
tenant=self.ctx.get('tenant_id'),
user=self.ctx.get('user_id'),
auth_url=self.ctx.get('auth_url'),
roles=self.ctx.get('roles'),
show_deleted=self.ctx.get('show_deleted'),
is_admin=self.ctx.get('is_admin'),
auth_token_info=self.ctx.get('auth_token_info'),
trustor_user_id=self.ctx.get('trustor_user_id'),
trust_id=self.ctx.get('trust_id'),
region_name=self.ctx.get('region_name'),
user_domain_id=self.ctx.get('user_domain'),
project_domain_id=self.ctx.get('project_domain'))
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_from_dict(self):
ctx = context.RequestContext.from_dict(self.ctx)
ctx_dict = ctx.to_dict()
del(ctx_dict['request_id'])
self.assertEqual(self.ctx, ctx_dict)
def test_request_context_update(self):
ctx = context.RequestContext.from_dict(self.ctx)
for k in self.ctx:
if (k == 'user_identity' or
k == 'user_domain_id' or
k == 'project_domain_id'):
continue
# these values are different between attribute and context
if k == 'tenant' or k == 'user':
continue
self.assertEqual(self.ctx.get(k), ctx.to_dict().get(k))
override = '%s_override' % k
setattr(ctx, k, override)
self.assertEqual(override, ctx.to_dict().get(k))
def test_get_admin_context(self):
ctx = context.get_admin_context()
self.assertTrue(ctx.is_admin)
self.assertFalse(ctx.show_deleted)
def test_get_admin_context_show_deleted(self):
ctx = context.get_admin_context(show_deleted=True)
self.assertTrue(ctx.is_admin)
self.assertTrue(ctx.show_deleted)
def test_admin_context_policy_true(self):
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = True
ctx = context.RequestContext(roles=['admin'])
self.assertTrue(ctx.is_admin)
def test_admin_context_policy_false(self):
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(roles=['notadmin'])
self.assertFalse(ctx.is_admin)
def test_keystone_v3_endpoint_in_context(self):
"""Ensure that the context is the preferred source for the auth_uri."""
cfg.CONF.set_override('auth_uri', 'http://xyz',
group='clients_keystone', enforce_type=True)
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(
auth_url='http://example.com:5000/v2.0')
self.assertEqual(ctx.keystone_v3_endpoint,
'http://example.com:5000/v3')
def test_keystone_v3_endpoint_in_clients_keystone_config(self):
"""Ensure that the [clients_keystone] section is the preferred source.
Ensure that the [clients_keystone] section of the configuration is
the preferred source when the context does not have the auth_uri.
"""
cfg.CONF.set_override('auth_uri', 'http://xyz',
group='clients_keystone', enforce_type=True)
importutils.import_module('keystonemiddleware.auth_token')
cfg.CONF.set_override('auth_uri', 'http://abc/v2.0',
group='keystone_authtoken', enforce_type=True)
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
with mock.patch('keystoneclient.discover.Discover') as discover:
class MockDiscover(object):
def url_for(self, endpoint):
return 'http://xyz/v3'
discover.return_value = MockDiscover()
ctx = context.RequestContext(auth_url=None)
self.assertEqual(ctx.keystone_v3_endpoint, 'http://xyz/v3')
def test_keystone_v3_endpoint_in_keystone_authtoken_config(self):
"""Ensure that the [keystone_authtoken] section is used.
Ensure that the [keystone_authtoken] section of the configuration
is used when the auth_uri is not defined in the context or the
[clients_keystone] section.
"""
importutils.import_module('keystonemiddleware.auth_token')
cfg.CONF.set_override('auth_uri', 'http://abc/v2.0',
group='keystone_authtoken', enforce_type=True)
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(auth_url=None)
self.assertEqual(ctx.keystone_v3_endpoint, 'http://abc/v3')
def test_keystone_v3_endpoint_not_set_in_config(self):
"""Ensure an exception is raised when the auth_uri cannot be obtained.
Ensure an exception is raised when the auth_uri cannot be obtained
from any source.
"""
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(auth_url=None)
self.assertRaises(exception.AuthorizationFailure, getattr, ctx,
'keystone_v3_endpoint')
def test_create_trusts_auth_plugin_with_correct_user_domain_id(self):
importutils.import_module('keystonemiddleware.auth_token')
cfg.CONF.set_override('auth_uri', 'http://abc/v2.0',
group='keystone_authtoken', enforce_type=True)
cfg.CONF.set_override('admin_user', 'heat',
group='keystone_authtoken', enforce_type=True)
cfg.CONF.set_override('admin_password', 'password',
group='keystone_authtoken', enforce_type=True)
policy_check = 'heat.common.policy.Enforcer.check_is_admin'
with mock.patch(policy_check) as pc:
pc.return_value = False
ctx = context.RequestContext(auth_url=None,
user_domain_id='non-default',
username='test')
with mock.patch('keystoneauth1.identity.generic.Password') as ps:
ctx.trusts_auth_plugin
ps.assert_called_once_with(username='heat',
password='password',
user_domain_id='default',
auth_url='http://abc/v3',
trust_id=None)
def test_cache(self):
ctx = context.RequestContext.from_dict(self.ctx)
class Class1(object):
pass
class Class2(object):
pass
self.assertEqual(0, len(ctx._object_cache))
cache1 = ctx.cache(Class1)
self.assertIsInstance(cache1, Class1)
self.assertEqual(1, len(ctx._object_cache))
cache1a = ctx.cache(Class1)
self.assertEqual(cache1, cache1a)
self.assertEqual(1, len(ctx._object_cache))
cache2 = ctx.cache(Class2)
self.assertIsInstance(cache2, Class2)
self.assertEqual(2, len(ctx._object_cache))
class RequestContextMiddlewareTest(common.HeatTestCase):
scenarios = [(
'empty_headers',
dict(
environ=None,
headers={},
context_dict={
'auth_token': None,
'auth_token_info': None,
'auth_url': None,
'aws_creds': None,
'is_admin': False,
'password': None,
'roles': [],
'show_deleted': False,
'tenant': None,
'tenant_id': None,
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': None,
'username': None
})
), (
'username_password',
dict(
environ=None,
headers={
'X-Auth-User': 'my_username',
'X-Auth-Key': 'my_password',
'X-Auth-EC2-Creds': '{"ec2Credentials": {}}',
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken',
'X-Project-Name': 'my_tenant',
'X-Project-Id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3'
},
context_dict={
'auth_token': 'atoken',
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': None,
'is_admin': False,
'password': 'my_password',
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant',
'tenant_id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': 'my_username',
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': 'my_username'
})
), (
'aws_creds',
dict(
environ=None,
headers={
'X-Auth-EC2-Creds': '{"ec2Credentials": {}}',
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken',
'X-Project-Name': 'my_tenant',
'X-Project-Id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3',
},
context_dict={
'auth_token': 'atoken',
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': '{"ec2Credentials": {}}',
'is_admin': False,
'password': None,
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant',
'tenant_id': 'db6808c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': None
})
), (
'token_creds',
dict(
environ={'keystone.token_info': {'info': 123}},
headers={
'X-User-Id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'X-Auth-Token': 'atoken2',
'X-Project-Name': 'my_tenant2',
'X-Project-Id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'X-Auth-Url': 'http://192.0.2.1:5000/v1',
'X-Roles': 'role1,role2,role3',
},
context_dict={
'auth_token': 'atoken2',
'auth_token_info': {'info': 123},
'auth_url': 'http://192.0.2.1:5000/v1',
'aws_creds': None,
'is_admin': False,
'password': None,
'roles': ['role1', 'role2', 'role3'],
'show_deleted': False,
'tenant': 'my_tenant2',
'tenant_id': 'bb9108c8-62d0-4d92-898c-d644a6af20e9',
'trust_id': None,
'trustor_user_id': None,
'user': None,
'user_id': '7a87ff18-31c6-45ce-a186-ec7987f488c3',
'username': None
})
)]
def setUp(self):
super(RequestContextMiddlewareTest, self).setUp()
self.fixture = self.useFixture(config_fixture.Config())
self.fixture.conf(args=['--config-dir', policy_path])
policy_opts.set_defaults(cfg.CONF, 'check_admin.json')
def test_context_middleware(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/stacks', headers=self.headers,
environ=self.environ)
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertIsNotNone(ctx.get('request_id'))
def test_context_middleware_with_requestid(self):
middleware = context.ContextMiddleware(None, None)
request = webob.Request.blank('/stacks', headers=self.headers,
environ=self.environ)
req_id = 'req-5a63f0d7-1b69-447b-b621-4ea87cc7186d'
request.environ[request_id.ENV_REQUEST_ID] = req_id
self.assertIsNone(middleware.process_request(request))
ctx = request.context.to_dict()
for k, v in self.context_dict.items():
self.assertEqual(v, ctx[k], 'Key %s values do not match' % k)
self.assertEqual(
ctx.get('request_id'), req_id,
'Key request_id values do not match')
|
{
"content_hash": "e3b324e39f195f60ce1f5fd6be337479",
"timestamp": "",
"source": "github",
"line_count": 371,
"max_line_length": 79,
"avg_line_length": 41.15094339622642,
"alnum_prop": 0.5300975961223554,
"repo_name": "cwolferh/heat-scratch",
"id": "661b965dcc419d76062279048b517a17eba6c7b8",
"size": "15842",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/test_common_context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8338769"
},
{
"name": "Shell",
"bytes": "56516"
}
],
"symlink_target": ""
}
|
import warnings
from functools import partial
from typing import Any, Callable, List, Optional, Sequence
import torch
from torch import nn, Tensor
# from .._internally_replaced_utils import load_state_dict_from_url
from .misc_torch import ConvNormActivation, SqueezeExcitation as SElayer
from ._utils import _make_divisible
__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"]
model_urls = {
"mobilenet_v3_large":
"https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth",
"mobilenet_v3_small":
"https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth",
}
class SqueezeExcitation(SElayer):
"""DEPRECATED"""
def __init__(self, input_channels: int, squeeze_factor: int=4):
squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8)
super().__init__(
input_channels, squeeze_channels, scale_activation=nn.Hardsigmoid)
self.relu = self.activation
delattr(self, "activation")
warnings.warn(
"This SqueezeExcitation class is deprecated and will be removed in future versions. "
"Use torchvision.ops.misc.SqueezeExcitation instead.",
FutureWarning, )
class InvertedResidualConfig:
# Stores information listed at Tables 1 and 2 of the MobileNetV3 paper
def __init__(
self,
input_channels: int,
kernel: int,
expanded_channels: int,
out_channels: int,
use_se: bool,
activation: str,
stride: int,
dilation: int,
width_mult: float, ):
self.input_channels = self.adjust_channels(input_channels, width_mult)
self.kernel = kernel
self.expanded_channels = self.adjust_channels(expanded_channels,
width_mult)
self.out_channels = self.adjust_channels(out_channels, width_mult)
self.use_se = use_se
self.use_hs = activation == "HS"
self.stride = stride
self.dilation = dilation
@staticmethod
def adjust_channels(channels: int, width_mult: float):
return _make_divisible(channels * width_mult, 8)
class InvertedResidual(nn.Module):
# Implemented as described at section 5 of MobileNetV3 paper
def __init__(
self,
cnf: InvertedResidualConfig,
norm_layer: Callable[..., nn.Module],
se_layer: Callable[..., nn.Module]=partial(
SElayer, scale_activation=nn.Hardsigmoid), ):
super().__init__()
if not (1 <= cnf.stride <= 2):
raise ValueError("illegal stride value")
self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels
layers: List[nn.Module] = []
activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU
# expand
if cnf.expanded_channels != cnf.input_channels:
layers.append(
ConvNormActivation(
cnf.input_channels,
cnf.expanded_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=activation_layer, ))
# depthwise
stride = 1 if cnf.dilation > 1 else cnf.stride
layers.append(
ConvNormActivation(
cnf.expanded_channels,
cnf.expanded_channels,
kernel_size=cnf.kernel,
stride=stride,
dilation=cnf.dilation,
groups=cnf.expanded_channels,
norm_layer=norm_layer,
activation_layer=activation_layer, ))
if cnf.use_se:
squeeze_channels = _make_divisible(cnf.expanded_channels // 4, 8)
layers.append(se_layer(cnf.expanded_channels, squeeze_channels))
# project
layers.append(
ConvNormActivation(
cnf.expanded_channels,
cnf.out_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=None))
self.block = nn.Sequential(*layers)
self.out_channels = cnf.out_channels
self._is_cn = cnf.stride > 1
def forward(self, input: Tensor) -> Tensor:
result = self.block(input)
if self.use_res_connect:
result += input
return result
class MobileNetV3(nn.Module):
def __init__(
self,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
num_classes: int=1000,
block: Optional[Callable[..., nn.Module]]=None,
norm_layer: Optional[Callable[..., nn.Module]]=None,
dropout: float=0.2,
**kwargs: Any, ) -> None:
"""
MobileNet V3 main class
Args:
inverted_residual_setting (List[InvertedResidualConfig]): Network structure
last_channel (int): The number of channels on the penultimate layer
num_classes (int): Number of classes
block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet
norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use
dropout (float): The droupout probability
"""
super().__init__()
if not inverted_residual_setting:
raise ValueError(
"The inverted_residual_setting should not be empty")
elif not (isinstance(inverted_residual_setting, Sequence) and all([
isinstance(s, InvertedResidualConfig)
for s in inverted_residual_setting
])):
raise TypeError(
"The inverted_residual_setting should be List[InvertedResidualConfig]"
)
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)
layers: List[nn.Module] = []
# building first layer
firstconv_output_channels = inverted_residual_setting[0].input_channels
layers.append(
ConvNormActivation(
3,
firstconv_output_channels,
kernel_size=3,
stride=2,
norm_layer=norm_layer,
activation_layer=nn.Hardswish, ))
# building inverted residual blocks
for cnf in inverted_residual_setting:
layers.append(block(cnf, norm_layer))
# building last several layers
lastconv_input_channels = inverted_residual_setting[-1].out_channels
lastconv_output_channels = 6 * lastconv_input_channels
layers.append(
ConvNormActivation(
lastconv_input_channels,
lastconv_output_channels,
kernel_size=1,
norm_layer=norm_layer,
activation_layer=nn.Hardswish, ))
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Linear(lastconv_output_channels, last_channel),
nn.Hardswish(inplace=True),
nn.Dropout(
p=dropout, inplace=True),
nn.Linear(last_channel, num_classes), )
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
def _mobilenet_v3_conf(arch: str,
width_mult: float=1.0,
reduced_tail: bool=False,
dilated: bool=False,
**kwargs: Any):
reduce_divider = 2 if reduced_tail else 1
dilation = 2 if dilated else 1
bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult)
adjust_channels = partial(
InvertedResidualConfig.adjust_channels, width_mult=width_mult)
if arch == "mobilenet_v3_large":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, False, "RE", 1, 1),
bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1
bneck_conf(24, 3, 72, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 5, 120, 40, True, "RE", 1, 1),
bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3
bneck_conf(80, 3, 200, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 184, 80, False, "HS", 1, 1),
bneck_conf(80, 3, 480, 112, True, "HS", 1, 1),
bneck_conf(112, 3, 672, 112, True, "HS", 1, 1),
bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2,
dilation), # C4
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider,
160 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider,
160 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1280 // reduce_divider) # C5
elif arch == "mobilenet_v3_small":
inverted_residual_setting = [
bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1
bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2
bneck_conf(24, 3, 88, 24, False, "RE", 1, 1),
bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 240, 40, True, "HS", 1, 1),
bneck_conf(40, 5, 120, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 144, 48, True, "HS", 1, 1),
bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2,
dilation), # C4
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider,
96 // reduce_divider, True, "HS", 1, dilation),
bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider,
96 // reduce_divider, True, "HS", 1, dilation),
]
last_channel = adjust_channels(1024 // reduce_divider) # C5
else:
raise ValueError(f"Unsupported model type {arch}")
return inverted_residual_setting, last_channel
def _mobilenet_v3(
arch: str,
inverted_residual_setting: List[InvertedResidualConfig],
last_channel: int,
pretrained: bool,
progress: bool,
**kwargs: Any, ):
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
if model_urls.get(arch, None) is None:
raise ValueError(
f"No checkpoint is available for model type {arch}")
state_dict = load_state_dict_from_url(
model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def mobilenet_v3_large(pretrained: bool=False,
progress: bool=True,
**kwargs: Any) -> MobileNetV3:
"""
Constructs a large MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_large"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch,
**kwargs)
return _mobilenet_v3(arch, inverted_residual_setting, last_channel,
pretrained, progress, **kwargs)
def mobilenet_v3_small(pretrained: bool=False,
progress: bool=True,
**kwargs: Any) -> MobileNetV3:
"""
Constructs a small MobileNetV3 architecture from
`"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
arch = "mobilenet_v3_small"
inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch,
**kwargs)
return _mobilenet_v3(arch, inverted_residual_setting, last_channel,
pretrained, progress, **kwargs)
|
{
"content_hash": "61fd874e69f2288fc130488395f66261",
"timestamp": "",
"source": "github",
"line_count": 340,
"max_line_length": 120,
"avg_line_length": 38.85,
"alnum_prop": 0.5588613823907942,
"repo_name": "PaddlePaddle/models",
"id": "ae2863c73cbedcbbec1a62d65e1a63b6ab9733a4",
"size": "13209",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/2.3",
"path": "tutorials/mobilenetv3_prod/Step1-5/mobilenetv3_ref/torchvision/models/mobilenet_v3_torch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
}
|
"""Tests for the Spotify config flow."""
from unittest.mock import patch
from spotipy import SpotifyException
from homeassistant import data_entry_flow, setup
from homeassistant.components.spotify.const import DOMAIN
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.common import MockConfigEntry
async def test_abort_if_no_configuration(hass):
"""Check flow aborts when no configuration is present."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_zeroconf_abort_if_existing_entry(hass):
"""Check zeroconf flow aborts when an entry already exist."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_flow(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Check a full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
"https://accounts.spotify.com/authorize"
"?response_type=code&client_id=client"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
"&scope=user-modify-playback-state,user-read-playback-state,user-read-private,"
"playlist-read-private,playlist-read-collaborative,user-library-read,"
"user-top-read,user-read-playback-position,user-read-recently-played,user-follow-read"
)
client = await hass_client_no_auth()
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.spotify.async_setup_entry", return_value=True
), patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {
"id": "fake_id",
"display_name": "frenck",
}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["name"] == "frenck"
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
async def test_abort_if_spotify_error(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Check Spotify errors causes flow to abort."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.spotify.config_flow.Spotify.current_user",
side_effect=SpotifyException(400, -1, "message"),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "connection_error"
async def test_reauthentication(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Test Spotify reauthentication."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=123,
version=1,
data={"id": "frenck", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_REAUTH}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.spotify.async_setup_entry", return_value=True
), patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {"id": "frenck"}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
async def test_reauth_account_mismatch(
hass, hass_client_no_auth, aioclient_mock, current_request_with_host
):
"""Test Spotify reauthentication with different account."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=123,
version=1,
data={"id": "frenck", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_REAUTH}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(
hass,
{
"flow_id": result["flow_id"],
"redirect_uri": "https://example.com/auth/external/callback",
},
)
client = await hass_client_no_auth()
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {"id": "fake_id"}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_account_mismatch"
|
{
"content_hash": "85bab5d16caee18d6e495158bce87203",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 94,
"avg_line_length": 33.40350877192982,
"alnum_prop": 0.617436974789916,
"repo_name": "Danielhiversen/home-assistant",
"id": "0d0d4a50a3dadf115d18ace17d8ae585c4613242",
"size": "9520",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/spotify/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import pytest
import requests
from pyrestcli.exceptions import NotFoundException
from carto.maps import NamedMap, NamedMapManager, AnonymousMap
from secret import NAMED_MAP_DEFINITION, NAMED_MAP_AUTH_TOKEN, \
NAMED_MAP_INSTANTIATION, ANONYMOUS_MAP_DEFINITION
@pytest.fixture(scope="module")
def named_map_manager(api_key_auth_client_usr):
"""
Returns a named map manager instance that can be reused in tests
:param api_key_auth_client: Fixture that provides a valid APIKeyAuthClient
object
:return: NamedMap instance
"""
return NamedMapManager(api_key_auth_client_usr)
@pytest.fixture(scope="module")
def no_auth_client_fixture(no_auth_client):
return no_auth_client
def test_get_named_map_error(named_map_manager):
with pytest.raises(NotFoundException):
named_map_manager.get('non-existent')
def test_named_map_methods(named_map_manager):
n = NamedMap(named_map_manager.client)
n.template_id = 'python_sdk_test_map'
try:
n.delete()
except NotFoundException:
pass
# Create named map
named = named_map_manager.create(template=NAMED_MAP_DEFINITION)
assert named.template_id is not None
# Get the named map created
new_named = named_map_manager.get(named.template_id)
assert new_named.template_id == named.template_id
# Instantiate named map
named.instantiate(NAMED_MAP_INSTANTIATION, NAMED_MAP_AUTH_TOKEN)
assert named.layergroupid is not None
# Update named map
# del named.view
named.view = None
named.save()
assert named.view is None
url = named.get_tile_url(0, 0, 0, layer_id="all")
assert url is not None
# Delete named map
assert named.delete().status_code is requests.codes.no_content
def test_named_map_manager(named_map_manager):
# Get all named maps
initial_maps = named_map_manager.all()
# Create named map
named = named_map_manager.create(template=NAMED_MAP_DEFINITION)
assert named.template_id is not None
# Get all named maps again
final_maps = named_map_manager.all()
# Check number of maps is correct
assert len(initial_maps) + 1 == len(final_maps)
# Delete named map simply to avoid polluting the user's account
assert named.delete().status_code is requests.codes.no_content
def test_create_anonymous_map(no_auth_client_fixture):
anonymous = AnonymousMap(no_auth_client_fixture)
anonymous.instantiate(ANONYMOUS_MAP_DEFINITION)
url = anonymous.get_tile_url(0, 0, 0, layer_id="all")
assert url is not None
assert anonymous.layergroupid is not None
|
{
"content_hash": "31c83de0ee49e5c3d57779f99d6d40cb",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 78,
"avg_line_length": 29.233333333333334,
"alnum_prop": 0.7054351957430635,
"repo_name": "CartoDB/carto-python",
"id": "d81f54ff1b61914f4c840af5fb96c2bb4df3f3a8",
"size": "2631",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_maps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148350"
}
],
"symlink_target": ""
}
|
"""This module contains the general information for MailRecipient ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class MailRecipientConsts:
ADMIN_ACTION_CLEAR = "clear"
ADMIN_ACTION_SEND_TEST_MAIL = "send-test-mail"
class MailRecipient(ManagedObject):
"""This is MailRecipient class."""
consts = MailRecipientConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("MailRecipient", "mailRecipient", "mail-recipient-[id]", VersionMeta.Version303a, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'commMailAlert'], [], ["Get", "Remove", "Set"]),
"modular": MoMeta("MailRecipient", "mailRecipient", "mail-recipient-[id]", VersionMeta.Version303a, "InputOutput", 0x7f, [], ["admin", "read-only", "user"], [u'commMailAlert'], [], ["Get", "Remove", "Set"])
}
prop_meta = {
"classic": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["clear", "send-test-mail"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"email": MoPropertyMeta("email", "email", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 64, r"""(([^<>\(\)\[\]\\\.,;:\s@""]+(\.[^<>\(\)\[\]\\\.,;:\s@""]+)*)|(""\.+""))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,})""", [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version303a, MoPropertyMeta.NAMING, 0x10, 1, 4, None, [], ["1-4"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"test_mail_status": MoPropertyMeta("test_mail_status", "testMailStatus", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
"modular": {
"admin_action": MoPropertyMeta("admin_action", "adminAction", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, 0, 510, None, ["clear", "send-test-mail"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"email": MoPropertyMeta("email", "email", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 64, r"""(([^<>\(\)\[\]\\\.,;:\s@""]+(\.[^<>\(\)\[\]\\\.,;:\s@""]+)*)|(""\.+""))@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\])|([a-zA-Z\-0-9]+\.)+[a-zA-Z]{2,})""", [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version303a, MoPropertyMeta.NAMING, 0x10, 1, 4, None, [], ["1-4"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"test_mail_status": MoPropertyMeta("test_mail_status", "testMailStatus", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
},
}
prop_map = {
"classic": {
"adminAction": "admin_action",
"dn": "dn",
"email": "email",
"id": "id",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"testMailStatus": "test_mail_status",
},
"modular": {
"adminAction": "admin_action",
"dn": "dn",
"email": "email",
"id": "id",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"testMailStatus": "test_mail_status",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.admin_action = None
self.email = None
self.status = None
self.child_action = None
self.test_mail_status = None
ManagedObject.__init__(self, "MailRecipient", parent_mo_or_dn, **kwargs)
|
{
"content_hash": "e4c594277fc5dc69d8b15f8982204274",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 304,
"avg_line_length": 56.298850574712645,
"alnum_prop": 0.5624744793793385,
"repo_name": "ragupta-git/ImcSdk",
"id": "10bf11bc0043337bac476ac4cde8d5e8bee8b872",
"size": "4898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imcsdk/mometa/mail/MailRecipient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1042023"
}
],
"symlink_target": ""
}
|
from inspect import Parameter
import threading
from .exceptions import InvalidAnnotation, MissingDependencyDeclaration, BindingAlreadyExists, AnnotationAlreadyExists
ANNOTATION_FORMAT = '{name}:{annotation}'
"""Standard binding key format for name/annotation pairs """
def get_key(name, annotation=None):
"""
Returns the binding key for a name/annotation pair, formatted with :attr:`~di.annotators.ANNOTATION_FORMAT`,
e.g. ``'widget:text'``.
:param name: A binding name
:type name: str
:param annotation: A binding annotation
:type annotation: str
:rtype: str
"""
if annotation:
return ANNOTATION_FORMAT.format(**dict(name=name, annotation=annotation))
return name
def from_key(key):
"""
Returns a tuple ``(name, annotation)`` for a key formatted as defined in
:attr:`~di.annotators.ANNOTATION_FORMAT`, e.g. ``('widget', 'text')`` for key ``'widget:text'``.
:param key: A key
:type key: str
:rtype: tuple
"""
parts = key.split(':', 1)
return (parts[0], None) if len(parts) < 2 else parts
def from_param(param, use_argument_names=True):
"""
Returns a tuple ``(name, annotation)`` for a :class:`inspect.Parameter` object.
:param param: A parameter object
:type param: :class:`inspect.Parameter`
:param use_argument_names: Whether to use the parameter object name or rely on annotation only.
:type use_argument_names: bool
:raises: :exc:`~di.exceptions.InvalidAnnotation`
:raises: :exc:`~di.exceptions.MissingDependencyDeclaration`
:rtype: tuple
"""
param_annotation = None
if param.annotation is not Parameter.empty:
if not isinstance(param.annotation, str):
raise InvalidAnnotation('"%r" is not a valid annotation to argument "%s".' % (param.annotation, param.name))
param_annotation = param.annotation
if use_argument_names:
return param.name, param_annotation
else:
if param_annotation is None:
raise MissingDependencyDeclaration('Parameter "%s" has no dependency declaration.'
'Turn "use_arguments_name" off or annotate a dependency' % param.name)
return from_key(param_annotation)
class Binding(object):
"""
Handles binding annotations by providing an extended binding key to the passed ``injector`` in case of an annotation
request. If not, delegating attributes to ``provider``. In both cases the actual ``key -> value`` assignment
happens within the ``Binding``.
:param binding_map: A binding map.
:type binding_map: :class:`~di.types.StrictDict`
:param name: The name of the binding that eventually will get annotated.
:type name: str
:param provider: A provider object that you want to bind to a name/annotation pair.
:type provider: :class:`~di.providers.Provider`
:raises: :exc:`~di.exceptions.BindingAlreadyExists`
"""
def __init__(self, binding_map, name, provider):
self._binding_map = binding_map
self._name = name
self._provider = provider
self._rlock = threading.RLock()
def annotated_with(self, annotation):
"""
Extends the binding key with ``annotation`` and delegates further provider instructions to (returns)
the wrapped provider. Adds
:param annotation: An annotation name.
:type annotation: str
:rtype: :class:`~di.providers.Provider`
"""
if not isinstance(annotation, str):
raise InvalidAnnotation('"%r" is not a string' % annotation)
key = get_key(self._name, annotation)
self._bind(key, self._provider)
return self._provider
def __getattr__(self, item):
"""
Tries to delegate attribute calls to the wrapped provider.
:rtype: :class:`~di.providers.Provider`
"""
attr = getattr(self._provider, item, None)
if attr is None:
raise AttributeError('Neither "Binding" nor "%r" has attribute "%r"' % (self._provider, item))
else:
self._bind(get_key(self._name), self._provider)
return attr
def _bind(self, key, value):
with self._rlock:
name, annotation = from_key(key)
if annotation and key in self._binding_map:
raise AnnotationAlreadyExists('Annotation "%s" for binding "%s" already exists' % (annotation, name))
elif key in self._binding_map:
raise BindingAlreadyExists('Binding "%r" already exists' % key)
self._binding_map[key] = value
|
{
"content_hash": "97d23db0579b85c57b48f5019de673bd",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 120,
"avg_line_length": 34.507462686567166,
"alnum_prop": 0.6425173010380623,
"repo_name": "lukasbuenger/python-di",
"id": "b60c645a008b823dddf46449d79fc9714f36aa1d",
"size": "4648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "di/bindings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "43177"
}
],
"symlink_target": ""
}
|
node = S(input, "application/json")
childNode = node.prop("orderDetails")
property1 = node.prop("order")
property2 = childNode.prop("price")
property3 = node.prop("active")
stringValue = property1.stringValue()
numberValue = property2.numberValue()
boolValue = property3.boolValue()
|
{
"content_hash": "41a973e8e061163266720556d5606e54",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 37,
"avg_line_length": 28.5,
"alnum_prop": 0.7543859649122807,
"repo_name": "escardin/camunda-spin",
"id": "99aa103361be99c04aef780db986862f8c2895cb",
"size": "285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeReadPropertyPythonTest.shouldBeSameAsJavaValue.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "24857"
},
{
"name": "Java",
"bytes": "556983"
},
{
"name": "JavaScript",
"bytes": "21221"
},
{
"name": "Python",
"bytes": "20838"
},
{
"name": "Ruby",
"bytes": "20903"
}
],
"symlink_target": ""
}
|
from common.des_tool import get_decode_num, get_encode_num
from django import template
register = template.Library()
@register.filter(name='get_encode_id')
def get_encode_id(id):
return get_encode_num(id)
@register.filter(name='get_decode_id')
def get_decode_id(encode_str):
return get_decode_num(encode_str)
|
{
"content_hash": "de6c724cd793583c18dccd5a811601fd",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 26.583333333333332,
"alnum_prop": 0.7460815047021944,
"repo_name": "garrickvan/xfuture_blog",
"id": "04f0316c6a90f45740890968527938dc80049780",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/templatetags/des_tags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "41943"
},
{
"name": "CSS",
"bytes": "83620"
},
{
"name": "JavaScript",
"bytes": "730554"
},
{
"name": "PHP",
"bytes": "43361"
},
{
"name": "Python",
"bytes": "45969"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("django_dbq", "0003_auto_20180713_1000"),
]
operations = [
migrations.AlterField(
model_name="job",
name="state",
field=models.CharField(
choices=[
("NEW", "New"),
("READY", "Ready"),
("PROCESSING", "Processing"),
("FAILED", "Failed"),
("COMPLETE", "Complete"),
],
db_index=True,
default="NEW",
max_length=20,
),
),
migrations.AlterField(
model_name="job",
name="workspace",
field=models.JSONField(null=True),
),
]
|
{
"content_hash": "283ec66c3c69150a3ffde06a27598568",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 50,
"avg_line_length": 26.15625,
"alnum_prop": 0.4241338112305854,
"repo_name": "dabapps/django-db-queue",
"id": "b62ab02c5348ec90d5b48fafa2541b31c8806044",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_dbq/migrations/0004_auto_20210818_0247.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "33296"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.