text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from ansible.module_utils.ansible_release import __version__ as ANSIBLE_VERSION
try:
from msrestazure.azure_exceptions import CloudError
from msrestazure.azure_configuration import AzureConfiguration
from msrest.service_client import ServiceClient
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller
from msrestazure.polling.arm_polling import ARMPolling
import uuid
import json
except ImportError:
# This is handled in azure_rm_common
AzureConfiguration = object
ANSIBLE_USER_AGENT = 'Ansible/{0}'.format(ANSIBLE_VERSION)
class GenericRestClientConfiguration(AzureConfiguration):
def __init__(self, credentials, subscription_id, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(GenericRestClientConfiguration, self).__init__(base_url)
self.add_user_agent(ANSIBLE_USER_AGENT)
self.credentials = credentials
self.subscription_id = subscription_id
class GenericRestClient(object):
def __init__(self, credentials, subscription_id, base_url=None):
self.config = GenericRestClientConfiguration(credentials, subscription_id, base_url)
self._client = ServiceClient(self.config.credentials, self.config)
self.models = None
def query(self, url, method, query_parameters, header_parameters, body, expected_status_codes, polling_timeout, polling_interval):
# Construct and send request
operation_config = {}
request = None
if header_parameters is None:
header_parameters = {}
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if method == 'GET':
request = self._client.get(url, query_parameters)
elif method == 'PUT':
request = self._client.put(url, query_parameters)
elif method == 'POST':
request = self._client.post(url, query_parameters)
elif method == 'HEAD':
request = self._client.head(url, query_parameters)
elif method == 'PATCH':
request = self._client.patch(url, query_parameters)
elif method == 'DELETE':
request = self._client.delete(url, query_parameters)
elif method == 'MERGE':
request = self._client.merge(url, query_parameters)
response = self._client.send(request, header_parameters, body, **operation_config)
if response.status_code not in expected_status_codes:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
elif response.status_code == 202 and polling_timeout > 0:
def get_long_running_output(response):
return response
poller = LROPoller(self._client,
ClientRawResponse(None, response),
get_long_running_output,
ARMPolling(polling_interval, **operation_config))
response = self.get_poller_result(poller, polling_timeout)
return response
def get_poller_result(self, poller, timeout):
try:
poller.wait(timeout=timeout)
return poller.result()
except Exception as exc:
raise
|
{
"content_hash": "cb900853541ca7221b1525cd965cffae",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 134,
"avg_line_length": 38.043010752688176,
"alnum_prop": 0.6396269078575466,
"repo_name": "thaim/ansible",
"id": "4fd7eaa3b454d8d81e61e61e9febe4ebce7f58a8",
"size": "3696",
"binary": false,
"copies": "65",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/module_utils/azure_rm_common_rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
"""
MINDBODY Public API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.get_session_types_response import GetSessionTypesResponse # noqa: E501
from swagger_client.rest import ApiException
class TestGetSessionTypesResponse(unittest.TestCase):
"""GetSessionTypesResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetSessionTypesResponse(self):
"""Test GetSessionTypesResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.get_session_types_response.GetSessionTypesResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "511a8ad52997251658812ff0dfd0c30a",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 119,
"avg_line_length": 26.236842105263158,
"alnum_prop": 0.7121364092276831,
"repo_name": "mindbody/API-Examples",
"id": "5d41f1acbc622414c0a24183b468fbc52ffc84d1",
"size": "1014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SDKs/Python/test/test_get_session_types_response.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PHP",
"bytes": "3610259"
},
{
"name": "Python",
"bytes": "2338642"
},
{
"name": "Ruby",
"bytes": "2284441"
},
{
"name": "Shell",
"bytes": "5058"
}
],
"symlink_target": ""
}
|
import sys
from pysnmp.entity.rfc3413 import config
from pysnmp.proto import rfc1905, errind
from pysnmp.proto.api import v2c
from pysnmp.proto.proxy import rfc2576
from pysnmp import error, nextid, debug
from pysnmp.proto.error import StatusInformation
from pyasn1.type import univ
getNextHandle = nextid.Integer(0x7fffffff)
__null = univ.Null('')
def getNextVarBinds(varBinds, origVarBinds=None):
errorIndication = None
idx = nonNulls = len(varBinds)
rspVarBinds = []
while idx:
idx = idx - 1
if varBinds[idx][1].tagSet in (rfc1905.NoSuchObject.tagSet,
rfc1905.NoSuchInstance.tagSet,
rfc1905.EndOfMibView.tagSet):
nonNulls = nonNulls - 1
elif origVarBinds is not None:
if v2c.ObjectIdentifier(origVarBinds[idx][0]).asTuple() >= varBinds[idx][0].asTuple():
errorIndication = errind.oidNotIncreasing
rspVarBinds.insert(0, (varBinds[idx][0], __null))
if not nonNulls:
rspVarBinds = []
return errorIndication, rspVarBinds
class CommandGenerator:
_null = univ.Null('')
def __init__(self):
self.__pendingReqs = {}
def processResponsePdu(self,
snmpEngine,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
statusInformation,
sendPduHandle,
cbCtx):
origSendRequestHandle, cbFun, cbCtx = cbCtx
# 3.1.1
if sendPduHandle not in self.__pendingReqs:
raise error.PySnmpError('Missing sendPduHandle %s' % sendPduHandle)
( origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPduVersion,
origPdu,
origTimeout,
origRetryCount,
origRetries ) = self.__pendingReqs.pop(sendPduHandle)
snmpEngine.transportDispatcher.jobFinished(id(self))
# 3.1.3
if statusInformation:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendPduHandle %s, statusInformation %s' % (sendPduHandle, statusInformation))
errorIndication = statusInformation['errorIndication']
# SNMP engine discovery will take extra retries, allow that
if errorIndication in (errind.notInTimeWindow,
errind.unknownEngineID) and \
origRetries == origRetryCount + 2 or \
errorIndication not in (errind.notInTimeWindow,
errind.unknownEngineID) and \
origRetries == origRetryCount:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendPduHandle %s, retry count %d exceeded' % (sendPduHandle, origRetries))
cbFun(snmpEngine,
origSendRequestHandle,
statusInformation['errorIndication'],
None,
cbCtx)
return
try:
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPduVersion,
origPdu,
True, # expectResponse
origTimeout, # already in ticks
self.processResponsePdu,
(origSendRequestHandle, cbFun, cbCtx)
)
snmpEngine.transportDispatcher.jobStarted(id(self))
self.__pendingReqs[sendPduHandle] = (
origTransportDomain,
origTransportAddress,
origMessageProcessingModel,
origSecurityModel,
origSecurityName,
origSecurityLevel,
origContextEngineId,
origContextName,
origPduVersion,
origPdu,
origTimeout,
origRetryCount,
origRetries + 1
)
return
except StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('processResponsePdu: origSendRequestHandle %s, _sendPdu() failed with %r' % (sendPduHandle, statusInformation))
cbFun(snmpEngine,
origSendRequestHandle,
statusInformation['errorIndication'],
None,
cbCtx)
return
if origMessageProcessingModel != messageProcessingModel or \
origSecurityModel != securityModel or \
origSecurityName != origSecurityName or \
origContextEngineId and origContextEngineId != contextEngineId or \
origContextName and origContextName != contextName or \
origPduVersion != pduVersion:
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendPduHandle %s, request/response data mismatch' % sendPduHandle)
cbFun(snmpEngine,
origSendRequestHandle,
'badResponse', # errorIndication
None,
cbCtx)
return
# User-side API assumes SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v1ToV2(PDU, origPdu)
# 3.1.2
if v2c.apiPDU.getRequestID(PDU) != v2c.apiPDU.getRequestID(origPdu):
debug.logger & debug.flagApp and debug.logger('processResponsePdu: sendPduHandle %s, request-id/response-id mismatch' % sendPduHandle)
cbFun(snmpEngine,
origSendRequestHandle,
'badResponse', # errorIndication
None,
cbCtx)
return
cbFun(snmpEngine,
origSendRequestHandle,
None, # errorIndication
PDU,
cbCtx)
def sendPdu(self,
snmpEngine,
targetName,
contextEngineId,
contextName,
PDU,
cbFun,
cbCtx):
( transportDomain,
transportAddress,
timeout,
retryCount,
messageProcessingModel,
securityModel,
securityName,
securityLevel ) = config.getTargetInfo(snmpEngine, targetName)
# Convert timeout in seconds into timeout in timer ticks
timeoutInTicks = float(timeout)/100/snmpEngine.transportDispatcher.getTimerResolution()
SnmpEngineID, SnmpAdminString = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('SNMP-FRAMEWORK-MIB', 'SnmpEngineID', 'SnmpAdminString')
# Cast possible strings into bytes
if contextEngineId:
contextEngineId = SnmpEngineID(contextEngineId)
contextName = SnmpAdminString(contextName)
origPDU = PDU
# User-side API assumes SMIv2
if messageProcessingModel == 0:
PDU = rfc2576.v2ToV1(PDU)
pduVersion = 0
else:
pduVersion = 1
sendRequestHandle = getNextHandle()
# 3.1
sendPduHandle = snmpEngine.msgAndPduDsp.sendPdu(
snmpEngine,
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
PDU,
1, # expectResponse
timeoutInTicks,
self.processResponsePdu,
(sendRequestHandle, cbFun, cbCtx)
)
snmpEngine.transportDispatcher.jobStarted(id(self))
self.__pendingReqs[sendPduHandle] = (
transportDomain,
transportAddress,
messageProcessingModel,
securityModel,
securityName,
securityLevel,
contextEngineId,
contextName,
pduVersion,
origPDU,
timeoutInTicks,
retryCount,
0
)
debug.logger & debug.flagApp and debug.logger('sendPdu: sendPduHandle %s, timeout %d*10 ms/%d ticks, retry 0 of %d' % (sendPduHandle, timeout, timeoutInTicks, retryCount))
return sendRequestHandle
# backward compatibility stub
CommandGeneratorBase = CommandGenerator
class GetCommandGenerator(CommandGenerator):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
cbFun, cbCtx = cbCtx
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
PDU and v2c.apiPDU.getErrorStatus(PDU) or 0,
PDU and v2c.apiPDU.getErrorIndex(PDU, muteErrors=True) or 0,
PDU and v2c.apiPDU.getVarBinds(PDU) or (),
cbCtx)
def sendVarBinds(self,
snmpEngine,
targetName,
contextEngineId,
contextName,
varBinds,
cbFun,
cbCtx=None):
reqPDU = v2c.GetRequestPDU()
v2c.apiPDU.setDefaults(reqPDU)
v2c.apiPDU.setVarBinds(reqPDU, varBinds)
return self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(cbFun, cbCtx))
class SetCommandGenerator(CommandGenerator):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
cbFun, cbCtx = cbCtx
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
PDU and v2c.apiPDU.getErrorStatus(PDU) or 0,
PDU and v2c.apiPDU.getErrorIndex(PDU, muteErrors=True) or 0,
PDU and v2c.apiPDU.getVarBinds(PDU) or (),
cbCtx)
def sendVarBinds(self,
snmpEngine,
targetName,
contextEngineId,
contextName,
varBinds,
cbFun,
cbCtx=None):
reqPDU = v2c.SetRequestPDU()
v2c.apiPDU.setDefaults(reqPDU)
v2c.apiPDU.setVarBinds(reqPDU, varBinds)
return self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(cbFun, cbCtx))
class NextCommandGeneratorSingleRun(CommandGenerator):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
targetName, contextEngineId, contextName, reqPDU, cbFun, cbCtx = cbCtx
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
PDU and v2c.apiPDU.getErrorStatus(PDU) or 0,
PDU and v2c.apiPDU.getErrorIndex(PDU, muteErrors=True) or 0,
PDU and v2c.apiPDU.getVarBinds(PDU) or (),
cbCtx)
def sendVarBinds(self,
snmpEngine,
targetName,
contextEngineId,
contextName,
varBinds,
cbFun,
cbCtx=None):
reqPDU = v2c.GetNextRequestPDU()
v2c.apiPDU.setDefaults(reqPDU)
v2c.apiPDU.setVarBinds(reqPDU, varBinds)
return self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(targetName, contextEngineId, contextName,
reqPDU, cbFun, cbCtx))
class NextCommandGenerator(NextCommandGeneratorSingleRun):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
targetName, contextEngineId, contextName, reqPDU, cbFun, cbCtx = cbCtx
if errorIndication:
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
0, 0, (),
cbCtx)
return
varBindTable = v2c.apiPDU.getVarBindTable(reqPDU, PDU)
if v2c.apiPDU.getErrorStatus(PDU):
errorIndication, varBinds = None, ()
elif not varBindTable:
errorIndication, varBinds = errind.emptyResponse, ()
else:
errorIndication, varBinds = getNextVarBinds(
varBindTable[-1], v2c.apiPDU.getVarBinds(reqPDU)
)
if not cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
v2c.apiPDU.getErrorStatus(PDU),
v2c.apiPDU.getErrorIndex(PDU, muteErrors=True),
varBindTable,
cbCtx):
debug.logger & debug.flagApp and debug.logger('processResponseVarBinds: sendRequestHandle %s, app says to stop walking' % sendRequestHandle)
return # app says enough
if not varBinds:
return # no more objects available
v2c.apiPDU.setRequestID(reqPDU, v2c.getNextRequestID())
v2c.apiPDU.setVarBinds(reqPDU, varBinds)
try:
self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(targetName, contextEngineId, contextName,
reqPDU, cbFun, cbCtx))
except StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('sendVarBinds: sendPduHandle %s: sendPdu() failed with %r' % (sendRequestHandle, statusInformation))
cbFun(snmpEngine,
sendRequestHandle,
statusInformation['errorIndication'],
0, 0, (), cbCtx)
class BulkCommandGeneratorSingleRun(CommandGenerator):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
targetName, nonRepeaters, maxRepetitions, \
contextEngineId, contextName, reqPDU, cbFun, cbCtx = cbCtx
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
PDU and v2c.apiPDU.getErrorStatus(PDU) or 0,
PDU and v2c.apiPDU.getErrorIndex(PDU, muteErrors=True) or 0,
PDU and v2c.apiPDU.getVarBinds(PDU) or (),
cbCtx)
def sendVarBinds(self,
snmpEngine,
targetName,
contextEngineId,
contextName,
nonRepeaters,
maxRepetitions,
varBinds,
cbFun,
cbCtx=None):
reqPDU = v2c.GetBulkRequestPDU()
v2c.apiBulkPDU.setDefaults(reqPDU)
v2c.apiBulkPDU.setNonRepeaters(reqPDU, nonRepeaters)
v2c.apiBulkPDU.setMaxRepetitions(reqPDU, maxRepetitions)
v2c.apiBulkPDU.setVarBinds(reqPDU, varBinds)
return self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(targetName, nonRepeaters, maxRepetitions,
contextEngineId, contextName, reqPDU,
cbFun, cbCtx))
class BulkCommandGenerator(BulkCommandGeneratorSingleRun):
def processResponseVarBinds(self,
snmpEngine,
sendRequestHandle,
errorIndication,
PDU,
cbCtx):
targetName, nonRepeaters, maxRepetitions, \
contextEngineId, contextName, reqPDU, cbFun, cbCtx = cbCtx
if errorIndication:
cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
0, 0, (),
cbCtx)
return
varBindTable = v2c.apiBulkPDU.getVarBindTable(reqPDU, PDU)
if v2c.apiBulkPDU.getErrorStatus(PDU):
errorIndication, varBinds = None, ()
elif not varBindTable:
errorIndication, varBinds = errind.emptyResponse, ()
else:
errorIndication, varBinds = getNextVarBinds(
varBindTable[-1], v2c.apiPDU.getVarBinds(reqPDU)
)
nonRepeaters = v2c.apiBulkPDU.getNonRepeaters(reqPDU)
if nonRepeaters:
varBinds = v2c.apiBulkPDU.getVarBinds(reqPDU)[:int(nonRepeaters)] + varBinds[int(nonRepeaters):]
if not cbFun(snmpEngine,
sendRequestHandle,
errorIndication,
v2c.apiBulkPDU.getErrorStatus(PDU),
v2c.apiBulkPDU.getErrorIndex(PDU, muteErrors=True),
varBindTable, cbCtx):
debug.logger & debug.flagApp and debug.logger('processResponseVarBinds: sendRequestHandle %s, app says to stop walking' % sendRequestHandle)
return # app says enough
if not varBinds:
return # no more objects available
v2c.apiBulkPDU.setRequestID(reqPDU, v2c.getNextRequestID())
v2c.apiBulkPDU.setVarBinds(reqPDU, varBinds)
try:
self.sendPdu(snmpEngine,
targetName,
contextEngineId,
contextName,
reqPDU,
self.processResponseVarBinds,
(targetName, nonRepeaters, maxRepetitions,
contextEngineId, contextName, reqPDU, cbFun, cbCtx))
except StatusInformation:
statusInformation = sys.exc_info()[1]
debug.logger & debug.flagApp and debug.logger('processResponseVarBinds: sendPduHandle %s: _sendPdu() failed with %r' % (sendRequestHandle, statusInformation))
cbFun(snmpEngine,
sendRequestHandle,
statusInformation['errorIndication'],
0, 0, (), cbCtx)
#
# Obsolete, compatibility interfaces.
#
def __sendReqCbFun(snmpEngine,
sendRequestHandle,
errorIndication,
errorStatus,
errorIndex,
varBinds,
cbCtx):
cbFun, cbCtx = cbCtx
return cbFun(sendRequestHandle,
errorIndication,
errorStatus,
errorIndex,
varBinds,
cbCtx)
def _sendReq(self,
snmpEngine,
targetName,
varBinds,
cbFun,
cbCtx=None,
contextEngineId=None,
contextName=''):
return self.sendVarBinds(snmpEngine,
targetName,
contextEngineId,
contextName,
varBinds,
__sendReqCbFun,
(cbFun, cbCtx))
def _sendBulkReq(self,
snmpEngine,
targetName,
nonRepeaters,
maxRepetitions,
varBinds,
cbFun,
cbCtx=None,
contextEngineId=None,
contextName=''):
return self.sendVarBinds(snmpEngine,
targetName,
contextEngineId,
contextName,
nonRepeaters,
maxRepetitions,
varBinds,
__sendReqCbFun,
(cbFun, cbCtx))
# install compatibility wrappers
GetCommandGenerator.sendReq = _sendReq
SetCommandGenerator.sendReq = _sendReq
NextCommandGenerator.sendReq = _sendReq
NextCommandGeneratorSingleRun.sendReq = _sendReq
BulkCommandGenerator.sendReq = _sendBulkReq
BulkCommandGeneratorSingleRun.sendReq = _sendBulkReq
|
{
"content_hash": "03e4d9b9f4a6322b04c19b7ce1f19e55",
"timestamp": "",
"source": "github",
"line_count": 608,
"max_line_length": 179,
"avg_line_length": 37.004934210526315,
"alnum_prop": 0.5091781856971421,
"repo_name": "imron/scalyr-agent-2",
"id": "362dc368307ff50bbeab7958a19666fbedc4cfc7",
"size": "22499",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scalyr_agent/third_party/pysnmp/entity/rfc3413/cmdgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1297"
},
{
"name": "Dockerfile",
"bytes": "1461"
},
{
"name": "Python",
"bytes": "2093708"
}
],
"symlink_target": ""
}
|
from django.conf import settings
def is_alpaca_enabled():
return getattr(settings, 'ALPACA_ENABLED', True)
def get_alpaca_project_path_fragment():
return getattr(settings, 'ALPACA_PROJECT_PATH_FRAGMENT', '')
def get_alpaca_environment():
try:
return settings.ALPACA_ENVIRONMENT
except ValueError:
raise ValueError("ALPACA_ENVIRONMENT setting is required.")
def get_alpaca_monitor_host():
try:
return settings.ALPACA_MONITOR_HOST
except ValueError:
raise ValueError("ALPACA_MONITOR_HOST setting is required.")
def get_alpaca_monitor_port():
return getattr(settings, 'ALPACA_MONITOR_PORT', 8195)
def get_alpaca_connection_pool_size():
return getattr(settings, 'ALPACA_CONNECTION_POOL_SIZE', 3)
|
{
"content_hash": "f6fe7947c270cbaf040812d738280ca9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 68,
"avg_line_length": 24.774193548387096,
"alnum_prop": 0.7109375,
"repo_name": "msiedlarek/alpaca-django",
"id": "ed2d15ac86c46f958c7858bd40853c39a35b05d0",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alpaca_django/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "37365"
},
{
"name": "Python",
"bytes": "21198"
},
{
"name": "Ruby",
"bytes": "240630"
},
{
"name": "Shell",
"bytes": "4793"
}
],
"symlink_target": ""
}
|
from __future__ import division
from django.conf import settings
from django.db import models
class Ingredient(models.Model):
name = models.CharField(max_length=250)
unit = models.CharField(max_length=20)
class Meta:
unique_together = ("name", "unit")
def __str__(self):
return f"{self.name} ({self.unit})"
class ProductManager(models.Manager):
def search_by_ingredient(self, pattern):
ingredients = Ingredient.objects.filter(name__icontains=pattern)
products = set()
for i in ingredients:
products |= set(i.product_set.all())
with_prices = [product for product in products if product.sep is not None]
products = sorted(with_prices, key=lambda x: x.sep)
return products
def search_by_nappi(self, nappi):
products = Product.objects.filter(nappi_code=nappi).order_by("sep")
return products
def search_by_product_name(self, pattern):
products = Product.objects.filter(name__icontains=pattern).order_by("sep")
return products
class Product(models.Model):
nappi_code = models.CharField(max_length=20, null=False, unique=True)
regno = models.CharField(max_length=50, null=False)
name = models.CharField(max_length=100)
schedule = models.CharField(max_length=22, null=True)
dosage_form = models.CharField(max_length=20, null=True)
pack_size = models.FloatField(null=False)
num_packs = models.IntegerField(null=False)
sep = models.FloatField(null=False)
is_generic = models.CharField(max_length=20, null=True)
ingredients = models.ManyToManyField(Ingredient, through='ProductIngredient')
objects = ProductManager()
# wish this could be dependecy injection but it isn't clear how to do this with Django models
@staticmethod
def parameters():
return settings.PRICE_PARAMETERS
def __str__(self):
return self.name
@property
def related_products(self):
num_ingredients = len(self.product_ingredients.all())
qs = Product.objects.annotate(models.Count("ingredients")).filter(ingredients__count=num_ingredients)
for pi in self.product_ingredients.all():
qs = qs.filter(product_ingredients__ingredient=pi.ingredient, product_ingredients__strength=pi.strength)
return qs.order_by("sep")
@property
def max_fee(self):
if self.dispensing_fee is None or self.sep is None:
return 0
return self.dispensing_fee + self.sep
@property
def dispensing_fee(self):
params = Product.parameters()
VAT = params["VAT"]
try:
for threshold, perc, flat_rate in params["prices"]:
if self.sep < threshold:
return (self.sep * perc + flat_rate) * VAT
except (ValueError, TypeError):
return self.sep
@property
def cost_per_unit(self):
if self.pack_size is None:
return 0
if self.pack_size > 0:
qty = self.pack_size * self.num_packs
else:
qty = self.num_packs
return self.max_fee / qty
@property
def min_cost_per_unit(self):
if self.pack_size > 0:
qty = self.pack_size * self.num_packs
else:
qty = self.num_packs
return self.sep / qty
@property
def max_cost_per_unit(self):
return self.cost_per_unit
@property
def copayments(self):
return [
{"formulary": fp.formulary.name, "copayment": fp.copayment}
for fp in self.formularyproducts.all()
]
class ProductIngredient(models.Model):
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="product_ingredients")
ingredient = models.ForeignKey(Ingredient, on_delete=models.CASCADE)
strength = models.CharField(max_length=20)
class Meta:
unique_together = ("product", "ingredient", "strength")
def __str__(self):
return "%s %s" % (self.ingredient, self.strength)
class LastUpdatedManager(models.Manager):
def last_updated(self):
return LastUpdated.objects.all().order_by('-update_date')[0]
class LastUpdated(models.Model):
update_date = models.DateField(auto_now_add=True)
objects = LastUpdatedManager()
def __str__(self):
return str(self.update_date)
class Formulary(models.Model):
name = models.CharField(max_length=255)
last_updated = models.DateField()
def __str__(self):
return self.name
class FormularyProduct(models.Model):
formulary = models.ForeignKey(Formulary, on_delete=models.CASCADE, related_name="products")
product = models.ForeignKey(Product, on_delete=models.CASCADE, related_name="formularyproducts")
price = models.FloatField(null=False)
@property
def copayment(self):
return max(self.product.max_fee - self.price, 0)
class Meta:
unique_together = ("formulary", "product")
|
{
"content_hash": "727c3fb5a44d9e574fcb5de242db242c",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 116,
"avg_line_length": 31.30188679245283,
"alnum_prop": 0.6481816355234077,
"repo_name": "Code4SA/medicine-price-registry",
"id": "fe5b6a1d4f22481b9c3ad4b684aa345c68f1ab91",
"size": "4977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpr/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2924"
},
{
"name": "Dockerfile",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "11939"
},
{
"name": "JavaScript",
"bytes": "42128"
},
{
"name": "Procfile",
"bytes": "48"
},
{
"name": "Python",
"bytes": "93013"
},
{
"name": "Shell",
"bytes": "1951"
}
],
"symlink_target": ""
}
|
import sys
# Initialize a bunch of variables.
x, y, x_lo, x_hi, y_lo, y_hi = None, None, None, None, None, None
current_count = 0 # Counts the number of observations in current bin.
# Input comes from STDIN.
for line in sys.stdin:
# Remove leading and trailing whitespace
line = line.strip()
# Parse the input we got from mapper.py, which has format (x,y ,count)
x, y, count = line.split(',')
# Convert count, x, y to appropriate type.
try:
count = int(count)
x = float(x)
y = float(y)
except ValueError:
# Conversion failed. Just ignore it like a crying baby, then.
continue
# Since Hadoop sorts by key, we can take bins the be in order.
if x_lo < x <= x_hi and y_lo < y <= y_hi: # (x,y) is in current box
current_count += count
else: # update to a new box
if current_count > 0: # need this check so we don't print the first x and y
print '%.1f,%.1f,%.1f,%.1f,%.0f' % (x_lo, x_hi, y_lo, y_hi, current_count)
# A new box!
x_lo, x_hi = x-.1, x
y_lo, y_hi = y-.1, y
# Number of observations in box increases.
current_count = count
# print the last group
print '%.1f,%.1f,%.1f,%.1f,%.0f' % (x_lo, x_hi, y_lo, y_hi, current_count)
|
{
"content_hash": "34cd06c58ba4a9bec49ec9e2b51b4b01",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 30.894736842105264,
"alnum_prop": 0.6405451448040886,
"repo_name": "longphin/Bayesian---STA250",
"id": "65b6e211d99804214e91fce3b5516058b498b9ba",
"size": "1197",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "HW2/Streaming/reducer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "684"
},
{
"name": "Perl",
"bytes": "332"
},
{
"name": "Python",
"bytes": "43973"
},
{
"name": "R",
"bytes": "116979"
},
{
"name": "Shell",
"bytes": "8999"
},
{
"name": "TeX",
"bytes": "50307"
}
],
"symlink_target": ""
}
|
import smtplib
class CL_Email:
def __init__(self, email_obj):
self.data = email_obj
self.sender = self.data['sender']
self.recipient = self.data['recipients']
self.server_pwd = self.data['server_pwd']
self.message = ''
def write(self, craigslist_listings):
self.message += 'Subject: Craigslist Bot (' + str(len(craigslist_listings)) + ' new posts)\n\n'
for post in craigslist_listings:
self.message += '$' + str(post.price) + ' ' + post.title + \
'\nKeyword matches: ' + str([k for k in post.keyword_matches]) + \
'\n' + post.id + \
'\n' + post.summary + \
'\n\n------------------\n\n'
def send(self):
server = smtplib.SMTP('smtp.gmail.com',587) #port 465 or 587
server.ehlo()
server.starttls()
server.ehlo()
server.login(self.sender, self.server_pwd)
server.sendmail(self.sender, self.recipient, self.message.encode('utf-8'))
server.close()
|
{
"content_hash": "e8e389ecdea2d034e02166b3a7a71689",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 103,
"avg_line_length": 43.64,
"alnum_prop": 0.5206232813932172,
"repo_name": "evanhenri/Craiglist-Post-Notification-Bot",
"id": "4509c8603b49b01d0a1acf840be39043249f3417",
"size": "1091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cl_email.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10156"
}
],
"symlink_target": ""
}
|
from collections import deque
import itertools
import operator
import net_utils
import tobii_api
import cv2
import cv2.aruco as aruco
import config
import logging
import numpy
def nothing(x):
pass
class VideoProcessing():
''' Detect Fiducial and check if gaze position falls within ROI '''
def __init__(self, peer):
self.output_filters = OutputFilters()
self.lastid = None
self.lastpts = 0
# start video Keep-Alive
self.sock = net_utils.mksock(peer)
self.keepalive = tobii_api.KeepAlive(self.sock, peer, 'video')
# init aruco detector
self.parameters = aruco.DetectorParameters_create()
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_100)
# init GUI
if not config.HEADLESS:
self. param_window = 'Gaze Params'
self. image_window = 'Gaze Image'
cv2.namedWindow(self.param_window, cv2.WINDOW_NORMAL)
cv2.namedWindow(self.image_window, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.param_window, 600, 200)
cv2.resizeWindow(self.image_window, 1280, 720)
cv2.createTrackbar('X Offset', self.param_window, config.GAZE_OFFSET_X+100, 200, nothing)
cv2.createTrackbar('Y Offset', self.param_window, config.GAZE_OFFSET_Y+100, 200, nothing)
cv2.createTrackbar('Threshold', self.param_window, config.GAZE_THRESHOLD, 30, nothing)
def detect(self, frame, data):
# detect aruco fiducials
corners, ids, rejectedImgPoints = aruco.detectMarkers(frame, self.aruco_dict, parameters=self.parameters)
annotated = aruco.drawDetectedMarkers(frame, corners)
serialout = None
if data is not None:
rows = frame.shape[0]
cols = frame.shape[1]
# convert to pixel coords and annotate image
offsetx = config.GAZE_OFFSET_X
offsety = config.GAZE_OFFSET_Y
if not config.HEADLESS:
offsetx = cv2.getTrackbarPos('X Offset', self.param_window) - 100
offsety = cv2.getTrackbarPos('Y Offset', self.param_window) - 100
gazex = int(round(cols*data['gp'][0])) - offsetx
gazey = int(round(rows*data['gp'][1])) - offsety
if not config.HEADLESS:
cv2.circle(annotated, (gazex, gazey), 10, (0, 0, 255), 4)
detectedid = None
# check if gaze position falls within roi
if len(corners) > 0 and ids is not None:
for roi, id in zip(corners, ids):
if config.DISTANCES:
mroi = cv2.moments(roi)
cXroi = int(mroi["m10"] / mroi["m00"])
cYroi = int(mroi["m01"] / mroi["m00"])
distance = numpy.linalg.norm(numpy.array((cXroi,cYroi))-numpy.array((gazex,gazey)))
angle = numpy.arctan2((gazey-cYroi),(gazex-cXroi))*180/numpy.pi
if angle<0:
angle = angle + 360
serialout = (str(int(id)).zfill(2)+str(int(distance)).zfill(4)+str(int(angle)).zfill(4))+'A'
logging.debug('Marker ' +str(id) + ' centre ' + str(cXroi) + ',' + str(cYroi) + "distance " + str(distance) + "Angle: " + str(angle) + "Serialout: " + serialout)
if cv2.pointPolygonTest(roi, (gazex, gazey), False) >= 0:
serialout = (str(int(id)).zfill(2)+str(int(distance)).zfill(4)+'9999A')
threshold = config.GAZE_THRESHOLD
if not config.HEADLESS:
threshold = cv2.getTrackbarPos('Threshold', self.param_window)
self.output_filters.set_threshold(threshold)
detectedid = self.output_filters.process(id[0])
if detectedid is not None:
logging.info('DETECTED MARKER ' + str(detectedid))
self.lastid = detectedid
break
# annotate fiducial id on frame
if self.lastid is not None and not config.HEADLESS:
cv2.putText(annotated, str(self.lastid), (100, 200), cv2.FONT_HERSHEY_SIMPLEX, 4, (0, 255, 0), 2, cv2.LINE_AA)
# display image
if not config.HEADLESS:
cv2.imshow(self.image_window, annotated)
return detectedid, serialout
else:
if not config.HEADLESS:
cv2.imshow(self.image_window, annotated)
return None, serialout
def stop(self):
self.keepalive.stop()
if not config.HEADLESS:
cv2.destroyAllWindows()
class OutputFilters():
''' filter detections '''
def __init__(self):
self.queue = deque([None]*config.DWELL_TIME_FRAMES)
self.threshold = 0
def set_threshold(self, threshold):
self.threshold = threshold
# return most common element in a list
# https://stackoverflow.com/questions/1518522/python-most-common-element-in-a-list
def __most_common(self, L):
# get an iterable of (item, iterable) pairs
SL = sorted((x, i) for i, x in enumerate(L))
groups = itertools.groupby(SL, key=operator.itemgetter(0))
# auxiliary function to get 'quality' for an item
def _auxfun(g):
item, iterable = g
count = 0
min_index = len(L)
for _, where in iterable:
count += 1
min_index = min(min_index, where)
return count, -min_index
# pick the highest-count/earliest item
return max(groups, key=_auxfun)[0]
def process(self, id):
self.queue.append(id)
# get most detected fiducial id
most_id = self.__most_common(self.queue)
# if we have more than threshold detections per time frame, it's a hit!!
count = self.queue.count(most_id)
if count >= self.threshold:
return id
else:
return None
|
{
"content_hash": "9af475958e2bbb1c74c3ddc3d653e3e7",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 185,
"avg_line_length": 42.07586206896552,
"alnum_prop": 0.565972791345681,
"repo_name": "robot-army/gazecontrol",
"id": "e68ed253b47214abcef632b9e94bc11752c06f3b",
"size": "6777",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "video_processing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "1039"
},
{
"name": "Python",
"bytes": "29110"
}
],
"symlink_target": ""
}
|
import mock
from os_brick import encryptors
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from nova import block_device
from nova import context
from nova import exception
from nova import objects
from nova.objects import fields
from nova import test
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import matchers
from nova.virt import block_device as driver_block_device
from nova.virt import driver
from nova.virt import fake as fake_virt
from nova.volume import cinder
class TestDriverBlockDevice(test.NoDBTestCase):
# This is used to signal if we're dealing with a new style volume
# attachment (Cinder v3.44 flow).
attachment_id = None
driver_classes = {
'swap': driver_block_device.DriverSwapBlockDevice,
'ephemeral': driver_block_device.DriverEphemeralBlockDevice,
'volume': driver_block_device.DriverVolumeBlockDevice,
'volsnapshot': driver_block_device.DriverVolSnapshotBlockDevice,
'volimage': driver_block_device.DriverVolImageBlockDevice,
'volblank': driver_block_device.DriverVolBlankBlockDevice
}
swap_bdm_dict = block_device.BlockDeviceDict(
{'id': 1, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'disk_bus': 'scsi',
'volume_size': 2,
'boot_index': -1})
swap_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2,
'disk_bus': 'scsi'}
swap_legacy_driver_bdm = {
'device_name': '/dev/sdb1',
'swap_size': 2}
ephemeral_bdm_dict = block_device.BlockDeviceDict(
{'id': 2, 'instance_uuid': uuids.instance,
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 4,
'guest_format': 'ext4',
'delete_on_termination': True,
'boot_index': -1})
ephemeral_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'device_type': 'disk',
'guest_format': 'ext4',
'disk_bus': 'scsi'}
ephemeral_legacy_driver_bdm = {
'device_name': '/dev/sdc1',
'size': 4,
'virtual_name': 'ephemeral0',
'num': 0}
volume_bdm_dict = block_device.BlockDeviceDict(
{'id': 3, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda1',
'source_type': 'volume',
'disk_bus': 'scsi',
'device_type': 'disk',
'volume_size': 8,
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'guest_format': 'ext4',
'connection_info': '{"fake": "connection_info"}',
'delete_on_termination': False,
'boot_index': 0})
volume_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': 'ext4',
'boot_index': 0,
'volume_type': None}
volume_legacy_driver_bdm = {
'mount_device': '/dev/sda1',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': False}
volsnapshot_bdm_dict = block_device.BlockDeviceDict(
{'id': 4, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1,
'volume_type': None})
volsnapshot_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volsnapshot_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volimage_bdm_dict = block_device.BlockDeviceDict(
{'id': 5, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 1,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'image_id': 'fake-image-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1,
'volume_type': None})
volimage_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volimage_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
volblank_bdm_dict = block_device.BlockDeviceDict(
{'id': 6, 'instance_uuid': uuids.instance,
'device_name': '/dev/sda2',
'delete_on_termination': True,
'volume_size': 3,
'disk_bus': 'scsi',
'device_type': 'disk',
'source_type': 'blank',
'destination_type': 'volume',
'connection_info': '{"fake": "connection_info"}',
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1,
'volume_type': None})
volblank_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True,
'disk_bus': 'scsi',
'device_type': 'disk',
'guest_format': None,
'boot_index': -1,
'volume_type': None}
volblank_legacy_driver_bdm = {
'mount_device': '/dev/sda2',
'connection_info': {"fake": "connection_info"},
'delete_on_termination': True}
def setUp(self):
super(TestDriverBlockDevice, self).setUp()
self.volume_api = self.mox.CreateMock(cinder.API)
self.virt_driver = self.mox.CreateMock(driver.ComputeDriver)
self.context = context.RequestContext('fake_user',
'fake_project')
# create bdm objects for testing
self.swap_bdm = fake_block_device.fake_bdm_object(
self.context, self.swap_bdm_dict)
self.ephemeral_bdm = fake_block_device.fake_bdm_object(
self.context, self.ephemeral_bdm_dict)
self.volume_bdm = fake_block_device.fake_bdm_object(
self.context, self.volume_bdm_dict)
self.volsnapshot_bdm = fake_block_device.fake_bdm_object(
self.context, self.volsnapshot_bdm_dict)
self.volimage_bdm = fake_block_device.fake_bdm_object(
self.context, self.volimage_bdm_dict)
self.volblank_bdm = fake_block_device.fake_bdm_object(
self.context, self.volblank_bdm_dict)
# Set the attachment_id on our fake class variables which we have
# to do in setUp so that any attachment_id set by a subclass will
# be used properly.
for name in ('volume', 'volsnapshot', 'volimage', 'volblank'):
for attr in ('%s_bdm', '%s_driver_bdm'):
bdm = getattr(self, attr % name)
bdm['attachment_id'] = self.attachment_id
@mock.patch('nova.virt.block_device.LOG')
@mock.patch('os_brick.encryptors')
def test_driver_detach_passes_failed(self, enc, log):
virt = mock.MagicMock()
virt.detach_volume.side_effect = exception.DeviceDetachFailed(
device='sda', reason='because testing')
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
inst = mock.MagicMock(),
vol_api = mock.MagicMock()
# Make sure we pass through DeviceDetachFailed,
# but don't log it as an exception, just a warning
self.assertRaises(exception.DeviceDetachFailed,
driver_bdm.driver_detach,
self.context, inst, vol_api, virt)
self.assertFalse(log.exception.called)
self.assertTrue(log.warning.called)
vol_api.roll_detaching.assert_called_once_with(self.context,
driver_bdm.volume_id)
def test_no_device_raises(self):
for name, cls in self.driver_classes.items():
bdm = fake_block_device.fake_bdm_object(
self.context, {'no_device': True})
self.assertRaises(driver_block_device._NotTransformable,
cls, bdm)
def _test_driver_device(self, name):
db_bdm = getattr(self, "%s_bdm" % name)
test_bdm = self.driver_classes[name](db_bdm)
expected = getattr(self, "%s_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm))
for k, v in db_bdm.items():
field_val = getattr(test_bdm._bdm_obj, k)
if isinstance(field_val, bool):
v = bool(v)
self.assertEqual(field_val, v)
for field, value in expected.items():
# Test that all driver bdm fields are available as both attrs and
# dict values
self.assertEqual(test_bdm[field], value)
self.assertEqual(getattr(test_bdm, field), value)
test_value = mock.sentinel.value
if field in test_bdm._proxy_as_attr:
# We can't set a versioned object field to a sentinel because
# it's an invalid type. It's not worth creating valid example
# values for all possible field types just for this, so we just
# test setting it to its current value. This at least
# exercises the code path without being a maintenance burden.
test_value = value
# Test that we can set values via either attribute or dict
test_bdm[field] = test_value
self.assertEqual(getattr(test_bdm, field), test_value)
setattr(test_bdm, field, value)
self.assertEqual(test_bdm[field], value)
# Reset the value
test_bdm[field] = value
expected = getattr(self, "%s_legacy_driver_bdm" % name)
self.assertThat(expected, matchers.DictMatches(test_bdm.legacy()))
# Test passthru attributes
for passthru in test_bdm._proxy_as_attr:
self.assertEqual(getattr(test_bdm, passthru),
getattr(test_bdm._bdm_obj, passthru))
# Make sure that all others raise _invalidType
for other_name, cls in self.driver_classes.items():
if other_name == name:
continue
self.assertRaises(driver_block_device._InvalidType,
cls,
getattr(self, '%s_bdm' % name))
# Test the save method
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
for fld, alias in test_bdm._update_on_save.items():
# We can't set fake values on enums, like device_type,
# so skip those.
if not isinstance(test_bdm._bdm_obj.fields[fld],
fields.BaseEnumField):
test_bdm[alias or fld] = 'fake_changed_value'
test_bdm.save()
for fld, alias in test_bdm._update_on_save.items():
self.assertEqual(test_bdm[alias or fld],
getattr(test_bdm._bdm_obj, fld))
save_mock.assert_called_once_with()
def check_save():
self.assertEqual(set([]), test_bdm._bdm_obj.obj_what_changed())
# Test that nothing is set on the object if there are no actual changes
test_bdm._bdm_obj.obj_reset_changes()
with mock.patch.object(test_bdm._bdm_obj, 'save') as save_mock:
save_mock.side_effect = check_save
test_bdm.save()
def _test_driver_default_size(self, name):
size = 'swap_size' if name == 'swap' else 'size'
no_size_bdm = getattr(self, "%s_bdm_dict" % name).copy()
no_size_bdm['volume_size'] = None
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
del no_size_bdm['volume_size']
driver_bdm = self.driver_classes[name](
fake_block_device.fake_bdm_object(self.context, no_size_bdm))
self.assertEqual(driver_bdm[size], 0)
def test_driver_swap_block_device(self):
self._test_driver_device("swap")
def test_driver_swap_default_size(self):
self._test_driver_default_size('swap')
def test_driver_ephemeral_block_device(self):
self._test_driver_device("ephemeral")
def test_driver_ephemeral_default_size(self):
self._test_driver_default_size('ephemeral')
def test_driver_volume_block_device(self):
self._test_driver_device("volume")
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
self.assertEqual(test_bdm['connection_info'],
jsonutils.loads(test_bdm._bdm_obj.connection_info))
self.assertEqual(test_bdm._bdm_obj.id, 3)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-1')
self.assertEqual(test_bdm.volume_size, 8)
def test_driver_snapshot_block_device(self):
self._test_driver_device("volsnapshot")
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 4)
self.assertEqual(test_bdm.snapshot_id, 'fake-snapshot-id-1')
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
self.assertEqual(test_bdm.volume_size, 3)
def test_driver_image_block_device(self):
self._test_driver_device('volimage')
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
self.assertEqual(test_bdm._bdm_obj.id, 5)
self.assertEqual(test_bdm.image_id, 'fake-image-id-1')
self.assertEqual(test_bdm.volume_size, 1)
def test_driver_image_block_device_destination_local(self):
self._test_driver_device('volimage')
bdm = self.volimage_bdm_dict.copy()
bdm['destination_type'] = 'local'
self.assertRaises(driver_block_device._InvalidType,
self.driver_classes['volimage'],
fake_block_device.fake_bdm_object(self.context, bdm))
def test_driver_blank_block_device(self):
self._test_driver_device('volblank')
test_bdm = self.driver_classes['volblank'](
self.volblank_bdm)
self.assertEqual(6, test_bdm._bdm_obj.id)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
self.assertEqual(3, test_bdm.volume_size)
def _test_call_wait_func(self, delete_on_termination, delete_fail=False):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['delete_on_termination'] = delete_on_termination
with mock.patch.object(self.volume_api, 'delete') as vol_delete:
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id='fake-id',
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
if delete_on_termination and delete_fail:
vol_delete.side_effect = Exception()
self.assertRaises(exception.VolumeNotCreated,
test_bdm._call_wait_func,
context=self.context,
wait_func=wait_func,
volume_api=self.volume_api,
volume_id='fake-id')
self.assertEqual(delete_on_termination, vol_delete.called)
def test_call_wait_delete_volume(self):
self._test_call_wait_func(True)
def test_call_wait_delete_volume_fail(self):
self._test_call_wait_func(True, True)
def test_call_wait_no_delete_volume(self):
self._test_call_wait_func(False)
def test_volume_delete_attachment(self, include_shared_targets=False):
attachment_id = uuids.attachment
driver_bdm = self.driver_classes['volume'](self.volume_bdm)
driver_bdm['attachment_id'] = attachment_id
elevated_context = self.context.elevated()
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': None}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
volume = {'id': driver_bdm.volume_id,
'attach_status': 'attached',
'status': 'in-use'}
if include_shared_targets:
volume['shared_targets'] = True
volume['service_uuid'] = uuids.service_uuid
with test.nested(
mock.patch.object(driver_bdm, '_get_volume', return_value=volume),
mock.patch.object(self.virt_driver, 'get_volume_connector',
return_value=connector),
mock.patch('nova.utils.synchronized',
side_effect=lambda a: lambda f: lambda *args: f(*args)),
mock.patch.object(self.volume_api, 'attachment_delete'),
) as (mock_get_volume, mock_get_connector, mock_sync, vapi_attach_del):
driver_bdm.detach(elevated_context, instance,
self.volume_api, self.virt_driver,
attachment_id=attachment_id)
if include_shared_targets:
mock_sync.assert_called_once_with((uuids.service_uuid))
vapi_attach_del.assert_called_once_with(elevated_context,
attachment_id)
def test_volume_delete_attachment_with_shared_targets(self):
self.test_volume_delete_attachment(include_shared_targets=True)
def _test_volume_attach(self, driver_bdm, bdm_dict,
fake_volume, fail_check_av_zone=False,
driver_attach=False, fail_driver_attach=False,
volume_attach=True, fail_volume_attach=False,
access_mode='rw', availability_zone=None,
multiattach=False, driver_multi_attach=False,
fail_with_virt_driver=False,
include_shared_targets=False):
if driver_multi_attach:
self.virt_driver.capabilities['supports_multiattach'] = True
else:
self.virt_driver.capabilities['supports_multiattach'] = False
elevated_context = self.context.elevated()
self.stubs.Set(self.context, 'elevated',
lambda: elevated_context)
self.mox.StubOutWithMock(driver_bdm._bdm_obj, 'save')
self.mox.StubOutWithMock(encryptors, 'get_encryption_metadata')
instance_detail = {'id': '123', 'uuid': uuids.uuid,
'availability_zone': availability_zone}
instance = fake_instance.fake_instance_obj(self.context,
**instance_detail)
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'access_mode': access_mode}}
expected_conn_info = {'data': {'access_mode': access_mode},
'serial': fake_volume['id']}
if multiattach and driver_multi_attach:
expected_conn_info['multiattach'] = True
enc_data = {'fake': 'enc_data'}
if include_shared_targets:
fake_volume['shared_targets'] = True
fake_volume['service_uuid'] = uuids.service_uuid
self.volume_api.get(
self.context, fake_volume['id'],
microversion='3.48').AndReturn(fake_volume)
else:
# First call to get() fails because the API isn't new enough.
self.volume_api.get(
self.context, fake_volume['id'], microversion='3.48').AndRaise(
exception.CinderAPIVersionNotAvailable(version='3.48'))
# So we fallback to the old call.
self.volume_api.get(self.context,
fake_volume['id']).AndReturn(fake_volume)
if not fail_check_av_zone:
self.volume_api.check_availability_zone(self.context,
fake_volume,
instance=instance).AndReturn(None)
else:
self.volume_api.check_availability_zone(self.context,
fake_volume,
instance=instance).AndRaise(
test.TestingException)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
if fail_with_virt_driver:
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if self.attachment_id is None:
self.volume_api.initialize_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(connection_info)
else:
self.volume_api.attachment_update(
elevated_context, self.attachment_id, connector,
bdm_dict['device_name']).AndReturn(
{'connection_info': connection_info})
if driver_attach:
encryptors.get_encryption_metadata(
elevated_context, self.volume_api, fake_volume['id'],
connection_info).AndReturn(enc_data)
if not fail_driver_attach:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndReturn(None)
else:
self.virt_driver.attach_volume(
elevated_context, expected_conn_info, instance,
bdm_dict['device_name'],
disk_bus=bdm_dict['disk_bus'],
device_type=bdm_dict['device_type'],
encryption=enc_data).AndRaise(test.TestingException)
if self.attachment_id is None:
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
else:
self.volume_api.attachment_delete(
elevated_context, self.attachment_id).AndReturn(None)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
if volume_attach:
# save updates before marking the volume as in-use
driver_bdm._bdm_obj.save().AndReturn(None)
if not fail_volume_attach:
if self.attachment_id is None:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndReturn(None)
else:
self.volume_api.attachment_complete(
elevated_context, self.attachment_id).AndReturn(None)
else:
if self.attachment_id is None:
self.volume_api.attach(elevated_context, fake_volume['id'],
uuids.uuid, bdm_dict['device_name'],
mode=access_mode).AndRaise(
test.TestingException)
if driver_attach:
self.virt_driver.detach_volume(
self.context, expected_conn_info, instance,
bdm_dict['device_name'],
encryption=enc_data).AndReturn(None)
self.volume_api.terminate_connection(
elevated_context, fake_volume['id'],
connector).AndReturn(None)
self.volume_api.detach(elevated_context,
fake_volume['id']).AndReturn(None)
else:
self.volume_api.attachment_complete(
elevated_context, self.attachment_id).AndRaise(
test.TestingException)
self.volume_api.attachment_delete(
elevated_context, self.attachment_id).AndReturn(None)
# The @update_db decorator will save any changes.
driver_bdm._bdm_obj.save().AndReturn(None)
return instance, expected_conn_info
def test_volume_attach(self, include_shared_targets=False):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume,
include_shared_targets=include_shared_targets)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_with_shared_targets(self):
self.test_volume_attach(include_shared_targets=True)
def test_volume_attach_ro(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, access_mode='ro')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_update_size(self):
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm.volume_size = None
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached',
'size': 42}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertEqual(expected_conn_info, test_bdm['connection_info'])
self.assertEqual(42, test_bdm.volume_size)
def test_volume_attach_check_av_zone_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_check_av_zone=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver)
def test_volume_no_volume_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=False)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_driver_attach=False)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_no_check_driver_attach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver,
do_driver_attach=True)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_driver_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, driver_attach=True,
fail_driver_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=True)
@mock.patch('nova.objects.BlockDeviceMapping.save')
@mock.patch('nova.volume.cinder.API')
@mock.patch('os_brick.encryptors.get_encryption_metadata',
return_value={})
def test_volume_attach_volume_attach_fails(self, mock_get_encryption,
mock_volume_api, mock_bdm_save):
"""Tests that attaching the volume fails and driver rollback occurs."""
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
mock_volume_api.get.return_value = volume
instance = fake_instance.fake_instance_obj(self.context)
virt_driver = fake_virt.SmallFakeDriver(virtapi=mock.MagicMock())
fake_conn_info = {
'serial': volume['id'],
'data': {
'foo': 'bar'
}
}
if self.attachment_id:
mock_volume_api.attachment_update.return_value = {
'connection_info': fake_conn_info
}
mock_volume_api.attachment_complete.side_effect = (
test.TestingException)
else:
# legacy flow, stub out the volume_api accordingly
mock_volume_api.attach.side_effect = test.TestingException
mock_volume_api.initialize_connection.return_value = fake_conn_info
with mock.patch.object(virt_driver, 'detach_volume') as drvr_detach:
with mock.patch.object(self.context, 'elevated',
return_value=self.context):
self.assertRaises(test.TestingException, test_bdm.attach,
self.context, instance, mock_volume_api,
virt_driver, do_driver_attach=True)
drvr_detach.assert_called_once_with(
self.context, fake_conn_info, instance,
self.volume_bdm.device_name,
encryption=mock_get_encryption.return_value)
if self.attachment_id:
mock_volume_api.attachment_delete.assert_called_once_with(
self.context, self.attachment_id)
else:
mock_volume_api.terminate_connection.assert_called_once_with(
self.context, volume['id'],
virt_driver.get_volume_connector(instance))
mock_volume_api.detach.assert_called_once_with(
self.context, volume['id'])
self.assertEqual(2, mock_bdm_save.call_count)
def test_volume_attach_no_driver_attach_volume_attach_fails(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
instance, _ = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, fail_volume_attach=True)
self.mox.ReplayAll()
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
instance, self.volume_api, self.virt_driver,
do_driver_attach=False)
def test_refresh_connection(self):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
connector = {'ip': 'fake_ip', 'host': 'fake_host'}
connection_info = {'data': {'multipath_id': 'fake_multipath_id'}}
expected_conn_info = {'data': {'multipath_id': 'fake_multipath_id'},
'serial': 'fake-volume-id-2'}
self.mox.StubOutWithMock(test_bdm._bdm_obj, 'save')
if self.attachment_id is None:
self.virt_driver.get_volume_connector(instance).AndReturn(
connector)
self.volume_api.initialize_connection(
self.context, test_bdm.volume_id,
connector).AndReturn(connection_info)
else:
self.volume_api.attachment_get(
self.context, self.attachment_id).AndReturn(
{'connection_info': connection_info})
test_bdm._bdm_obj.save().AndReturn(None)
self.mox.ReplayAll()
test_bdm.refresh_connection_info(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_snapshot_attach_no_volume(self):
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone=None,
volume_type=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_snapshot_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the snapshot has the same AZ as
# the instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.get_snapshot(self.context,
'fake-snapshot-id-1').AndReturn(snapshot)
self.volume_api.create(self.context, 3, '', '', snapshot,
availability_zone='test-az',
volume_type=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_snapshot, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_snapshot_attach_fail_volume(self):
fail_volume_snapshot = self.volsnapshot_bdm_dict.copy()
fail_volume_snapshot['volume_id'] = None
test_bdm = self.driver_classes['volsnapshot'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_snapshot))
snapshot = {'id': 'fake-volume-id-1',
'attach_status': 'detached'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_get_snap, vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_get_snap.assert_called_once_with(
self.context, 'fake-snapshot-id-1')
vol_create.assert_called_once_with(
self.context, 3, '', '', snapshot, availability_zone=None,
volume_type=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_snapshot_attach_volume(self):
test_bdm = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume(self):
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone=None,
volume_type=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_image_attach_no_volume_cinder_cross_az_attach_false(self):
# Tests that the volume created from the image has the same AZ as the
# instance.
self.flags(cross_az_attach=False, group='cinder')
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, no_volume_image))
# When we create a volume, we attach it using the old flow.
self.attachment_id = None
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
wait_func = self.mox.CreateMockAnything()
self.volume_api.create(self.context, 1, '', '', image_id=image['id'],
availability_zone='test-az',
volume_type=None).AndReturn(volume)
wait_func(self.context, 'fake-volume-id-2').AndReturn(None)
instance, expected_conn_info = self._test_volume_attach(
test_bdm, no_volume_image, volume,
availability_zone='test-az')
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver, wait_func)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_image_attach_fail_volume(self):
fail_volume_image = self.volimage_bdm_dict.copy()
fail_volume_image['volume_id'] = None
test_bdm = self.driver_classes['volimage'](
fake_block_device.fake_bdm_object(
self.context, fail_volume_image))
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'attach_status': 'detached'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, 1, '', '', image_id=image['id'],
availability_zone=None, volume_type=None)
vol_delete.assert_called_once_with(self.context, volume['id'])
def test_image_attach_volume(self):
test_bdm = self.driver_classes['volimage'](
self.volimage_bdm)
instance = {'id': 'fake_id', 'uuid': uuids.uuid}
volume_class = self.driver_classes['volume']
self.mox.StubOutWithMock(volume_class, 'attach')
# Make sure theses are not called
self.mox.StubOutWithMock(self.volume_api, 'get_snapshot')
self.mox.StubOutWithMock(self.volume_api, 'create')
volume_class.attach(self.context, instance, self.volume_api,
self.virt_driver).AndReturn(None)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
self.assertEqual(test_bdm.volume_id, 'fake-volume-id-2')
def test_blank_attach_fail_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'delete'),
) as (vol_create, vol_delete):
wait_func = mock.MagicMock()
mock_exception = exception.VolumeNotCreated(volume_id=volume['id'],
seconds=1,
attempts=1,
volume_status='error')
wait_func.side_effect = mock_exception
self.assertRaises(exception.VolumeNotCreated,
test_bdm.attach, context=self.context,
instance=instance,
volume_api=self.volume_api,
virt_driver=self.virt_driver,
wait_func=wait_func)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone=None)
vol_delete.assert_called_once_with(
self.context, volume['id'])
def test_blank_attach_volume(self):
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**{'uuid': uuids.uuid})
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(volume_class, 'attach')
) as (vol_create, vol_attach):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone=None)
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_blank_attach_volume_cinder_cross_az_attach_false(self):
# Tests that the blank volume created is in the same availability zone
# as the instance.
self.flags(cross_az_attach=False, group='cinder')
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
test_bdm = self.driver_classes['volblank'](
fake_block_device.fake_bdm_object(
self.context, no_blank_volume))
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with mock.patch.object(self.volume_api, 'create',
return_value=volume) as vol_create:
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid,
'', volume_type=None, availability_zone='test-az')
vol_attach.assert_called_once_with(self.context, instance,
self.volume_api,
self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_convert_block_devices(self):
bdms = objects.BlockDeviceMappingList(
objects=[self.volume_bdm, self.ephemeral_bdm])
converted = driver_block_device._convert_block_devices(
self.driver_classes['volume'], bdms)
self.assertEqual(converted, [self.volume_driver_bdm])
def test_convert_all_volumes(self):
converted = driver_block_device.convert_all_volumes()
self.assertEqual([], converted)
converted = driver_block_device.convert_all_volumes(
self.volume_bdm, self.ephemeral_bdm, self.volimage_bdm,
self.volblank_bdm, self.volsnapshot_bdm)
self.assertEqual(converted, [self.volume_driver_bdm,
self.volimage_driver_bdm,
self.volblank_driver_bdm,
self.volsnapshot_driver_bdm])
def test_convert_volume(self):
self.assertIsNone(driver_block_device.convert_volume(self.swap_bdm))
self.assertEqual(self.volume_driver_bdm,
driver_block_device.convert_volume(self.volume_bdm))
self.assertEqual(self.volsnapshot_driver_bdm,
driver_block_device.convert_volume(
self.volsnapshot_bdm))
def test_legacy_block_devices(self):
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
block_device_mapping = [test_snapshot, test_snapshot]
legacy_bdm = driver_block_device.legacy_block_devices(
block_device_mapping)
self.assertEqual(legacy_bdm, [self.volsnapshot_legacy_driver_bdm,
self.volsnapshot_legacy_driver_bdm])
# Test that the ephemerals work as expected
test_ephemerals = [self.driver_classes['ephemeral'](
self.ephemeral_bdm) for _ in range(2)]
expected = [self.ephemeral_legacy_driver_bdm.copy()
for _ in range(2)]
expected[0]['virtual_name'] = 'ephemeral0'
expected[0]['num'] = 0
expected[1]['virtual_name'] = 'ephemeral1'
expected[1]['num'] = 1
legacy_ephemerals = driver_block_device.legacy_block_devices(
test_ephemerals)
self.assertEqual(expected, legacy_ephemerals)
def test_get_swap(self):
swap = [self.swap_driver_bdm]
legacy_swap = [self.swap_legacy_driver_bdm]
no_swap = [self.volume_driver_bdm]
self.assertEqual(swap[0], driver_block_device.get_swap(swap))
self.assertEqual(legacy_swap[0],
driver_block_device.get_swap(legacy_swap))
self.assertIsNone(driver_block_device.get_swap(no_swap))
self.assertIsNone(driver_block_device.get_swap([]))
def test_is_implemented(self):
for bdm in (self.volimage_bdm, self.volume_bdm, self.swap_bdm,
self.ephemeral_bdm, self.volsnapshot_bdm):
self.assertTrue(driver_block_device.is_implemented(bdm))
local_image = self.volimage_bdm_dict.copy()
local_image['destination_type'] = 'local'
self.assertFalse(driver_block_device.is_implemented(
fake_block_device.fake_bdm_object(self.context, local_image)))
def test_is_block_device_mapping(self):
test_swap = self.driver_classes['swap'](self.swap_bdm)
test_ephemeral = self.driver_classes['ephemeral'](self.ephemeral_bdm)
test_image = self.driver_classes['volimage'](self.volimage_bdm)
test_snapshot = self.driver_classes['volsnapshot'](
self.volsnapshot_bdm)
test_volume = self.driver_classes['volume'](self.volume_bdm)
test_blank = self.driver_classes['volblank'](self.volblank_bdm)
for bdm in (test_image, test_snapshot, test_volume, test_blank):
self.assertTrue(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
for bdm in (test_swap, test_ephemeral):
self.assertFalse(driver_block_device.is_block_device_mapping(
bdm._bdm_obj))
def test_get_volume_create_az_cinder_cross_az_attach_true(self):
# Tests that we get None back if cinder.cross_az_attach=True even if
# the instance has an AZ assigned. Note that since cross_az_attach
# defaults to True we don't need to set a flag explicitly for the test.
updates = {'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(self.context, **updates)
self.assertIsNone(
driver_block_device._get_volume_create_az_value(instance))
def test_refresh_conn_infos(self):
# Only DriverVolumeBlockDevice derived devices should refresh their
# connection_info during a refresh_conn_infos call.
test_volume = mock.MagicMock(
spec=driver_block_device.DriverVolumeBlockDevice)
test_image = mock.MagicMock(
spec=driver_block_device.DriverVolImageBlockDevice)
test_snapshot = mock.MagicMock(
spec=driver_block_device.DriverVolSnapshotBlockDevice)
test_blank = mock.MagicMock(
spec=driver_block_device.DriverVolBlankBlockDevice)
test_eph = mock.MagicMock(
spec=driver_block_device.DriverEphemeralBlockDevice)
test_swap = mock.MagicMock(
spec=driver_block_device.DriverSwapBlockDevice)
block_device_mapping = [test_volume, test_image, test_eph,
test_snapshot, test_swap, test_blank]
driver_block_device.refresh_conn_infos(block_device_mapping,
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
for test_mock in [test_volume, test_image, test_snapshot, test_blank]:
test_mock.refresh_connection_info.assert_called_once_with(
mock.sentinel.refresh_context,
mock.sentinel.refresh_instance,
mock.sentinel.refresh_vol_api,
mock.sentinel.refresh_virt_drv)
# NOTE(lyarwood): Can't think of a better way of testing this as we
# can't assert_not_called if the method isn't in the spec.
self.assertFalse(hasattr(test_eph, 'refresh_connection_info'))
self.assertFalse(hasattr(test_swap, 'refresh_connection_info'))
def test_proxy_as_attr(self):
class A(driver_block_device.DriverBlockDevice):
pass
def _transform(self):
pass
class B(A):
_proxy_as_attr_inherited = set('B')
class C(A):
_proxy_as_attr_inherited = set('C')
class D(B):
_proxy_as_attr_inherited = set('D')
class E(B, C):
_proxy_as_attr_inherited = set('E')
bdm = objects.BlockDeviceMapping(self.context, no_device=False)
self.assertEqual(set(['uuid', 'is_volume']), A(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B']),
B(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'C']),
C(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'D']),
D(bdm)._proxy_as_attr)
self.assertEqual(set(['uuid', 'is_volume', 'B', 'C', 'E']),
E(bdm)._proxy_as_attr)
def _test_boot_from_volume_source_blank_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volblank'](bdm)
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': '%s-blank-vol' % uuids.uuid}
with mock.patch.object(self.volume_api, 'create',
return_value=volume) as vol_create:
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'%s-blank-vol' % uuids.uuid, '',
volume_type=expected_volume_type,
availability_zone='test-az')
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_blank_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(self.context, no_blank_volume)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_blank_volume_type(bdm, None)
def test_boot_from_volume_source_blank_with_volume_type(self):
# Tests that the blank volume created specifies the volume type.
no_blank_volume = self.volblank_bdm_dict.copy()
no_blank_volume['volume_id'] = None
no_blank_volume['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(self.context, no_blank_volume)
self._test_boot_from_volume_source_blank_volume_type(bdm, 'fake-lvm-1')
def _test_boot_from_volume_source_image_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volimage'](bdm)
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
image = {'id': 'fake-image-id-1'}
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-image-vol'}
with mock.patch.object(self.volume_api, 'create',
return_value=volume) as vol_create:
with mock.patch.object(volume_class, 'attach') as vol_attach:
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size,
'', '', image_id=image['id'],
volume_type=expected_volume_type,
availability_zone='test-az')
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_image_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(self.context, no_volume_image)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_image_volume_type(bdm, None)
def test_boot_from_volume_source_image_with_volume_type(self):
# Tests that the volume created from the image specifies the volume
# type.
no_volume_image = self.volimage_bdm_dict.copy()
no_volume_image['volume_id'] = None
no_volume_image['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(self.context, no_volume_image)
self._test_boot_from_volume_source_image_volume_type(bdm, 'fake-lvm-1')
def _test_boot_from_volume_source_snapshot_volume_type(
self, bdm, expected_volume_type):
self.flags(cross_az_attach=False, group='cinder')
test_bdm = self.driver_classes['volsnapshot'](bdm)
snapshot = {'id': 'fake-snapshot-id-1',
'attach_status': 'detached'}
updates = {'uuid': uuids.uuid, 'availability_zone': 'test-az'}
instance = fake_instance.fake_instance_obj(mock.sentinel.ctx,
**updates)
volume_class = self.driver_classes['volume']
volume = {'id': 'fake-volume-id-2',
'display_name': 'fake-snapshot-vol'}
with test.nested(
mock.patch.object(self.volume_api, 'create', return_value=volume),
mock.patch.object(self.volume_api, 'get_snapshot',
return_value=snapshot),
mock.patch.object(volume_class, 'attach')
) as (
vol_create, vol_get_snap, vol_attach
):
test_bdm.attach(self.context, instance, self.volume_api,
self.virt_driver)
vol_create.assert_called_once_with(
self.context, test_bdm.volume_size, '', '', snapshot,
volume_type=expected_volume_type, availability_zone='test-az')
vol_attach.assert_called_once_with(
self.context, instance, self.volume_api, self.virt_driver)
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
def test_boot_from_volume_source_snapshot_with_unset_volume_type(self):
"""Tests the scenario that the BlockDeviceMapping.volume_type field
is unset for RPC compatibility to an older compute.
"""
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
bdm = fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot)
delattr(bdm, 'volume_type')
self.assertNotIn('volume_type', bdm)
self._test_boot_from_volume_source_snapshot_volume_type(bdm, None)
def test_boot_from_volume_source_snapshot_with_volume_type(self):
# Tests that the volume created from the snapshot specifies the volume
# type.
no_volume_snapshot = self.volsnapshot_bdm_dict.copy()
no_volume_snapshot['volume_id'] = None
no_volume_snapshot['volume_type'] = 'fake-lvm-1'
bdm = fake_block_device.fake_bdm_object(
self.context, no_volume_snapshot)
self._test_boot_from_volume_source_snapshot_volume_type(
bdm, 'fake-lvm-1')
class TestDriverBlockDeviceNewFlow(TestDriverBlockDevice):
"""Virt block_device tests for the Cinder 3.44 volume attach flow
where a volume BDM has an attachment_id.
"""
attachment_id = uuids.attachment_id
def test_volume_attach_multiattach(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, expected_conn_info = self._test_volume_attach(
test_bdm, self.volume_bdm, volume, multiattach=True,
driver_multi_attach=True)
self.mox.ReplayAll()
test_bdm.attach(self.context, instance,
self.volume_api, self.virt_driver)
self.assertThat(test_bdm['connection_info'],
matchers.DictMatches(expected_conn_info))
def test_volume_attach_multiattach_no_virt_driver_support(self):
test_bdm = self.driver_classes['volume'](
self.volume_bdm)
volume = {'id': 'fake-volume-id-1',
'multiattach': True,
'attach_status': 'attached',
'status': 'in-use',
'attachments': {'fake_instance_2':
{'mountpoint': '/dev/vdc'}}}
instance, _ = self._test_volume_attach(test_bdm, self.volume_bdm,
volume, multiattach=True,
fail_with_virt_driver=True)
self.mox.ReplayAll()
self.assertRaises(exception.MultiattachNotSupportedByVirtDriver,
test_bdm.attach, self.context, instance,
self.volume_api, self.virt_driver)
@mock.patch('nova.objects.BlockDeviceMapping.save')
def test_refresh_connection_preserve_multiattach(self, mock_bdm_save):
"""Tests that we've already attached a multiattach-capable volume
and when refreshing the connection_info from the attachment record,
the multiattach flag in the bdm.connection_info is preserved.
"""
test_bdm = self.driver_classes['volume'](self.volume_bdm)
test_bdm['connection_info']['multiattach'] = True
volume_api = mock.Mock()
volume_api.attachment_get.return_value = {
'connection_info': {
'data': {
'some': 'goodies'
}
}
}
test_bdm.refresh_connection_info(
self.context, mock.sentinel.instance,
volume_api, mock.sentinel.virt_driver)
volume_api.attachment_get.assert_called_once_with(
self.context, self.attachment_id)
mock_bdm_save.assert_called_once_with()
expected_connection_info = {
'data': {
'some': 'goodies'
},
'serial': self.volume_bdm.volume_id,
'multiattach': True
}
self.assertDictEqual(expected_connection_info,
test_bdm['connection_info'])
class TestGetVolumeId(test.NoDBTestCase):
def test_get_volume_id_none_found(self):
self.assertIsNone(driver_block_device.get_volume_id(None))
self.assertIsNone(driver_block_device.get_volume_id({}))
self.assertIsNone(driver_block_device.get_volume_id({'data': {}}))
def test_get_volume_id_found_volume_id_no_serial(self):
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'data': {'volume_id': uuids.volume_id}}))
def test_get_volume_id_found_no_volume_id_serial(self):
self.assertEqual(uuids.serial,
driver_block_device.get_volume_id(
{'serial': uuids.serial}))
def test_get_volume_id_found_both(self):
# volume_id is taken over serial
self.assertEqual(uuids.volume_id,
driver_block_device.get_volume_id(
{'serial': uuids.serial,
'data': {'volume_id': uuids.volume_id}}))
|
{
"content_hash": "10a809cb9cb49a2015908d1c3b9d363e",
"timestamp": "",
"source": "github",
"line_count": 1573,
"max_line_length": 79,
"avg_line_length": 44.75842339478703,
"alnum_prop": 0.560585185711242,
"repo_name": "mikalstill/nova",
"id": "d4085bc71d84eb314d4688a8c9eac2bda736ce37",
"size": "71003",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/tests/unit/virt/test_block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "3325"
},
{
"name": "Python",
"bytes": "22797282"
},
{
"name": "Shell",
"bytes": "32969"
},
{
"name": "Smarty",
"bytes": "418399"
}
],
"symlink_target": ""
}
|
class Country():
def __init__(self, name, gold, silver, bronze):
self.name = name
self.gold, self.silver, self.bronze = gold, silver, bronze
def __str__(self):
return "%s %d %d %d" % (self.name, self.gold, self.silver, self.bronze)
def __lt__(self, other):
if self.gold == other.gold:
if self.silver == other.silver:
if self.bronze == other.bronze:
return self.name > other.name
return self.bronze < other.bronze
return self.silver < other.silver
return self.gold < other.gold
class MedalTable:
def generate(self, results):
trophy = {}
for cList in results:
for i, cname in enumerate(cList.split()):
if cname not in trophy:
trophy[cname] = Country(cname, 0, 0, 0)
if i == 0:
trophy[cname].gold = trophy[cname].gold + 1
if i == 1:
trophy[cname].silver = trophy[cname].silver + 1
if i == 2:
trophy[cname].bronze = trophy[cname].bronze + 1
trophyList = tuple(trophy.values())
return sorted(trophyList, reverse=True)
|
{
"content_hash": "d7678d287d035491c41b0e08c5e1aa44",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 33.54054054054054,
"alnum_prop": 0.5165189363416599,
"repo_name": "chandps/Topcoder",
"id": "79e62bc7e74bce9c8928862dd070f6e15eb54810",
"size": "1241",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "topcoder/MedalTable.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "1643"
},
{
"name": "HTML",
"bytes": "595"
},
{
"name": "Java",
"bytes": "4106"
},
{
"name": "Python",
"bytes": "27993"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
class DisplayDriverServerTest( unittest.TestCase ) :
def testPortNumber( self ) :
s1 = IECore.DisplayDriverServer( 1559 )
self.assertEqual( s1.portNumber(), 1559 )
self.assertRaises( RuntimeError, IECore.DisplayDriverServer, 1559 )
s2 = IECore.DisplayDriverServer( 0 )
self.assertNotEqual( s2.portNumber(), 0 )
self.assertNotEqual( s2.portNumber(), s1.portNumber() )
s3 = IECore.DisplayDriverServer( 0 )
self.assertNotEqual( s3.portNumber(), 0 )
self.assertNotEqual( s3.portNumber(), s2.portNumber() )
s4 = IECore.DisplayDriverServer()
self.assertNotEqual( s4.portNumber(), 0 )
self.assertNotEqual( s4.portNumber(), s3.portNumber() )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "66033abca1df4187e72360b409ea2afb",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 69,
"avg_line_length": 26.535714285714285,
"alnum_prop": 0.7187079407806191,
"repo_name": "hradec/cortex",
"id": "eb4f0ec15cac7aaa3ecb45d9938e6d21d13d98f5",
"size": "2527",
"binary": false,
"copies": "1",
"ref": "refs/heads/testing",
"path": "test/IECore/DisplayDriverServerTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70350"
},
{
"name": "C++",
"bytes": "11602345"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31098"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Objective-C",
"bytes": "21989"
},
{
"name": "Python",
"bytes": "5076729"
},
{
"name": "Slash",
"bytes": "8583"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
from ckanext.geoview import plugin
def test_plugin():
"""This is here just as a sanity test
"""
p = plugin.OLGeoView()
assert p
|
{
"content_hash": "112adc44c85da96f8d53e7eb1f2a01e1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 20.571428571428573,
"alnum_prop": 0.6458333333333334,
"repo_name": "kalxas/ckanext-geoview",
"id": "f04f001446c7e76dfff81b6bdb9a2a313ba218d3",
"size": "144",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ckanext/geoview/tests/test_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5941"
},
{
"name": "HTML",
"bytes": "5938"
},
{
"name": "JavaScript",
"bytes": "31776"
},
{
"name": "Python",
"bytes": "22709"
}
],
"symlink_target": ""
}
|
"""Generic entry point script."""
import sys
from tensorflow.python.platform import flags
def run():
f = flags.FLAGS
f._parse_flags()
main = sys.modules['__main__'].main
sys.exit(main(sys.argv))
|
{
"content_hash": "e8679c139b73ee786ac4c32f32da17ad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 44,
"avg_line_length": 19.454545454545453,
"alnum_prop": 0.6495327102803738,
"repo_name": "liyu1990/tensorflow",
"id": "5917d00ce3f70c6d955203d18dd578fa1ae50887",
"size": "214",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/platform/default/_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "127080"
},
{
"name": "C++",
"bytes": "4875335"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "631255"
},
{
"name": "Java",
"bytes": "44192"
},
{
"name": "JavaScript",
"bytes": "5067"
},
{
"name": "Objective-C",
"bytes": "630"
},
{
"name": "Protocol Buffer",
"bytes": "44898"
},
{
"name": "Python",
"bytes": "2425565"
},
{
"name": "Shell",
"bytes": "1036"
},
{
"name": "TypeScript",
"bytes": "236089"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = '''
sphinx.ext.autodoc
sphinx.ext.coverage
sphinx.ext.doctest
sphinx.ext.napoleon
sphinx.ext.todo
sphinx.ext.viewcode
'''.split()
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'inform'
copyright = u'2017-2022, Ken Kundert'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = '1.27.1'
# The short X.Y version.
version = '.'.join(release.split('.')[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'nature'
# Use default rather than my normal nature so we get the read-the-docs style on
# that website.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'informdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'inform.tex', u'Inform Documentation',
u'Ken Kundert', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'inform', u'Inform Documentation',
[u'Ken Kundert'], 3)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Inform', u'Inform Documentation',
u'Ken Kundert', 'Inform', 'Print & Logging Utilities',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
#KSK: add custom css code if present
def setup(app):
import os
if os.path.exists('.static/css/custom.css'):
app.add_stylesheet('css/custom.css')
|
{
"content_hash": "9f21e3330c4fc7d38a3601836d98fe39",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 31.520325203252032,
"alnum_prop": 0.6987361361877741,
"repo_name": "KenKundert/inform",
"id": "51be33570329a74af41791a2ff0861c4738947d8",
"size": "8171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226893"
},
{
"name": "Shell",
"bytes": "773"
}
],
"symlink_target": ""
}
|
"""
Description:
Requirements: pySerial, wxPython Phoenix
glossary and of other descriptions:
DMM - digital multimeter
PSU - power supply
SBC - single board computer
INS - general instrument commands
GEN - general sequence instructions
"""
import wx
import theme
import base
# from wx.lib.agw import spinctrl
class StepVoltage(wx.Dialog):
def __init__(self, parent, instruments, variables):
wx.Dialog.__init__(self,
parent,
title="Step Voltage")
self._variables = variables
panel = wx.Panel(self)
sizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
sbox = wx.StaticBox(panel, label="")
sbox_sizer = wx.StaticBoxSizer(sbox, wx.HORIZONTAL)
grid = wx.GridBagSizer(5,5)
row = 0
# row += 1 #let's start at 1, to give some space
lbl_psu = wx.StaticText(panel, label="Power Supply:")
choices = instruments
self.cbox_psu = wx.ComboBox(panel, choices=choices)
self.cbox_psu.Bind(wx.EVT_COMBOBOX, self.OnPsuSelected)
grid.Add(lbl_psu, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.cbox_psu, pos=(row,1), span=(0,3), flag=wx.ALL|wx.EXPAND, border=5)
grid.AddGrowableCol(1)
row += 1
lbl_initial = wx.StaticText(panel, label="Initial Voltage:")
self.spin_initial = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_initial2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_initial.Bind(wx.EVT_SPINCTRL, self.OnSpinInitial)
self.spin_initial2.Bind(wx.EVT_SPINCTRL, self.OnSpinInitial)
self.lbl_voltage = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_initial, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_initial, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_initial2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_final = wx.StaticText(panel, label="Final Voltage (Limit):")
self.spin_final = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_final2 = wx.SpinCtrl(panel, max=99, min=0, size=(50, -1))
self.spin_final.Bind(wx.EVT_SPINCTRL, self.OnSpinFinal)
self.spin_final2.Bind(wx.EVT_SPINCTRL, self.OnSpinFinal)
self.lbl_voltage2 = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_final, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_final, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_final2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_voltage2, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_step = wx.StaticText(panel, label="Voltage Increment/Decrement:")
self.spin_step = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_step2 = wx.SpinCtrl(panel, max=30, min=0, size=(50, -1))
self.spin_step.Bind(wx.EVT_SPINCTRL, self.OnSpinStep)
self.spin_step2.Bind(wx.EVT_SPINCTRL, self.OnSpinStep)
self.lbl_step2 = wx.StaticText(panel, label="0.0v")
grid.Add(lbl_step, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_step, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_step2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_step2, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_step_delay = wx.StaticText(panel, label="Delay before Increment/decrement (ms):")
self.spin_step_delay = wx.SpinCtrl(panel, max=59, min=0, size=(50, -1))
self.spin_step_delay2 = wx.SpinCtrl(panel, max=59, min=0, size=(50, -1))
self.lbl_step_delay = wx.StaticText(panel, label="0.0s")
self.spin_step_delay.Bind(wx.EVT_SPINCTRL, self.OnSpinStepDelay)
self.spin_step_delay2.Bind(wx.EVT_SPINCTRL, self.OnSpinStepDelay)
grid.Add(lbl_step_delay, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.spin_step_delay, pos=(row,1), flag=wx.ALL, border=5)
grid.Add(self.spin_step_delay2, pos=(row,2), flag=wx.ALL, border=5)
grid.Add(self.lbl_step_delay, pos=(row,3), flag=wx.ALL, border=5)
row += 1
lbl_repeat = wx.StaticText(panel, label="Repeat:")
spin_repeat = wx.SpinCtrl(panel, max=999, min=0, size=(50, -1))
grid.Add(lbl_repeat, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(spin_repeat, pos=(row,1), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_local = wx.StaticText(panel, label="Local Name:")
default = defaultname = "stepvolt"
index = 1
while defaultname in self._variables["locals"]:
defaultname = default + str(index)
index += 1
self.text_local = wx.TextCtrl(panel, value=defaultname)
grid.Add(lbl_local, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_local, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
row += 1
lbl_global = wx.StaticText(panel, label="Global Name:")
self.text_global = wx.TextCtrl(panel, value="")
grid.Add(lbl_global, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
grid.Add(self.text_global, pos=(row,1), span=(0,2), flag=wx.ALL|wx.EXPAND, border=5)
# row += 1
# self.lbl_error = wx.StaticText(panel, label="")
# grid.Add(self.lbl_error, pos=(row,0), flag=wx.ALL|wx.EXPAND, border=5)
# if self.cbox_psu.GetSelection() == -1:
# self.lbl_error.SetLabel("*Cannot add this step unless a power supply is selected")
sbox_sizer.Add(grid, 1, wx.ALL|wx.EXPAND, 0)
sbox_sizer.AddSpacer(10)
#-----
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.AddStretchSpacer()
btn_cancel = wx.Button(panel, label="Cancel", id=wx.ID_CANCEL)
btn_cancel.Bind(wx.EVT_BUTTON, self.OnButton)
self.btn_add = wx.Button(panel, label="Add", id=wx.ID_OK)
self.btn_add.Bind(wx.EVT_BUTTON, self.OnButton)
# self.btn_add.Disable()
hsizer.Add(btn_cancel, 0, wx.ALL|wx.EXPAND, 5)
hsizer.Add(self.btn_add, 0, wx.ALL|wx.EXPAND, 5)
#add to main sizer
sizer.Add(sbox_sizer, 0, wx.ALL|wx.EXPAND, 2)
sizer.Add(hsizer, 0, wx.ALL|wx.EXPAND, 5)
panel.SetSizer(sizer)
w, h = sizer.Fit(self)
# self.SetSize((w, h*1.5))
# self.SetMinSize((w, h*1.5))
# self.SetMaxSize(sizer.Fit(self))
try:
self.SetIcon(theme.GetIcon("psu_png"))
except:
pass
self.Bind(wx.EVT_KEY_UP, self.OnKeyUp)
def OnPsuSelected(self, event):
pass
# self.btn_add.Enable()
# self.lbl_error.SetLabel("")
def OnKeyUp(self, event):
key = event.GetKeyCode()
print(event)
if key == wx.KEY_ESCAPE:
self.EndModal(wx.ID_CANCEL)
def OnSpinInitial(self, event=None):
v0 = self.spin_initial.GetValue()
v1 = self.spin_initial2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage.SetLabel(label)
def OnSpinFinal(self, event=None):
v0 = self.spin_final.GetValue()
v1 = self.spin_final2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_voltage2.SetLabel(label)
def OnSpinStep(self, event=None):
v0 = self.spin_step.GetValue()
v1 = self.spin_step2.GetValue()
label = str(v0) + "." + str(v1) + "v"
self.lbl_step2.SetLabel(label)
def OnSpinStepDelay(self, event=None):
s0 = self.spin_step_delay.GetValue()
s1 = self.spin_step_delay2.GetValue()
label = str(s0) + "." + str(s1) + "s"
self.lbl_step_delay.SetLabel(label)
def OnButton(self, event):
e = event.GetEventObject()
label = e.GetLabel()
id = e.GetId()
if label == "Cancel":
self.EndModal(id)
elif label == "Add":
self.EndModal(id)
def SetValue(self, data):
params = data["parameters"]
params = "), " + params[1:-1] + ", (" #so we can split it easier
param_dict = {}
params = params.split("), (")
for param in params:
param = param[1: -1]
if param == "":
continue
key, value = param.split("', '")
param_dict[key] = value
self.cbox_psu.SetValue(param_dict["psu"])
self.lbl_step_delay.SetLabel(param_dict["delay"])
self.lbl_step2.SetLabel(param_dict["step"])
self.lbl_voltage.SetLabel(param_dict["v1"])
self.lbl_voltage2.SetLabel(param_dict["v0"])
#increment delay
spin1, spin2 = param_dict["delay"][:-1].split(".")
self.spin_step_delay.SetValue(spin1)
self.spin_step_delay.SetValue(spin2)
#initial voltage
spin1, spin2 = param_dict["v0"][:-1].split(".")
self.spin_initial.SetValue(spin1)
self.spin_initial2.SetValue(spin2)
#final voltage
spin1, spin2 = param_dict["v1"][:-1].split(".")
self.spin_final.SetValue(spin1)
self.spin_final2.SetValue(spin2)
#increment set
spin1, spin2 = param_dict["step"][:-1].split(".")
self.spin_step.SetValue(spin1)
self.spin_step2.SetValue(spin2)
#
self.text_local.SetValue(data["local"])
self.text_global.SetValue(data["global"])
def GetValue(self):
data = [("psu", self.cbox_psu.GetValue()),
("v0", self.lbl_voltage.GetLabel()),
("v1", self.lbl_voltage2.GetLabel()),
("step", self.lbl_step2.GetLabel()),
("delay", self.lbl_step_delay.GetLabel())]
data = {"action":"Step Voltage",
"parameters":str(data)}
local = self.text_local.GetValue()
if local != "":
for char in local:
if char.isdigit() or char.isalpha():
continue
local = local.replace(char, "_")
data["local"] = local
glob = self.text_global.GetValue()
if glob != "":
for char in glob:
if char.isdigit() or char.isalpha():
continue
glob = glob.replace(char, "_")
data["global"] = glob
return data
|
{
"content_hash": "a85e7bdde72bcb46308868b28cc47441",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 96,
"avg_line_length": 38.449122807017545,
"alnum_prop": 0.5519255338565432,
"repo_name": "swprojects/Serial-Sequence-Creator",
"id": "6908cffde56a21b72280902a83a4be36dee43b78",
"size": "10958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dialogs/stepvoltage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "190811"
}
],
"symlink_target": ""
}
|
"""Famas URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework import routers
from books import views
router = routers.DefaultRouter()
router.register(r'book_pages', viewset=views.BookPageViewSet)
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^register-device-token/(?P<token>[0-9A-Za-z]+)/$', views.register_device_token)
]
|
{
"content_hash": "33bf1beb4fdd0b7df72a331bbf099f6e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 89,
"avg_line_length": 37.93333333333333,
"alnum_prop": 0.7065026362038664,
"repo_name": "Pepedou/Famas",
"id": "203b2f1938fdc8e903575283e0912057b85b6b2f",
"size": "1138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Famas/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15818"
}
],
"symlink_target": ""
}
|
import os
import re
import sys
import traceback
from robot.errors import RobotError
from .platform import JYTHON, RERAISED_EXCEPTIONS
from .unic import unic
EXCLUDE_ROBOT_TRACES = not os.getenv('ROBOT_INTERNAL_TRACES')
if JYTHON:
from java.io import StringWriter, PrintWriter
from java.lang import Throwable, OutOfMemoryError
else:
Throwable = ()
def get_error_message():
"""Returns error message of the last occurred exception.
This method handles also exceptions containing unicode messages. Thus it
MUST be used to get messages from all exceptions originating outside the
framework.
"""
return ErrorDetails().message
def get_error_details(exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""Returns error message and details of the last occurred exception."""
details = ErrorDetails(exclude_robot_traces=exclude_robot_traces)
return details.message, details.traceback
def ErrorDetails(exc_info=None, exclude_robot_traces=EXCLUDE_ROBOT_TRACES):
"""This factory returns an object that wraps the last occurred exception
It has attributes `message`, `traceback` and `error`, where `message`
contains type and message of the original error, `traceback` contains the
traceback/stack trace and `error` contains the original error instance.
"""
exc_type, exc_value, exc_traceback = exc_info or sys.exc_info()
if exc_type in RERAISED_EXCEPTIONS:
raise exc_value
details = PythonErrorDetails \
if not isinstance(exc_value, Throwable) else JavaErrorDetails
return details(exc_type, exc_value, exc_traceback, exclude_robot_traces)
class _ErrorDetails(object):
_generic_exception_names = ('AssertionError', 'AssertionFailedError',
'Exception', 'Error', 'RuntimeError',
'RuntimeException')
def __init__(self, exc_type, exc_value, exc_traceback,
exclude_robot_traces=True):
self.error = exc_value
self._exc_type = exc_type
self._exc_traceback = exc_traceback
self._exclude_robot_traces = exclude_robot_traces
self._message = None
self._traceback = None
@property
def message(self):
if self._message is None:
self._message = self._get_message()
return self._message
def _get_message(self):
raise NotImplementedError
@property
def traceback(self):
if self._traceback is None:
self._traceback = self._get_details()
return self._traceback
def _get_details(self):
raise NotImplementedError
def _get_name(self, exc_type):
try:
return exc_type.__name__
except AttributeError:
return unic(exc_type)
def _format_message(self, name, message):
message = unic(message or '')
message = self._clean_up_message(message, name)
name = name.split('.')[-1] # Use only last part of the name
if not message:
return name
if self._is_generic_exception(name):
return message
return '%s: %s' % (name, message)
def _is_generic_exception(self, name):
return (name in self._generic_exception_names or
isinstance(self.error, RobotError) or
getattr(self.error, 'ROBOT_SUPPRESS_NAME', False))
def _clean_up_message(self, message, name):
return message
class PythonErrorDetails(_ErrorDetails):
def _get_message(self):
name = self._get_name(self._exc_type)
return self._format_message(name, unic(self.error))
def _get_details(self):
if isinstance(self.error, RobotError):
return self.error.details
return 'Traceback (most recent call last):\n' + self._get_traceback()
def _get_traceback(self):
tb = self._exc_traceback
while tb and self._is_excluded_traceback(tb):
tb = tb.tb_next
return ''.join(traceback.format_tb(tb)).rstrip() or ' None'
def _is_excluded_traceback(self, traceback):
if not self._exclude_robot_traces:
return False
module = traceback.tb_frame.f_globals.get('__name__')
return module and module.startswith('robot.')
class JavaErrorDetails(_ErrorDetails):
_java_trace_re = re.compile('^\s+at (\w.+)')
_ignored_java_trace = ('org.python.', 'robot.running.', 'robot$py.',
'sun.reflect.', 'java.lang.reflect.')
def _get_message(self):
exc_name = self._get_name(self._exc_type)
# OOME.getMessage and even toString seem to throw NullPointerException
if not self._is_out_of_memory_error(self._exc_type):
exc_msg = self.error.getMessage()
else:
exc_msg = str(self.error)
return self._format_message(exc_name, exc_msg)
def _is_out_of_memory_error(self, exc_type):
return exc_type is OutOfMemoryError
def _get_details(self):
# OOME.printStackTrace seems to throw NullPointerException
if self._is_out_of_memory_error(self._exc_type):
return ''
output = StringWriter()
self.error.printStackTrace(PrintWriter(output))
details = '\n'.join(line for line in output.toString().splitlines()
if not self._is_ignored_stack_trace_line(line))
msg = unic(self.error.getMessage() or '')
if msg:
details = details.replace(msg, '', 1)
return details
def _is_ignored_stack_trace_line(self, line):
if not line:
return True
res = self._java_trace_re.match(line)
if res is None:
return False
location = res.group(1)
for entry in self._ignored_java_trace:
if location.startswith(entry):
return True
return False
def _clean_up_message(self, msg, name):
msg = self._remove_stack_trace_lines(msg)
return self._remove_exception_name(msg, name).strip()
def _remove_stack_trace_lines(self, msg):
lines = msg.splitlines()
while lines:
if self._java_trace_re.match(lines[-1]):
lines.pop()
else:
break
return '\n'.join(lines)
def _remove_exception_name(self, msg, name):
tokens = msg.split(':', 1)
if len(tokens) == 2 and tokens[0] == name:
msg = tokens[1]
return msg
|
{
"content_hash": "c41eb00a679553e6e4bd6b34a7bc7259",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 78,
"avg_line_length": 33.84293193717277,
"alnum_prop": 0.6202042079207921,
"repo_name": "joongh/robotframework",
"id": "f4af61154ceaf9b26b7f9536872824d81a3061c6",
"size": "7108",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/robot/utils/error.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "57497"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2209566"
},
{
"name": "RobotFramework",
"bytes": "2048926"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
from .census import get_census_profile
from .geography import get_geography, get_locations, get_locations_from_coords, LocationNotFound
__all__ = ['get_census_profile', 'get_elections_profile', 'get_geography',
'get_locations', 'get_locations_from_coords']
|
{
"content_hash": "6ccda575071d70c99b954ff155217bd0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 96,
"avg_line_length": 53.8,
"alnum_prop": 0.7323420074349443,
"repo_name": "callmealien/wazimap_zambia",
"id": "a6008e680000a7cfec6dedd2a30fbeb0980771b9",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "censusreporter/api/controller/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63765"
},
{
"name": "HTML",
"bytes": "260910"
},
{
"name": "JavaScript",
"bytes": "191077"
},
{
"name": "Python",
"bytes": "1273842"
},
{
"name": "Shell",
"bytes": "3819"
}
],
"symlink_target": ""
}
|
"""
Test for LRUCache without a TTL value.
"""
import pytest
import barrelmagazine
_key = 'test_key'
@pytest.fixture
def cache():
return barrelmagazine.LRUCache()
def test_invalid_get_value_no_value(cache):
with pytest.raises(KeyError):
cache[_key]
def test_valid_get_value(cache):
with pytest.raises(KeyError):
cache[_key]
cache[_key] = 1
assert cache[_key] == 1
def test_valid_set_value(cache):
with pytest.raises(KeyError):
cache[_key]
cache[_key] = 1
assert cache[_key] == 1
def test_invalid_del_value(cache):
with pytest.raises(KeyError):
del cache[_key]
def test_valid_del_value(cache):
cache[_key] = 1
assert cache[_key] == 1
del cache[_key]
with pytest.raises(KeyError):
cache[_key]
def test_invalid_pop_no_value(cache):
with pytest.raises(KeyError):
cache.pop(_key)
def test_valid_pop(cache):
cache[_key] = 1
assert cache[_key] == 1
assert cache.pop(_key) == 1
with pytest.raises(KeyError):
cache[_key]
def test_valid_out_of_space():
cache = barrelmagazine.LRUCache(maxsize=1)
cache['1'] = 1
assert cache['1'] == 1
cache['2'] = 2
assert cache['2'] == 2
with pytest.raises(KeyError):
cache['1']
|
{
"content_hash": "397b63b458ed1ee976a4c1a68611c548",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 46,
"avg_line_length": 19.46969696969697,
"alnum_prop": 0.6140077821011674,
"repo_name": "cngo-github/barrel-magazine",
"id": "1b48a65f96f066acd20e97cbb92207b43e885c8c",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "barrelmagazine/tests/test_lru_cache.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5316"
}
],
"symlink_target": ""
}
|
from Bio.PopGen.GenePop import FileParser
import Bio.PopGen.FDist
# Quite a few utility functions could be done (like remove pop,
# add locus, etc...). The recommended strategy is convert back
# and forth from/to GenePop and use GenePop Utils
def convert_genepop_to_fdist(gp_rec, report_pops=None):
"""Converts a GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (either standard or big)
Returns:
FDist record.
"""
if hasattr(gp_rec, "populations"):
return _convert_genepop_to_fdist(gp_rec)
else:
return _convert_genepop_to_fdist_big(gp_rec, report_pops)
def _convert_genepop_to_fdist(gp_rec):
"""Converts a standard GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Standard)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
fd_rec.num_pops = len(gp_rec.populations)
for lc_i in range(len(gp_rec.loci_list)):
alleles = []
pop_data = []
for pop_i in range(len(gp_rec.populations)):
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
alleles.sort() # Dominance requires this
# here we go again (necessary...)
for pop_i in range(len(gp_rec.populations)):
allele_counts = {}
for indiv in gp_rec.populations[pop_i]:
for al in indiv[1][lc_i]:
if al is not None:
count = allele_counts.get(al, 0)
allele_counts[al] = count + 1
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def _convert_genepop_to_fdist_big(gp_rec, report_pops=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
fd_rec.data_org = 1
fd_rec.num_loci = len(gp_rec.loci_list)
num_loci = len(gp_rec.loci_list)
loci = []
for i in range(num_loci):
loci.append(set())
pops = []
work_rec = FileParser.read(gp_rec.fname)
lParser = work_rec.get_individual()
def init_pop():
my_pop = []
for i in range(num_loci):
my_pop.append({})
return my_pop
curr_pop = init_pop()
num_pops = 1
if report_pops:
report_pops(num_pops)
while lParser:
if lParser is not True:
for loci_pos in range(num_loci):
for al in lParser[1][loci_pos]:
if al is not None:
loci[loci_pos].add(al)
curr_pop[loci_pos][al] = curr_pop[loci_pos].get(al, 0) + 1
else:
pops.append(curr_pop)
num_pops += 1
if report_pops:
report_pops(num_pops)
curr_pop = init_pop()
lParser = work_rec.get_individual()
work_rec._handle.close() # TODO - Needs a proper fix
pops.append(curr_pop)
fd_rec.num_pops = num_pops
for loci_pos in range(num_loci):
alleles = sorted(loci[loci_pos])
loci_rec = [len(alleles), []]
for pop in pops:
pop_rec = []
for allele in alleles:
pop_rec.append(pop[loci_pos].get(allele, 0))
loci_rec[1].append(pop_rec)
fd_rec.loci_data.append(tuple(loci_rec))
return fd_rec
def _convert_genepop_to_fdist_big_old(gp_rec, report_loci=None):
"""Converts a big GenePop record to a FDist one.
Parameters:
gp_rec - Genepop Record (Big)
Returns:
FDist record.
"""
fd_rec = Bio.PopGen.FDist.Record()
def countPops(rec):
f2 = FileParser.read(rec.fname)
popCnt = 1
while f2.skip_population():
popCnt += 1
return popCnt
fd_rec.data_org = 0
fd_rec.num_loci = len(gp_rec.loci_list)
work_rec0 = FileParser.read(gp_rec.fname)
fd_rec.num_pops = countPops(work_rec0)
num_loci = len(gp_rec.loci_list)
for lc_i in range(num_loci):
if report_loci:
report_loci(lc_i, num_loci)
work_rec = FileParser.read(gp_rec.fname)
work_rec2 = FileParser.read(gp_rec.fname)
alleles = []
pop_data = []
lParser = work_rec.get_individual()
while lParser:
if lParser is not True:
for al in lParser[1][lc_i]:
if al is not None and al not in alleles:
alleles.append(al)
lParser = work_rec.get_individual()
# here we go again (necessary...)
alleles.sort()
def process_pop(pop_data, alleles, allele_counts):
allele_array = [] # We need the same order as in alleles
for allele in alleles:
allele_array.append(allele_counts.get(allele, 0))
pop_data.append(allele_array)
lParser = work_rec2.get_individual()
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
while lParser:
if lParser is True:
process_pop(pop_data, alleles, allele_counts)
allele_counts = {}
for allele in alleles:
allele_counts[allele] = 0
allele_counts[None] = 0
else:
for al in lParser[1][lc_i]:
allele_counts[al] += 1
lParser = work_rec2.get_individual()
process_pop(pop_data, alleles, allele_counts)
fd_rec.loci_data.append((len(alleles), pop_data))
return fd_rec
def approximate_fst(desired_fst, simulated_fst, parameter_fst,
max_run_fst=1, min_run_fst=0, limit=0.005):
"""Calculates the next Fst attempt in order to approximate a
desired Fst.
"""
if abs(simulated_fst - desired_fst) < limit:
return parameter_fst, max_run_fst, min_run_fst
if simulated_fst > desired_fst:
max_run_fst = parameter_fst
next_parameter_fst = (min_run_fst + parameter_fst) / 2
else:
min_run_fst = parameter_fst
next_parameter_fst = (max_run_fst + parameter_fst) / 2
return next_parameter_fst, max_run_fst, min_run_fst
|
{
"content_hash": "9e3b6e16d1bc94e04c05141be8b1b14c",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 82,
"avg_line_length": 32.383495145631066,
"alnum_prop": 0.5588367561085295,
"repo_name": "poojavade/Genomics_Docker",
"id": "da19ee8aae4f650e0214dabb03f35cec7e7ef1b1",
"size": "6919",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/Bio/PopGen/FDist/Utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "1265138"
},
{
"name": "C++",
"bytes": "4734960"
},
{
"name": "CSS",
"bytes": "17332"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "GLSL",
"bytes": "493"
},
{
"name": "Groff",
"bytes": "77173"
},
{
"name": "HTML",
"bytes": "395483"
},
{
"name": "Java",
"bytes": "9223"
},
{
"name": "JavaScript",
"bytes": "783663"
},
{
"name": "Jupyter Notebook",
"bytes": "189877"
},
{
"name": "Lua",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "77825"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Perl",
"bytes": "244796"
},
{
"name": "Python",
"bytes": "54562861"
},
{
"name": "R",
"bytes": "2568"
},
{
"name": "Shell",
"bytes": "40620"
},
{
"name": "Smarty",
"bytes": "21035"
},
{
"name": "TeX",
"bytes": "55310"
}
],
"symlink_target": ""
}
|
import pypcd
import numpy as np
def write_pcd_with_normals(points, normals, output_pcd_filepath):
with open(output_pcd_filepath, 'w') as new_pcd_file:
new_pcd_file.write(
"# .PCD v0.7 - Point Cloud Data file format\n"
"VERSION 0.7\n"
"FIELDS x y z normal_x normal_y normal_z\n"
"SIZE 4 4 4 4 4 4\n"
"TYPE F F F F F F\n"
"COUNT 1 1 1 1 1 1\n"
"WIDTH {0}\n"
"HEIGHT 1\n"
"POINTS {0}\n"
"DATA ascii\n"
.format(points.shape[0])
)
for point, normal in zip(points, normals):
new_pcd_file.write("{} {} {} {} {} {}\n".format(point[0], point[1], point[2], normal[0], normal[1], normal[2]))
def compute_tactile_normals(cloud):
points = cloud.to_array()
num_points = points.shape[0]
# n x 1 magnitude of each point
magnitudes = np.linalg.norm(points, axis=1).reshape((num_points, 1))
# divide points by per point magnitude, and flip sign to point back at origin
normals = points / magnitudes
return normals
def compute_depth_normals(pcd, ksearch, search_radius):
points = pcd.to_array()
# Convert PCD to PCD with normals
normals = pcd.calc_normals(ksearch=ksearch, search_radius=search_radius)
return normals
def calculate_normals_from_depth_and_tactile(depth_cloud, tactile_cloud, downsampled_pointcloud_size):
v_points = depth_cloud.to_array()
v_normals = compute_depth_normals(depth_cloud, ksearch=10, search_radius=0)
depth_downsample_factor = v_points.shape[0] / downsampled_pointcloud_size
v_points = v_points[::depth_downsample_factor]
v_normals = v_normals[::depth_downsample_factor]
t_points = tactile_cloud.to_array()
t_normals = compute_tactile_normals(tactile_cloud)
vt_points = np.concatenate([v_points, t_points])
vt_normals = np.concatenate([v_normals, t_normals])
return vt_points, vt_normals
def pcd_to_np(pcd_filename):
"""
Read in PCD then return nx3 numpy array
:type pcd_filename: str
:rtype numpy.ndarray
"""
pc = pypcd.PointCloud.from_path(pcd_filename)
return pcl_to_np(pc)
def pcl_to_np(pointcloud):
"""
Convert PCL pointcloud to numpy nx3 numpy array
:type pointcloud: pcl.PointCloud
:rtype numpy.ndarray
"""
xyz = np.empty((pointcloud.points, 3), dtype=np.float)
xyz[:, 0] = pointcloud.pc_data['x']
xyz[:, 1] = pointcloud.pc_data['y']
xyz[:, 2] = pointcloud.pc_data['z']
return xyz
def np_to_pcl(pc_np):
"""
Convert nx3 numpy array to PCL pointcloud
:type pc_np: numpy.ndarray
:rtype pcl.PointCloud
"""
new_pcd = pypcd.PointCloud.from_array(np.array(pc_np, dtype=np.float32))
return new_pcd
|
{
"content_hash": "250832bbeeb1a384f6ce58152357f7ed",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 123,
"avg_line_length": 27.663366336633665,
"alnum_prop": 0.6252684323550465,
"repo_name": "CRLab/curvox",
"id": "9667631acd471ab94962abb30bd21eb691e4f610",
"size": "2794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/curvox/cloud_conversions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76823"
}
],
"symlink_target": ""
}
|
"""Graph mode cluster tests for the experimental `replicate` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class ReplicateClusterTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
super(ReplicateClusterTest, self).setUp()
# Start the local server.
worker_config = config_pb2.ConfigProto()
worker_config.device_count["CPU"] = 2
worker, _ = test_util.create_local_cluster(
3, 0, worker_config=worker_config)
self._device0 = "/job:worker/replica:0/task:0/device:CPU:0"
self._device1 = "/job:worker/replica:0/task:1/device:CPU:0"
self._device2 = "/job:worker/replica:0/task:2/device:CPU:0"
self._target = worker[0].target
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testBasic(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
with session.Session(self._target) as sess:
for i in range(100):
self.assertEqual(i, sess.run(get_next()))
self.assertEqual(i, sess.run(get_next1()))
self.assertEqual(i, sess.run(get_next2()))
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testMap(self):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(lambda x: x * 2)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
with session.Session(self._target) as sess:
for i in range(100):
self.assertEqual(i * 2, sess.run(get_next()))
self.assertEqual(i * 2, sess.run(get_next1()))
self.assertEqual(i * 2, sess.run(get_next2()))
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testVariableInput(self):
with ops.device(self._device0):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: counter_var.assign_add(1))
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
with ops.device(self._device1):
it1 = dataset_ops.make_initializable_iterator(dataset1)
# We don't support stateful ops in functions as of now.
with session.Session(self._target) as sess:
with self.assertRaises(errors.FailedPreconditionError):
sess.run(it1.initializer)
@combinations.generate(
combinations.combine(tf_api_version=[1], mode=["graph"]))
def testAllowStatefulOp(self):
with compat.forward_compatibility_horizon(2019, 9, 12):
with ops.device(self._device0):
dataset0 = dataset_ops.Dataset.range(100).map(
lambda _: random_ops.random_uniform( # pylint:disable=g-long-lambda
[],
minval=1,
maxval=10,
dtype=dtypes.float32))
opt = dataset_ops.Options()
opt.experimental_allow_stateful = True
dataset0 = dataset0.with_options(opt)
replicated_ds = distribute.replicate(dataset0,
[self._device1, self._device2])
dataset1 = replicated_ds[self._device1]
dataset2 = replicated_ds[self._device2]
with ops.device(self._device0):
get_next0 = self.getNext(dataset0)
with ops.device(self._device1):
get_next1 = self.getNext(dataset1)
with ops.device(self._device2):
get_next2 = self.getNext(dataset2)
with session.Session(self._target) as sess:
for _ in range(100):
sess.run(get_next0())
sess.run(get_next1())
sess.run(get_next2())
if __name__ == "__main__":
test.main()
|
{
"content_hash": "be908c717845083f739820e23f63cf72",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 80,
"avg_line_length": 40.1865671641791,
"alnum_prop": 0.664066852367688,
"repo_name": "DavidNorman/tensorflow",
"id": "41acbc804e78b3d759717f05a9d5f60fd1cf5a35",
"size": "6074",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/replicate_cluster_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4913"
},
{
"name": "Batchfile",
"bytes": "15272"
},
{
"name": "C",
"bytes": "774469"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "74659044"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "79827"
},
{
"name": "Go",
"bytes": "1670422"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "827737"
},
{
"name": "Jupyter Notebook",
"bytes": "540800"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1004638"
},
{
"name": "Makefile",
"bytes": "66660"
},
{
"name": "Objective-C",
"bytes": "105247"
},
{
"name": "Objective-C++",
"bytes": "297569"
},
{
"name": "PHP",
"bytes": "23553"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "14529"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37406546"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4706"
},
{
"name": "Shell",
"bytes": "452517"
},
{
"name": "Smarty",
"bytes": "31460"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
import errno
import logging
import os
from airflow import configuration as conf
from airflow.utils.helpers import parse_template_string
from datetime import datetime
class FileProcessorHandler(logging.Handler):
"""
FileProcessorHandler is a python log handler that handles
dag processor logs. It creates and delegates log handling
to `logging.FileHandler` after receiving dag processor context.
"""
def __init__(self, base_log_folder, filename_template):
"""
:param base_log_folder: Base log folder to place logs.
:param filename_template: template filename string
"""
super(FileProcessorHandler, self).__init__()
self.handler = None
self.base_log_folder = base_log_folder
self.dag_dir = os.path.expanduser(conf.get('core', 'DAGS_FOLDER'))
self.filename_template, self.filename_jinja_template = \
parse_template_string(filename_template)
self._cur_date = datetime.today()
if not os.path.exists(self._get_log_directory()):
try:
os.makedirs(self._get_log_directory())
except OSError as e:
# only ignore case where the directory already exist
if e.errno != errno.EEXIST:
raise
logging.warning("%s already exists", self._get_log_directory())
self._symlink_latest_log_directory()
def set_context(self, filename):
"""
Provide filename context to airflow task handler.
:param filename: filename in which the dag is located
"""
local_loc = self._init_file(filename)
self.handler = logging.FileHandler(local_loc)
self.handler.setFormatter(self.formatter)
self.handler.setLevel(self.level)
if self._cur_date < datetime.today():
self._symlink_latest_log_directory()
self._cur_date = datetime.today()
def emit(self, record):
if self.handler is not None:
self.handler.emit(record)
def flush(self):
if self.handler is not None:
self.handler.flush()
def close(self):
if self.handler is not None:
self.handler.close()
def _render_filename(self, filename):
filename = os.path.relpath(filename, self.dag_dir)
ctx = dict()
ctx['filename'] = filename
if self.filename_jinja_template:
return self.filename_jinja_template.render(**ctx)
return self.filename_template.format(filename=ctx['filename'])
def _get_log_directory(self):
now = datetime.utcnow()
return os.path.join(self.base_log_folder, now.strftime("%Y-%m-%d"))
def _symlink_latest_log_directory(self):
"""
Create symbolic link to the current day's log directory to
allow easy access to the latest scheduler log files.
:return: None
"""
log_directory = self._get_log_directory()
latest_log_directory_path = os.path.join(self.base_log_folder, "latest")
if os.path.isdir(log_directory):
try:
# if symlink exists but is stale, update it
if os.path.islink(latest_log_directory_path):
if os.readlink(latest_log_directory_path) != log_directory:
os.unlink(latest_log_directory_path)
os.symlink(log_directory, latest_log_directory_path)
elif (os.path.isdir(latest_log_directory_path) or
os.path.isfile(latest_log_directory_path)):
logging.warning(
"%s already exists as a dir/file. Skip creating symlink.",
latest_log_directory_path
)
else:
os.symlink(log_directory, latest_log_directory_path)
except OSError:
logging.warning("OSError while attempting to symlink "
"the latest log directory")
def _init_file(self, filename):
"""
Create log file and directory if required.
:param filename: task instance object
:return: relative log path of the given task instance
"""
relative_path = self._render_filename(filename)
full_path = os.path.join(self._get_log_directory(), relative_path)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.exists(full_path):
open(full_path, "a").close()
return full_path
|
{
"content_hash": "25e7ee7d9586c2ab08c07844f149c799",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 82,
"avg_line_length": 36.36220472440945,
"alnum_prop": 0.5961455175400606,
"repo_name": "fenglu-g/incubator-airflow",
"id": "8b0bc978e1aa3297bbe013dc32009fca44098814",
"size": "5430",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/utils/log/file_processor_handler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "3634"
},
{
"name": "HTML",
"bytes": "129454"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5852162"
},
{
"name": "Shell",
"bytes": "41793"
}
],
"symlink_target": ""
}
|
"""Core control stuff for Coverage."""
import atexit, os, random, socket, sys
from coverage.annotate import AnnotateReporter
from coverage.backward import string_class
from coverage.codeunit import code_unit_factory, CodeUnit
from coverage.collector import Collector
from coverage.config import CoverageConfig
from coverage.data import CoverageData
from coverage.files import FileLocator, TreeMatcher, FnmatchMatcher
from coverage.files import find_python_files
from coverage.html import HtmlReporter
from coverage.misc import CoverageException, bool_or_none, join_regex
from coverage.results import Analysis, Numbers
from coverage.summary import SummaryReporter
from coverage.xmlreport import XmlReporter
class coverage(object):
"""Programmatic access to Coverage.
To use::
from coverage import coverage
cov = coverage()
cov.start()
#.. blah blah (run your code) blah blah ..
cov.stop()
cov.html_report(directory='covhtml')
"""
def __init__(self, data_file=None, data_suffix=None, cover_pylib=None,
auto_data=False, timid=None, branch=None, config_file=True,
source=None, omit=None, include=None):
"""
`data_file` is the base name of the data file to use, defaulting to
".coverage". `data_suffix` is appended (with a dot) to `data_file` to
create the final file name. If `data_suffix` is simply True, then a
suffix is created with the machine and process identity included.
`cover_pylib` is a boolean determining whether Python code installed
with the Python interpreter is measured. This includes the Python
standard library and any packages installed with the interpreter.
If `auto_data` is true, then any existing data file will be read when
coverage measurement starts, and data will be saved automatically when
measurement stops.
If `timid` is true, then a slower and simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions breaks the faster trace function.
If `branch` is true, then branch coverage will be measured in addition
to the usual statement coverage.
`config_file` determines what config file to read. If it is a string,
it is the name of the config file to read. If it is True, then a
standard file is read (".coveragerc"). If it is False, then no file is
read.
`source` is a list of file paths or package names. Only code located
in the trees indicated by the file paths or package names will be
measured.
`include` and `omit` are lists of filename patterns. Files that match
`include` will be measured, files that match `omit` will not. Each
will also accept a single string argument.
"""
from coverage import __version__
# A record of all the warnings that have been issued.
self._warnings = []
# Build our configuration from a number of sources:
# 1: defaults:
self.config = CoverageConfig()
# 2: from the coveragerc file:
if config_file:
if config_file is True:
config_file = ".coveragerc"
try:
self.config.from_file(config_file)
except ValueError:
_, err, _ = sys.exc_info()
raise CoverageException(
"Couldn't read config file %s: %s" % (config_file, err)
)
# 3: from environment variables:
self.config.from_environment('COVERAGE_OPTIONS')
env_data_file = os.environ.get('COVERAGE_FILE')
if env_data_file:
self.config.data_file = env_data_file
# 4: from constructor arguments:
if isinstance(omit, string_class):
omit = [omit]
if isinstance(include, string_class):
include = [include]
self.config.from_args(
data_file=data_file, cover_pylib=cover_pylib, timid=timid,
branch=branch, parallel=bool_or_none(data_suffix),
source=source, omit=omit, include=include
)
self.auto_data = auto_data
self.atexit_registered = False
# _exclude_re is a dict mapping exclusion list names to compiled
# regexes.
self._exclude_re = {}
self._exclude_regex_stale()
self.file_locator = FileLocator()
# The source argument can be directories or package names.
self.source = []
self.source_pkgs = []
for src in self.config.source or []:
if os.path.exists(src):
self.source.append(self.file_locator.canonical_filename(src))
else:
self.source_pkgs.append(src)
self.omit = self._prep_patterns(self.config.omit)
self.include = self._prep_patterns(self.config.include)
self.collector = Collector(
self._should_trace, timid=self.config.timid,
branch=self.config.branch, warn=self._warn
)
# Suffixes are a bit tricky. We want to use the data suffix only when
# collecting data, not when combining data. So we save it as
# `self.run_suffix` now, and promote it to `self.data_suffix` if we
# find that we are collecting data later.
if data_suffix or self.config.parallel:
if not isinstance(data_suffix, string_class):
# if data_suffix=True, use .machinename.pid.random
data_suffix = True
else:
data_suffix = None
self.data_suffix = None
self.run_suffix = data_suffix
# Create the data file. We do this at construction time so that the
# data file will be written into the directory where the process
# started rather than wherever the process eventually chdir'd to.
self.data = CoverageData(
basename=self.config.data_file,
collector="coverage v%s" % __version__
)
# The dirs for files considered "installed with the interpreter".
self.pylib_dirs = []
if not self.config.cover_pylib:
# Look at where some standard modules are located. That's the
# indication for "installed with the interpreter". In some
# environments (virtualenv, for centralfitestoque), these modules may be
# spread across a few locations. Look at all the candidate modules
# we've imported, and take all the different ones.
for m in (atexit, os, random, socket):
if hasattr(m, "__file__"):
m_dir = self._canonical_dir(m.__file__)
if m_dir not in self.pylib_dirs:
self.pylib_dirs.append(m_dir)
# To avoid tracing the coverage code itself, we skip anything located
# where we are.
self.cover_dir = self._canonical_dir(__file__)
# The matchers for _should_trace, created when tracing starts.
self.source_match = None
self.pylib_match = self.cover_match = None
self.include_match = self.omit_match = None
# Only _harvest_data once per measurement cycle.
self._harvested = False
# Set the reporting precision.
Numbers.set_precision(self.config.precision)
# When tearing down the coverage object, modules can become None.
# Saving the modules as object attributes avoids problems, but it is
# quite ad-hoc which modules need to be saved and which references
# need to use the object attributes.
self.socket = socket
self.os = os
self.random = random
def _canonical_dir(self, f):
"""Return the canonical directory of the file `f`."""
return os.path.split(self.file_locator.canonical_filename(f))[0]
def _source_for_file(self, filename):
"""Return the source file for `filename`."""
if not filename.endswith(".py"):
if filename[-4:-1] == ".py":
filename = filename[:-1]
return filename
def _should_trace(self, filename, frame):
"""Decide whether to trace execution in `filename`
This function is called from the trace function. As each new file name
is encountered, this function determines whether it is traced or not.
Returns a canonicalized filename if it should be traced, False if it
should not.
"""
if os is None:
return False
if filename.startswith('<'):
# Lots of non-file execution is represented with artificial
# filenames like "<string>", "<doctest readme.txt[0]>", or
# "<exec_function>". Don't ever trace these executions, since we
# can't do anything with the data later anyway.
return False
if filename.endswith(".html"):
# Jinja and maybe other templating systems compile templates into
# Python code, but use the template filename as the filename in
# the compiled code. Of course, those filenames are useless later
# so don't bother collecting. TODO: How should we really separate
# out good file extensions from bad?
return False
self._check_for_packages()
# Compiled Python files have two filenames: frame.f_code.co_filename is
# the filename at the time the .pyc was compiled. The second name is
# __file__, which is where the .pyc was actually loaded from. Since
# .pyc files can be moved after compilation (for centralfitestoque, by being
# installed), we look for __file__ in the frame and prefer it to the
# co_filename value.
dunder_file = frame.f_globals.get('__file__')
if dunder_file:
filename = self._source_for_file(dunder_file)
# Jython reports the .class file to the tracer, use the source file.
if filename.endswith("$py.class"):
filename = filename[:-9] + ".py"
canonical = self.file_locator.canonical_filename(filename)
# If the user specified source, then that's authoritative about what to
# measure. If they didn't, then we have to exclude the stdlib and
# coverage.py directories.
if self.source_match:
if not self.source_match.match(canonical):
return False
else:
# If we aren't supposed to trace installed code, then check if this
# is near the Python standard library and skip it if so.
if self.pylib_match and self.pylib_match.match(canonical):
return False
# We exclude the coverage code itself, since a little of it will be
# measured otherwise.
if self.cover_match and self.cover_match.match(canonical):
return False
# Check the file against the include and omit patterns.
if self.include_match and not self.include_match.match(canonical):
return False
if self.omit_match and self.omit_match.match(canonical):
return False
return canonical
# To log what should_trace returns, change this to "if 1:"
if 0:
_real_should_trace = _should_trace
def _should_trace(self, filename, frame): # pylint: disable=E0102
"""A logging decorator around the real _should_trace function."""
ret = self._real_should_trace(filename, frame)
print("should_trace: %r -> %r" % (filename, ret))
return ret
def _warn(self, msg):
"""Use `msg` as a warning."""
self._warnings.append(msg)
sys.stderr.write("Coverage.py warning: %s\n" % msg)
def _prep_patterns(self, patterns):
"""Prepare the file patterns for use in a `FnmatchMatcher`.
If a pattern starts with a wildcard, it is used as a pattern
as-is. If it does not start with a wildcard, then it is made
absolute with the current directory.
If `patterns` is None, an empty list is returned.
"""
patterns = patterns or []
prepped = []
for p in patterns or []:
if p.startswith("*") or p.startswith("?"):
prepped.append(p)
else:
prepped.append(self.file_locator.abs_file(p))
return prepped
def _check_for_packages(self):
"""Update the source_match matcher with latest imported packages."""
# Our self.source_pkgs attribute is a list of package names we want to
# measure. Each time through here, we see if we've imported any of
# them yet. If so, we add its file to source_match, and we don't have
# to look for that package any more.
if self.source_pkgs:
found = []
for pkg in self.source_pkgs:
try:
mod = sys.modules[pkg]
except KeyError:
continue
found.append(pkg)
try:
pkg_file = mod.__file__
except AttributeError:
self._warn("Module %s has no Python source." % pkg)
else:
d, f = os.path.split(pkg_file)
if f.startswith('__init__.'):
# This is actually a package, return the directory.
pkg_file = d
else:
pkg_file = self._source_for_file(pkg_file)
pkg_file = self.file_locator.canonical_filename(pkg_file)
self.source.append(pkg_file)
self.source_match.add(pkg_file)
for pkg in found:
self.source_pkgs.remove(pkg)
def use_cache(self, usecache):
"""Control the use of a data file (incorrectly called a cache).
`usecache` is true or false, whether to read and write data on disk.
"""
self.data.usefile(usecache)
def load(self):
"""Load previously-collected coverage data from the data file."""
self.collector.reset()
self.data.read()
def start(self):
"""Start measuring code coverage."""
if self.run_suffix:
# Calling start() means we're running code, so use the run_suffix
# as the data_suffix when we eventually save the data.
self.data_suffix = self.run_suffix
if self.auto_data:
self.load()
# Save coverage data when Python exits.
if not self.atexit_registered:
atexit.register(self.save)
self.atexit_registered = True
# Create the matchers we need for _should_trace
if self.source or self.source_pkgs:
self.source_match = TreeMatcher(self.source)
else:
if self.cover_dir:
self.cover_match = TreeMatcher([self.cover_dir])
if self.pylib_dirs:
self.pylib_match = TreeMatcher(self.pylib_dirs)
if self.include:
self.include_match = FnmatchMatcher(self.include)
if self.omit:
self.omit_match = FnmatchMatcher(self.omit)
self._harvested = False
self.collector.start()
def stop(self):
"""Stop measuring code coverage."""
self.collector.stop()
self._harvest_data()
def erase(self):
"""Erase previously-collected coverage data.
This removes the in-memory data collected in this session as well as
discarding the data file.
"""
self.collector.reset()
self.data.erase()
def clear_exclude(self, which='exclude'):
"""Clear the exclude list."""
setattr(self.config, which + "_list", [])
self._exclude_regex_stale()
def exclude(self, regex, which='exclude'):
"""Exclude source lines from execution consideration.
A number of lists of regular expressions are maintained. Each list
selects lines that are treated differently during reporting.
`which` determines which list is modified. The "exclude" list selects
lines that are not considered executable at all. The "partial" list
indicates lines with branches that are not taken.
`regex` is a regular expression. The regex is added to the specified
list. If any of the regexes in the list is found in a line, the line
is marked for special treatment during reporting.
"""
excl_list = getattr(self.config, which + "_list")
excl_list.append(regex)
self._exclude_regex_stale()
def _exclude_regex_stale(self):
"""Drop all the compiled exclusion regexes, a list was modified."""
self._exclude_re.clear()
def _exclude_regex(self, which):
"""Return a compiled regex for the given exclusion list."""
if which not in self._exclude_re:
excl_list = getattr(self.config, which + "_list")
self._exclude_re[which] = join_regex(excl_list)
return self._exclude_re[which]
def get_exclude_list(self, which='exclude'):
"""Return a list of excluded regex patterns.
`which` indicates which list is desired. See `exclude` for the lists
that are available, and their meaning.
"""
return getattr(self.config, which + "_list")
def save(self):
"""Save the collected coverage data to the data file."""
data_suffix = self.data_suffix
if data_suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
data_suffix = "%s.%s.%06d" % (
self.socket.gethostname(), self.os.getpid(),
self.random.randint(0, 99999)
)
self._harvest_data()
self.data.write(suffix=data_suffix)
def combine(self):
"""Combine together a number of similarly-named coverage data files.
All coverage data files whose name starts with `data_file` (from the
coverage() constructor) will be read, and combined together into the
current measurements.
"""
self.data.combine_parallel_data()
def _harvest_data(self):
"""Get the collected data and reset the collector.
Also warn about various problems collecting data.
"""
if not self._harvested:
self.data.add_line_data(self.collector.get_line_data())
self.data.add_arc_data(self.collector.get_arc_data())
self.collector.reset()
# If there are still entries in the source_pkgs list, then we never
# encountered those packages.
for pkg in self.source_pkgs:
self._warn("Module %s was never imported." % pkg)
# Find out if we got any data.
summary = self.data.summary()
if not summary:
self._warn("No data was collected.")
# Find files that were never executed at all.
for src in self.source:
for py_file in find_python_files(src):
self.data.touch_file(py_file)
self._harvested = True
# Backward compatibility with version 1.
def analysis(self, morf):
"""Like `analysis2` but doesn't return excluded line numbers."""
f, s, _, m, mf = self.analysis2(morf)
return f, s, m, mf
def analysis2(self, morf):
"""Analyze a module.
`morf` is a module or a filename. It will be analyzed to determine
its coverage statistics. The return value is a 5-tuple:
* The filename for the module.
* A list of line numbers of executable statements.
* A list of line numbers of excluded statements.
* A list of line numbers of statements not run (missing from
execution).
* A readable formatted string of the missing line numbers.
The analysis uses the source file itself and the current measured
coverage data.
"""
analysis = self._analyze(morf)
return (
analysis.filename, analysis.statements, analysis.excluded,
analysis.missing, analysis.missing_formatted()
)
def _analyze(self, it):
"""Analyze a single morf or code unit.
Returns an `Analysis` object.
"""
if not isinstance(it, CodeUnit):
it = code_unit_factory(it, self.file_locator)[0]
return Analysis(self, it)
def report(self, morfs=None, show_missing=True, ignore_errors=None,
file=None, # pylint: disable=W0622
omit=None, include=None
):
"""Write a summary report to `file`.
Each module in `morfs` is listed, with counts of statements, executed
statements, missing statements, and a list of lines missed.
`include` is a list of filename patterns. Modules whose filenames
match those patterns will be included in the report. Modules matching
`omit` will not be included in the report.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = SummaryReporter(
self, show_missing, self.config.ignore_errors
)
reporter.report(morfs, outfile=file, config=self.config)
def annotate(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Annotate a list of modules.
Each module in `morfs` is annotated. The source is written to a new
file, named with a ",cover" suffix, with each line prefixed with a
marker to indicate the coverage of the line. Covered lines have ">",
excluded lines have "-", and missing lines have "!".
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include
)
reporter = AnnotateReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config, directory=directory)
def html_report(self, morfs=None, directory=None, ignore_errors=None,
omit=None, include=None):
"""Generate an HTML report.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
html_dir=directory,
)
reporter = HtmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, config=self.config)
def xml_report(self, morfs=None, outfile=None, ignore_errors=None,
omit=None, include=None):
"""Generate an XML report of coverage results.
The report is compatible with Cobertura reports.
Each module in `morfs` is included in the report. `outfile` is the
path to write the file to, "-" will write to stdout.
See `coverage.report()` for other arguments.
"""
self.config.from_args(
ignore_errors=ignore_errors, omit=omit, include=include,
xml_output=outfile,
)
file_to_close = None
if self.config.xml_output:
if self.config.xml_output == '-':
outfile = sys.stdout
else:
outfile = open(self.config.xml_output, "w")
file_to_close = outfile
try:
reporter = XmlReporter(self, self.config.ignore_errors)
reporter.report(morfs, outfile=outfile, config=self.config)
finally:
if file_to_close:
file_to_close.close()
def sysinfo(self):
"""Return a list of (key, value) pairs showing internal information."""
import coverage as covmod
import platform, re
info = [
('version', covmod.__version__),
('coverage', covmod.__file__),
('cover_dir', self.cover_dir),
('pylib_dirs', self.pylib_dirs),
('tracer', self.collector.tracer_name()),
('data_path', self.data.filename),
('python', sys.version.replace('\n', '')),
('platform', platform.platform()),
('cwd', os.getcwd()),
('path', sys.path),
('environment', [
("%s = %s" % (k, v)) for k, v in os.environ.items()
if re.search("^COV|^PY", k)
]),
]
return info
def process_startup():
"""Call this at Python startup to perhaps measure coverage.
If the environment variable COVERAGE_PROCESS_START is defined, coverage
measurement is started. The value of the variable is the config file
to use.
There are two ways to configure your Python installation to invoke this
function when Python starts:
#. Create or append to sitecustomize.py to add these lines::
import coverage
coverage.process_startup()
#. Create a .pth file in your Python installation containing::
import coverage; coverage.process_startup()
"""
cps = os.environ.get("COVERAGE_PROCESS_START")
if cps:
cov = coverage(config_file=cps, auto_data=True)
if os.environ.get("COVERAGE_COVERAGE"):
# Measuring coverage within coverage.py takes yet more trickery.
cov.cover_dir = "Please measure coverage.py!"
cov.start()
|
{
"content_hash": "5e19faf38d79008ed43dc0e5fc5ff67a",
"timestamp": "",
"source": "github",
"line_count": 673,
"max_line_length": 84,
"avg_line_length": 38.454680534918275,
"alnum_prop": 0.5993044822256569,
"repo_name": "akiokio/centralfitestoque",
"id": "1d403b56a094f710116eeaa4a751a950d7a1cd78",
"size": "25880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/.pycharm_helpers/coverage/control.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "253279"
},
{
"name": "JavaScript",
"bytes": "253299"
},
{
"name": "Python",
"bytes": "6144500"
},
{
"name": "Ruby",
"bytes": "168219"
},
{
"name": "Shell",
"bytes": "21"
}
],
"symlink_target": ""
}
|
from CIM14.IEC61970.LoadModel.EnergyArea import EnergyArea
class SubLoadArea(EnergyArea):
"""The class is the second level in a hierarchical structure for grouping of loads for the purpose of load flow load scaling.
"""
def __init__(self, LoadGroups=None, LoadArea=None, *args, **kw_args):
"""Initialises a new 'SubLoadArea' instance.
@param LoadGroups: The Loadgroups in the SubLoadArea.
@param LoadArea: The LoadArea where the SubLoadArea belongs.
"""
self._LoadGroups = []
self.LoadGroups = [] if LoadGroups is None else LoadGroups
self._LoadArea = None
self.LoadArea = LoadArea
super(SubLoadArea, self).__init__(*args, **kw_args)
_attrs = []
_attr_types = {}
_defaults = {}
_enums = {}
_refs = ["LoadGroups", "LoadArea"]
_many_refs = ["LoadGroups"]
def getLoadGroups(self):
"""The Loadgroups in the SubLoadArea.
"""
return self._LoadGroups
def setLoadGroups(self, value):
for x in self._LoadGroups:
x.SubLoadArea = None
for y in value:
y._SubLoadArea = self
self._LoadGroups = value
LoadGroups = property(getLoadGroups, setLoadGroups)
def addLoadGroups(self, *LoadGroups):
for obj in LoadGroups:
obj.SubLoadArea = self
def removeLoadGroups(self, *LoadGroups):
for obj in LoadGroups:
obj.SubLoadArea = None
def getLoadArea(self):
"""The LoadArea where the SubLoadArea belongs.
"""
return self._LoadArea
def setLoadArea(self, value):
if self._LoadArea is not None:
filtered = [x for x in self.LoadArea.SubLoadAreas if x != self]
self._LoadArea._SubLoadAreas = filtered
self._LoadArea = value
if self._LoadArea is not None:
if self not in self._LoadArea._SubLoadAreas:
self._LoadArea._SubLoadAreas.append(self)
LoadArea = property(getLoadArea, setLoadArea)
|
{
"content_hash": "765745eaa833ea69b95566022d20a981",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 129,
"avg_line_length": 30.696969696969695,
"alnum_prop": 0.6164856860809477,
"repo_name": "rwl/PyCIM",
"id": "83e4a4a4301fffe95f96490d1b35fc1ab6909f20",
"size": "3126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM14/IEC61970/LoadModel/SubLoadArea.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
}
|
'''
Credential creation and verification utilities.
'''
import os
import logging
import xmlrpclib
import sys
import datetime
import dateutil
import handler.geni.v3.extensions.sfa.trust.credential as cred
import handler.geni.v3.extensions.sfa.trust.gid as gid
import handler.geni.v3.extensions.sfa.trust.rights as rights
from handler.geni.v3.extensions.sfa.util.xrn import hrn_authfor_hrn
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_file is a trusted root file file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
self.logger = logging.getLogger('cred-verifier')
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files? The combo cert file?
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
self.logger.info('Will accept credentials signed by any of %d root certs found in %s: %r' % (len(self.root_cert_files), root_cert_fileordir, self.root_cert_files))
elif os.path.isfile(root_cert_fileordir):
self.logger.info('Will accept credentials signed by the single root cert %s' % root_cert_fileordir)
self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
logger = logging.getLogger('cred-verifier')
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
logger.debug('Skipping non file %s', filepath)
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
logger.info("Adding trusted cert file %s", filename)
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
else:
logger.info('Combined dir of %d trusted certs %s into file %s for Python SSL support', okFileCount, caCerts, comboFullPath)
return comboFullPath
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
if gid_string is None:
return
def make_cred(cred_string):
return cred.Credential(string=cred_string)
return self.verify(gid.GID(string=gid_string),
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
else:
self.logger.debug('Source URNs do not match. Source URN %r != credential source URN %r', source_urn, cred_source_urn)
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
# target_urn, cred_target_urn)
result = target_urn == cred_target_urn
if result:
# self.logger.debug('Target URNs match.')
pass
else:
self.logger.debug('Target URNs do NOT match. Target URN %r != Credential URN %r', target_urn, cred_target_urn)
return result
def verify_privileges(self, privileges, credential):
''' Return True iff the given credential gives the privilege
to perform ALL of the privileges (actions) in the given list.
In particular, the given list of 'privileges' is really a list
of names of operations. The privileges in credentials are
each turned in to Rights objects (see sfa/trust/rights.py).
And the SFA rights table is used to map from names of privileges
as specified in credentials, to names of operations.'''
result = True
privs = credential.get_privileges()
for priv in privileges:
if not privs.can_perform(priv):
self.logger.debug('Privilege %s not found on credential %s of %s', priv, credential.get_gid_object().get_urn(), credential.get_gid_caller().get_urn())
result = False
return result
def verify(self, gid, credentials, target_urn, privileges):
'''Verify that the given Source GID supplied at least one credential
in the given list of credentials that has all the privileges required
in the privileges list on the given target.
IE if any of the supplied credentials has a caller that matches gid
and a target that matches target_urn, and has all the privileges in
the given list, then return the list of credentials that were ok.
Throw an Exception if we fail to verify any credential.'''
# Note that here we treat a list of credentials as being options
# Alternatively could accumulate privileges for example
# The semantics of the list of credentials is under specified.
self.logger.debug('Verifying privileges')
result = list()
failure = ""
tried_creds = ""
for cred in credentials:
if tried_creds != "":
tried_creds = "%s, %s" % (tried_creds, cred.get_gid_caller().get_urn())
else:
tried_creds = cred.get_gid_caller().get_urn()
if not self.verify_source(gid, cred):
failure = "Cred %s fails: Source URNs dont match" % cred.get_gid_caller().get_urn()
continue
if not self.verify_target(target_urn, cred):
failure = "Cred %s on %s fails: Target URNs dont match" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn())
continue
if not self.verify_privileges(privileges, cred):
failure = "Cert %s doesn't have sufficient privileges" % cred.get_gid_caller().get_urn()
continue
print
try:
if not cred.verify(self.root_cert_files):
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files))
continue
except Exception, exc:
failure = "Couldn't validate credential for caller %s with target %s with any of %d known root certs: %s: %s" % (cred.get_gid_caller().get_urn(), cred.get_gid_object().get_urn(), len(self.root_cert_files), exc.__class__.__name__, exc)
self.logger.info(failure)
continue
# If got here it verified
result.append(cred)
if result and result != list():
# At least one credential verified ok and was added to the list
# return that list
return result
else:
# We did not find any credential with sufficient privileges
# Raise an exception.
fault_code = 'Insufficient privileges'
fault_string = 'No credential was found with appropriate privileges. Tried %s. Last failure: %s' % (tried_creds, failure)
self.logger.error(fault_string)
raise xmlrpclib.Fault(fault_code, fault_string)
def create_credential(caller_gid, object_gid, expiration, typename, issuer_keyfile, issuer_certfile, trusted_roots, delegatable=False):
'''Create and Return a Credential object issued by given key/cert for the given caller
and object GID objects, given life in seconds, and given type.
Privileges are determined by type per sfa/trust/rights.py
Privileges are delegatable if requested.'''
# FIXME: Validate args: my gids, >0 life,
# type of cred one I can issue
# and readable key and cert files
if caller_gid is None:
raise ValueError("Missing Caller GID")
if object_gid is None:
raise ValueError("Missing Object GID")
if expiration is None:
raise ValueError("Missing expiration")
naive_expiration = naiveUTC(expiration)
duration = naive_expiration - datetime.datetime.utcnow()
life_secs = duration.seconds + duration.days * 24 * 3600
if life_secs < 1:
raise ValueError("Credential expiration is in the past")
if trusted_roots is None:
raise ValueError("Missing list of trusted roots")
if typename is None or typename.strip() == '':
raise ValueError("Missing credential type")
typename = typename.strip().lower()
if typename not in ("user", "sa", "ma", "authority", "slice", "component"):
raise ValueError("Unknown credential type %s" % typename)
if not os.path.isfile(issuer_keyfile):
raise ValueError("Cant read issuer key file %s" % issuer_keyfile)
if not os.path.isfile(issuer_certfile):
raise ValueError("Cant read issuer cert file %s" % issuer_certfile)
issuer_gid = gid.GID(filename=issuer_certfile)
if not (object_gid.get_urn() == issuer_gid.get_urn() or
(issuer_gid.get_type().find('authority') == 0 and
hrn_authfor_hrn(issuer_gid.get_hrn(), object_gid.get_hrn()))):
raise ValueError("Issuer not authorized to issue credential: Issuer=%s Target=%s" % (issuer_gid.get_urn(), object_gid.get_urn()))
ucred = cred.Credential()
# FIXME: Validate the caller_gid and object_gid
# are my user and slice
# Do get_issuer and compare to the issuer cert?
# Or do gid.is_signed_by_cert(issuer_certfile)?
ucred.set_gid_caller(caller_gid)
ucred.set_gid_object(object_gid)
ucred.set_expiration(expiration)
# Use sfa/trust/rights.py to figure out what privileges
# the credential should have.
# user means refresh, resolve, info
# per the privilege_table that lets users do
# remove, update, resolve, list, getcredential,
# listslices, listnodes, getpolicy
# Note that it does not allow manipulating slivers
# And every right is delegatable if any are delegatable (default False)
privileges = rights.determine_rights(typename, None)
privileges.delegate_all_privileges(delegatable)
ucred.set_privileges(privileges)
ucred.encode()
ucred.set_issuer_keys(issuer_keyfile, issuer_certfile)
ucred.sign()
try:
ucred.verify(trusted_roots)
except Exception, exc:
raise Exception("Create Credential failed to verify new credential from trusted roots: %s" % exc)
return ucred
|
{
"content_hash": "e1b85b91ffbc169de0e909eb61c6a963",
"timestamp": "",
"source": "github",
"line_count": 308,
"max_line_length": 250,
"avg_line_length": 45.564935064935064,
"alnum_prop": 0.6308963944705714,
"repo_name": "ict-felix/stack",
"id": "f1b6411c3d6c5d173eb856da1a46373d94f08316",
"size": "15246",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/resource/manager/stitching-entity/src/handler/geni/v3/extensions/geni/util/cred_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "337811"
},
{
"name": "Elixir",
"bytes": "17243"
},
{
"name": "Emacs Lisp",
"bytes": "1098"
},
{
"name": "Groff",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "660363"
},
{
"name": "Java",
"bytes": "18362"
},
{
"name": "JavaScript",
"bytes": "838960"
},
{
"name": "Makefile",
"bytes": "11581"
},
{
"name": "Perl",
"bytes": "5416"
},
{
"name": "Python",
"bytes": "8073455"
},
{
"name": "Shell",
"bytes": "259720"
}
],
"symlink_target": ""
}
|
from warnings import catch_warnings
with catch_warnings(record=True):
import json
import sys
version_json = '''
{
"dirty": false,
"error": null,
"full-revisionid": "62a87bf4a2af02a8d3bc271ad26e5994292b8e6a",
"version": "0.25.3"
}
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
|
{
"content_hash": "a88925fb2590eb0b95567bd5b7377109",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 18.88235294117647,
"alnum_prop": 0.7040498442367601,
"repo_name": "kushalbhola/MyStuff",
"id": "842357466ad043abc2961b2d0e4560c78b2a4a6f",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Practice/PythonApplication/env/Lib/site-packages/pandas/_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1330"
},
{
"name": "C#",
"bytes": "332967"
},
{
"name": "CSS",
"bytes": "1451"
},
{
"name": "HTML",
"bytes": "7539"
},
{
"name": "Java",
"bytes": "14860"
},
{
"name": "JavaScript",
"bytes": "9843"
},
{
"name": "Jupyter Notebook",
"bytes": "374013"
},
{
"name": "PowerShell",
"bytes": "1448"
},
{
"name": "Python",
"bytes": "6511820"
},
{
"name": "Tcl",
"bytes": "24289"
},
{
"name": "TypeScript",
"bytes": "15697"
}
],
"symlink_target": ""
}
|
'''
Roles in this namespace are meant to provide `MongoDB <http://www.mongodb.org/>`_ database management utilities for Debian distributions.
'''
from cStringIO import StringIO
from configobj import ConfigObj
from provy.core import Role
from provy.more.debian.package.aptitude import AptitudeRole
class MongoDBRole(Role):
'''
This role provides `MongoDB <http://www.mongodb.org/>`_ database management utilities for Debian distributions.
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
with self.using(MongoDBRole) as role:
role.restart()
'''
def provision(self):
'''
Installs `MongoDB <http://www.mongodb.org/>`_ and its dependencies.
This method should be called upon if overriden in base classes, or MongoDB won't work properly in the remote server.
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
self.provision_role(MongoDBRole) # no need to call this if using with block.
'''
distro_info = self.get_distro_info()
self.log('Installing MongoDB via packages')
if distro_info.distributor_id == 'Ubuntu':
self.provision_to_ubuntu()
else:
self.provision_to_debian()
self.log('MongoDB installed')
def provision_to_debian(self):
'''
Installs MongoDB and its dependencies via Debian-specific repository.
It's not recommended that you use this method directly; Instead, provision this role directly and it will find out the best way to provision.
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
with self.using(MongoDBRole) as mongo:
mongo.provision_to_debian()
'''
initialization_type = 'debian-sysvinit'
self.__provision_with_init_type(initialization_type)
def provision_to_ubuntu(self):
'''
Installs MongoDB and its dependencies via Ubuntu-specific repository.
It's not recommended that you use this method directly; Instead, provision this role directly and it will find out the best way to provision.
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
with self.using(MongoDBRole) as mongo:
mongo.provision_to_ubuntu()
'''
initialization_type = 'ubuntu-upstart'
self.__provision_with_init_type(initialization_type)
def __provision_with_init_type(self, initialization_type):
with self.using(AptitudeRole) as aptitude:
aptitude.ensure_gpg_key('http://docs.mongodb.org/10gen-gpg-key.asc')
aptitude.ensure_aptitude_source('deb http://downloads-distro.mongodb.org/repo/%s dist 10gen' % initialization_type)
aptitude.force_update()
aptitude.ensure_package_installed('mongodb-10gen')
def restart(self):
'''
Restarts the MongoDB database.
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
with self.using(MongoDBRole) as mongo:
mongo.restart()
'''
self.execute('service mongodb restart', sudo=True)
def configure(self, configuration):
'''
Configures the MongoDB database according to a dictionary.
.. note::
Some important details about this method:
* It will leave configuration items untouched if they're not changed;
* It will create a new configuration item if it doesn't exist yet;
* It will overwrite the configuration items defined in the original configuration by the ones defined in the `configuration` argument, if they have the same name;
* It will convert boolean items to lowercase (like :data:`True` to "true"), when writing, to follow the `mongodb.conf` conventions;
* It will leave file comments untouched, to avoid losing potentially important information;
:param configuration: The intended configuration items.
:type configuration: :class:`dict`
Example:
::
from provy.core import Role
from provy.more.debian import MongoDBRole
class MySampleRole(Role):
def provision(self):
with self.using(MongoDBRole) as mongo:
mongo.configure({
'port': 9876,
'replSet': 'my_replica_set',
})
'''
mongodb_config_path = '/etc/mongodb.conf'
config = self.__config_from_remote(mongodb_config_path)
self.__set_config_items(configuration, config)
tmp_file = self.__tmp_file_with_config(config)
self.put_file(from_file=tmp_file, to_file=mongodb_config_path, sudo=True)
def __tmp_file_with_config(self, config):
output_buffer = StringIO()
config.write(output_buffer)
tmp_file = self.write_to_temp_file(output_buffer.getvalue())
return tmp_file
def __config_from_remote(self, mongodb_config_path):
config_content = self.read_remote_file(mongodb_config_path, sudo=True)
config_buffer = StringIO(config_content)
config = ConfigObj(infile=config_buffer)
return config
def __set_config_items(self, configuration, config):
for key, value in configuration.items():
if isinstance(value, bool):
value = str(value).lower()
config[key] = value
|
{
"content_hash": "aef548a53d68d14f29b2e9b4b0a975b4",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 174,
"avg_line_length": 35.61271676300578,
"alnum_prop": 0.6099659146242493,
"repo_name": "python-provy/provy",
"id": "127a3babaef372b7ff39fe1a82be2960d2d73dfe",
"size": "6204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "provy/more/debian/database/mongodb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "608167"
},
{
"name": "Ruby",
"bytes": "912"
}
],
"symlink_target": ""
}
|
import pygame
import Resources, InputState, Vector2, Factory
BG_COLOR = Resources.BLACK
# Classe que armazena a Surface principal do jogo onde tudo sera desenhado
class ScreenManager:
def __init__(self):
#pygame.font.init()
#pygame.mixer.init()
# quantidade de frames por segundo maxima
self.framerate = 30
# Surface onde tudo sera desenhado
self.screen = pygame.display.get_surface()
# guarda as intaracoes do jogador com o teclado e mouse
self.inputState = InputState.InputState()
# o BaseObject que sera atualizado (pode ser uma fase, tela de menu, etc)
self.baseObject = None
self.clock = pygame.time.Clock()
# uma Vector2 de dois elementos que indica a posicao da camera, onde o indice 0 armazena a posicao no eixo 'x' e o indice 1 no eixo 'y'
self.camera = Vector2.Vector2(0, 0)
# uma tupla de dois elementos que armazena o tamanho da tela
self.resolution = self.getLastUsedResolution()
# sempre que o jogo abrir ele vai pegar a ultima resolucao utilizada e setar a tela com esta resolucao
self.setResolution(self.resolution)
self.factory = Factory.Factory(self)
# indica se o jogo esta rodando
self.running = False
# atribui o BaseObject que sera atualisado
# param: obj - um objeto do tipo BaseObject
def setBaseObjectToUpdate(self, obj):
self.baseObject = obj
# loop principal do jogo
def run(self):
self.running = True
while self.running:
# limpa a tela
self.screen.fill(BG_COLOR)
# atualisa os inputs do jogador
self.inputState.update()
self.running = not self.inputState.QUIT
# chama o metodo update() do BaseObject
self.baseObject.update()
# chama o metodo draw() do GameObject
self.baseObject.draw()
pygame.display.flip() # atualiza a tela
self.clock.tick(self.framerate)
pygame.quit()
# Preenche a tela com uma cor 'color'
# param: color - uma tupla com tres elementos para indicar a cor em RGB
def fill(self, color):
self.screen.fill(color)
# Desenha uma surface, relativa ao mapa
# param: surface - a Surface que sera desenhada
# param: position - um Vector2 com a posicao no mapa em que a surface sera desenhada
def blit(self, surface, position):
pos = self.getSimplePosition(position)
self.screen.blit(surface, pos)
# Desenha uma surface que nao eh relativa a camera, ou seja, desenha em uma posicao fixa da tela, independente da posicao da camera no mapa
# param: surface - a Surface que sera desenhada
# param: position - uma tupla com a posicao na tela
def blitNonCameraRelative(self, surface, position):
self.screen.blit(surface, position)
# Retorna uma tupla com a posicao do objeto no mapa relativo a camera
# param: position - um Vector2 com a posicao no mapa
# return: tupla com a posicao
def getSimplePosition(self,position):
return (position.x - self.camera.x, position.y - self.camera.y)
# Verifica se uma posicao no mapa esta visivel pela camera
# param: position - um Vector2 com a posicao no mapa
# param: offset - uma tupla que indica a quantidade a ser desconsiderada em cada, ou seja,
# o quanto pra fora da tela o objeto pode estar, mas ainda considera dentro da tela
# return: um boolean indicando se esta visivel
def isInsideScreen(self, position, offset):
pos = self.getSimplePosition(position)
# ve se esta fora da tela no eixo x
if pos[0] + offset[0] < 0 or pos[0] -offset[0] > self.resolution[0]:
# esta fora da tela no eixo x
return False
# esta dentro da tela no eixo x
else:
# agora ve se esta dentro da tela no eixo y
if pos[1] + offset[1] < 0 or pos[1] - offset[1] > self.resolution[1]:
# esta fora da tela no eixo y
return False
else:
# esta dentro da tela nos dois eicos
return True
# Atualiza a posicao da camera
# param: mapSize: uma tupla de dois elementos indicando a largura (indice 0) e a altura (indice 1) do mapa em pixels
# param: referencePoint: um Vector2 com um ponto de referencia para a camera, esse ponto sempre deve estar visivel
def updateCamera(self, mapSize, referencePoint):
self.camera.x = referencePoint.x - self.resolution[0]/2 # do jeito que ta aki a o ponto de referencia sempre fica no meio da tela (eixo x)
if self.camera.x < 0:
self.camera.x = 0
elif self.camera.x + self.resolution[0] > mapSize[0]:
self.camera.x = mapSize[0] - self.resolution[0]
self.camera.y = referencePoint.y - self.resolution[1]/2 # do jeito que ta aki a o ponto de referencia sempre fica no meio da tela (eixo y)
if self.camera.y < 0:
self.camera.y = 0
elif self.camera.y + self.resolution[1] > mapSize[1]:
self.camera.y = mapSize[1] - self.resolution[1]
# funcao que retorna uma tupla de dois elementos indicando a ultima resolucao utilisada na tela
# param: resolution - uma tupla de dois inteiros que indica a resolucao da tela que sera setada
def setResolution(self, resolution):
self.resolution = resolution
try:
pygame.display.set_mode(resolution, pygame.FULLSCREEN)
except Exception:
self.resolution = Resources.DEFAULT_SCREEN_SIZE
pygame.display.set_mode(Resources.DEFAULT_SCREEN_SIZE, pygame.FULLSCREEN)
f = file('res/files/resolution.txt','w')
f.write(str(self.resolution))
# funcao que retorna uma tupla de dois elementos indicando a ultima resolucao utilisada na tela
# return: tupla com a ultima resolucao
def getLastUsedResolution(self):
try:
f = file('res/files/resolution.txt', 'r')
res = eval(f.read())
except Exception:
# se der merda, retorna a resolucao default 800,600
res = Resources.DEFAULT_SCREEN_SIZE
return res
# para o ScreenManager, termina o loop do jogo
def stop(self):
self.running = False
|
{
"content_hash": "440804fd29945f73f7eea48c4eeb1f78",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 146,
"avg_line_length": 43.96621621621622,
"alnum_prop": 0.6300906715844475,
"repo_name": "iPatso/PyGameProjs",
"id": "c803791ef4e0bfd9993ac6c326fe8f8cafe4771a",
"size": "6507",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "PYex/Rambit/Rambit/source/ScreenManager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "433468"
}
],
"symlink_target": ""
}
|
from ._hcache import Cached
from ._hcache import cached
|
{
"content_hash": "410d606e939363586651588295d29859",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 27,
"avg_line_length": 28,
"alnum_prop": 0.7857142857142857,
"repo_name": "PMBio/limix",
"id": "98cdc3661c4fea8396ea3f878dfbae6e4ba648fa",
"size": "56",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limix/hcache/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "1550482"
},
{
"name": "C++",
"bytes": "8073525"
},
{
"name": "CMake",
"bytes": "21097"
},
{
"name": "Fortran",
"bytes": "363470"
},
{
"name": "M4",
"bytes": "16520"
},
{
"name": "Makefile",
"bytes": "11605"
},
{
"name": "Matlab",
"bytes": "25435"
},
{
"name": "PowerShell",
"bytes": "3104"
},
{
"name": "Python",
"bytes": "1704175"
},
{
"name": "Roff",
"bytes": "66747"
},
{
"name": "Shell",
"bytes": "15645"
},
{
"name": "TeX",
"bytes": "26251"
}
],
"symlink_target": ""
}
|
from city.bus_stop import BusStop
from city.double_stop import DoubleStop
from city.point import Point
from city.tram_stop import TramStop
from transport.bus import Bus
from transport.taxi import Taxi
from transport.tram import Tram
import networkx as nx
from city.edge import Edge
from random import randint
class Environment():
def __init__(self):
self.points = []
self.paths = []
self.cycles = []
self.speed = []
self.edge_speed = []
self.transporters = []
self.tmpTaxis = []
self.load_points()
self.load_paths()
self.load_roads()
self.city = nx.DiGraph()
self.init_city()
self.init_transporters()
def load_points(self):
for line in open('config/points'):
if line.startswith("#"):
continue
p = line.strip().split(" ")
x = float(p[0])
y = float(p[1])
point_types = p[2].split(",")
load = int(p[3])
if x < 0 or x > 1 or y < 0 or y > 1:
exit("bad configuration: points coordinates must be between 0 and 1")
if len(point_types) == 1:
point_type = point_types[0]
if point_type == 'bus':
self.points.append(BusStop(x, y, point_types, load))
elif point_type == 'tram':
self.points.append(TramStop(x, y, point_types, load))
elif point_type == 'none':
self.points.append(Point(x, y, point_types, load))
else:
exit("bad configuration: point types must be one of: 'bus', 'tram', 'none'")
else:
if len(point_types) == 2 and 'bus' in point_types and 'tram' in point_types:
self.points.append(DoubleStop(x, y, point_types, load))
else:
exit("bad configuration: unknown point types combination")
def load_paths(self):
for line in open('config/paths'):
full_path = line.strip().split(",")
self.edge_speed.append(full_path[1])
p = full_path[0].split(" ")
path = []
for point in p:
path.append(int(point))
if int(point) < 0 or int(point) > len(self.points):
exit("bad configuration: point number must be between 0 and " + str(len(self.points)))
self.paths.append(path)
def load_roads(self):
for line in open('config/roads'):
trans = line.strip().split(",")
self.speed.append(int(trans[1]))
transporter_type = trans[2]
if transporter_type == "bus":
self.transporters.append(Bus())
elif transporter_type == "tram":
self.transporters.append(Tram())
elif transporter_type == "taxi":
self.tmpTaxis.append(int(trans[1]))
else:
exit("bad configuration: transporter type must be one of: 'bus', 'tram', 'taxi'")
r = trans[0].split(" ")
road = []
if transporter_type != "taxi":
for point in r:
road.append(int(point))
self.cycles.append(road)
def init_transporters(self):
for j in range(len(self.cycles)):
cycle = self.cycles[j]
road = nx.DiGraph()
cycle_array = []
for point in cycle:
cycle_array.append(self.points[point])
for i in range(len(cycle_array)):
is_edge = False
for edge in self.city.edges():
edge_val = self.city[edge[0]][edge[1]]['val']
if edge_val.begin == cycle_array[i] and edge_val.end == cycle_array[(i+1) % len(cycle_array)]:
is_edge = True
road.add_edge(edge[0], edge[1], val=edge_val)
if not is_edge:
exit("bad configuration: no connection between " + str(cycle_array[i].x) + "," + str(cycle_array[i].y) + " and " + str(cycle_array[(i+1) % len(cycle_array)].x) + "," + str(cycle_array[(i+1) % len(cycle_array)].y))
# road.add_cycle(cycle_array)
self.transporters[j].position = self.points[cycle[0]]
self.transporters[j].route = road
self.transporters[j].speed = self.speed[j]
# self.transporters.append(Bus(self.points[cycle[0]], road, self.speed[j]))
def init_city(self):
for i in range(len(self.paths)):
path = self.paths[i]
self.city.add_edge(self.points[path[0]], self.points[path[1]], val=Edge(self.points[path[0]], self.points[path[1]], self.edge_speed[i]))
for taxi in self.tmpTaxis:
self.transporters.append(Taxi(self.points[randint(0, len(self.points)-1)], self.city, taxi))
|
{
"content_hash": "03ce2c5ddfa0f38538e8d76e4b41dc9d",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 233,
"avg_line_length": 41.083333333333336,
"alnum_prop": 0.5235294117647059,
"repo_name": "PrzemekBurczyk/TransportSimulation",
"id": "b42bde7e2dd5fb9354cc43f2ad61c5858275b686",
"size": "4930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18847"
}
],
"symlink_target": ""
}
|
from robot.output.xmllogger import XmlLogger
from robot.result.visitor import ResultVisitor
# TODO: Unify XmlLogger and ResultVisitor APIs.
# Perhaps XmlLogger could be ResultVisitor.
class OutputWriter(XmlLogger, ResultVisitor):
def __init__(self, output):
XmlLogger.__init__(self, output, generator='Rebot')
def start_message(self, msg):
self._write_message(msg)
def close(self):
self._writer.end('robot')
self._writer.close()
def start_errors(self, errors):
XmlLogger.start_errors(self)
def end_errors(self, errors):
XmlLogger.end_errors(self)
def end_result(self, result):
self.close()
start_total_statistics = XmlLogger.start_total_stats
start_tag_statistics = XmlLogger.start_tag_stats
start_suite_statistics = XmlLogger.start_suite_stats
end_total_statistics = XmlLogger.end_total_stats
end_tag_statistics = XmlLogger.end_tag_stats
end_suite_statistics = XmlLogger.end_suite_stats
def visit_stat(self, stat):
self._writer.element('stat', stat.name,
stat.get_attributes(values_as_strings=True))
|
{
"content_hash": "0927d2041e1afcadd5bdccb148841e62",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 29.692307692307693,
"alnum_prop": 0.6778929188255614,
"repo_name": "qitaos/robotframework-mabot",
"id": "21ad4b70c5b30ae1697244fd31f727b1ceef3d4f",
"size": "1764",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/robot/reporting/outputwriter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11560"
},
{
"name": "HTML",
"bytes": "84841"
},
{
"name": "JavaScript",
"bytes": "38214"
},
{
"name": "Python",
"bytes": "1288243"
},
{
"name": "Shell",
"bytes": "32"
}
],
"symlink_target": ""
}
|
"""RPC proxy, allows both client/server to connect and match connection.
In normal RPC, client directly connect to server's IP address.
Sometimes this cannot be done when server do not have a static address.
RPCProxy allows both client and server connect to the proxy server,
the proxy server will forward the message between the client and server.
"""
# pylint: disable=unused-variable, unused-argument
import os
import asyncio
import logging
import socket
import threading
import errno
import struct
import time
try:
import tornado
from tornado import gen
from tornado import websocket
from tornado import ioloop
from . import tornado_util
except ImportError as error_msg:
raise ImportError(
"RPCProxy module requires tornado package %s. Try 'pip install tornado'." % error_msg
)
from tvm.contrib.popen_pool import PopenWorker
from . import _ffi_api
from . import base
from .base import TrackerCode
from .server import _server_env
from .._ffi.base import py_str
class ForwardHandler(object):
"""Forward handler to forward the message."""
def _init_handler(self):
"""Initialize handler."""
self._init_message = bytes()
self._init_req_nbytes = 4
self._magic = None
self.timeout = None
self._rpc_key_length = None
self._done = False
self._proxy = ProxyServerHandler.current
assert self._proxy
self.rpc_key = None
self.match_key = None
self.forward_proxy = None
self.alloc_time = None
def __del__(self):
logging.info("Delete %s...", self.name())
def name(self):
"""Name of this connection."""
return "RPCConnection"
def _init_step(self, message):
if self._magic is None:
assert len(message) == 4
self._magic = struct.unpack("<i", message)[0]
if self._magic != base.RPC_MAGIC:
logging.info("Invalid RPC magic from %s", self.name())
self.close()
self._init_req_nbytes = 4
elif self._rpc_key_length is None:
assert len(message) == 4
self._rpc_key_length = struct.unpack("<i", message)[0]
self._init_req_nbytes = self._rpc_key_length
elif self.rpc_key is None:
assert len(message) == self._rpc_key_length
self.rpc_key = py_str(message)
# match key is used to do the matching
self.match_key = self.rpc_key[7:].split()[0]
self.on_start()
else:
assert False
def on_start(self):
"""Event when the initialization is completed"""
self._proxy.handler_ready(self)
def on_data(self, message):
"""on data"""
assert isinstance(message, bytes)
if self.forward_proxy:
self.forward_proxy.send_data(message)
else:
while message and self._init_req_nbytes > len(self._init_message):
nbytes = self._init_req_nbytes - len(self._init_message)
self._init_message += message[:nbytes]
message = message[nbytes:]
if self._init_req_nbytes == len(self._init_message):
temp = self._init_message
self._init_req_nbytes = 0
self._init_message = bytes()
self._init_step(temp)
if message:
logging.info("Invalid RPC protocol, too many bytes %s", self.name())
self.close()
def on_error(self, err):
logging.info("%s: Error in RPC %s", self.name(), err)
self.close_pair()
def close_pair(self):
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.close()
def on_close_event(self):
"""on close event"""
assert not self._done
logging.info("RPCProxy:on_close_event %s ...", self.name())
if self.match_key:
key = self.match_key
if self._proxy._client_pool.get(key, None) == self:
self._proxy._client_pool.pop(key)
if self._proxy._server_pool.get(key, None) == self:
self._proxy._server_pool.pop(key)
self._done = True
self.forward_proxy = None
class TCPHandler(tornado_util.TCPHandler, ForwardHandler):
"""Event driven TCP handler."""
def __init__(self, sock, addr):
super(TCPHandler, self).__init__(sock)
self._init_handler()
self.addr = addr
def name(self):
return "TCPSocketProxy:%s:%s" % (str(self.addr[0]), self.rpc_key)
def send_data(self, message, binary=True):
self.write_message(message, True)
def on_message(self, message):
self.on_data(message)
def on_close(self):
logging.info("RPCProxy: on_close %s ...", self.name())
self._close_process = True
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.on_close_event()
class WebSocketHandler(websocket.WebSocketHandler, ForwardHandler):
"""Handler for websockets."""
def __init__(self, *args, **kwargs):
super(WebSocketHandler, self).__init__(*args, **kwargs)
self._init_handler()
def name(self):
return "WebSocketProxy:%s" % (self.rpc_key)
def on_message(self, message):
self.on_data(message)
def data_received(self, _):
raise NotImplementedError()
def send_data(self, message):
try:
self.write_message(message, True)
except websocket.WebSocketClosedError as err:
self.on_error(err)
def on_close(self):
logging.info("RPCProxy: on_close %s ...", self.name())
if self.forward_proxy:
self.forward_proxy.signal_close()
self.forward_proxy = None
self.on_close_event()
def signal_close(self):
self.close()
class RequestHandler(tornado.web.RequestHandler):
"""Handles html request."""
def __init__(self, *args, **kwargs):
file_path = kwargs.pop("file_path")
if file_path.endswith("html"):
self.page = open(file_path).read()
web_port = kwargs.pop("rpc_web_port", None)
if web_port:
self.page = self.page.replace(
"ws://localhost:9190/ws", "ws://localhost:%d/ws" % web_port
)
else:
self.page = open(file_path, "rb").read()
super(RequestHandler, self).__init__(*args, **kwargs)
def data_received(self, _):
pass
def get(self, *args, **kwargs):
self.write(self.page)
class ProxyServerHandler(object):
"""Internal proxy server handler class."""
current = None
def __init__(
self,
sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page=None,
resource_files=None,
):
assert ProxyServerHandler.current is None
ProxyServerHandler.current = self
if web_port:
handlers = [
(r"/ws", WebSocketHandler),
]
if index_page:
handlers.append(
(r"/", RequestHandler, {"file_path": index_page, "rpc_web_port": web_port})
)
logging.info("Serving RPC index html page at http://localhost:%d", web_port)
resource_files = resource_files if resource_files else []
for fname in resource_files:
basename = os.path.basename(fname)
pair = (r"/%s" % basename, RequestHandler, {"file_path": fname})
handlers.append(pair)
logging.info(pair)
self.app = tornado.web.Application(handlers)
self.app.listen(web_port)
self.sock = sock
self.sock.setblocking(0)
self.loop = ioloop.IOLoop.current()
def event_handler(_, events):
self._on_event(events)
self.loop.add_handler(self.sock.fileno(), event_handler, self.loop.READ)
self._client_pool = {}
self._server_pool = {}
self.timeout_alloc = 5
self.timeout_client = timeout_client
self.timeout_server = timeout_server
# tracker information
self._listen_port = listen_port
self._tracker_addr = tracker_addr
self._tracker_conn = None
self._tracker_pending_puts = []
self._key_set = set()
self.update_tracker_period = 2
if tracker_addr:
logging.info("Tracker address:%s", str(tracker_addr))
def _callback():
self._update_tracker(True)
self.loop.call_later(self.update_tracker_period, _callback)
logging.info("RPCProxy: Websock port bind to %d", web_port)
def _on_event(self, _):
while True:
try:
conn, addr = self.sock.accept()
TCPHandler(conn, addr)
except socket.error as err:
if err.args[0] in (errno.EAGAIN, errno.EWOULDBLOCK):
break
def _pair_up(self, lhs, rhs):
lhs.forward_proxy = rhs
rhs.forward_proxy = lhs
lhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS))
lhs.send_data(struct.pack("<i", len(rhs.rpc_key)))
lhs.send_data(rhs.rpc_key.encode("utf-8"))
rhs.send_data(struct.pack("<i", base.RPC_CODE_SUCCESS))
rhs.send_data(struct.pack("<i", len(lhs.rpc_key)))
rhs.send_data(lhs.rpc_key.encode("utf-8"))
logging.info("Pairup connect %s and %s", lhs.name(), rhs.name())
def _regenerate_server_keys(self, keys):
"""Regenerate keys for server pool"""
keyset = set(self._server_pool.keys())
new_keys = []
# re-generate the server match key, so old information is invalidated.
for key in keys:
rpc_key, _ = key.split(":")
handle = self._server_pool[key]
del self._server_pool[key]
new_key = base.random_key(rpc_key + ":", keyset)
self._server_pool[new_key] = handle
keyset.add(new_key)
new_keys.append(new_key)
return new_keys
def _update_tracker(self, period_update=False):
"""Update information on tracker."""
try:
if self._tracker_conn is None:
self._tracker_conn = socket.socket(
base.get_addr_family(self._tracker_addr), socket.SOCK_STREAM
)
self._tracker_conn.connect(self._tracker_addr)
self._tracker_conn.sendall(struct.pack("<i", base.RPC_TRACKER_MAGIC))
magic = struct.unpack("<i", base.recvall(self._tracker_conn, 4))[0]
if magic != base.RPC_TRACKER_MAGIC:
self.loop.stop()
raise RuntimeError("%s is not RPC Tracker" % str(self._tracker_addr))
# just connect to tracker, need to update all keys
self._tracker_pending_puts = self._server_pool.keys()
if self._tracker_conn and period_update:
# periodically update tracker information
# regenerate key if the key is not in tracker anymore
# and there is no in-coming connection after timeout_alloc
base.sendjson(self._tracker_conn, [TrackerCode.GET_PENDING_MATCHKEYS])
pending_keys = set(base.recvjson(self._tracker_conn))
update_keys = []
for k, v in self._server_pool.items():
if k not in pending_keys:
if v.alloc_time is None:
v.alloc_time = time.time()
elif time.time() - v.alloc_time > self.timeout_alloc:
update_keys.append(k)
v.alloc_time = None
if update_keys:
logging.info(
"RPCProxy: No incoming conn on %s, regenerate keys...", str(update_keys)
)
new_keys = self._regenerate_server_keys(update_keys)
self._tracker_pending_puts += new_keys
need_update_info = False
# report new connections
for key in self._tracker_pending_puts:
rpc_key = key.split(":")[0]
base.sendjson(
self._tracker_conn, [TrackerCode.PUT, rpc_key, (self._listen_port, key), None]
)
assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS
if rpc_key not in self._key_set:
self._key_set.add(rpc_key)
need_update_info = True
if need_update_info:
keylist = "[" + ",".join(self._key_set) + "]"
cinfo = {"key": "server:proxy" + keylist, "addr": [None, self._listen_port]}
base.sendjson(self._tracker_conn, [TrackerCode.UPDATE_INFO, cinfo])
assert base.recvjson(self._tracker_conn) == TrackerCode.SUCCESS
self._tracker_pending_puts = []
except (socket.error, IOError) as err:
logging.info(
"Lost tracker connection: %s, try reconnect in %g sec",
str(err),
self.update_tracker_period,
)
self._tracker_conn.close()
self._tracker_conn = None
self._regenerate_server_keys(self._server_pool.keys())
if period_update:
def _callback():
self._update_tracker(True)
self.loop.call_later(self.update_tracker_period, _callback)
def _handler_ready_tracker_mode(self, handler):
"""tracker mode to handle handler ready."""
if handler.rpc_key.startswith("server:"):
key = base.random_key(handler.match_key + ":", self._server_pool)
handler.match_key = key
self._server_pool[key] = handler
self._tracker_pending_puts.append(key)
self._update_tracker()
else:
if handler.match_key in self._server_pool:
self._pair_up(self._server_pool.pop(handler.match_key), handler)
else:
handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH))
handler.signal_close()
def _handler_ready_proxy_mode(self, handler):
"""Normal proxy mode when handler is ready."""
if handler.rpc_key.startswith("server:"):
pool_src, pool_dst = self._client_pool, self._server_pool
timeout = self.timeout_server
else:
pool_src, pool_dst = self._server_pool, self._client_pool
timeout = self.timeout_client
key = handler.match_key
if key in pool_src:
self._pair_up(pool_src.pop(key), handler)
return
if key not in pool_dst:
pool_dst[key] = handler
def cleanup():
"""Cleanup client connection if timeout"""
if pool_dst.get(key, None) == handler:
logging.info(
"Timeout client connection %s, cannot find match key=%s",
handler.name(),
key,
)
pool_dst.pop(key)
handler.send_data(struct.pack("<i", base.RPC_CODE_MISMATCH))
handler.signal_close()
self.loop.call_later(timeout, cleanup)
else:
logging.info("Duplicate connection with same key=%s", key)
handler.send_data(struct.pack("<i", base.RPC_CODE_DUPLICATE))
handler.signal_close()
def handler_ready(self, handler):
"""Report handler to be ready."""
logging.info("Handler ready %s", handler.name())
if self._tracker_addr:
self._handler_ready_tracker_mode(handler)
else:
self._handler_ready_proxy_mode(handler)
def run(self):
"""Run the proxy server"""
ioloop.IOLoop.current().start()
def _proxy_server(
listen_sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
):
asyncio.set_event_loop(asyncio.new_event_loop())
handler = ProxyServerHandler(
listen_sock,
listen_port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
)
handler.run()
class PopenProxyServerState(object):
"""Internal PopenProxy State for Popen"""
current = None
def __init__(
self,
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
sock = socket.socket(base.get_addr_family((host, port)), socket.SOCK_STREAM)
self.port = None
for my_port in range(port, port_end):
try:
sock.bind((host, my_port))
self.port = my_port
break
except socket.error as sock_err:
if sock_err.errno in [errno.EADDRINUSE]:
continue
raise sock_err
if not self.port:
raise ValueError("cannot bind to any port in [%d, %d)" % (port, port_end))
logging.info("RPCProxy: client port bind to %s:%d", host, self.port)
sock.listen(1)
self.thread = threading.Thread(
target=_proxy_server,
args=(
sock,
self.port,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
),
)
# start the server in a different thread
# so we can return the port directly
self.thread.start()
def _popen_start_proxy_server(
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
# This is a function that will be sent to the
# Popen worker to run on a separate process.
# Create and start the server in a different thread
state = PopenProxyServerState(
host,
port,
port_end,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
)
PopenProxyServerState.current = state
# returns the port so that the main can get the port number.
return state.port
class Proxy(object):
"""Start RPC proxy server on a seperate process.
Python implementation based on PopenWorker.
Parameters
----------
host : str
The host url of the server.
port : int
The TCP port to be bind to
port_end : int, optional
The end TCP port to search
web_port : int, optional
The http/websocket port of the server.
timeout_client : float, optional
Timeout of client until it sees a matching connection.
timeout_server : float, optional
Timeout of server until it sees a matching connection.
tracker_addr: Tuple (str, int) , optional
The address of RPC Tracker in tuple (host, ip) format.
If is not None, the server will register itself to the tracker.
index_page : str, optional
Path to an index page that can be used to display at proxy index.
resource_files : str, optional
Path to local resources that can be included in the http request
"""
def __init__(
self,
host,
port=9091,
port_end=9199,
web_port=0,
timeout_client=600,
timeout_server=600,
tracker_addr=None,
index_page=None,
resource_files=None,
):
self.proc = PopenWorker()
# send the function
self.proc.send(
_popen_start_proxy_server,
[
host,
port,
port_end,
web_port,
timeout_client,
timeout_server,
tracker_addr,
index_page,
resource_files,
],
)
# receive the port
self.port = self.proc.recv()
self.host = host
def terminate(self):
"""Terminate the server process"""
if self.proc:
logging.info("Terminating Proxy Server...")
self.proc.kill()
self.proc = None
def __del__(self):
self.terminate()
def websocket_proxy_server(url, key=""):
"""Create a RPC server that uses an websocket that connects to a proxy.
Parameters
----------
url : str
The url to be connected.
key : str
The key to identify the server.
"""
def create_on_message(conn):
def _fsend(data):
data = bytes(data)
conn.write_message(data, binary=True)
return len(data)
on_message = _ffi_api.CreateEventDrivenServer(_fsend, "WebSocketProxyServer", "%toinit")
return on_message
@gen.coroutine
def _connect(key):
conn = yield websocket.websocket_connect(url)
on_message = create_on_message(conn)
temp = _server_env(None)
# Start connecton
conn.write_message(struct.pack("<i", base.RPC_MAGIC), binary=True)
key = "server:" + key
conn.write_message(struct.pack("<i", len(key)), binary=True)
conn.write_message(key.encode("utf-8"), binary=True)
msg = yield conn.read_message()
assert len(msg) >= 4
magic = struct.unpack("<i", msg[:4])[0]
if magic == base.RPC_CODE_DUPLICATE:
raise RuntimeError("key: %s has already been used in proxy" % key)
if magic == base.RPC_CODE_MISMATCH:
logging.info("RPCProxy do not have matching client key %s", key)
elif magic != base.RPC_CODE_SUCCESS:
raise RuntimeError("%s is not RPC Proxy" % url)
msg = msg[4:]
logging.info("Connection established with remote")
if msg:
on_message(bytearray(msg), 3)
while True:
try:
msg = yield conn.read_message()
if msg is None:
break
on_message(bytearray(msg), 3)
except websocket.WebSocketClosedError as err:
break
logging.info("WebSocketProxyServer closed...")
temp.remove()
ioloop.IOLoop.current().stop()
ioloop.IOLoop.current().spawn_callback(_connect, key)
ioloop.IOLoop.current().start()
|
{
"content_hash": "ed8d49d574850e83333fe9e075be1d3b",
"timestamp": "",
"source": "github",
"line_count": 693,
"max_line_length": 98,
"avg_line_length": 33.086580086580085,
"alnum_prop": 0.5528370186227048,
"repo_name": "dmlc/tvm",
"id": "c3b0056eb591c83b53016d90b34a899a20bd0359",
"size": "23714",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/tvm/rpc/proxy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
import numpy as np
import time, math
import matplotlib.pyplot as plt
import pylab
from scipy.interpolate import splprep, splev
import Op, Interface
from GCore import Label
import ISCV
plt.switch_backend('Qt4Agg')
class Track2D(Op.Op):
def __init__(self, name='/Track 2D', locations='', x2dThreshold=0.012, frameRange=''):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('locations', 'Locations', 'locations', 'string', locations, {}),
('x2d_threshold', 'X2D Threshold', 'X2D Threshold', 'float', x2dThreshold, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, fields)
self.tracker = None
def flush(self):
self.tracker = None
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
x2ds = interface.attr('x2ds')
x2ds_splits = interface.attr('x2ds_splits')
if x2ds is None or x2ds_splits is None: return
# TODO: Cache this
if self.tracker is None:
self.tracker = Label.Track2D(len(x2ds_splits) - 1, x2d_threshold=attrs['x2d_threshold'])
self.tracker.push(x2ds, x2ds_splits)
interface.setAttr('labels', self.tracker.labels)
class Track3D(Op.Op):
def __init__(self, name='/Track 3D', locations='', detections='', calibration='',
x2d_threshold=6./2000, pred_2d_threshold=100./2000, x3d_threshold=30,
tilt_threshold=0.0002, min_rays=3, numPolishIts=3, forceRayAgreement=True, boot=True, bootInterval=10,
skeleton='', pointSize=12.0, colour=(0.8, 0.0, 0.8, 0.7), intersect_threshold=100., generateNormals=False,
showContributions=False, frameRange='', enable=False):
fields = [
('name', 'Name', 'Name', 'string', name, {}),
('locations', 'Locations', 'Locations', 'string', locations, {}),
('enable', 'enable', 'enable', 'bool', enable, {}),
('detections', 'Detections location', 'Detections location', 'string', detections, {}),
('calibration', 'Calibration', 'Calibration location', 'string', calibration, {}),
('x2d_threshold', '2D threshold', '2D threshold', 'float', x2d_threshold, {}),
('pred_2d_threshold', '2D threshold prediction', '2D threshold prediction', 'float', pred_2d_threshold, {}),
('x3d_threshold', '3D threshold', '3D threshold', 'float', x3d_threshold, {}),
('tilt_threshold', 'Tilt treshold', 'Tilt threshold', 'float', tilt_threshold, {}),
('min_rays', 'Min. rays', 'Minimum number of intersecting rays', 'int', min_rays, {}),
('numPolishIts', '# Polish its.', 'Number of polish iterations', 'int', numPolishIts, {'min': 1}),
('forceRayAgreement', 'Ray agreement', 'Force ray agreement', 'bool', forceRayAgreement, {}),
('boot', 'Enable booting', 'Enable booting', 'bool', boot, {}),
('boot_interval', 'Boot interval', 'Boot interval', 'int', bootInterval, {}),
('skeleton', 'Skeleton', 'Skeleton with visibility LODs', 'string', skeleton, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {}),
('colour', '3D Point colour', '3D Point colour', 'string', str(colour), {}),
('intersect_threshold', 'Intersect threshold', 'Intersect threshold', 'float', intersect_threshold, {}),
('generateNormals', 'Generate normals', 'Generate normals for visibility checks', 'bool', generateNormals, {}),
('show_contributions', 'Show contributions', 'Show camera contributions', 'bool', showContributions, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, fields)
self.flush()
def flush(self):
self.tracker = None
self.boot = False
self.visibility = None
self.cameraPositions = None
self.frames = []
self.x3ds = None
self.x3ds_labels = None
self.lastFrame = -1
def cook(self, location, interface, attrs):
if not attrs['enable']: return
if not self.useFrame(interface.frame(), attrs['frameRange']):
self.lastFrame = interface.frame()
return
if not attrs['calibration'] or not attrs['detections']: return
if interface.frame() == self.lastFrame and not interface.isDirty(): return
# Get 2D data and push to the tracker
detections = attrs['detections']
x2ds_data = interface.attr('x2ds', atLocation=detections)
x2ds_splits = interface.attr('x2ds_splits', atLocation=detections)
if x2ds_data is None or x2ds_splits is None:
self.logger.error('No 2D data found at: %s' % detections)
return
settings = Label.PushSettings()
calibrationLocation = attrs['calibration']
if not calibrationLocation: calibrationLocation = interface.root()
self.mats = interface.attr('mats', atLocation=calibrationLocation)
if not self.mats: return
# Make sure we've got the as many calibration matrices as the number of cameras with detections
numCamsWithDets = len(x2ds_splits) - 1
if numCamsWithDets != len(self.mats):
# TODO: Don't allow going further, return
# NOTE: Temp for Reframe
self.mats = self.mats[:numCamsWithDets]
if 'skeleton' in attrs and attrs['skeleton']:
skeletonLoc = attrs['skeleton']
skelDict = interface.attr('skelDict', atLocation=skeletonLoc)
visibilityLod = interface.getChild('visibilityLod', parent=skeletonLoc)
if visibilityLod is None:
self.logger.warning('No visibility LODs found at skeleton: %s' % attrs['skeleton'])
return
lodTris = visibilityLod['tris']
lodVerts = visibilityLod['verts']
lodNormals = visibilityLod['faceNormals']
settings.useVisibility = True
settings.generateNormals = True
settings.triangles = lodVerts[lodTris]
settings.triangleNormals = np.concatenate((lodNormals))
settings.cameraPositions = np.array([m[4] for m in self.mats], dtype=np.float32)
settings.intersect_threshold = 100.
tris = lodVerts[lodTris]
cameraPositions = np.array([m[4] for m in self.mats], dtype=np.float32)
if self.visibility is None: self.visibility = ISCV.ProjectVisibility.create()
self.visibility.setLods(tris, cameraPositions, np.concatenate((lodNormals)),
attrs['intersect_threshold'], attrs['generateNormals'])
settings.visibility = self.visibility
settings.numPolishIts = attrs['numPolishIts']
settings.forceRayAgreement = attrs['forceRayAgreement']
if self.tracker is None:
self.tracker = Label.Track3D(self.mats, attrs['x2d_threshold'], attrs['pred_2d_threshold'], attrs['x3d_threshold'],
attrs['tilt_threshold'], attrs['min_rays'], boot_interval=attrs['boot_interval'])
# booting = interface.attr('booting', atLocation='/root')
if not self.boot and attrs['boot']: #booting == 0 or not self.boot:# and attrs['boot']: #self.tracker.next_id == 0:
self.x3ds, x2ds_labels = self.tracker.boot(x2ds_data, x2ds_splits, settings=settings)
self.boot = True
else:
self.x3ds, x2ds_labels = self.tracker.push(x2ds_data, x2ds_splits, settings=settings)
trackAttrs = {
'x3ds': self.x3ds,
'x3ds_labels': self.tracker.x3ds_labels,
'x3ds_colour': eval(attrs['colour']),
'x3ds_pointSize': attrs['pointSize']
}
if attrs['show_contributions']:
# Find which cameras contribute to the 3D reconstructions (optional?)
trackAttrs['camerasLocation'] = calibrationLocation
trackAttrs['showCameraContributions'] = attrs['show_contributions']
# trackAttrs['cameraPositions'] = self.cameraPositions
trackAttrs['labels'] = x2ds_labels
trackAttrs['x2ds_splits'] = x2ds_splits
# interface.setAttr('labels', self.tracker.x2ds_labels, atLocation=attrs['detections'])
interface.createChild(interface.name(), 'points3d', atLocation=interface.parentPath(), attrs=trackAttrs)
self.frames.append(interface.frame())
interface.setAttr('labels', x2ds_labels, atLocation=detections)
# Show labelled detections as green for clarity
labelColour = interface.attr('x2ds_colour', atLocation=detections)
labelColours = interface.getLabelColours(x2ds_labels, labelColour)
if labelColours.any():
numLabelled = len(np.unique(x2ds_labels)) - 1
# self.logger.info('# Labelled: %d' % numLabelled)
interface.setAttr('x2ds_colours', labelColours, atLocation=detections)
self.lastFrame = interface.frame()
# Test
interface.setAttr('model', self.tracker, atLocation='/root')
class Model(Op.Op):
def __init__(self, name='/Tracking Model', locations='', detections='', calibration='', tracking='', its=1, normals=False,
x2d_threshold=20./2000, pred_2d_threshold=100./2000, x3d_threshold=30, boot=False, unlabelledPenalty=100.0,
maxHypotheses=500, bootIts=5, mesh='', useWeights=False, useVisibility=False, visibilityLod='',
intersection_threshold=100., generateNormals=False, showContributions=True, pointSize=8.,
colour=(0.8, 0.8, 0., 0.7), showLabelAssignment=True, visualiseLabels=False, frameRange='',
showLabellingGraph=False, bootResetTo=10, bootReset=False, forceBoot=False, enable=False,
use3dTracks=False):
fields = [
('name', 'Name', 'name', 'string', name, {}),
('locations', 'Skeleton locations', 'Skeleton locations', 'string', locations, {}),
('enable', 'enable', 'enable', 'bool', enable, {}),
('detections', 'Detections location', 'Detections location', 'string', detections, {}),
('calibration', 'Calibration location', 'Calibration location', 'string', calibration, {}),
('tracking', 'Tracking location', '3D Tracking location', 'string', tracking, {}),
('its', '# Iterations', 'Number of iterations', 'int', its, {}),
('normals', 'Use normals', 'Use normals if available', 'bool', normals, {}),
('x2d_threshold', '2D Threshold', '2D Threshold', 'float', x2d_threshold, {}),
('pred_2d_threshold', '2D Threshold Prediction', '2D Threshold Prediction', 'float', pred_2d_threshold, {}),
('x3d_threshold', '3D Threshold', '3D Threshold', 'float', x3d_threshold, {}),
('boot', 'Boot Labels', 'Boot Labels', 'bool', boot, {}),
('maxHypotheses', '# Max. Hypotheses', 'Number of hypotheses to maintain', 'int', maxHypotheses, {}),
('unlabelledPenalty', 'Unlabelled Penalty', 'Penalty for unlabelled points', 'float', unlabelledPenalty, {}),
('bootIts', 'Boot iterations', 'Boot iterations', 'int', bootIts, {}),
('use3dTracks', 'Use 3D tracks', 'Use 3D tracks', 'bool', use3dTracks, {}),
('mesh', 'Mesh', 'Mesh location', 'string', mesh, {}),
('useWeights', 'Use weights', 'Use weights', 'bool', useWeights, {}),
('useVisibility', 'Visibility check', 'Do a visibility check if possible', 'bool', useVisibility, {}),
('visibilityLod', 'Visibility LOD location', 'Visibility LOD location', 'string', visibilityLod, {}),
('intersection_threshold', 'Intersection threshold', 'Intersection threshold', 'float', intersection_threshold, {}),
('generateNormals', 'Generate normals', 'Generate normals for visibility checks', 'bool', generateNormals, {}),
('show_contributions', 'Show contributions', 'Show camera contributions', 'bool', showContributions, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {}),
('colour', '3D Point colour', '3D Point colour', 'string', str(colour), {}),
('showLabelAssignment', 'Show label assignment', 'Show label assignment | unlabelled (R), labelled (G), 1-ray (G)', 'bool', showLabelAssignment, {}),
('visualiseLabels', 'Visualise labels', 'Visualise labels', 'bool', visualiseLabels, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {}),
('showLabellingGraph', 'Show labelling graph', 'Show labelling graph', 'bool', showLabellingGraph, {}),
('bootResetTo', 'Boot reset to', 'Boot reset to (skipping or manual)', 'int', bootResetTo, {}),
('bootReset', 'Boot reset', 'Boot reset', 'bool', bootReset, {}),
('forceBoot', 'Force boot', 'Force boot', 'bool', forceBoot, {}) # Temp
]
super(self.__class__, self).__init__('Tracking Model', fields)
self.flush()
self.trackerDirty = False
self.lastFrame = -1
def flush(self):
self.cameraPositions = None
self.model = None
self.visibility = None
self.booting = None
self.Ps = None
def update(self):
self.trackerDirty = True
def getEffectorLabels(self, skelDict):
if isinstance(skelDict['markerNames'][0], str):
try:
effectorLabels = np.array([int(mn) for mn in skelDict['markerNames']], dtype=np.int32)
except:
skelDict['labelNames'] = list(np.unique(skelDict['markerNames']))
effectorLabels = np.array(
[skelDict['labelNames'].index(ln) if ln in skelDict['labelNames'] else -1 for ln in skelDict['markerNames']],
dtype=np.int32)
else:
effectorLabels = np.array(skelDict['markerNames'], dtype=np.int32)
return effectorLabels
def cook(self, location, interface, attrs):
if not attrs['enable']: return
if not self.useFrame(interface.frame(), attrs['frameRange']):
self.lastFrame = interface.frame()
return
if interface.frame() == self.lastFrame and not interface.isDirty(): return
if self.booting is None: self.booting = attrs['bootResetTo']
its = attrs['its']
normals = attrs['normals']
x2d_threshold = attrs['x2d_threshold']
pred_2d_threshold = attrs['pred_2d_threshold']
x3d_threshold = attrs['x3d_threshold']
detections = attrs['detections']
if not location or not detections: return
# Define push settings for track model
settings = Label.PushSettings()
settings.useWeights = attrs['useWeights']
# Get skeleton
skelDict = interface.attr('skelDict')
if skelDict is None:
self.logger.error('No skeleton dictionary found!')
return
# Get calibration
calibrationLocation = attrs['calibration']
if not calibrationLocation: calibrationLocation = interface.root()
mats = interface.attr('mats', atLocation=calibrationLocation)
if not mats:
self.logger.error('No mats found at: %s' % calibrationLocation)
return
if self.cameraPositions is None: self.cameraPositions = np.array([m[4] for m in mats], dtype=np.float32)
# Note: This should be split into x2ds and x2ds_splits (temporarily support both cases)
# data = interface.attr('data', atLocation=detections)
data = None
if data is not None:
x2ds_data, x2ds_splits = data
else:
x2ds_data = interface.attr('x2ds', atLocation=detections)
x2ds_splits = interface.attr('x2ds_splits', atLocation=detections)
if x2ds_data is None or x2ds_splits is None:
# self.logger.info('Could not find detection data at: %s' % detections)
self.logger.error('Could not find 2D data (x2ds, x2ds_splits) at: %s' % detections)
return
Ps = interface.attr('Ps', atLocation=calibrationLocation)
if Ps is None:
if self.Ps is None: self.Ps = np.array([m[2] / (np.sum(m[2][0, :3] ** 2) ** 0.5) for m in mats], dtype=np.float32)
Ps = self.Ps
# Make sure we've got the as many calibration matrices as the number of cameras with detections
numCamsWithDets = len(x2ds_splits) - 1
if numCamsWithDets != len(mats):
# TODO: Don't allow going further, return
# NOTE: Temp for Reframe
mats = mats[:numCamsWithDets]
Ps = Ps[:numCamsWithDets]
if self.model is None or interface.isDirty():
if 'markerNames' not in skelDict:
self.logger.error('No markerNames found in skeleton!')
return
if len(skelDict['markerNames']) == 0:
self.logger.error('No markers in skeleton markerNames!')
return
effectorLabels = self.getEffectorLabels(skelDict)
self.model = Label.TrackModel(skelDict, effectorLabels, mats, x2d_threshold, pred_2d_threshold, x3d_threshold)
# Check if we want to use a 3D tracker to provide 3D data for the tracking model
if attrs['use3dTracks']:
self.logger.info('Using 3D Tracks')
self.model.track3d = interface.attr('model', atLocation='/root')
# self.model.track3d = Label.Track3D(mats, 6./2000., 100./2000., 30., boot_interval=1)
settings.numPolishIts = 3
settings.forceRayAgreement = True
# Attempt to pick up skeleton root mat if present
try:
rootMat = interface.attr('rootMat')
if rootMat.any():
self.model.rootMat = rootMat
except:
pass # Probably no skeleton in the scene
if normals:
if attrs['mesh'] and interface.hasAttr('normals', atLocation=attrs['mesh']):
settings.x3ds_normals = interface.attr('normals', atLocation=attrs['mesh'])
if 'markerNormals' in skelDict:
settings.x3ds_normals = skelDict['markerNormals']
if self.visibility is None: self.visibility = ISCV.ProjectVisibility.create()
self.visibility.setNormals(settings.x3ds_normals)
if interface.frame() == 0:
self.model.bootLabels(x2ds_data, x2ds_splits)
# Check if we should boot (and have all the attributes we need)
# Check boot countdown
graph = interface.attr('label_graph')
if attrs['boot']: self.booting -= 1
if (attrs['boot'] and self.booting == 0) or (attrs['forceBoot']):
if not graph:
self.logger.error('Could not boot because the label graph was not found!')
return
trackingLocation = attrs['tracking']
if not trackingLocation: trackingLocation = location
_x3ds = interface.attr('x3ds', atLocation=trackingLocation)
if _x3ds is None:
self.logger.error('Could not boot because the x3ds were not found at: %s' % trackingLocation)
return
self.logger.info('Boot Pose...')
maxHyps = attrs['maxHypotheses']
penalty = attrs['unlabelledPenalty']
# if attrs['forceBoot']: self.booting = 1 #attrs['bootResetTo']
numGraphLabels = len(graph[0])
x3dIndicesForLabels = -np.ones(numGraphLabels, dtype=np.int32)
label_score = ISCV.label_from_graph(_x3ds, graph[0], graph[1], graph[2], graph[3], maxHyps, penalty, x3dIndicesForLabels)
clouds = ISCV.HashCloud2DList(x2ds_data, x2ds_splits, x2d_threshold)
whichLabels = np.array(np.where(x3dIndicesForLabels != -1)[0], dtype=np.int32)
x3ds = _x3ds[x3dIndicesForLabels[whichLabels]]
self.logger.info('Labelled %d out of %d markers' % (len(whichLabels), numGraphLabels))
interface.setAttr('numLabelled', len(whichLabels))
# if 'labelNames' in skelDict: labelNames = np.int32(skelDict['labelNames'])
# else: labelNames = np.arange(len(skelDict['markerNames']))
if 'labelNames' in skelDict: labelNames = np.int32(skelDict['markerNames']) # This will break Spader, DotsTool should change
else: labelNames = np.arange(len(skelDict['markerNames']))
x3ds_labels = np.array(skelDict['markerNames'], dtype=np.str)[whichLabels]
pras_score, x2d_labels, vels = Label.project_assign(clouds, x3ds, whichLabels, Ps, x2d_threshold=x2d_threshold)
self.logger.info('Frame: %d | Label score: %.2f | Pras score: %.2f' % (interface.frame(), label_score, pras_score))
# Initialise the pose using the assigned labels
bootScore = self.model.bootPose(x2ds_data, x2ds_splits, x2d_labels, its=attrs['bootIts'])
self.logger.info('Boot score: %.2f' % bootScore)
if False:
# Check distance after booting
from GCore import SolveIK
m_x3ds, m_x3ds_labels = SolveIK.skeleton_marker_positions(skelDict, skelDict['rootMat'], skelDict['chanValues'],
self.model.effectorLabels, self.model.effectorData,
skelDict['markerWeights'])
diffs = m_x3ds[whichLabels] - x3ds
meanDiff = np.mean(diffs, axis=0)
diffSum = np.linalg.norm(diffs)
self.logger.info('Mean 3D distance = {}'.format(meanDiff))
self.logger.info('Total 3D distance = %.2f' % diffSum)
# Character.pose_skeleton(skelDict['Gs'], skelDict)
else:
if False and not self.booting >= 0:
self.model.track3d = interface.attr('model', atLocation='/root')
# self.model.track3d = Label.Track3D(mats, 6./2000., 100./2000., 30., boot_interval=1)
settings.numPolishIts = 3
settings.forceRayAgreement = True
# Check if we've got visibility lods
if 'useVisibility' in attrs and attrs['useVisibility']:
settings.useVisibility = attrs['useVisibility']
settings.generateNormals = attrs['generateNormals']
if 'visibilityLod' in attrs and attrs['visibilityLod']:
visibilityLod = interface.location(attrs['visibilityLod'])
else:
visibilityLod = interface.getChild('visibilityLod')
if visibilityLod is None:
self.logger.error('No visibility LODs found at skeleton: %s' % location)
return
lodTris = visibilityLod['tris']
lodVerts = visibilityLod['verts']
lodNormals = visibilityLod['faceNormals']
settings.triangleNormals = np.concatenate((lodNormals))
if 'generateCb' in visibilityLod: settings.generateVisibilityLodsCb = visibilityLod['generateCb']
tris = lodVerts[lodTris]
if self.visibility is None: self.visibility = ISCV.ProjectVisibility.create()
self.visibility.setLods(tris, self.cameraPositions, np.concatenate((lodNormals)),
attrs['intersection_threshold'], attrs['generateNormals'])
if self.trackerDirty:
self.model.rebuildEffectorData(skelDict, self.getEffectorLabels(skelDict))
self.trackerDirty = False
# Allow overriding the 2D threshold using an attribute
settings.x2d_thresholdOverride = interface.attr('x2d_thresholdOverride')
settings.visibility = self.visibility
self.model.push(x2ds_data, x2ds_splits, its=its, settings=settings)
x3ds = self.model.x3ds
# x3ds = self.model.trackX3ds
x3ds_labels = self.model.x3d_labels
#if attrs['bootReset']: self.booting = attrs['bootResetTo']
if self.lastFrame != -1 and np.abs(interface.frame() - self.lastFrame) >= attrs['bootResetTo']:
self.booting = attrs['bootResetTo']
self.lastFrame = interface.frame()
# -- Grab all the information and update --
skelDict = self.model.skelDict
# Colour marker points based on labels if we have been given any (from a detection location)
# Not labelled: Red
# Labelled (more than one ray): Green
# Labelled (one ray): Blue
# start = time.time()
# TODO: Make efficient
x3ds_colours = np.array([], dtype=np.float32)
if attrs['visualiseLabels']:
x3ds_colours = np.tile((1, 0, 0, 0.7), (x3ds_labels.shape[0], 1))
labelHits = np.array([len(np.where(self.model.labels == x3d_label)[0]) for x3d_label in x3ds_labels], dtype=np.int32)
x3ds_colours[np.where(labelHits == 1)[0]] = (0, 0, 1, 0.7)
x3ds_colours[np.where(labelHits > 1)[0]] = (0, 1, 0, 0.7)
# print '> label hits:', (time.time() - start)
# Create reconstructed 3D points from the model
modelAttrs = {
'x3ds': x3ds,
'x3ds_labels': x3ds_labels,
'normals': settings.x3ds_normals,
'x3ds_colour': eval(attrs['colour']),
'x3ds_pointSize': attrs['pointSize'],
'x3ds_colours': x3ds_colours
}
modelAttrs['boot'] = attrs['boot'] and self.booting == 0
if attrs['showLabellingGraph'] and graph is not None:
edges = Label.find_graph_edges_for_labels(graph, self.model.x3d_labels)
modelAttrs['edges'] = edges
# Find which cameras contribute to the 3D reconstructions
# start = time.time()
cameraContributions = {}
if attrs['show_contributions']:
modelAttrs['showCameraContributions'] = attrs['show_contributions']
modelAttrs['camerasLocation'] = calibrationLocation
modelAttrs['x2ds_splits'] = x2ds_splits
modelAttrs['labels'] = self.model.labels
interface.createChild('reconstruction', 'points3d', attrs=modelAttrs)
if interface.attr('originalNormals') is not None:
n = []
normals = interface.attr('originalNormals').copy()
for ni, (parent, normal) in enumerate(zip(skelDict['markerParents'], normals)):
Gs = skelDict['Gs'][parent].copy()
n.append(np.dot(Gs[:3, :3], normal))
skelDict['markerNormals'] = np.float32(n)
# Update Skeleton data
interface.setAttr('skelDict', self.model.skelDict)
interface.setAttr('Gs', skelDict['Gs'].copy())
# NOTE: Shouldn't this be done in the update mesh op?
# (maybe good to keep it as an option if we make it efficient)
# Update mesh data if any
# if attrs['mesh']:
# vs, vs_labels = getWorldSpaceMarkerPos(skelDict)
# interface.setAttr('vs', vs, atLocation=attrs['mesh'])
# Add detection labels
interface.setAttr('labels', self.model.labels, atLocation=detections)
interface.setAttr('labels', self.model.labels)
# Show labelled detections as green for clarity
labelColour = interface.attr('x2ds_colour', atLocation=detections)
labelColours = interface.getLabelColours(self.model.labels, labelColour)
if labelColours.any():
# numLabelled = len(np.unique(self.model.labels)) - 1
# self.logger.info('# Labelled: %d' % len(numLabelled))
interface.setAttr('x2ds_colours', labelColours, atLocation=detections)
# Temporary hack to help improve labelled data
interface.setAttr('model', self.model)
class Error(Op.Op):
def __init__(self, name='/Track Error', locations='', source='', x3ds='', printRule=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'Skeleton locations', 'string', locations, {}),
('source', 'source', 'source skeleton location', 'string', source, {}),
('x3ds', '3D points', '3D points (optional)', 'string', x3ds, {}),
('printRule', 'Print on frames', 'Print on frames', 'string', printRule, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.numFrames = 0
self.effectorsDist = 0
self.minEffectorsDist = 0
self.maxEffectorsDist = 0
self.jointsDiffs = 0
self.minJointDiff = 0
self.maxJointDiff = 0
self.labelHits = 0
self.minLabelHits = 1
self.maxLabelHits = 0
self.stats = {
'frames': [],
'labels': [],
'effectors': [],
'joints': []
}
def cook(self, location, interface, attrs):
# Make sure we have some source to compare with
if not attrs['source']: return
# Get cooked skeleton
skelDict = interface.attr('skelDict')
if not skelDict: return
labels = interface.attr('labels')
if labels is None: return
# Get the reconstructions if we have any
# x3ds = None
# if 'x3ds' in attrs: x3ds = interface.attr('x3ds', atLocation=attrs['x3ds'])
# Get the source we want to compare against (assume for now that the source is a skeleton)
sourceSkelDict = interface.attr('skelDict', atLocation=attrs['source'])
if not sourceSkelDict: return
# Get effectors for ground truth skeleton
from GCore import SolveIK
effectorLabels_gt = np.array([int(mn) for mn in sourceSkelDict['markerNames']], dtype=np.int32)
effectorData_gt = SolveIK.make_effectorData(skelDict)
x3ds_gt, x3ds_labels_gt = SolveIK.skeleton_marker_positions(sourceSkelDict, sourceSkelDict['rootMat'],\
sourceSkelDict['chanValues'], effectorLabels_gt, \
effectorData_gt, sourceSkelDict['markerWeights'])
# Get effectors for target skeleton
effectorLabels = np.array([int(mn) for mn in skelDict['markerNames']], dtype=np.int32)
effectorData = SolveIK.make_effectorData(skelDict)
x3ds, x3ds_labels = SolveIK.skeleton_marker_positions(skelDict, skelDict['rootMat'], skelDict['chanValues'],
effectorLabels, effectorData, skelDict['markerWeights'])
d = (x3ds - x3ds_gt) ** 2
ed = np.sqrt(np.sum(d, axis=1))
totalEd = np.sum(ed)
minEd, maxEd = np.min(ed), np.max(ed)
self.minEffectorsDist = max(minEd, self.minEffectorsDist)
self.maxEffectorsDist = max(maxEd, self.maxEffectorsDist)
self.effectorsDist += totalEd
self.stats['effectors'].append(totalEd)
frame = interface.frame()
self.stats['frames'].append(frame)
self.numFrames += 1
# for vi, (v, d) in enumerate(zip(x3ds, ed)):
# pAttrs = {'x3ds': np.array([v], dtype=np.float32), 'x3ds_pointSize': np.sqrt(d) + 0.1, 'x3ds_colour': (0, 0, 0, 0.5)}
# interface.createChild('p_%d' % vi, 'points3d', attrs=pAttrs)
# Now that we have two skeletons, calculate distances between joints
dists = []
jointDiffs = 0
for jointName in skelDict['jointNames']:
d = []
for ci, (cv, cn) in enumerate(zip(sourceSkelDict['chanValues'], sourceSkelDict['chanNames'])):
if jointName in cn and cn[-2:] in ['rx', 'ry', 'rz']:
idx = skelDict['chanNames'].index(cn)
jointDiff = abs(skelDict['chanValues'][idx] - cv)
jointDiffs += jointDiff
self.jointsDiffs += jointDiff
d.append(jointDiff)
if d:
dists.append(np.array(d, dtype=np.float32))
allDists = np.concatenate((dists))
minJointDiff = abs(np.min(allDists))
maxJointDiff = abs(np.max(allDists))
# self.stats['joints'].append(np.sum(allDists))
self.stats['joints'].append(maxJointDiff)
self.minJointDiff = max(minJointDiff, self.minJointDiff)
self.maxJointDiff = max(maxJointDiff, self.maxJointDiff)
# Check how many labels we've found
numMarkers = skelDict['numMarkers']
hits = np.where(labels != -1)[0]
numHits = float(len(hits))
perc = numHits / numMarkers
self.stats['labels'].append(perc)
self.labelHits += perc
self.minLabelHits = min(perc, self.minLabelHits)
self.maxLabelHits = max(perc, self.maxLabelHits)
# TODO: Measure label accuracy by checking which ones are correct (not just assigned)
# Print stats for frame
# print "> Frame:", frame
# print " - Effectors dists (min | max | total):", minEd, "|", maxEd, "|", totalEd
# print " - Joint diffs (min | max | total):", minJointDiff, "|", maxJointDiff, "|", jointDiffs
# print " - Label hits:", perc, "% |", int(numHits)
# Print average stats
if self.useFrame(interface.frame(), attrs['printRule']):
avgEffDist = self.effectorsDist / self.numFrames
avgJointDiff = self.jointsDiffs / self.numFrames
avgLabelHits = self.labelHits / self.numFrames
print "> AVERAGE:"
print " - Effs (min | max | avg | total):", self.minEffectorsDist, "|", self.maxEffectorsDist, "|", avgEffDist, "|", self.effectorsDist
print " - Joints (min | max | avg | total):", self.minJointDiff, "|", self.maxJointDiff, "|", avgJointDiff, "|", self.jointsDiffs
print " - Labels (min | max | avg):", self.minLabelHits, "|", self.maxLabelHits, "|", avgLabelHits
if True:
import datetime, os
from os.path import expanduser
home_directory = expanduser('~')
dumpDir = os.path.join(home_directory, 'Documents\IMS')
import matplotlib.pyplot as plt
fig, (ax1, ax2, ax3) = plt.subplots(3)
ax1.set_title('Effectors')
ax1.plot(self.stats['frames'], self.stats['effectors'])
ax2.set_title('Joints')
ax2.plot(self.stats['frames'], self.stats['joints'])
ax3.set_title('Labels')
ax3.plot(self.stats['frames'], self.stats['labels'])
dumpName = 'Stats ' + str(datetime.datetime.now().strftime('%d-%m-%Y %H-%M-%S'))
fname = os.path.join(dumpDir, dumpName)
plt.savefig(fname + '.png')
plt.show()
# ed = np.array([np.sqrt(np.sum(np.power(d, 2))) for d in dists], dtype=np.float32)
# minDist, maxDist = np.min(ed), np.max(ed)
# self.logger.info('Min Dist = %f | Max Dist = %f' % (minDist, maxDist))
# for vi, v in enumerate(vs):
# idx = skelDict['markerParents'][vi]
# d = ed[idx]
# pAttrs = {'x3ds': np.array([v], dtype=np.float32), 'x3ds_pointSize': d*10 + 0.1, 'x3ds_colour': (0, 0, 0, 0.5)}
# interface.createChild('p_%d' % vi, 'points3d', attrs=pAttrs)
class Count3Ds(Op.Op):
def __init__(self, name='/Count_3D_Tracks', locations='', collectRule='', printRule='', exportRule='', exportPath='',
numMaxElms=3, minNumPoints=100, reverse=False, allowOverrides=False, displayTracks=False):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'X3ds locations', 'string', locations, {}),
('collectRule', 'Collect on frames', 'Collect on frames', 'string', collectRule, {}),
('printRule', 'Print on frames', 'Print on frames', 'string', printRule, {}),
('exportRule', 'Export on frames', 'Export on frames', 'string', exportRule, {}),
('exportPath', 'Export path', 'Export path', 'string', exportPath, {}),
('numMaxElms', 'numMaxElms', 'numMaxElms', 'int', numMaxElms, {}),
('minNumPoints', 'minNumPoints', 'minNumPoints', 'int', minNumPoints, {}),
('reverse', 'Reverse', 'Reverse', 'bool', reverse, {}),
('allowOverrides', 'Overrides', 'Allow overrides', 'bool', allowOverrides, {}),
('displayTracks', 'Display tracks', 'Display tracks', 'bool', displayTracks, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.stats = {
'frames': [],
'num_x3ds': [],
'num_tracks': [],
'track_lifetime': {},
'lastFrame': -1
}
self.frames = 0
self.x3ds_frames = {}
self.cacheManualOverride = False
self.trackColours = {}
def setup(self, interface, attrs):
self.cacheManualOverride = attrs['allowOverrides']
def cook(self, location, interface, attrs):
# if interface.frame() == self.stats['lastFrame']: return
if not self.useFrame(interface.frame(), attrs['collectRule']): return
if attrs['reverse']:
if self.stats['lastFrame'] == -1: self.stats['lastFrame'] = interface.frame()
else:
self.stats['lastFrame'] = interface.frame()
# Temp boot hack
isBoot = interface.attr('boot')
if isBoot is None or not isBoot: return
# Get cooked skeleton
x3ds = interface.attr('x3ds')
if x3ds is None: return
x3ds_labels = interface.attr('x3ds_labels')
if x3ds_labels is None or len(x3ds_labels) == 0: return
if len(x3ds) < attrs['minNumPoints']:
self.logger.warning('Not enough markers (%d)' % len(x3ds))
return
# Note: Assumption here for now is that we're using the labels (ints)
x3ds_labels = np.int32(x3ds_labels)
maxLabel = np.max(x3ds_labels)
frame = interface.frame()
if frame not in self.stats['frames']:
self.stats['frames'].append(frame)
self.stats['num_x3ds'].append(int(len(x3ds)))
self.stats['num_tracks'].append(int(maxLabel))
self.frames += 1
# Collect the x3ds if we're exporting them later
# if attrs['collectRule']:
# for x3d, x3d_label in zip(x3ds, x3ds_labels):
# if x3d_label not in self.x3ds_frames: self.x3ds_frames[x3d_label] = []
# self.x3ds_frames[x3d_label].append(x3d)
frameLabels = []
labelSwitch = np.zeros((maxLabel + 1, 1), dtype=np.int32)
for x3d, label in zip(x3ds, x3ds_labels):
label = int(label)
if label not in self.x3ds_frames:
self.x3ds_frames[label] = []
colour = np.float32(np.random.rand(4))
colour[3] = 1.0
self.trackColours[label] = colour
if label not in self.stats['track_lifetime']: self.stats['track_lifetime'][label] = []
# self.stats['track_lifetime'][label] = [frame]
# self.x3ds_frames[label].append(x3d)
# else:
if frame not in self.stats['track_lifetime'][label]:
self.stats['track_lifetime'][label].append(frame)
self.x3ds_frames[label].append(x3d)
frameLabels.append(label)
labelSwitch[label, 0] = 1
elif attrs['allowOverrides']:
frameIdx = self.stats['track_lifetime'][label].index(frame)
self.x3ds_frames[label][frameIdx] = x3d
refFrame = float(self.frames)
if refFrame > 0:
colours = np.zeros_like(x3ds)
for li, l in enumerate(x3ds_labels):
if l in self.stats['track_lifetime']:
frames = self.stats['track_lifetime'][l]
c = float(len(frames)) / refFrame
colours[li][0] = 1. - c
colours[li][2] = c
else:
colours[li][0] = 1.
interface.setAttr('x3ds_colours', np.array(colours, dtype=np.float32))
if attrs['exportRule'] and self.useFrame(interface.frame(), attrs['exportRule']):
import collections
trackLifetimes = self.stats['track_lifetime']
if trackLifetimes:
# Check which track length is the most common to use as a base track, where we look for other tracks of
# the same length
trackIds = collections.Counter([len(frames) for tid, frames in trackLifetimes.iteritems()]).most_common(attrs['numMaxElms'])
# print 'Most common (#frames, #points):', trackIds
minNumPoints = attrs['minNumPoints']
numFrames, numPoints = -1, -1
if trackIds:
numFrames, numPoints = trackIds[0]
if numPoints < minNumPoints:
self.logger.warning('Not enough points found in tracks: #points [%d] < [%d]' % (numPoints, minNumPoints))
else:
# We should be verifying that the selected tracks line up with the base track
# trackFirstFrame, trackLastFrame = track[0], track[-1]
# Go through each track and pick out the tracks that have survived as long as the base track
c3ds, c3ds_labels = [], []
for label, trackFrames in trackLifetimes.iteritems():
# For now exclude tracks with a longer lifetime. We should pick out the block of data
# by identifying which frames are solid within the timeline.
if len(trackFrames) != numFrames: continue
c3ds.append(self.x3ds_frames[label])
c3ds_labels.append(label)
c3ds = np.array(c3ds, dtype=np.float32)
c3ds_labels = np.array(c3ds_labels, dtype=np.int32)
# Either dump the c3ds to file (if a path is given) or alternatively write the c3ds to the interface
if attrs['exportPath']:
from IO import IO
exportPath = self.resolvePath(attrs['exportPath'] + '_' + str(interface.frame()) + '.c3dio')
import os
if not os.path.isfile(exportPath):
IO.save(exportPath, {'/root/tracks': {'x3ds': c3ds, 'x3ds_labels': c3ds_labels}})
self.logger.info('Exported C3Ds to: %s' % exportPath)
else:
c3dsAttrs = {
'x3ds': c3ds,
'x3ds_labels': c3ds_labels
}
interface.createChild('c3ds', 'group', attrs=c3dsAttrs)
# Print stats
# if self.useFrame(interface.frame(), attrs['printRule']):
if False:
import datetime, os
from os.path import expanduser
home_directory = expanduser('~')
dumpDir = os.path.join(home_directory, 'Documents\IMS')
self.logger.info('# tracks = %d' % self.stats['num_tracks'][-1])
self.logger.info('# x3ds = %d' % self.stats['num_x3ds'][-1])
# print 'labels:', frameLabels
import matplotlib.pyplot as plt
fig, (ax1, ax2) = plt.subplots(2)
# ax1.set_title('# X3Ds')
# ax1.plot(self.stats['frames'], self.stats['num_x3ds'])
#
# ax2.set_title('# Tracks')
# ax2.plot(self.stats['frames'], self.stats['num_tracks'])
trackLifetimes = np.array([(l, len(f), np.min(f), np.max(f)) for (l, f) in self.stats['track_lifetime'].iteritems()], dtype=np.int32)
trackLifetimes.view('i32,i32,i32,i32').sort(order=['f1'], axis=0)
ax1.set_title('Track lifetimes')
ax1.barh(range(len(trackLifetimes)), trackLifetimes[:, 1][::-1], color='blue')
ax2.set_title('Active labels (frame %s)' % str(interface.frame()))
ax2.bar(range(maxLabel + 1), labelSwitch[:, 0])
# from IO import IO
# IO.save(os.path.join(os.environ['GRIP_DATA'],'TracksStats.io'), {'/root/data': {'tracks': trackLifetimes}})
dumpName = 'Stats ' + str(datetime.datetime.now().strftime('%d-%m-%Y %H-%M-%S'))
fname = os.path.join(dumpDir, dumpName)
plt.savefig(fname + '.png')
# plt.show()
logAttrs = {
'stats': self.stats,
'x3ds_frames': self.x3ds_frames,
'track_colours': self.trackColours
}
interface.createChild('log', 'group', attrs=logAttrs)
if attrs['displayTracks']:
for trackId, trackX3ds in self.x3ds_frames.iteritems():
colour = self.trackColours[trackId]
tAttrs = {
'x3ds': trackX3ds,
'x3ds_colour': colour
}
interface.createChild('track_%d' % trackId, 'points', attrs=tAttrs)
class Visualise(Op.Op):
def __init__(self, name='/Visualise_Tracks', locations='', maxFrames=0, singleLocation=False, update=True):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('maxFrames', 'Max frames', 'Max frames', 'int', maxFrames, {}),
('singleLocation', 'Single location', 'Single location', 'bool', singleLocation, {}),
('update', 'Update', 'Update', 'bool', update, {})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
if not attrs['update']: return
x3ds_frames = interface.attr('x3ds_frames')
maxFrames = attrs['maxFrames']
if x3ds_frames is not None:
interface.deleteLocationsByName(location + '/track_')
trackColours = interface.attr('track_colours')
if attrs['singleLocation']:
x3ds, colours = [], []
for trackId, trackX3ds in x3ds_frames.iteritems():
if len(trackX3ds) == 0: continue
if maxFrames and len(trackX3ds) > maxFrames: continue
colour = trackColours[trackId] if trackColours is not None else (0., 0., 0.7, 0.7)
x3ds.extend(trackX3ds)
colours.extend(np.repeat([colour], len(trackX3ds), axis=0))
tAttrs = {
'x3ds': np.float32(x3ds),
'x3ds_colours': np.float32(colours),
'x3ds_pointSize': 8.
}
interface.createChild('tracks', 'points', attrs=tAttrs)
else:
for trackId, trackX3ds in x3ds_frames.iteritems():
if len(trackX3ds) == 0: continue
if maxFrames and len(trackX3ds) > maxFrames: continue
colour = trackColours[trackId] if trackColours is not None else (0., 0., 0.7, 0.7)
tAttrs = {
'x3ds': trackX3ds,
'x3ds_colour': colour,
'x3ds_pointSize': 8.
}
interface.createChild('track_%d' % trackId, 'points', attrs=tAttrs)
class ExportX3ds(Op.Op):
def __init__(self, name='/Export_Track_Log_To_X3Ds', locations='', saveTo='', numMaxElms=3, minNumPoints=30, frameRange=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('saveTo', 'Save to', 'Save to (.x3d)', 'filename', saveTo, {}),
('numMaxElms', 'numMaxElms', 'numMaxElms', 'int', numMaxElms, {'min': 1}),
('minNumPoints', 'minNumPoints', 'minNumPoints', 'int', minNumPoints, {'min': 1}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
if not location or not attrs['saveTo']: return
stats = interface.attr('stats')
x3ds_frames = interface.attr('x3ds_frames')
import collections
trackLifetimes = stats['track_lifetime']
if True:
# Go through and save out frames. Missing frames are marked as -1
maxTrackId, minTrackId, numTracks = max(trackLifetimes.keys()), min(trackLifetimes.keys()), len(trackLifetimes)
trackInds = np.ones(maxTrackId + 1, dtype=np.int32) * -1
trackInds[trackLifetimes.keys()] = np.arange(numTracks + 1)
tracksNumFrames = [len(frames) for frames in trackLifetimes.values()]
tracksMinFrames = [min(frames) for frames in trackLifetimes.values()]
frameMin, frameMax = min(tracksNumFrames), max(tracksNumFrames)
frameStart, frameEnd = 0, frameMax
numFrames = frameEnd - frameStart
# c3ds = np.zeros((numFrames, numTracks, 4), dtype=np.float32)
c3ds = np.ones((numTracks, numFrames, 4), dtype=np.float32) * -1
self.logger.info("Shape: {}".format(c3ds.shape))
labels = []
allFrames = np.int32(stats['frames'])
for tid, trackFrames in trackLifetimes.iteritems():
trackFrames = np.where(trackFrames == allFrames.reshape(-1, 1))[0]
if tid not in x3ds_frames: continue
tid_index = trackInds[tid]
c3ds[tid_index, trackFrames, :3] = x3ds_frames[tid]
c3ds[tid_index, trackFrames, 3] = 0.0
labels.append(tid)
c3ds_labels = np.int32(trackLifetimes.keys())
else:
trackIds = collections.Counter([len(frames) for tid, frames in trackLifetimes.iteritems()]).most_common(attrs['numMaxElms'])
minNumPoints = minNumPoints = attrs['minNumPoints']
numFrames, numPoints = -1, -1
if trackIds:
numFrames, numPoints = trackIds[0]
c3ds, c3ds_labels = [], []
for label, trackFrames in trackLifetimes.iteritems():
if len(trackFrames) != numFrames: continue
if len(x3ds_frames[label]) != numFrames:
print 'Unexpected frame length for label %s: %d instead of %d' % (label, len(x3ds_frames[label]), numFrames)
c3ds.append(x3ds_frames[label])
c3ds_labels.append(label)
c3ds = np.float32(c3ds)
c3ds_labels = np.int32(c3ds_labels)
if attrs['saveTo']:
from IO import IO
exportPath = self.resolvePath(attrs['saveTo'])
IO.save(exportPath, {'/root/tracks': {'x3ds': c3ds, 'x3ds_labels': c3ds_labels}})
self.logger.info('Exported C3Ds to: %s' % exportPath)
else:
c3dsAttrs = {
'x3ds': c3ds,
'x3ds_labels': c3ds_labels
}
interface.createChild('c3ds', 'group', attrs=c3dsAttrs)
def calculateMissingFrames(trackLifetimes, x3ds_frames, trackId, mergeId):
if trackId not in trackLifetimes or mergeId not in trackLifetimes: return
mergeStart, mergeEnd = trackLifetimes[trackId][-1], trackLifetimes[mergeId][0]
numMissingFrames = mergeEnd - mergeStart
if numMissingFrames <= 0 or len(x3ds_frames[trackId]) <= 1 or len(x3ds_frames[mergeId]) <= 1:
return None, None
v0_idx = -2 if len(x3ds_frames[trackId]) > 1 else -1
v3_idx = 1 if len(x3ds_frames[mergeId]) > 1 else 0
cpts = np.float32([
x3ds_frames[trackId][v0_idx],
x3ds_frames[trackId][-1],
x3ds_frames[mergeId][0],
x3ds_frames[mergeId][v3_idx]
])
tck, u = splprep(cpts.T, u=None, s=0.0, per=0)
u_new = np.linspace(0, 1, numMissingFrames + 3)
x_new, y_new, z_new = splev(u_new, tck, der=0)
fillPts = np.float32([[x, y, z] for (x, y, z) in zip(x_new, y_new, z_new)])
fillFrameNumbers = range(mergeStart + 1, mergeEnd)
fillPts = fillPts[2:-2]
fillFrameNumbers = fillFrameNumbers
assert len(fillPts) == len(fillFrameNumbers)
return fillPts, fillFrameNumbers
class MergeTracks(Op.Op):
def __init__(self, name='/Merge_Tracks', locations='', trackId=-1, mergeIds='', x3d_threshold=100., frame_threshold=30,
suggest=True, executeMerge=False, fillMissingFrames=True, visualiseCandidates=False, visualisePrecedingCandidates=False,
pointSize=12.0, colour1=(0, 0, 0, 1), colour2=(0.5, 0.5, 0.5, 1), clearCache=True):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('trackId', 'Track ID', 'Track ID', 'int', trackId, {'min': -1}),
('mergeIds', 'Merge IDs', 'Merge IDs', 'string', mergeIds, {}),
('x3d_threshold', '3D threshold', '3D threshold', 'float', x3d_threshold, {}),
('frame_threshold', 'Frame threshold', 'Frame threshold', 'float', frame_threshold, {}),
('suggest', 'Suggest', 'Suggest merge', 'bool', suggest, {}),
('executeMerge', 'Execute merge', 'Execute merge', 'bool', executeMerge, {}),
('fillMissingFrames', 'Fill missing frames', 'Fill missing frames', 'bool', fillMissingFrames, {}),
('visualiseCandidates', 'Visualise candidates', 'Visualise candidates', 'bool', visualiseCandidates, {}),
('visualisePrecedingCandidates', 'Visualise prec. candidates', 'Visualise preceding candidates', 'bool', visualisePrecedingCandidates, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {}),
('colour1', 'Colour (filler)', 'Filler colour to track', 'string', str(colour1), {}),
('colour2', 'Colour (filler prec.)', 'Filler colour to preceding track', 'string', str(colour2), {}),
('clearCache', 'Clear cache', 'Clear cache', 'bool', clearCache, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.cands, self.candsPreceding = [], []
def cook(self, location, interface, attrs):
if attrs['trackId'] == -1: return
x3ds_frames = interface.attr('x3ds_frames')
interface.deleteLocationsByName(location + '/filler_')
mergeIds = np.int32(attrs['mergeIds'].split()) if attrs['mergeIds'] else None
if x3ds_frames is not None:
trackId = attrs['trackId']
# Find candidate tracks (tracks that don't overlap with the track in question)
stats = interface.attr('stats')
if stats is not None:
trackLifetimes = stats['track_lifetime']
if trackId not in trackLifetimes:
self.logger.warning('Could not find track %d in log' % trackId)
return
trackFrames = trackLifetimes[trackId]
s, e = min(trackFrames), max(trackFrames)
self.logger.info('Track %d duration: %d -> %d' % (trackId, s, e))
if attrs['suggest']:
if not self.cands or attrs['clearCache']:
self.cands, self.candsPreceding = [], []
for tid, frames in trackLifetimes.iteritems():
if trackId == tid or tid not in x3ds_frames: continue
ts, te = min(frames), max(frames)
# print('Compare with track %d: %d -> %d' % (tid, ts, te))
# Check overlap (accept frame gaps within threshold)
if 0 < ts - e < attrs['frame_threshold']:
# Distance test
x3d = x3ds_frames[trackId][trackFrames.index(e)]
x3d_cand = x3ds_frames[tid][frames.index(ts)]
dist = np.linalg.norm(x3d - x3d_cand)
print('Track %d is within threshold (after) with dist %f' % (tid, dist))
if dist < attrs['x3d_threshold']:
self.cands.append((tid, ts, te, dist))
elif 0 < s - te < attrs['frame_threshold']:
x3d = x3ds_frames[trackId][trackFrames.index(s)]
x3d_cand = x3ds_frames[tid][frames.index(te)]
dist = np.linalg.norm(x3d - x3d_cand)
if dist < attrs['x3d_threshold']:
self.candsPreceding.append((tid, ts, te, dist))
if self.cands:
self.logger.info("Candidate tracks: {}".format(self.cands))
if attrs['visualiseCandidates']:
interface.setAttr('visible', True, atLocation='%s/track_%d' % (location, trackId))
for tid, ts, te, dist in self.cands:
if mergeIds is not None and tid not in mergeIds: continue
interface.setAttr('visible', True, atLocation='%s/track_%d' % (location, tid))
fillPts, fillFrames = calculateMissingFrames(trackLifetimes, x3ds_frames, trackId, tid)
if fillPts is not None:
self.logger.info('Estimated %d points to connect tracks %d and %d (%d frames)' % (len(fillPts), trackId, tid, (te - ts)))
# print 'Track join (front):', trackLifetimes[trackId][-2:], '>', fillFrames[:2]
# print 'Track join (back):', fillFrames[-2:], '>', trackLifetimes[tid][:2]
pAttrs = {
'x3ds': np.float32(fillPts),
'x3ds_colour': eval(attrs['colour1']),
'x3ds_pointSize': attrs['pointSize'],
}
interface.createChild('filler_prec_%d_%d' % (trackId, tid), 'points3d', atLocation='%s' % location, attrs=pAttrs)
if self.candsPreceding:
self.logger.info("Candidate tracks (preceding): {}".format(self.candsPreceding))
if attrs['visualisePrecedingCandidates']:
interface.setAttr('visible', True, atLocation='%s/track_%d' % (location, trackId))
for tid, ts, te, dist in self.candsPreceding:
if mergeIds is not None and tid not in mergeIds: continue
interface.setAttr('visible', True, atLocation='%s/track_%d' % (location, tid))
fillPts, fillFrames = calculateMissingFrames(trackLifetimes, x3ds_frames, trackId, tid)
if fillPts is not None:
self.logger.info('Estimated %d points to connect tracks %d and %d' % (len(fillPts), trackId, tid))
pAttrs = {
'x3ds': np.float32(fillPts),
'x3ds_colour': eval(attrs['colour2']),
'x3ds_pointSize': attrs['pointSize'],
}
interface.createChild('filler_%d_%d' % (trackId, tid), 'points3d', atLocation='%s' % location, attrs=pAttrs)
# Merge tracks and make sure we remove any overlap
if mergeIds is None or not attrs['executeMerge']: return
trackLifetimes = stats['track_lifetime']
self.cands, self.candsPreceding = [], []
# Go through each merge id requested by the user and merge
for mergeId in mergeIds:
if mergeId == -1: continue
if mergeId not in trackLifetimes or mergeId not in x3ds_frames:
self.logger.warning('Could not find track id %d to merge into %d' % (mergeId, trackId))
continue
if mergeId < trackId:
self.logger.warning('At the moment we can only merge to an earlier track: %d > %d' % (trackId, mergeId))
# Fill missing frames between the tracks if requested
if attrs['fillMissingFrames']:
fillPts, fillFrameNumbers = calculateMissingFrames(trackLifetimes, x3ds_frames, trackId, mergeId)
# Extend the track data if there are any frames to fill with
if fillPts is not None and fillFrameNumbers is not None:
x3ds_frames[trackId].extend(fillPts)
trackLifetimes[trackId].extend(fillFrameNumbers)
trackFrames = trackLifetimes[trackId]
mergeFrames = trackLifetimes[mergeId]
# Check if there's overlap and if so resolve it by excluding the overlapping points from the merge track
trackId_lastFrame, mergeId_firstFrame = trackFrames[-1], mergeFrames[0]
mergeFrom = mergeId_firstFrame
if trackId_lastFrame >= mergeId_firstFrame:
mergeFrom = mergeFrames.index(trackId_lastFrame + 1)
self.logger.info('Merge track starts before the target track ends (%d >= %d): Merge from %d' % (trackId_lastFrame, mergeId_firstFrame, mergeFrom))
# Update the x3ds for track frames
x3ds_frames[trackId].extend(x3ds_frames[mergeId][mergeFrom:])
del x3ds_frames[mergeId]
# Update the track stats to reflect the merged frames
if mergeFrames:
trackFrames.extend(mergeFrames[mergeFrom:])
del trackLifetimes[mergeId]
stats['track_lifetime'] = trackLifetimes
interface.setAttr('x3ds_frames', x3ds_frames)
interface.setAttr('stats', stats)
class AutoMergeTracks(Op.Op):
def __init__(self, name='/Auto_Merge_Tracks', locations='', x3d_threshold=100., frame_threshold=30,
suggest=False, executeMerge=False, strictMerge=False, fillMissingFrames=True, minNumFrames=4):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('x3d_threshold', '3D threshold', '3D threshold', 'float', x3d_threshold, {}),
('frame_threshold', 'Missing frames threshold', 'Missing frames threshold', 'int', frame_threshold, {}),
('suggest', 'Suggest', 'Suggest merge', 'bool', suggest, {}),
('executeMerge', 'Execute merge', 'Execute merge', 'bool', executeMerge, {}),
('strictMerge', 'Strict merge', 'Only merge if one track option is available', 'bool', strictMerge, {}),
('fillMissingFrames', 'Fill missing frames', 'Fill missing frames', 'bool', fillMissingFrames, {}),
('minNumFrames', 'Min. # frames', 'Min. # frames', 'int', minNumFrames, {'min': 1})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
if not attrs['suggest']: return
mergeCount = 0
x3ds_frames = interface.attr('x3ds_frames')
if x3ds_frames is not None:
# Find candidates (tracks that don't overlap)
stats = interface.attr('stats')
if stats is not None:
trackLifetimes = stats['track_lifetime']
keysToRemove = []
for trackId, trackFrames in trackLifetimes.iteritems():
if trackId not in x3ds_frames: continue
s, e = min(trackFrames), max(trackFrames)
si, ei = trackFrames.index(s), trackFrames.index(e)
cands, candsBackward = [], []
x3ds_trackFrames = x3ds_frames[trackId]
numTrackFrames = len(x3ds_trackFrames)
dists = []
for tid, frames in trackLifetimes.iteritems():
if trackId == tid or tid not in x3ds_frames: continue
ts, te = min(frames), max(frames)
# Check overlap (accept frame gaps within threshold)
if 0 < ts - e < attrs['frame_threshold']:
# Distance test
if ei >= numTrackFrames:
self.logger.warning('Track %d (%d -> %d) exceeds frame length: %d' % (trackId, s, e, numTrackFrames))
continue
x3d = x3ds_trackFrames[ei]
x3d_cand = x3ds_frames[tid][frames.index(ts)]
dist = np.linalg.norm(x3d - x3d_cand)
if dist < attrs['x3d_threshold']:
cands.append((tid, ts, te, dist))
dists.append(dist)
if cands:
self.logger.info('Track %d duration: %d -> %d' % (trackId, s, e))
self.logger.info(" -> Candidate tracks: {}".format(cands))
# Merge tracks
if attrs['executeMerge']:
# Find the track we want to merge (merge Id)
if attrs['strictMerge'] and len(cands) != 1: continue
if len(cands) == 1:
mergeId = cands[0][0]
else:
# Find lowest distance (seems the most sensible given our simple heuristics)
mergeId = cands[np.argmin(dists)][0]
if trackId not in trackLifetimes: continue
if mergeId not in trackLifetimes: continue
# Fill missing frames between tracks if necessary
if attrs['fillMissingFrames']:
fillPts, fillFrameNumbers = calculateMissingFrames(trackLifetimes, x3ds_frames, trackId, mergeId)
if fillPts is not None and fillFrameNumbers is not None:
self.logger.info('Using %d estimated points to connect tracks %d and %d' % (len(fillPts), trackId, tid))
x3ds_frames[trackId].extend(fillPts)
trackLifetimes[trackId].extend(fillFrameNumbers)
# Update x3ds to reflect the merged frames
x3ds_frames[trackId].extend(x3ds_frames[mergeId])
#x3ds_frames[mergeId] = []
del x3ds_frames[mergeId]
mergeCount += 1
self.logger.info(' -> Merged track %d into %d' % (mergeId, trackId))
# Update track stats to reflect the merged frames
mergeFrames = trackLifetimes[mergeId]
if mergeFrames:
trackLifetimes[trackId].extend(mergeFrames)
# del trackLifetimes[mergeId]
keysToRemove.append(mergeId)
for key in keysToRemove: del trackLifetimes[key]
stats['track_lifetime'] = trackLifetimes
# Log the number of tracks after merging
if mergeCount:
self.logger.info('Number of tracks after %d merge operations: %d' % (mergeCount, len(x3ds_frames)))
elif not mergeCount and attrs['executeMerge']:
self.logger.info('No merging required')
# Eliminate tracks shorter than a certain length (in frames)?
for tid, frames in x3ds_frames.iteritems():
numFrames = len(frames)
frameThreshold = attrs['minNumFrames']
if numFrames < frameThreshold:
self.logger.info('Track %d has fewer than %d frames (%d)' % (tid, frameThreshold, numFrames))
interface.setAttr('x3ds_frames', x3ds_frames)
interface.setAttr('stats', stats)
class Interpolate(Op.Op):
def __init__(self, name='/Interpolate_Tracks', locations='', track1=-1, track2=-1, type=1):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('track1', 'Track 1 ID', 'Track 1 ID', 'int', track1, {'min': 0}),
('track2', 'Track 2 ID', 'Track 2 ID', 'int', track2, {'min': 0}),
('type', 'Type', 'Type', 'int', type, {}) # TODO: Make drop-down
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
x3ds_frames = interface.attr('x3ds_frames')
if x3ds_frames is None: return
stats = interface.attr('stats')
if stats is None: return
trackLifetime = stats['track_lifetime']
track1_id = attrs['track1']
track2_id = attrs['track2']
if track1_id == -1 or track2_id == -1:
return
track1 = x3ds_frames[track1_id]
track2 = x3ds_frames[track2_id]
frameGap = trackLifetime[track2_id][0] - trackLifetime[track1_id][-1] - 1
self.logger.info('Gap frames: %d' % frameGap)
pts = np.float32([])
if attrs['type'] == 1:
cpts = np.float32([
track1[-2], track1[-1], track2[0], track2[1]
])
targetGap = np.linalg.norm(track1[-1] - track1[-2])
gapDist = np.linalg.norm(track1[-1] - track2[0])
ratio = math.ceil(gapDist / targetGap)
self.logger.info('Gap distance: %.2f' % gapDist)
tck, u = splprep(cpts.T, u=None, s=0.0, per=0)
u_new = np.linspace(0, 1, frameGap + 3)
x_new, y_new, z_new = splev(u_new, tck, der=0)
pts = np.float32([[x, y, z] for (x, y, z) in zip(x_new, y_new, z_new)])
pAttrs = {
'x3ds': np.float32(pts[1:-2]),
'x3ds_colour': (0, 0, 0, 1),
'x3ds_pointSize': 12.
}
interface.createChild('interpolatedPts', 'points3d', attrs=pAttrs)
class Info(Op.Op):
def __init__(self, name='/Tracks_Info', locations='', basicInfo=True, detailedInfo=False, printInfo=False,
plotTimeline=False, useFilters=True, filterMaxFrames=0):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('basicInfo', 'Basic info', 'Basic info', 'bool', basicInfo, {}),
('detailedInfo', 'Detailed info', 'Detailed info', 'bool', detailedInfo, {}),
('printInfo', 'Print info', 'Print info', 'bool', printInfo, {}),
('plotTimeline', 'Plot timeline', 'Plot timeline', 'bool', plotTimeline, {}),
('useFilters', 'Use filters', 'Use filters', 'bool', useFilters, {}),
('filterMaxFrames', 'Filter max frames', 'Only show if frames less than', 'int', filterMaxFrames, {'min': 0})
]
super(self.__class__, self).__init__(name, self.fields)
def cook(self, location, interface, attrs):
x3ds_frames = interface.attr('x3ds_frames')
stats = interface.attr('stats')
if x3ds_frames is None or stats is None: return
tracksLifetime = stats['track_lifetime']
if attrs['basicInfo']:
numTracks = len(stats['track_lifetime'])
numX3dsTracks = len(x3ds_frames)
interface.setAttr('numTracks', numTracks)
interface.setAttr('numX3dsTracks', numX3dsTracks)
if attrs['printInfo']: self.logger.info('Number of tracks (x3ds): %d (%d)' % (numTracks, numX3dsTracks))
if attrs['detailedInfo']:
for tid, frames in tracksLifetime.iteritems():
ts, te = min(frames), max(frames)
if attrs['useFilters'] and te - ts >= attrs['filterMaxFrames']: continue
if attrs['printInfo']: print('Track %d: %d -> %d' % (tid, ts, te))
if attrs['plotTimeline']:
labels, trackFrames = [], []
for label, frames in tracksLifetime.iteritems():
ts, te = min(frames), max(frames)
if attrs['useFilters'] and te - ts >= attrs['filterMaxFrames']: continue
labels.append(label)
trackFrames.append(frames)
trackColours = interface.attr('track_colours')
if trackColours is None: trackColours = ['blue'] * len(labels)
fig = plt.figure()
ax = fig.add_subplot(111)
for i, (label, frames) in enumerate(zip(labels, trackFrames)):
ax.barh((i * 0.5) + 0.5, len(frames), left=frames[0], height=0.3, align='center', color=trackColours[label], alpha=0.75)
y_max = float(len(labels)) * 0.5 + 0.25
pos = np.arange(0.5, y_max, 0.5)
locs_y, labels_y = pylab.yticks(pos, labels)
plt.setp(labels_y, fontsize=6)
ax.axis('tight')
ax.set_ylim(ymin=0.25, ymax=y_max)
ax.grid(color='g', linestyle=':')
ax.invert_yaxis()
plt.show()
class VisualiseTrackHealth(Op.Op):
def __init__(self, name='/Visualise_Track_Health', locations='', frame=0, enable=True):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('frame', 'Frame', 'Frame', 'int', frame, {'min': 0}),
('enable', 'Enable', 'Enable', 'bool', enable, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.x3ds_frames = None
self.trackLifetime = None
def cook(self, location, interface, attrs):
if not attrs['enable']: return
if not location or location == self.getName(): return
if self.x3ds_frames is None:
self.x3ds_frames = interface.attr('x3ds_frames')
if self.trackLifetime is None:
stats = interface.attr('stats')
if stats is not None:
self.trackLifetime = stats['track_lifetime']
if self.x3ds_frames is None:
self.logger.error('3D frames not found at location: %s' % location)
return
if self.trackLifetime is None:
self.logger.error('Stats not found at location: %s' % location)
return
if not attrs['frame']: return
refFrame = attrs['frame']
frame = interface.frame()
pts, labels, colours = [], [], []
for tid, frames in self.trackLifetime.iteritems():
if frame in frames:
if tid not in labels: labels.append(tid)
trackFrames = self.x3ds_frames[tid]
pts.append(trackFrames[frames.index(frame)])
c = min(1., float(len(frames)) / float(refFrame))
colours.append([1. - c, 0., c, 1.])
pAttrs = {
'x3ds': np.float32(pts),
'x3ds_labels': np.int32(labels),
'x3ds_colours': np.float32(colours)
}
interface.createChild('snapshot', 'points3d', attrs=pAttrs)
class VisualiseAnimatedX3ds(Op.Op):
def __init__(self, name='/Visualise_Animated_X3Ds', locations='', pointSize=12., useColours=False):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'locations', 'string', locations, {}),
('pointSize', '3D Point size', '3D Point size', 'float', pointSize, {'min': 1.}),
('useColours', 'Use colours', 'Use colours', 'bool', useColours, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.flush()
def flush(self):
self.x3ds_frames, self.x3ds_labels, self.x3ds_colours = None, None, None
def cook(self, location, interface, attrs):
if self.x3ds_frames is None:
self.x3ds_frames = interface.attr('x3ds_frames')
self.x3ds_labels = interface.attr('x3ds_labels')
self.x3ds_colours = interface.attr('x3ds_colours')
if self.x3ds_frames is not None and interface.frame() in self.x3ds_frames:
frameAttrs = {
'x3ds': self.x3ds_frames[interface.frame()],
'x3ds_pointSize': attrs['pointSize']
}
if self.x3ds_labels is not None:
frameAttrs['x3ds_labels'] = self.x3ds_labels[interface.frame()]
if attrs['useColours'] and self.x3ds_colours is not None:
frameAttrs['x3ds_colours'] = self.x3ds_colours[interface.frame()]
interface.createChild('points', 'points3d', attrs=frameAttrs)
class AddMarkersToSkeleton(Op.Op):
def __init__(self, name='/Add_Markers', locations='', x3ds='', collectRule='', frameRange='', useMeanMarkers=True ):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'Skeleton locations', 'string', locations, {}),
('x3dsLocation', 'x3ds', 'X3ds locations', 'string', x3ds, {}),
('collectRule', 'Collect on frames', 'Collect on frames', 'string', collectRule, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {}),
('useMeanMarkers', 'Use mean markers', 'Use mean markers', 'bool', useMeanMarkers, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.RTs = []
def cook(self, location, interface, attrs):
skelDict = interface.attr('skelDict')
if skelDict is None:
self.logger.error('No skeleton found at: %s' % location)
return
from IO import ASFReader
if self.useFrame(interface.frame(), attrs['collectRule']):
self.RTs.append(ASFReader.invert_matrix_array(skelDict['Gs']))
if not self.useFrame(interface.frame(), attrs['frameRange']): return
x3dsLocation = attrs['x3dsLocation']
x3ds = interface.attr('x3ds', atLocation=x3dsLocation)
x3ds_labels = interface.attr('x3ds_labels', atLocation=x3dsLocation)
if x3ds is None or x3ds_labels is None:
self.logger.error('No x3ds data found at: %s' % x3dsLocation)
return
# Now we've got a skeleton and x3ds which indicate candidate marker data
# We have to find out which bones and joints the markers map to
frames = np.transpose(x3ds, axes=(1, 0, 2))#[:50, :, :3]
data = frames.copy()
_RTs = np.transpose(self.RTs, axes=(1, 0, 2, 3))
pointToGroup, pointResiduals, stabilisedFrames = ASFReader.assignAndStabilize(data, _RTs, thresholdDistance=200.)
print pointToGroup
# jointIndices = [int(jn) for jn in skelDict['jointNames']]
Gs = skelDict['Gs']
markerParents = [gi for gi in pointToGroup if gi != -1]
markerNames = [('%d' % pi) for pi, gi in enumerate(pointToGroup) if gi != -1]
if attrs['useMeanMarkers']:
markerOffsets = np.mean(
[[np.dot(Gs[gi][:3, :3].T, data[fi][pi] - Gs[gi][:3, 3]) for pi, gi in enumerate(pointToGroup) if gi != -1] for fi in
range(data.shape[0])], axis=0)
else:
markerOffsets = [np.dot(Gs[gi][:3, :3].T, data[-1][pi] - Gs[gi][:3, 3]) for pi, gi in enumerate(pointToGroup) if gi != -1]
skelDict['markerParents'] = np.int32(markerParents)
skelDict['markerNames'] = markerNames
skelDict['markerOffsets'] = np.float32(markerOffsets)
skelDict['markerWeights'] = np.ones(len(markerNames), dtype=np.float32)
interface.setAttr('skelDict', skelDict)
interface.setAttr('override', True)
class Graph(Op.Op):
def __init__(self, name='/Track_Graph', locations='', frameRange='', x3d_threshold=300, nearestN=4, updateRange='',
trackedX3ds=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'X3ds locations', 'string', locations, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {}),
('updateRange', 'Update range', 'Update range', 'string', updateRange, {}),
('x3d_threshold', '3D threshold', '3D threshold', 'float', x3d_threshold, {}),
('nearestN', 'Nearest N', 'Nearest N', 'int', nearestN, {}),
('trackedX3ds', 'Tracked X3Ds', 'Tracked X3Ds', 'string', trackedX3ds, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.graph = None
self.edges = None
def setup(self, interface, attrs):
if self.graph is None:
self.graph = Label.TrackGraph(attrs['x3d_threshold'], attrs['nearestN'])
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
updateGraph = True if self.useFrame(interface.frame(), attrs['updateRange']) else False
x3ds = interface.attr('x3ds')
x3ds_labels = interface.attr('x3ds_labels')
if x3ds is None or x3ds_labels is None: return
x3ds_joints = None
if attrs['trackedX3ds']:
x3ds_joints = np.ones((len(x3ds_labels)), dtype=np.int32) * -1
trackedX3ds = interface.attr('x3ds', atLocation=attrs['trackedX3ds'])
trackedLabels = interface.attr('x3ds_labels', atLocation=attrs['trackedX3ds'])
trackedJoints = interface.attr('joints', atLocation=attrs['trackedX3ds'])
if trackedJoints is None:
self.logger.warning('No tracked data found at: %s' % attrs['trackedX3ds'])
else:
_, _labels, _vels = Label.label_3d_from_3d(trackedX3ds, trackedLabels, None, x3ds, attrs['x3d_threshold'])
matchingLabels = np.where(_labels != -1)[0]
whichJoints = np.where(_labels[matchingLabels] == trackedLabels.reshape(-1, 1))[1]
if len(matchingLabels) != 0:
x3ds_joints[matchingLabels] = trackedJoints[whichJoints]
# print x3ds_joints
self.graph.push(x3ds, x3ds_labels, updateGraph, x3ds_joints)
interface.setAttr('trackGraph', self.graph.graph)
# if self.edges is None:
self.edges = self.graph.drawing_graph()
# interface.setAttr('edges', self.graph.drawing_graph())
pAttrs = {
'x3ds': self.graph.x3ds,
'x3ds_labels': self.graph.x3ds_labels,
'x3ds_pointSize': 14.,
'x3ds_colour': (1., 0.5, 0., 0.7),
'edges': self.edges
}
interface.createChild('points', 'points3d', attrs=pAttrs)
class FrameDiff(Op.Op):
def __init__(self, name='/Track_Frame_Diff', locations='', frameRange=''):
self.fields = [
('name', 'name', 'name', 'string', name, {}),
('locations', 'locations', 'X3ds locations', 'string', locations, {}),
('frameRange', 'Frame range', 'Frame range', 'string', frameRange, {})
]
super(self.__class__, self).__init__(name, self.fields)
self.flush()
def flush(self):
self.x3ds_labels = None
def cook(self, location, interface, attrs):
if not self.useFrame(interface.frame(), attrs['frameRange']): return
x3ds_labels = interface.attr('x3ds_labels')
if self.x3ds_labels is not None:
x3ds_colours = np.zeros((len(x3ds_labels), 4), dtype=np.float32)
x3ds_colours[:, :] = [1, 0, 0, 0.7]
shared = np.where(x3ds_labels == self.x3ds_labels.reshape(-1, 1))[1]
x3ds_colours[shared] = [0, 0, 1, 0.7]
interface.setAttr('x3ds_colours', x3ds_colours)
self.x3ds_labels = x3ds_labels
def getWorldSpaceMarkerPos(skelDict):
vs, lbls = [], []
for mi in range(skelDict['numMarkers']):
parentJointGs = np.append(skelDict['Gs'][skelDict['markerParents'][mi]], [[0, 0, 0, 1]], axis=0)
mOffset = skelDict['markerOffsets'][mi]
mOffset = np.array([[mOffset[0], mOffset[1], mOffset[2], 1]], dtype=np.float32)
v = np.dot(parentJointGs, mOffset.T)
vs.append(np.concatenate(v[:3]))
lbls.append(skelDict['markerNames'][mi])
vs = np.array(vs, dtype=np.float32)
return vs, lbls
def det2imgXY(detection, (h, w)):
"""
Convert detection space (-1..1) to image space. Compensate for non-square images
w: 1920 h:1080
--
det [0.48002064, 0.29927447]
measured [1420, 253]
compute [1420.8198165893555, 701.60821616649628]
--
det [ 0.78030837 0.49955559]
measured [1709, 60]
computed [1709.0960311889648, 809.76001739501953]
"""
width, height = np.float32(w), np.float32(h)
x = (width / 2.) + (width * detection[0] / 2.)
y = (height / 2.) - (width * detection[1] / 2.)
return [x, y]
# Register Ops
import Registry
Registry.registerOp('Track 2D', Track2D)
Registry.registerOp('Track 3D', Track3D)
Registry.registerOp('Track Model', Model)
Registry.registerOp('Track Error', Error)
Registry.registerOp('Track Graph', Graph)
Registry.registerOp('Count 3D Tracks', Count3Ds)
Registry.registerOp('Visualise Tracks', Visualise)
Registry.registerOp('Interpolate Tracks', Interpolate)
Registry.registerOp('Tracks Info', Info)
Registry.registerOp('Visualise X3Ds Animation', VisualiseAnimatedX3ds)
Registry.registerOp('Visualise Track Health', VisualiseTrackHealth)
Registry.registerOp('Export Track Log to X3Ds', ExportX3ds)
|
{
"content_hash": "8c682bee4a7bedff28af98964af2e9ec",
"timestamp": "",
"source": "github",
"line_count": 1780,
"max_line_length": 152,
"avg_line_length": 41.02191011235955,
"alnum_prop": 0.6704145496377655,
"repo_name": "davidsoncolin/IMS",
"id": "479864eca198b3a370fecb41ff9d820bb4ff33eb",
"size": "73019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ops/Track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "159777"
},
{
"name": "Makefile",
"bytes": "404"
},
{
"name": "Python",
"bytes": "1869456"
},
{
"name": "Shell",
"bytes": "99"
}
],
"symlink_target": ""
}
|
"""
Support for Modbus Register sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.modbus/
"""
import logging
import struct
import voluptuous as vol
import homeassistant.components.modbus as modbus
from homeassistant.const import (
CONF_NAME, CONF_OFFSET, CONF_UNIT_OF_MEASUREMENT, CONF_SLAVE,
CONF_STRUCTURE)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers import config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['modbus']
CONF_COUNT = 'count'
CONF_REVERSE_ORDER = 'reverse_order'
CONF_PRECISION = 'precision'
CONF_REGISTER = 'register'
CONF_REGISTERS = 'registers'
CONF_SCALE = 'scale'
CONF_DATA_TYPE = 'data_type'
CONF_REGISTER_TYPE = 'register_type'
REGISTER_TYPE_HOLDING = 'holding'
REGISTER_TYPE_INPUT = 'input'
DATA_TYPE_INT = 'int'
DATA_TYPE_UINT = 'uint'
DATA_TYPE_FLOAT = 'float'
DATA_TYPE_CUSTOM = 'custom'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_REGISTERS): [{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING):
vol.In([REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]),
vol.Optional(CONF_COUNT, default=1): cv.positive_int,
vol.Optional(CONF_REVERSE_ORDER, default=False): cv.boolean,
vol.Optional(CONF_OFFSET, default=0): vol.Coerce(float),
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(CONF_SCALE, default=1): vol.Coerce(float),
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_INT):
vol.In([DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT,
DATA_TYPE_CUSTOM]),
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string
}]
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Modbus sensors."""
sensors = []
data_types = {DATA_TYPE_INT: {1: 'h', 2: 'i', 4: 'q'}}
data_types[DATA_TYPE_UINT] = {1: 'H', 2: 'I', 4: 'Q'}
data_types[DATA_TYPE_FLOAT] = {1: 'e', 2: 'f', 4: 'd'}
for register in config.get(CONF_REGISTERS):
structure = '>i'
if register.get(CONF_DATA_TYPE) != DATA_TYPE_CUSTOM:
try:
structure = '>{}'.format(data_types[
register.get(CONF_DATA_TYPE)][register.get(CONF_COUNT)])
except KeyError:
_LOGGER.error("Unable to detect data type for %s sensor, "
"try a custom type.", register.get(CONF_NAME))
continue
else:
structure = register.get(CONF_STRUCTURE)
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error(
"Error in sensor %s structure: %s",
register.get(CONF_NAME), err)
continue
if register.get(CONF_COUNT) * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count "
"(%d words)", size, register.get(CONF_COUNT))
continue
sensors.append(ModbusRegisterSensor(
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_UNIT_OF_MEASUREMENT),
register.get(CONF_COUNT),
register.get(CONF_REVERSE_ORDER),
register.get(CONF_SCALE),
register.get(CONF_OFFSET),
structure,
register.get(CONF_PRECISION)))
if not sensors:
return False
add_devices(sensors)
class ModbusRegisterSensor(Entity):
"""Modbus register sensor."""
def __init__(self, name, slave, register, register_type,
unit_of_measurement, count, reverse_order, scale, offset,
structure, precision):
"""Initialize the modbus register sensor."""
self._name = name
self._slave = int(slave) if slave else None
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._reverse_order = reverse_order
self._scale = scale
self._offset = offset
self._precision = precision
self._structure = structure
self._value = None
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Update the state of the sensor."""
if self._register_type == REGISTER_TYPE_INPUT:
result = modbus.HUB.read_input_registers(
self._slave,
self._register,
self._count)
else:
result = modbus.HUB.read_holding_registers(
self._slave,
self._register,
self._count)
val = 0
try:
registers = result.registers
if self._reverse_order:
registers.reverse()
except AttributeError:
_LOGGER.error("No response from modbus slave %s, register %s",
self._slave, self._register)
return
byte_string = b''.join(
[x.to_bytes(2, byteorder='big') for x in registers]
)
val = struct.unpack(self._structure, byte_string)[0]
self._value = format(
self._scale * val + self._offset, '.{}f'.format(self._precision))
|
{
"content_hash": "64bb837ed1ae3bdebd31bbc87d34424b",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 77,
"avg_line_length": 34.20454545454545,
"alnum_prop": 0.595514950166113,
"repo_name": "ewandor/home-assistant",
"id": "c4014fbd1dd038f05e4783055364c3de27ed66ef",
"size": "6020",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/modbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
}
|
from django.urls import reverse
from django.contrib.auth.models import User
from rest_framework.test import APITestCase
from mock import patch, call
from .. import models
from sample_app.models import TestRealm, TestAgent
class RealmListViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
url = reverse('realm_list',
kwargs={'realm_alias': 'starsweb'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_success(self):
url = reverse('realm_list',
kwargs={'realm_alias': 'testrealm'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
class RealmRetrieveViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
url = reverse('realm_detail',
kwargs={'realm_alias': 'starsweb',
'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
url = reverse('realm_detail',
kwargs={'realm_alias': 'testrealm',
'pk': self.realm.pk + 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_success(self):
url = reverse('realm_detail',
kwargs={'realm_alias': 'testrealm',
'pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'),
"sample_app.testrealm")
class GeneratorViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
url = reverse('generator',
kwargs={'realm_alias': 'starsweb',
'realm_pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
url = reverse('generator',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 404)
def test_generator_does_not_exist(self):
url = reverse('generator',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_user_does_not_have_permission(self):
generator = models.Generator(realm=self.realm)
generator.save()
url = reverse('generator',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'),
"sample_app.testrealm")
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Generator.objects.count(), 1)
def test_success(self):
self.user.is_staff = True
self.user.save()
url = reverse('generator',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {'allow_pauses': True},
follow=True)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.data.get('allow_pauses'), True)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('allow_pauses'), True)
response = self.client.put(url, {'allow_pauses': False}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('allow_pauses'), False)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 204)
self.assertEqual(models.Generator.objects.count(), 0)
class GenerationRuleListViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
url = reverse('generation_rules_list',
kwargs={'realm_alias': 'starsweb',
'realm_pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
url = reverse('generation_rules_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
def test_generator_does_not_exist(self):
url = reverse('generation_rules_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
def test_user_does_not_have_permission(self):
generator = models.Generator(realm=self.realm)
generator.save()
url = reverse('generation_rules_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.GenerationRule.objects.count(), 0)
def test_success(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.user.is_staff = True
self.user.save()
url = reverse('generation_rules_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 0)
response = self.client.post(url, {}, follow=True)
self.assertEqual(response.status_code, 201)
self.assertEqual(models.GenerationRule.objects.count(), 1)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
class GenerationRuleViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
self.generator = models.Generator(realm=self.realm)
self.generator.save()
self.rule = models.GenerationRule(generator=self.generator)
self.rule.save()
def test_realm_type_does_not_exist(self):
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'starsgame',
'realm_pk': 1,
'pk': self.rule.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1,
'pk': self.rule.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 404)
def test_generator_does_not_exist(self):
self.generator.delete()
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'pk': 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# TODO: do we want a 403 when the generator doesn't exist?
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 403)
def test_rule_does_not_exist(self):
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'pk': self.rule.pk + 1})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
# TODO: do we want a 403 when the rule doesn't exist?
response = self.client.put(url, {}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.delete(url, follow=True)
self.assertEqual(response.status_code, 403)
def test_rule_exists(self):
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'pk': self.rule.pk})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('id'), self.rule.id)
self.assertEqual(response.data.get('generator_id'),
self.generator.id)
def test_user_does_not_have_permission(self):
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'pk': self.rule.pk})
response = self.client.put(url, {'freq': 2}, follow=True)
self.assertEqual(response.status_code, 403)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('freq'), 3)
def test_user_has_permission(self):
self.user.is_staff = True
self.user.save()
url = reverse('generation_rule_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'pk': self.rule.pk})
response = self.client.put(url, {'freq': 2}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('freq'), 2)
class AgentListViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent = TestAgent(realm=self.realm, slug='bob')
self.agent.save()
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
realm_url = reverse('agent_list',
kwargs={'realm_alias': 'starsweb',
'realm_pk': 1,
'agent_alias': 'testagent'})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
realm_url = reverse('agent_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1,
'agent_alias': 'testagent'})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_generator_does_not_exist(self):
realm_url = reverse('agent_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent'})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_agent_type_does_not_exist(self):
realm_url = reverse('agent_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'starsrace'})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_success(self):
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('agent_list',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent'})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data), 1)
class AgentRetrieveViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent1 = self.realm.agents.create(slug='agent1')
self.agent2 = self.realm.agents.create(slug='agent2')
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'starsgame',
'realm_pk': 1,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_realm_does_not_exist(self):
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_generator_does_not_exist(self):
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_agent_type_does_not_exist(self):
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'starsrace',
'pk': 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_agent_does_not_exist(self):
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent2.pk + 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
def test_unpaused_unready(self):
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'), 'sample_app.testagent')
self.assertIsNone(response.data.get('pause'))
self.assertIsNone(response.data.get('ready'))
def test_unpaused_ready(self):
generator = models.Generator(realm=self.realm)
generator.save()
ready = models.Ready(agent=self.agent1, generator=generator,
user=self.user)
ready.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'), 'sample_app.testagent')
self.assertIsNone(response.data.get('pause'))
self.assertIsNotNone(response.data.get('ready'))
def test_paused_unready(self):
generator = models.Generator(realm=self.realm)
generator.save()
pause = models.Pause(agent=self.agent1, generator=generator,
user=self.user)
pause.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'), 'sample_app.testagent')
self.assertIsNotNone(response.data.get('pause'))
self.assertIsNone(response.data.get('ready'))
def test_paused_ready(self):
generator = models.Generator(realm=self.realm)
generator.save()
pause = models.Pause(agent=self.agent1, generator=generator,
user=self.user)
pause.save()
ready = models.Ready(agent=self.agent1, generator=generator,
user=self.user)
ready.save()
realm_url = reverse('agent_detail',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data.get('content_type'), 'sample_app.testagent')
self.assertIsNotNone(response.data.get('pause'))
self.assertIsNotNone(response.data.get('ready'))
class PauseViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent1 = self.realm.agents.create(slug='agent1')
self.agent2 = self.realm.agents.create(slug='agent2')
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
self.assertEqual(models.Pause.objects.count(), 0)
realm_url = reverse('pause',
kwargs={'realm_alias': 'starsgame',
'realm_pk': 1,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_realm_does_not_exist(self):
self.assertEqual(models.Pause.objects.count(), 0)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_generator_does_not_exist(self):
self.assertEqual(models.Pause.objects.count(), 0)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_agent_type_does_not_exist(self):
self.assertEqual(models.Pause.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'starsrace',
'agent_pk': 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_agent_does_not_exist(self):
self.assertEqual(models.Pause.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent2.pk + 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_user_does_not_have_permission(self):
self.assertEqual(models.Pause.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 403)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Pause.objects.count(), 0)
def test_pauses_not_allowed(self):
self.assertEqual(models.Pause.objects.count(), 0)
generator = models.Generator(realm=self.realm,
allow_pauses=False)
generator.save()
self.agent1.user = self.user
self.agent1.save()
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Pause.objects.count(), 0)
def test_success(self):
self.assertEqual(models.Pause.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 201)
self.assertEqual(models.Pause.objects.count(), 1)
def test_already_paused(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
pause = models.Pause(agent=self.agent1, generator=generator)
pause.save()
self.assertEqual(models.Pause.objects.count(), 1)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertContains(response,
"The fields content_type, object_id, generator must make a"
" unique set.",
status_code=400
)
self.assertEqual(models.Pause.objects.count(), 1)
def test_pauses_not_allowed_can_still_unpause(self):
generator = models.Generator(realm=self.realm,
allow_pauses=False)
generator.save()
self.agent1.user = self.user
self.agent1.save()
pause = models.Pause(agent=self.agent1, generator=generator)
pause.save()
self.assertEqual(models.Pause.objects.count(), 1)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 204)
self.assertEqual(models.Pause.objects.count(), 0)
def test_already_unpaused(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
self.assertEqual(models.Pause.objects.count(), 0)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Pause.objects.count(), 0)
def test_can_pause_while_ready(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
ready = models.Ready(agent=self.agent1, generator=generator)
ready.save()
self.assertEqual(models.Ready.objects.count(), 1)
realm_url = reverse('pause',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.post(realm_url,
{'reason': 'laziness'},
follow=True
)
self.assertEqual(response.status_code, 201)
self.assertEqual(models.Ready.objects.count(), 1)
self.assertEqual(models.Pause.objects.count(), 1)
class ReadyViewTestCase(APITestCase):
def setUp(self):
self.user = User.objects.create_user(username='test',
password='password')
self.realm = TestRealm(slug='500years')
self.realm.save()
self.agent1 = self.realm.agents.create(slug='agent1')
self.agent2 = self.realm.agents.create(slug='agent2')
self.assertTrue(self.client.login(username='test', password='password'))
def test_realm_type_does_not_exist(self):
self.assertEqual(models.Ready.objects.count(), 0)
realm_url = reverse('ready',
kwargs={'realm_alias': 'starsgame',
'realm_pk': 1,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
def test_realm_does_not_exist(self):
self.assertEqual(models.Ready.objects.count(), 0)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk + 1,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
def test_generator_does_not_exist(self):
self.assertEqual(models.Ready.objects.count(), 0)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
def test_agent_type_does_not_exist(self):
self.assertEqual(models.Ready.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'starsrace',
'agent_pk': 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
def test_agent_does_not_exist(self):
self.assertEqual(models.Ready.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent2.pk + 1})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
def test_user_does_not_have_permission(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.assertEqual(models.Ready.objects.count(), 0)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Ready.objects.count(), 0)
ready = models.Ready(agent=self.agent1, generator=generator)
ready.save()
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.Ready.objects.count(), 1)
def test_success(self):
self.assertEqual(models.Ready.objects.count(), 0)
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 201)
self.assertEqual(models.Ready.objects.count(), 1)
def test_already_ready(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
ready = models.Ready(agent=self.agent1, generator=generator)
ready.save()
self.assertEqual(models.Ready.objects.count(), 1)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 200)
response = self.client.post(realm_url, follow=True)
self.assertContains(response,
"The fields content_type, object_id, generator must make a"
" unique set.",
status_code=400
)
self.assertEqual(models.Ready.objects.count(), 1)
def test_can_mark_ready_while_paused(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
pause = models.Pause(agent=self.agent1, generator=generator,
reason='laziness')
pause.save()
self.assertEqual(models.Ready.objects.count(), 0)
self.assertEqual(models.Pause.objects.count(), 1)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.post(realm_url, follow=True)
self.assertEqual(response.status_code, 201)
self.assertEqual(models.Ready.objects.count(), 1)
self.assertEqual(models.Pause.objects.count(), 1)
def test_already_unready(self):
generator = models.Generator(realm=self.realm)
generator.save()
self.agent1.user = self.user
self.agent1.save()
self.assertEqual(models.Ready.objects.count(), 0)
realm_url = reverse('ready',
kwargs={'realm_alias': 'testrealm',
'realm_pk': self.realm.pk,
'agent_alias': 'testagent',
'agent_pk': self.agent1.pk})
response = self.client.get(realm_url)
self.assertEqual(response.status_code, 404)
response = self.client.delete(realm_url, follow=True)
self.assertEqual(response.status_code, 404)
self.assertEqual(models.Ready.objects.count(), 0)
|
{
"content_hash": "6a891bd59ef3e0f5bba55515c330b0d1",
"timestamp": "",
"source": "github",
"line_count": 1133,
"max_line_length": 83,
"avg_line_length": 38.11297440423654,
"alnum_prop": 0.5466166458246492,
"repo_name": "jbradberry/django-turn-generation",
"id": "994969c3dc9985034bc82d43f2c752718028597a",
"size": "43182",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turngeneration/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2187"
},
{
"name": "Python",
"bytes": "107302"
}
],
"symlink_target": ""
}
|
import datetime
import json
import humanize
import os
import urllib
from operator import itemgetter
from django.http import HttpResponse, HttpResponseNotFound
from django.conf.urls import url
class FileChooser(object):
def __init__(self, id, basedir, callback = None):
self.pattern = r'^filechooser/'+id+'/(?P<type>((ajax)|(http)))/(?P<method>((list)|(process)))/(?P<file>.*)$'
self.id = id
# check that id only contains a-z0-9
self.callback = callback
self.basedir = os.path.abspath(basedir)
def url_pattern(self):
return url(self.pattern, self.process, name='filechooser_'+self.id)
def process(self, request, file=None, type=None, method=None):
# make sure we can't get higher than the given basedir
path = os.path.join(self.basedir, file)
absolute = os.path.abspath(path)
# path should still contain at least the BASE directory
if not absolute.startswith(self.basedir):
raise EnvironmentError("Do not try to escape from the designated folder")
if method == 'list' and type == 'ajax':
return self.__ajax_list(file)
elif method == 'process' and type == 'http':
if self.callback: return self.callback(file)
else:
return HttpResponseNotFound("<h1>Requested operation not supported in FileChooser</h1>")
def __ajax_list(self, folder):
path = os.path.join(self.basedir, folder)
records = []
for filename in os.listdir(path):
records.append(self.__process_file(folder, filename))
result = {
"data": sorted(records, key=itemgetter('order'))
}
return HttpResponse(json.dumps(result))
def __process_file(self, folder, filename):
fullpath = os.path.join(self.basedir, folder, filename)
mtime = os.path.getmtime(fullpath)
mtime_date = datetime.datetime.fromtimestamp(mtime)
size = os.path.getsize(fullpath)
natural_size = humanize.naturalsize(size,gnu=True)
sortkey = 'z'
icon = filetype = 'file'
if os.path.isdir(fullpath):
sortkey = 'a' # force the folder in front of the files
filetype = 'folder'
icon = 'folder-open'
natural_size = '-'
size = -1
return {
'order' : sortkey + os.path.join(folder, filename).lower(),
'filename': {
'icon': icon,
'value': os.path.join(folder, filename),
'display': filename,
'type': filetype,
},
'size': {
'value': size,
'display': natural_size
},
'mtime': {
'value': mtime,
'display': humanize.naturaltime(datetime.datetime.now() - mtime_date),
'title': mtime_date.strftime("%x %X")
}
}
|
{
"content_hash": "205cbd03b63ff2072716844abe06e2ef",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 116,
"avg_line_length": 32.98888888888889,
"alnum_prop": 0.5678679690131357,
"repo_name": "martinvw/django-filechooser",
"id": "fbcafca661982bb3cad075abbd9c9778affab144",
"size": "2969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "filechooser/filechooser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "526"
},
{
"name": "JavaScript",
"bytes": "3633"
},
{
"name": "Python",
"bytes": "15477"
}
],
"symlink_target": ""
}
|
import swapper
from pytz import utc
from datetime import datetime
from django.db.models import Q
from django.urls import reverse
from accelerator.models import (
CoreProfile,
UserRole)
from accelerator.utils import UserAlert
from accelerator_abstract.models import (
ACTIVE_PANEL_STATUS,
CAPTURE_AVAILABILITY_DISABLED)
CONFIRMED_FOR_MESSAGE = "<h4>%s, you are confirmed for %s:</h4>"
EXPECTING_TO_SEE_YOU_MESSAGE = ("<p> </p><h4>We are expecting "
"to see you at %s.</h4>")
SINGLE_PANEL_MESSAGE = "this upcoming judging panel"
MULTIPLE_PANELS_MESSAGE = "these upcoming judging panels"
PLEASE_INFORM_US_MESSAGE = ("<p> </p><p><b>PLEASE INFORM US IMMEDIATELY "
'AT <a href="mailto:%s">%s</a>'
"IF YOU ARE NO LONGER ABLE TO MAKE %s. "
"THANKS!</b></p>")
ACTIVE_IN_PERSON_JUDGING_MESSAGE = (
'You are assigned to an active judging panel, please'
' <b><a class="btn btn-primary btn-large" href="%s">'
'go to your judging portal to review applications</a></b>'
'<p> </p>'
)
ONGOING_ONLINE_JUDGING_NOTIFICATION = (
'You are assigned to an active online judging round, please'
'<p> </p><b><a class="btn btn-primary btn-large" href="%s">'
'go to your judging portal to review applications</a></b>'
'<p> </p>'
)
UPDATE_JUDGE_COMMITMENTS_NOTIFICATION = (
'There are upcoming judging opportunities for you to revew. '
'<b><a class="btn btn-primary btn-large" href="%s"> '
'Update your judging commitments</a></b>'
'<p> </p>'
)
SHORT_BIO_MAX_LENGTH = 140
class ExpertProfile(CoreProfile):
user_type = 'expert'
default_page = "expert_homepage"
class Meta:
db_table = 'accelerator_expertprofile'
permissions = (
('change_password', 'Can change users passwords directly'),
)
swappable = swapper.swappable_setting(
CoreProfile.Meta.app_label, "ExpertProfile")
def judge_round_commitments(self):
return self.user.judgeroundcommitment_set.all()
def user_id(self):
return self.user.id
def has_invited_judging_rounds(self):
# True iff judge has desired state for upcoming
# judging rounds, for which they have no commitment object
# note that commitement object can indicate "decline to commit"
JudgingRound = swapper.load_model('accelerator', 'JudgingRound')
return JudgingRound.objects.filter(
start_date_time__gt=utc.localize(datetime.now()),
desired_judge_label__in=self.user.userlabel_set.all()).exclude(
judgeroundcommitment__judge=self.user).exists()
def get_active_assignments(self):
jpas = self.get_all_panel_assignments(round_active=True)
active_jpas = jpas.filter(panel__status=ACTIVE_PANEL_STATUS)
return active_jpas
@classmethod
def mentors(cls, program):
role_name = UserRole.MENTOR
return ExpertProfile.objects.filter(
user__programrolegrant__program_role__program=program,
user__programrolegrant__program_role__user_role__name=role_name)
def get_active_alerts(self, page=None):
"""Return any active alerts for the user, that are relevant for
the current 'page' of the application
"""
alerts = []
panels_url = reverse('panel_listing', urlconf="mc_judge.urls")
if self.has_invited_judging_rounds():
alerts.append(self._invited_judge_alert())
active_rounds = self.active_judging_rounds()
if self.is_judge(state="confirmed") and active_rounds.exists():
alert = UserAlert()
alert.alert_type = 'judge-portal-access'
alert.alert_style = 'success'
alert.message = ACTIVE_IN_PERSON_JUDGING_MESSAGE % panels_url
alerts.append(alert)
if self.is_judge() and self.get_active_assignments().exists():
alert = UserAlert()
alert.alert_type = 'judge-panel-active'
alert.alert_style = 'warning'
alert.message = (ONGOING_ONLINE_JUDGING_NOTIFICATION %
panels_url)
return alerts
def _invited_judge_alert(self):
manage_url = reverse('manage_commitments')
alert = UserAlert()
alert.alert_type = 'judge-invite'
alert.alert_style = 'success'
alert.message = UPDATE_JUDGE_COMMITMENTS_NOTIFICATION % manage_url
return alert
def _recruiting_judges():
capacity = Q(capture_capacity=True)
availability = ~Q(capture_availability=CAPTURE_AVAILABILITY_DISABLED)
return swapper.load_model('accelerator', 'JudgingRound').objects.filter(
capacity | availability).exists()
|
{
"content_hash": "47aec025fbfb1029a3eab5b2e1c66acc",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 37.36434108527132,
"alnum_prop": 0.6367219917012448,
"repo_name": "masschallenge/django-accelerator",
"id": "b9a7333ceb228410b01ec5a2db37a368e5f78d17",
"size": "4820",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator/models/expert_profile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
}
|
"""TensorFlow-related utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.eager import context
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond as smart_module
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util import tf_contextlib
def smart_cond(pred, true_fn=None, false_fn=None, name=None):
"""Return either `true_fn()` if predicate `pred` is true else `false_fn()`.
If `pred` is a bool or has a constant value, we return either `true_fn()`
or `false_fn()`, otherwise we use `tf.cond` to dynamically route to both.
Arguments:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
name: Optional name prefix when using `tf.cond`.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`.
Raises:
TypeError: If `true_fn` or `false_fn` is not callable.
"""
if isinstance(pred, variables.Variable):
return control_flow_ops.cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
return smart_module.smart_cond(
pred, true_fn=true_fn, false_fn=false_fn, name=name)
def constant_value(pred):
"""Return the bool value for `pred`, or None if `pred` had a dynamic value.
Arguments:
pred: A scalar, either a Python bool or a TensorFlow boolean variable
or tensor, or the Python integer 1 or 0.
Returns:
True or False if `pred` has a constant boolean value, None otherwise.
Raises:
TypeError: If `pred` is not a Variable, Tensor or bool, or Python
integer 1 or 0.
"""
# Allow integer booleans.
if isinstance(pred, int):
if pred == 1:
pred = True
elif pred == 0:
pred = False
if isinstance(pred, variables.Variable):
return None
return smart_module.smart_constant_value(pred)
def is_tensor_or_tensor_list(v):
v = nest.flatten(v)
if v and isinstance(v[0], ops.Tensor):
return True
else:
return False
def get_reachable_from_inputs(inputs, targets=None):
"""Returns the set of tensors/ops reachable from `inputs`.
Stops if all targets have been found (target is optional).
Only valid in Symbolic mode, not Eager mode.
Args:
inputs: List of tensors.
targets: List of tensors.
Returns:
A set of tensors reachable from the inputs (includes the inputs themselves).
"""
inputs = nest.flatten(inputs, expand_composites=True)
reachable = object_identity.ObjectIdentitySet(inputs)
if targets:
remaining_targets = object_identity.ObjectIdentitySet(nest.flatten(targets))
queue = inputs[:]
while queue:
x = queue.pop()
if isinstance(x, tuple(_user_convertible_tensor_types)):
# Can't find consumers of user-specific types.
continue
if isinstance(x, ops.Operation):
outputs = x.outputs[:] or []
outputs += x._control_outputs # pylint: disable=protected-access
elif isinstance(x, variables.Variable):
try:
outputs = [x.op]
except AttributeError:
# Variables can be created in an Eager context.
outputs = []
elif tensor_util.is_tensor(x):
outputs = x.consumers()
else:
raise TypeError('Expected Operation, Variable, or Tensor, got ' + str(x))
for y in outputs:
if y not in reachable:
reachable.add(y)
if targets:
remaining_targets.discard(y)
queue.insert(0, y)
if targets and not remaining_targets:
return reachable
return reachable
# This function needs access to private functions of `nest`.
# pylint: disable=protected-access
def map_structure_with_atomic(is_atomic_fn, map_fn, nested):
"""Maps the atomic elements of a nested structure.
Arguments:
is_atomic_fn: A function that determines if an element of `nested` is
atomic.
map_fn: The function to apply to atomic elements of `nested`.
nested: A nested structure.
Returns:
The nested structure, with atomic elements mapped according to `map_fn`.
Raises:
ValueError: If an element that is neither atomic nor a sequence is
encountered.
"""
if is_atomic_fn(nested):
return map_fn(nested)
# Recursively convert.
if not nest.is_sequence(nested):
raise ValueError(
'Received non-atomic and non-sequence element: {}'.format(nested))
if nest._is_mapping(nested):
values = [nested[k] for k in nest._sorted(nested)]
else:
values = nested
mapped_values = [
map_structure_with_atomic(is_atomic_fn, map_fn, ele) for ele in values
]
return nest._sequence_like(nested, mapped_values)
# pylint: enable=protected-access
def convert_shapes(input_shape, to_tuples=True):
"""Converts nested shape representations to desired format.
Performs:
TensorShapes -> tuples if `to_tuples=True`.
tuples of int or None -> TensorShapes if `to_tuples=False`.
Valid objects to be converted are:
- TensorShapes
- tuples with elements of type int or None.
- ints
- None
Arguments:
input_shape: A nested structure of objects to be converted to TensorShapes.
to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts
all tuples representing shapes to TensorShapes.
Returns:
Nested structure of shapes in desired format.
"""
def _is_shape_component(value):
return value is None or isinstance(value, (int, tensor_shape.Dimension))
def _is_atomic_shape(input_shape):
# Ex: TensorShape or (None, 10, 32) or 5 or `None`
if _is_shape_component(input_shape):
return True
if isinstance(input_shape, tensor_shape.TensorShape):
return True
if (isinstance(input_shape, (tuple, list)) and
all(_is_shape_component(ele) for ele in input_shape)):
return True
return False
def _convert_shape(input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if to_tuples:
input_shape = tuple(input_shape.as_list())
return input_shape
return map_structure_with_atomic(_is_atomic_shape, _convert_shape,
input_shape)
class ListWrapper(object):
"""A wrapper for lists to be treated as elements for `nest`."""
def __init__(self, list_to_wrap):
self._list = list_to_wrap
def as_list(self):
return self._list
def convert_inner_node_data(nested, wrap=False):
"""Either wraps or unwraps innermost node data lists in `ListWrapper` objects.
Arguments:
nested: A nested data structure.
wrap: If `True`, wrap innermost lists in `ListWrapper` objects. If `False`,
unwraps `ListWrapper` objects into lists.
Returns:
Structure of same type as nested, with lists wrapped/unwrapped.
"""
def _is_serialized_node_data(nested):
# Node data can be of form `[layer_name, node_id, tensor_id]` or
# `[layer_name, node_id, tensor_id, kwargs]`.
if (isinstance(nested, list) and (len(nested) in [3, 4]) and
isinstance(nested[0], six.string_types)):
return True
return False
def _is_atomic_nested(nested):
"""Returns `True` if `nested` is a list representing node data."""
if isinstance(nested, ListWrapper):
return True
if _is_serialized_node_data(nested):
return True
return not nest.is_sequence(nested)
def _convert_object_or_list(nested):
"""Convert b/t `ListWrapper` object and list representations."""
if wrap:
if isinstance(nested, ListWrapper):
return nested
if _is_serialized_node_data(nested):
return ListWrapper(nested)
return nested
else:
if isinstance(nested, ListWrapper):
return nested.as_list()
return nested
return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list,
nested)
def shape_type_conversion(fn):
"""Decorator that handles tuple/TensorShape conversion.
Used in `compute_output_shape` and `build`.
Arguments:
fn: function to wrap.
Returns:
Wrapped function.
"""
def wrapper(instance, input_shape):
# Pass shapes as tuples to `fn`
# This preserves compatibility with external Keras.
if input_shape is not None:
input_shape = convert_shapes(input_shape, to_tuples=True)
output_shape = fn(instance, input_shape)
# Return shapes from `fn` as TensorShapes.
if output_shape is not None:
output_shape = convert_shapes(output_shape, to_tuples=False)
return output_shape
return wrapper
def are_all_symbolic_tensors(tensors):
return all(is_symbolic_tensor(tensor) for tensor in tensors)
_user_convertible_tensor_types = set()
def is_symbolic_tensor(tensor):
"""Returns whether a tensor is symbolic (from a TF graph) or an eager tensor.
A Variable can be seen as either: it is considered symbolic
when we are in a graph scope, and eager when we are in an eager scope.
Arguments:
tensor: A tensor instance to test.
Returns:
True for symbolic tensors, False for eager tensors.
"""
if isinstance(tensor, tuple(_user_convertible_tensor_types)):
tensor = ops.convert_to_tensor_or_composite(tensor)
if isinstance(tensor, variables.Variable):
# Variables that are output of a Keras Layer in Functional API mode
# should be considered symbolic.
# TODO(omalleyt): We need a better way to check this in order to
# enable `run_eagerly=True` for Models containing Layers that
# return Variables as outputs.
return (getattr(tensor, '_keras_history', False) or
not context.executing_eagerly())
if isinstance(tensor, composite_tensor.CompositeTensor):
component_tensors = nest.flatten(tensor, expand_composites=True)
return any(hasattr(t, 'graph') for t in component_tensors)
if isinstance(tensor, ops.Tensor):
return hasattr(tensor, 'graph')
return False
def register_symbolic_tensor_type(cls):
"""Allows users to specify types regarded as symbolic `Tensor`s.
Used in conjunction with `tf.register_tensor_conversion_function`, calling
`tf.keras.utils.register_symbolic_tensor_type(cls)` allows non-`Tensor`
objects to be plumbed through Keras layers.
Example:
```python
# One-time setup.
class Foo(object):
def __init__(self, input_):
self._input = input_
def value(self):
return tf.constant(42.)
tf.register_tensor_conversion_function(
Foo, lambda x, *args, **kwargs: x.value())
tf.keras.utils.register_symbolic_tensor_type(Foo)
# User-land.
layer = tf.keras.layers.Lambda(lambda input_: Foo(input_))
```
Arguments:
cls: A `class` type which shall be regarded as a symbolic `Tensor`.
"""
global _user_convertible_tensor_types
_user_convertible_tensor_types.add(cls)
def is_tensor_or_variable(x):
return tensor_util.is_tensor(x) or isinstance(x, variables.Variable)
def assert_no_legacy_layers(layers):
"""Prevent tf.layers.Layers from being used with Keras.
Certain legacy layers inherit from their keras analogs; however they are
not supported with keras and can lead to subtle and hard to diagnose bugs.
Args:
layers: A list of layers to check
Raises:
TypeError: If any elements of layers are tf.layers.Layers
"""
# isinstance check for tf.layers.Layer introduces a circular dependency.
legacy_layers = [l for l in layers if getattr(l, '_is_legacy_layer', None)]
if legacy_layers:
layer_str = '\n'.join([' ' + str(l) for l in legacy_layers])
raise TypeError(
'The following are legacy tf.layers.Layers:\n{}\nTo use keras as a '
'framework (for instance using the Network, Model, or Sequential '
'classes), please use the tf.keras.layers implementation instead. '
'(Or, if writing custom layers, subclass from tf.keras.layers rather '
'than tf.layers)'.format(layer_str))
@tf_contextlib.contextmanager
def maybe_init_scope(layer):
"""Open an `init_scope` if in V2 mode and using the keras graph.
Arguments:
layer: The Layer/Model that is currently active.
Yields:
None
"""
# Don't open an init_scope in V1 mode or when using legacy tf.layers.
if (ops.executing_eagerly_outside_functions() and
getattr(layer, '_keras_style', True)):
with ops.init_scope():
yield
else:
yield
@tf_contextlib.contextmanager
def graph_context_for_symbolic_tensors(*args, **kwargs):
"""Returns graph context manager if any of the inputs is a symbolic tensor."""
if any(is_symbolic_tensor(v) for v in list(args) + list(kwargs.values())):
with K.get_graph().as_default():
yield
else:
yield
|
{
"content_hash": "c89f710c883008020ffdc74fe8cf2112",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 80,
"avg_line_length": 30.68075117370892,
"alnum_prop": 0.6912777352716144,
"repo_name": "ppwwyyxx/tensorflow",
"id": "cec7497851f98cfa929de4411558677bc42c7043",
"size": "13759",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/utils/tf_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45318"
},
{
"name": "C",
"bytes": "796611"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76521274"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952883"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1254789"
},
{
"name": "Makefile",
"bytes": "61284"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297774"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38709528"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7469"
},
{
"name": "Shell",
"bytes": "643731"
},
{
"name": "Smarty",
"bytes": "34743"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
"""
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from airy.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
|
{
"content_hash": "a07f4595bca966b8454e6d962a4b0d5e",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 73,
"avg_line_length": 33.3235294117647,
"alnum_prop": 0.5728155339805825,
"repo_name": "letolab/airy",
"id": "90edebbbfb23b577e3a4ab7f2e4d4f6c65e05e22",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airy/core/mail/backends/console.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "115012"
},
{
"name": "Python",
"bytes": "678842"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['RelativeDifference'] , ['MovingAverage'] , ['NoCycle'] , ['LSTM'] );
|
{
"content_hash": "0ee78474fa4b006860fc87839c887d7f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 41,
"alnum_prop": 0.725609756097561,
"repo_name": "antoinecarme/pyaf",
"id": "556bb359d003f420021e1cadd4def7181e8d6b68",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_RelativeDifference/model_control_one_enabled_RelativeDifference_MovingAverage_NoCycle_LSTM.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import kasparGUI.Model as Model
models = [
{'class': Model.CustomAction, },
{'class': Model.CustomTrigger, },
{'class': Model.User, },
]
blueprints = []
|
{
"content_hash": "6c2d0608801ec368c697af17fb439857",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.5024630541871922,
"repo_name": "scheunemann/KASPAR",
"id": "ee85ea9cbcb9d5f13df782441c103e730d579eb5",
"size": "203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kasparGUI/Web/api/modules/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11101"
},
{
"name": "JavaScript",
"bytes": "249928"
},
{
"name": "Python",
"bytes": "121201"
},
{
"name": "Shell",
"bytes": "7614"
}
],
"symlink_target": ""
}
|
from ggrc import db
from ggrc.access_control.roleable import Roleable
from ggrc.fulltext.mixin import Indexed
from .mixins import BusinessObject, Timeboxed, CustomAttributable
from .object_owner import Ownable
from .object_person import Personable
from .relationship import Relatable
from .track_object_state import HasObjectState
class OrgGroup(Roleable, HasObjectState, CustomAttributable,
Personable, Relatable, Timeboxed,
Ownable, BusinessObject, Indexed, db.Model):
__tablename__ = 'org_groups'
_aliases = {"url": "Org Group URL"}
def __str__(self):
return self.title
|
{
"content_hash": "cc7f7e71b03e23f095da3414ec442023",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 65,
"avg_line_length": 34.22222222222222,
"alnum_prop": 0.7483766233766234,
"repo_name": "AleksNeStu/ggrc-core",
"id": "e634f36912d7df905ddd488f495edb72edaea37f",
"size": "729",
"binary": false,
"copies": "1",
"ref": "refs/heads/release/0.10-Raspberry",
"path": "src/ggrc/models/org_group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "221201"
},
{
"name": "HTML",
"bytes": "1055542"
},
{
"name": "JavaScript",
"bytes": "1872353"
},
{
"name": "Makefile",
"bytes": "7044"
},
{
"name": "Mako",
"bytes": "4320"
},
{
"name": "Python",
"bytes": "2700938"
},
{
"name": "Shell",
"bytes": "31273"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import bootstrap
import unittest
from nark import *
class TimeTests(unittest.TestCase):
def test_convert_to_timestamp_and_back(self):
a = Assert()
now = DateTime.now()
timestamp = DateTime.as_timestamp(now)
datetime = Timestamp.as_datetime(timestamp)
a.true(datetime == now, "Failed to convert timestamp back")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "25046a315128599d2ce80ecb1011019f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 24.764705882352942,
"alnum_prop": 0.6959619952494062,
"repo_name": "shadowmint/python-nark",
"id": "db9be6135241df047c1309bd213823783f8dce93",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/nark/time_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "86811"
},
{
"name": "Shell",
"bytes": "278"
}
],
"symlink_target": ""
}
|
from suplemon.suplemon_module import Module
class Upper(Module):
def run(self, app, editor, args):
line_nums = []
for cursor in editor.cursors:
if cursor.y not in line_nums:
line_nums.append(cursor.y)
data = editor.lines[cursor.y].get_data().upper()
editor.lines[cursor.y].set_data(data)
module = {
"class": Upper,
"name": "upper",
}
|
{
"content_hash": "a9198d6b3426f2d2ac28d9c04abd32da",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 64,
"avg_line_length": 26.5625,
"alnum_prop": 0.5647058823529412,
"repo_name": "severin31/suplemon",
"id": "d6679ab5fa42adeb12b0d1688aacc3fc7cc4c838",
"size": "448",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "suplemon/modules/upper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "155931"
}
],
"symlink_target": ""
}
|
import argparse
import cgi
import json
import logging
import os
import subprocess
import sys
import tempfile
import time
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
from devil.android.sdk import intent
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
import devil_chromium
from pylib import constants
import activity_lens
import content_classification_lens
import controller
import device_setup
import frame_load_lens
import loading_model
import loading_trace
import model_graph
import options
# TODO(mattcary): logging.info isn't that useful, as the whole (tools) world
# uses logging info; we need to introduce logging modules to get finer-grained
# output. For now we just do logging.warning.
OPTIONS = options.OPTIONS
def _LoadPage(device, url):
"""Load a page on chrome on our device.
Args:
device: an AdbWrapper for the device on which to load the page.
url: url as a string to load.
"""
load_intent = intent.Intent(
package=OPTIONS.ChromePackage().package,
activity=OPTIONS.ChromePackage().activity,
data=url)
logging.warning('Loading ' + url)
device.StartActivity(load_intent, blocking=True)
def _WriteJson(output, json_data):
"""Write JSON data in a nice way.
Args:
output: a file object
json_data: JSON data as a dict.
"""
json.dump(json_data, output, sort_keys=True, indent=2)
def _GetPrefetchHtml(graph, name=None):
"""Generate prefetch page for the resources in resource graph.
Args:
graph: a ResourceGraph.
name: optional string used in the generated page.
Returns:
HTML as a string containing all the link rel=prefetch directives necessary
for prefetching the given ResourceGraph.
"""
if name:
title = 'Prefetch for ' + cgi.escape(name)
else:
title = 'Generated prefetch page'
output = []
output.append("""<!DOCTYPE html>
<html>
<head>
<title>%s</title>
""" % title)
for info in graph.ResourceInfo():
output.append('<link rel="prefetch" href="%s">\n' % info.Url())
output.append("""</head>
<body>%s</body>
</html>
""" % title)
return '\n'.join(output)
def _LogRequests(url, clear_cache_override=None):
"""Logs requests for a web page.
Args:
url: url to log as string.
clear_cache_override: if not None, set clear_cache different from OPTIONS.
Returns:
JSON dict of logged information (ie, a dict that describes JSON).
"""
if OPTIONS.local:
chrome_ctl = controller.LocalChromeController()
chrome_ctl.SetHeadless(OPTIONS.headless)
else:
chrome_ctl = controller.RemoteChromeController(
device_setup.GetFirstDevice())
clear_cache = (clear_cache_override if clear_cache_override is not None
else OPTIONS.clear_cache)
if OPTIONS.emulate_device:
chrome_ctl.SetDeviceEmulation(OPTIONS.emulate_device)
if OPTIONS.emulate_network:
chrome_ctl.SetNetworkEmulation(OPTIONS.emulate_network)
with chrome_ctl.Open() as connection:
if clear_cache:
connection.ClearCache()
trace = loading_trace.LoadingTrace.RecordUrlNavigation(
url, connection, chrome_ctl.ChromeMetadata())
return trace.ToJsonDict()
def _FullFetch(url, json_output, prefetch):
"""Do a full fetch with optional prefetching."""
if not url.startswith('http') and not url.startswith('file'):
url = 'http://' + url
logging.warning('Cold fetch')
cold_data = _LogRequests(url)
assert cold_data, 'Cold fetch failed to produce data. Check your phone.'
if prefetch:
assert not OPTIONS.local
logging.warning('Generating prefetch')
prefetch_html = _GetPrefetchHtml(
loading_model.ResourceGraph(cold_data), name=url)
tmp = tempfile.NamedTemporaryFile()
tmp.write(prefetch_html)
tmp.flush()
# We hope that the tmpfile name is unique enough for the device.
target = os.path.join('/sdcard/Download', os.path.basename(tmp.name))
device = device_setup.GetFirstDevice()
device.adb.Push(tmp.name, target)
logging.warning('Pushed prefetch %s to device at %s' % (tmp.name, target))
_LoadPage(device, 'file://' + target)
time.sleep(OPTIONS.prefetch_delay_seconds)
logging.warning('Warm fetch')
warm_data = _LogRequests(url, clear_cache_override=False)
with open(json_output, 'w') as f:
_WriteJson(f, warm_data)
logging.warning('Wrote ' + json_output)
with open(json_output + '.cold', 'w') as f:
_WriteJson(f, cold_data)
logging.warning('Wrote ' + json_output + '.cold')
else:
with open(json_output, 'w') as f:
_WriteJson(f, cold_data)
logging.warning('Wrote ' + json_output)
def _ProcessRequests(filename):
with open(filename) as f:
trace = loading_trace.LoadingTrace.FromJsonDict(json.load(f))
content_lens = (
content_classification_lens.ContentClassificationLens.WithRulesFiles(
trace, OPTIONS.ad_rules, OPTIONS.tracking_rules))
frame_lens = frame_load_lens.FrameLoadLens(trace)
activity = activity_lens.ActivityLens(trace)
graph = loading_model.ResourceGraph(
trace, content_lens, frame_lens, activity)
if OPTIONS.noads:
graph.Set(node_filter=graph.FilterAds)
return graph
def InvalidCommand(cmd):
sys.exit('Invalid command "%s"\nChoices are: %s' %
(cmd, ' '.join(COMMAND_MAP.keys())))
def DoPng(arg_str):
OPTIONS.ParseArgs(arg_str, description='Generates a PNG from a trace',
extra=['request_json', ('--png_output', ''),
('--eog', False)])
graph = _ProcessRequests(OPTIONS.request_json)
visualization = model_graph.GraphVisualization(graph)
tmp = tempfile.NamedTemporaryFile()
visualization.OutputDot(tmp)
tmp.flush()
png_output = OPTIONS.png_output
if not png_output:
if OPTIONS.request_json.endswith('.json'):
png_output = OPTIONS.request_json[
:OPTIONS.request_json.rfind('.json')] + '.png'
else:
png_output = OPTIONS.request_json + '.png'
subprocess.check_call(['dot', '-Tpng', tmp.name, '-o', png_output])
logging.warning('Wrote ' + png_output)
if OPTIONS.eog:
subprocess.Popen(['eog', png_output])
tmp.close()
def DoCompare(arg_str):
OPTIONS.ParseArgs(arg_str, description='Compares two traces',
extra=['g1_json', 'g2_json'])
g1 = _ProcessRequests(OPTIONS.g1_json)
g2 = _ProcessRequests(OPTIONS.g2_json)
discrepancies = loading_model.ResourceGraph.CheckImageLoadConsistency(g1, g2)
if discrepancies:
print '%d discrepancies' % len(discrepancies)
print '\n'.join([str(r) for r in discrepancies])
else:
print 'Consistent!'
def DoPrefetchSetup(arg_str):
OPTIONS.ParseArgs(arg_str, description='Sets up prefetch',
extra=['request_json', 'target_html', ('--upload', False)])
graph = _ProcessRequests(OPTIONS.request_json)
with open(OPTIONS.target_html, 'w') as html:
html.write(_GetPrefetchHtml(
graph, name=os.path.basename(OPTIONS.request_json)))
if OPTIONS.upload:
device = device_setup.GetFirstDevice()
destination = os.path.join('/sdcard/Download',
os.path.basename(OPTIONS.target_html))
device.adb.Push(OPTIONS.target_html, destination)
logging.warning(
'Pushed %s to device at %s' % (OPTIONS.target_html, destination))
def DoLogRequests(arg_str):
OPTIONS.ParseArgs(arg_str, description='Logs requests of a load',
extra=['--url', '--output', ('--prefetch', False)])
_FullFetch(url=OPTIONS.url,
json_output=OPTIONS.output,
prefetch=OPTIONS.prefetch)
def DoFetch(arg_str):
OPTIONS.ParseArgs(arg_str,
description=('Fetches SITE into DIR with '
'standard naming that can be processed by '
'./cost_to_csv.py. Both warm and cold '
'fetches are done. SITE can be a full url '
'but the filename may be strange so better '
'to just use a site (ie, domain).'),
extra=['--site', '--dir'])
if not os.path.exists(OPTIONS.dir):
os.makedirs(OPTIONS.dir)
_FullFetch(url=OPTIONS.site,
json_output=os.path.join(OPTIONS.dir, OPTIONS.site + '.json'),
prefetch=True)
def DoLongPole(arg_str):
OPTIONS.ParseArgs(arg_str, description='Calculates long pole',
extra='request_json')
graph = _ProcessRequests(OPTIONS.request_json)
path_list = []
cost = graph.Cost(path_list=path_list)
print '%s (%s)' % (path_list[-1], cost)
def DoNodeCost(arg_str):
OPTIONS.ParseArgs(arg_str,
description='Calculates node cost',
extra='request_json')
graph = _ProcessRequests(OPTIONS.request_json)
print sum((n.NodeCost() for n in graph.Nodes()))
def DoCost(arg_str):
OPTIONS.ParseArgs(arg_str,
description='Calculates total cost',
extra=['request_json', ('--path', False)])
graph = _ProcessRequests(OPTIONS.request_json)
path_list = []
print 'Graph cost: %s' % graph.Cost(path_list)
if OPTIONS.path:
for p in path_list:
print ' ' + p.ShortName()
COMMAND_MAP = {
'png': DoPng,
'compare': DoCompare,
'prefetch_setup': DoPrefetchSetup,
'log_requests': DoLogRequests,
'longpole': DoLongPole,
'nodecost': DoNodeCost,
'cost': DoCost,
'fetch': DoFetch,
}
def main():
logging.basicConfig(level=logging.WARNING)
OPTIONS.AddGlobalArgument(
'local', False,
'run against local desktop chrome rather than device '
'(see also --local_binary and local_profile_dir)')
OPTIONS.AddGlobalArgument(
'noads', False, 'ignore ad resources in modeling')
OPTIONS.AddGlobalArgument(
'ad_rules', '', 'AdBlocker+ ad rules file.')
OPTIONS.AddGlobalArgument(
'tracking_rules', '', 'AdBlocker+ tracking rules file.')
OPTIONS.AddGlobalArgument(
'prefetch_delay_seconds', 5,
'delay after requesting load of prefetch page '
'(only when running full fetch)')
OPTIONS.AddGlobalArgument(
'headless', False, 'Do not display Chrome UI (only works in local mode).')
parser = argparse.ArgumentParser(description='Analyzes loading')
parser.add_argument('command', help=' '.join(COMMAND_MAP.keys()))
parser.add_argument('rest', nargs=argparse.REMAINDER)
args = parser.parse_args()
devil_chromium.Initialize()
COMMAND_MAP.get(args.command,
lambda _: InvalidCommand(args.command))(args.rest)
if __name__ == '__main__':
main()
|
{
"content_hash": "aa9a304a38833c17fffcea4539c50f36",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 80,
"avg_line_length": 32.64741641337386,
"alnum_prop": 0.6600875151289451,
"repo_name": "junhuac/MQUIC",
"id": "fd1ba4ef9eb89194044918ff27e843a9121039ae",
"size": "10923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/android/loading/analyze.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "5386"
},
{
"name": "Batchfile",
"bytes": "42909"
},
{
"name": "C",
"bytes": "1168925"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "43919800"
},
{
"name": "CMake",
"bytes": "46379"
},
{
"name": "CSS",
"bytes": "19668"
},
{
"name": "Emacs Lisp",
"bytes": "32613"
},
{
"name": "Go",
"bytes": "7247"
},
{
"name": "Groff",
"bytes": "127224"
},
{
"name": "HTML",
"bytes": "2548385"
},
{
"name": "Java",
"bytes": "1332462"
},
{
"name": "JavaScript",
"bytes": "851006"
},
{
"name": "M4",
"bytes": "29823"
},
{
"name": "Makefile",
"bytes": "459525"
},
{
"name": "Objective-C",
"bytes": "120158"
},
{
"name": "Objective-C++",
"bytes": "330017"
},
{
"name": "PHP",
"bytes": "11283"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "16872234"
},
{
"name": "R",
"bytes": "1842"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Shell",
"bytes": "764509"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "12288"
},
{
"name": "nesC",
"bytes": "14779"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
class TranspilerTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.trainer_id = 0
self.trainers = 2
self.pservers = 2
self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175"
def net_conf(self):
x = fluid.layers.data(name='x', shape=[1000], dtype='float32')
y_predict = fluid.layers.fc(input=x,
size=1000,
act=None,
param_attr=fluid.ParamAttr(name='fc_w'))
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
cost = fluid.layers.square_error_cost(input=y_predict, label=y)
avg_cost = fluid.layers.mean(cost)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost)
return optimize_ops, params_grads
def get_main_program(self):
main = fluid.Program()
with fluid.program_guard(main):
self.net_conf()
return main
def get_trainer(self):
return self._transpiler_instance().get_trainer_program()
def get_pserver(self, ep):
t = self._transpiler_instance()
pserver = t.get_pserver_program(ep)
startup = t.get_startup_program(ep, pserver)
return pserver, startup
def _transpiler_instance(self):
main = self.get_main_program()
t = fluid.DistributeTranspiler()
t.transpile(
self.trainer_id,
program=main,
pservers=self.pserver_eps,
trainers=self.trainers)
return t
|
{
"content_hash": "a6954b34fd34a16db2ee73a7d05fa822",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 76,
"avg_line_length": 29.93220338983051,
"alnum_prop": 0.5855039637599094,
"repo_name": "Canpio/Paddle",
"id": "d84c5d9c41c705cf6d14cc0b5a8c692b0d646337",
"size": "2379",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "python/paddle/fluid/tests/unittests/transpiler_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "274629"
},
{
"name": "C++",
"bytes": "4761657"
},
{
"name": "CMake",
"bytes": "209462"
},
{
"name": "CSS",
"bytes": "21730"
},
{
"name": "Cuda",
"bytes": "738162"
},
{
"name": "Go",
"bytes": "99765"
},
{
"name": "HTML",
"bytes": "8941"
},
{
"name": "JavaScript",
"bytes": "1025"
},
{
"name": "Perl",
"bytes": "11452"
},
{
"name": "Protocol Buffer",
"bytes": "54402"
},
{
"name": "Python",
"bytes": "1526791"
},
{
"name": "Shell",
"bytes": "136472"
}
],
"symlink_target": ""
}
|
"""Parallel workflow execution via PBS/Torque
"""
import os
import sys
from .base import (GraphPluginBase, logger)
from ...interfaces.base import CommandLine
class PBSGraphPlugin(GraphPluginBase):
"""Execute using PBS/Torque
The plugin_args input to run can be used to control the SGE execution.
Currently supported options are:
- template : template to use for batch job submission
- qsub_args : arguments to be prepended to the job execution script in the
qsub call
"""
def __init__(self, **kwargs):
self._template = """
#PBS -V
"""
self._qsub_args = None
if 'plugin_args' in kwargs:
plugin_args = kwargs['plugin_args']
if 'template' in plugin_args:
self._template = plugin_args['template']
if os.path.isfile(self._template):
self._template = open(self._template).read()
if 'qsub_args' in plugin_args:
self._qsub_args = plugin_args['qsub_args']
super(PBSGraphPlugin, self).__init__(**kwargs)
def _submit_graph(self, pyfiles, dependencies):
batch_dir, _ = os.path.split(pyfiles[0])
submitjobsfile = os.path.join(batch_dir, 'submit_jobs.sh')
with open(submitjobsfile, 'wt') as fp:
fp.writelines('#!/usr/bin/env sh\n')
for idx, pyscript in enumerate(pyfiles):
batch_dir, name = os.path.split(pyscript)
name = '.'.join(name.split('.')[:-1])
batchscript = '\n'.join((self._template,
'%s %s' % (sys.executable, pyscript)))
batchscriptfile = os.path.join(batch_dir,
'batchscript_%s.sh' % name)
with open(batchscriptfile, 'wt') as batchfp:
batchfp.writelines(batchscript)
batchfp.close()
deps = ''
if idx in dependencies:
values = ['$job%05d' % jobid for jobid in dependencies[idx]]
if len(values):
deps = '-W depend=afterok:%s' % ':'.join(values)
fp.writelines('job%05d=`qsub %s %s %s`\n' % (idx, deps,
self._qsub_args,
batchscriptfile))
cmd = CommandLine('sh', environ=os.environ.data)
cmd.inputs.args = '%s' % submitjobsfile
cmd.run()
logger.info('submitted all jobs to queue')
|
{
"content_hash": "b4a80d5e528eaa39c144a4beff34c4c8",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 80,
"avg_line_length": 39.42424242424242,
"alnum_prop": 0.5172943889315911,
"repo_name": "christianbrodbeck/nipype",
"id": "c177973d9170196c7bf7c0c06a41d76743c8bbe8",
"size": "2602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipype/pipeline/plugins/pbsgraph.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Matlab",
"bytes": "282"
},
{
"name": "Objective-C",
"bytes": "4736"
},
{
"name": "Python",
"bytes": "2537426"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from ..DynamicDiffusionModel import DynamicDiffusionModel
import numpy as np
import networkx as nx
import future.utils
__author__ = "Giulio Rossetti"
__license__ = "BSD-2-Clause"
__email__ = "giulio.rossetti@gmail.com"
class DynSIRModel(DynamicDiffusionModel):
"""
Model Parameters to be specified via ModelConfig
:param beta: The infection rate (float value in [0,1])
:param gamma: The recovery rate (float value in [0,1])
"""
def __init__(self, graph, seed=None):
"""
Model Constructor
:param graph: A networkx graph object
"""
super(self.__class__, self).__init__(graph, seed)
self.available_statuses = {
"Susceptible": 0,
"Infected": 1,
"Removed": 2
}
self.parameters = {
"model": {
"beta": {
"descr": "Infection rate",
"range": [0, 1],
"optional": False},
"gamma": {
"descr": "Recovery rate",
"range": [0, 1],
"optional": False
}
},
"nodes": {},
"edges": {},
}
self.name = "SIR"
def iteration(self, node_status=True):
"""
Execute a single model iteration
:return: Iteration_id, Incremental node status (dictionary node->status)
"""
self.clean_initial_status(self.available_statuses.values())
actual_status = {node: nstatus for node, nstatus in future.utils.iteritems(self.status)}
# streaming
if self.stream_execution:
u, v = list(self.graph.edges())[0]
u_status = self.status[u]
v_status = self.status[v]
# infection test
if u_status == 1 and v_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[v] = 1
if v_status == 1 and u_status == 0:
p = np.random.random_sample()
if p < self.params['model']['beta']:
actual_status[u] = 1
# removal test
if v_status == 1:
g = np.random.random_sample()
if g < self.params['model']['gamma']:
actual_status[v] = 2
if u_status == 1:
g = np.random.random_sample()
if g < self.params['model']['gamma']:
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
# snapshot
else:
if self.actual_iteration == 0:
self.actual_iteration += 1
delta, node_count, status_delta = self.status_delta(actual_status)
if node_status:
return {"iteration": 0, "status": actual_status.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": 0, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
for u in self.graph.nodes():
u_status = self.status[u]
eventp = np.random.random_sample()
neighbors = self.graph.neighbors(u)
if isinstance(self.graph, nx.DiGraph):
neighbors = self.graph.predecessors(u)
if u_status == 0:
infected_neighbors = len([v for v in neighbors if self.status[v] == 1])
if eventp < self.params['model']['beta'] * infected_neighbors:
actual_status[u] = 1
elif u_status == 1:
if eventp < self.params['model']['gamma']:
actual_status[u] = 2
delta, node_count, status_delta = self.status_delta(actual_status)
self.status = actual_status
self.actual_iteration += 1
if node_status:
return {"iteration": self.actual_iteration - 1, "status": delta.copy(),
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
else:
return {"iteration": self.actual_iteration - 1, "status": {},
"node_count": node_count.copy(), "status_delta": status_delta.copy()}
|
{
"content_hash": "185b325e2d71f3a8f530dddd37740f42",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 97,
"avg_line_length": 36.419117647058826,
"alnum_prop": 0.4910155461336564,
"repo_name": "GiulioRossetti/ndlib",
"id": "1861af556f91f41a9ad85bcdb378b303d1dd00e0",
"size": "4953",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ndlib/models/dynamic/DynSIRModel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "415986"
}
],
"symlink_target": ""
}
|
"""
sentry.management.commands.cleanup
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
help = 'Deletes a portion of trailing data based on creation date'
option_list = BaseCommand.option_list + (
make_option('--days', default='30', type=int, help='Numbers of days to truncate on.'),
make_option('--project', type=int, help='Limit truncation to only entries from project.'),
make_option('--concurrency', type=int, default=1, help='The number of concurrent workers to run.'),
)
def handle(self, **options):
import logging
from sentry.tasks.cleanup import cleanup, logger
if options['verbosity'] > 1:
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
cleanup(
days=options['days'],
project=options['project'],
concurrency=options['concurrency'],
)
|
{
"content_hash": "7ab4620d9fff1116fcedb54f1e733ff4",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 107,
"avg_line_length": 34.05714285714286,
"alnum_prop": 0.6426174496644296,
"repo_name": "llonchj/sentry",
"id": "c98a615536f9b0e1a43bcbe496af5ec7e5e006d8",
"size": "1192",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/management/commands/cleanup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "583460"
},
{
"name": "HTML",
"bytes": "311084"
},
{
"name": "JavaScript",
"bytes": "620064"
},
{
"name": "Makefile",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "5910324"
}
],
"symlink_target": ""
}
|
import argparse
import json
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import Evaluation
from pipeline.component import HomoLR
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.interface import Model
from pipeline.utils.tools import load_job_config
def prettify(response, verbose=True):
if verbose:
print(json.dumps(response, indent=4, ensure_ascii=False))
print()
return response
def main(config="../../config.yaml", namespace=""):
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
hosts = parties.host[0]
arbiter = parties.arbiter[0]
guest_train_data = {"name": "vehicle_scale_homo_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "vehicle_scale_homo_host", "namespace": f"experiment{namespace}"}
# initialize pipeline
pipeline = PipeLine()
# set job initiator
pipeline.set_initiator(role='guest', party_id=guest)
# set participants information
pipeline.set_roles(guest=guest, host=hosts, arbiter=arbiter)
# define Reader components to read in data
reader_0 = Reader(name="reader_0")
# configure Reader for guest
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
# configure Reader for host
reader_0.get_party_instance(role='host', party_id=hosts).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0", output_format='dense', with_label=True)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
lr_param = {
"penalty": "L2",
"optimizer": "sgd",
"tol": 1e-05,
"alpha": 0.01,
"early_stop": "diff",
"batch_size": -1,
"learning_rate": 0.15,
"decay": 1,
"decay_sqrt": True,
"init_param": {
"init_method": "zeros"
},
"encrypt_param": {
"method": None
},
"cv_param": {
"n_splits": 4,
"shuffle": True,
"random_seed": 33,
"need_cv": False
},
"callback_param": {
"callbacks": ["ModelCheckpoint", "EarlyStopping"]
}
}
homo_lr_0 = HomoLR(name="homo_lr_0", max_iter=1, **lr_param)
homo_lr_1 = HomoLR(name="homo_lr_1")
pipeline.add_component(homo_lr_0, data=Data(train_data=data_transform_0.output.data))
pipeline.add_component(homo_lr_1, data=Data(test_data=data_transform_0.output.data),
model=Model(model=homo_lr_0.output.model))
evaluation_0 = Evaluation(name="evaluation_0", eval_type="multi")
pipeline.add_component(evaluation_0, data=Data(data=[homo_lr_0.output.data,
homo_lr_1.output.data]))
pipeline.compile()
# fit model
pipeline.fit()
# query component summary
prettify(pipeline.get_component("evaluation_0").get_summary())
return pipeline
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
{
"content_hash": "c237befb6379c1681d80b8bc0ba8ea2d",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 101,
"avg_line_length": 32.716981132075475,
"alnum_prop": 0.6242791234140715,
"repo_name": "FederatedAI/FATE",
"id": "dbb419f41a381d2ad383237e3575f9ceff94afc8",
"size": "4085",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pipeline/homo_logistic_regression/pipeline-homo-lr-one-vs-all.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
from flask_philo.serializers import BaseSerializer, uuid_schema
from tests.test_app.models import User
class GetUserSerializer(BaseSerializer):
"""
Used to serialize get resopnses
"""
_schema = {
'type': 'object',
'properties': {
'id': {'type': 'number'},
'email': {'type': 'string', 'format': 'email'},
'username': {'type': 'string'},
'last_login': {'type': 'string', 'format': 'date-time'},
'birthday': {'type': 'string', 'format': 'date'},
}
}
class PostUserSerializer(BaseSerializer):
"""
Post requests don't required id as they mean to be
used for create new objects
"""
_schema = {
'type': 'object',
'properties': {
'email': {'type': 'string', 'format': 'email'},
'username': {'type': 'string'},
'last_login': {'type': 'string', 'format': 'date-time'},
'birthday': {'type': 'string', 'format': 'date'},
'password': {'type': 'string'},
},
'required': ['email', 'username', 'password']
}
class PutUserSerializer(BaseSerializer):
__model__ = User
_schema = {
'type': 'object',
'properties': {
'email': {'type': 'string', 'format': 'email'},
'username': {'type': 'string'},
'id': {'type': 'number'},
'last_login': {'type': 'string', 'format': 'date-time'},
'password': {'type': 'string'},
},
'required': ['id', 'email', 'username']
}
class LoginSerializer(BaseSerializer):
"""
Post requests with login credentials
"""
_schema = {
'type': 'object',
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'email': {'type': 'string', 'format': 'email'}
},
'required': ['email', 'username', 'password']
}
class UUIDSerializer(BaseSerializer):
_schema = {
'definitions': {
'key': uuid_schema
},
'type': 'object',
'properties': {
'key': {'$ref': '#/definitions/key'},
},
'required': ['key']
}
|
{
"content_hash": "f2ae40ea1ea9a3a74164fad2f1a8f617",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 68,
"avg_line_length": 28.050632911392405,
"alnum_prop": 0.4824007220216607,
"repo_name": "maigfrga/flaskutils",
"id": "7a1d224fc8fb2604437c45784a01676a48109a67",
"size": "2216",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "test/tests/test_app/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "171"
},
{
"name": "HTML",
"bytes": "24"
},
{
"name": "Python",
"bytes": "97810"
},
{
"name": "Ruby",
"bytes": "3672"
},
{
"name": "Shell",
"bytes": "2785"
}
],
"symlink_target": ""
}
|
"""CSV single-pass output plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import os
import zipfile
from future.builtins import str
from grr_response_core.lib import utils
from grr_response_core.lib.rdfvalues import structs as rdf_structs
from grr_response_core.lib.util import collection
from grr_response_core.lib.util.compat import csv
from grr_response_core.lib.util.compat import yaml
from grr_response_server import instant_output_plugin
class CSVInstantOutputPlugin(
instant_output_plugin.InstantOutputPluginWithExportConversion):
"""Instant Output plugin that writes results to an archive of CSV files."""
plugin_name = "csv-zip"
friendly_name = "CSV (zipped)"
description = "Output ZIP archive with CSV files."
output_file_extension = ".zip"
ROW_BATCH = 100
def _GetCSVHeader(self, value_class, prefix=u""):
header = []
for type_info in value_class.type_infos:
if isinstance(type_info, rdf_structs.ProtoEmbedded):
header.extend(
self._GetCSVHeader(
type_info.type, prefix=prefix + type_info.name + u"."))
else:
header.append(prefix + type_info.name)
return header
def _GetCSVRow(self, value):
row = []
for type_info in value.__class__.type_infos:
if isinstance(type_info, rdf_structs.ProtoEmbedded):
row.extend(self._GetCSVRow(value.Get(type_info.name)))
else:
row.append(str(value.Get(type_info.name)))
return row
@property
def path_prefix(self):
prefix, _ = os.path.splitext(self.output_file_name)
return prefix
def Start(self):
self.archive_generator = utils.StreamingZipGenerator(
compression=zipfile.ZIP_DEFLATED)
self.export_counts = {}
return []
def ProcessSingleTypeExportedValues(self, original_value_type,
exported_values):
first_value = next(exported_values, None)
if not first_value:
return
yield self.archive_generator.WriteFileHeader(
"%s/%s/from_%s.csv" % (self.path_prefix, first_value.__class__.__name__,
original_value_type.__name__))
writer = csv.Writer()
# Write the CSV header based on first value class and write
# the first value itself. All other values are guaranteed
# to have the same class (see ProcessSingleTypeExportedValues definition).
writer.WriteRow(self._GetCSVHeader(first_value.__class__))
writer.WriteRow(self._GetCSVRow(first_value))
chunk = writer.Content().encode("utf-8")
yield self.archive_generator.WriteFileChunk(chunk)
# Counter starts from 1, as 1 value has already been written.
counter = 1
for batch in collection.Batch(exported_values, self.ROW_BATCH):
counter += len(batch)
writer = csv.Writer()
for value in batch:
writer.WriteRow(self._GetCSVRow(value))
chunk = writer.Content().encode("utf-8")
yield self.archive_generator.WriteFileChunk(chunk)
yield self.archive_generator.WriteFileFooter()
self.export_counts.setdefault(
original_value_type.__name__,
dict())[first_value.__class__.__name__] = counter
def Finish(self):
manifest = {"export_stats": self.export_counts}
manifest_bytes = yaml.Dump(manifest).encode("utf-8")
yield self.archive_generator.WriteFileHeader(self.path_prefix + "/MANIFEST")
yield self.archive_generator.WriteFileChunk(manifest_bytes)
yield self.archive_generator.WriteFileFooter()
yield self.archive_generator.Close()
|
{
"content_hash": "2def107461a31397dad82c5627484fcc",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 80,
"avg_line_length": 33.27777777777778,
"alnum_prop": 0.6844741235392321,
"repo_name": "demonchild2112/travis-test",
"id": "785ebc31d7116c141ecf81c3dd24f065b57090a2",
"size": "3616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grr/server/grr_response_server/output_plugins/csv_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
}
|
import unittest
from katas.kyu_7.string_chunks import string_chunk
class StringChunkTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(string_chunk('codewars', 2),
['co', 'de', 'wa', 'rs'])
def test_equal_2(self):
self.assertEqual(string_chunk('thiskataeasy', 4),
['this', 'kata', 'easy'])
def test_equal_3(self):
self.assertEqual(string_chunk('hello world', 3),
['hel', 'lo ', 'wor', 'ld'])
def test_equal_4(self):
self.assertEqual(string_chunk('everlong', 100), ['everlong'])
def test_equal_5(self):
self.assertEqual(string_chunk(123), [])
def test_equal_6(self):
self.assertEqual(string_chunk('hello', 'z'), [])
|
{
"content_hash": "8ed257927404c506eca5eab348496570",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 69,
"avg_line_length": 30.346153846153847,
"alnum_prop": 0.5640050697084917,
"repo_name": "the-zebulan/CodeWars",
"id": "e1a0e4e926dbfae24f2b79670a8f0a504744a2d2",
"size": "789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/kyu_7_tests/test_string_chunks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1203000"
}
],
"symlink_target": ""
}
|
"""
A collection of language information for use in FeatureExtraction.
"""
from .language import get, register, Language
from . import english
|
{
"content_hash": "544aeca543bbb5e6c3da56220b4435e0",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 66,
"avg_line_length": 20.714285714285715,
"alnum_prop": 0.7655172413793103,
"repo_name": "wikimedia/Wiki-Class",
"id": "b249c935ac3e65548f29e1fdc0aa14e2a1185ca8",
"size": "145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikiclass/languages/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19922"
}
],
"symlink_target": ""
}
|
import logging
import cgtk_log
log = cgtk_log.cgtk_log(level=logging.INFO)
def load_sheves():
pass
if __name__ == "__main__":
load_sheves()
|
{
"content_hash": "71dfef9229ce08f586621e8170a5d0a5",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 43,
"avg_line_length": 12.75,
"alnum_prop": 0.6274509803921569,
"repo_name": "cineuse/CNCGToolKit",
"id": "cb1be36f669f7388c81f68b4f4635e6f44970824",
"size": "197",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maya/scripts/cgtk_shelf/load_sheves.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "145"
},
{
"name": "Batchfile",
"bytes": "1187"
},
{
"name": "CSS",
"bytes": "15206"
},
{
"name": "Python",
"bytes": "6053232"
},
{
"name": "Shell",
"bytes": "82"
}
],
"symlink_target": ""
}
|
import ShareYourSystem as SYS
#define
MyTeamDict=SYS.Teamer.TeamDict([('a',1),('b',2)])
#print
print('MyTeamDict is ')
SYS._print(MyTeamDict)
#get
print(MyTeamDict.getValue(0))
print(MyTeamDict.getValue(1))
|
{
"content_hash": "a65056b28299acfddfb49e8cca7b378e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 49,
"avg_line_length": 15.142857142857142,
"alnum_prop": 0.7264150943396226,
"repo_name": "Ledoux/ShareYourSystem",
"id": "ca7bb9c82c711cdd41357342755cc83abc20ab2d",
"size": "228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Pythonlogy/build/lib/ShareYourSystem/Standards/Itemizers/Teamer/09_ExampleDoc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "86"
},
{
"name": "C++",
"bytes": "4244220"
},
{
"name": "CSS",
"bytes": "142769"
},
{
"name": "CoffeeScript",
"bytes": "37331"
},
{
"name": "HTML",
"bytes": "36211676"
},
{
"name": "JavaScript",
"bytes": "2147968"
},
{
"name": "Jupyter Notebook",
"bytes": "7930602"
},
{
"name": "Makefile",
"bytes": "6362"
},
{
"name": "PHP",
"bytes": "11096341"
},
{
"name": "Python",
"bytes": "5700092"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "Scala",
"bytes": "2412"
},
{
"name": "Shell",
"bytes": "2525"
},
{
"name": "Swift",
"bytes": "154"
},
{
"name": "TeX",
"bytes": "2556"
},
{
"name": "XSLT",
"bytes": "20993"
}
],
"symlink_target": ""
}
|
import sys, os
sys.path.insert(0, os.path.abspath('../src'))
from vcstools.__version__ import version
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest', 'sphinx.ext.coverage', 'sphinx.ext.autosummary']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'vcstools'
copyright = u'2010, Willow Garage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'vcstoolsdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'vcstools.tex', u'vcstools Documentation',
u'Tully Foote, Thibault Kruse, Ken Conley', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'vcstools', u'vcstools Documentation',
[u'Tully Foote, Thibault Kruse, Ken Conley'], 1)
]
|
{
"content_hash": "5d281e68b3e1c798387443542eeb90c2",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 84,
"avg_line_length": 32.88834951456311,
"alnum_prop": 0.7076014760147602,
"repo_name": "k-okada/vcstools",
"id": "3c4f45d8c69ed01fa45f293561ed89bb4cd671c6",
"size": "7194",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import sys, argparse
import urllib2, cookielib
class cookie_settings(object):
def __init__(self, url, port=None, ssl=None, verbosity=True):
self.url = url
self.server = url.split('/')[0]
self.path = '/'+'/'.join(url.split('/')[1:])
self.port = port or (443 if ssl else 80)
self.ssl = ssl
self.verbosity = verbosity
def getconn(self):
if self.ssl:
c = httplib.HTTPSConnection(self.server, self.port, timeout=10)
else:
c = httplib.HTTPConnection(self.server, self.port, timeout=10)
return c
def request(self):
if self.ssl:
url = 'https://'+self.url
else:
url = 'http://'+self.url
r = urllib2.urlopen(url)
return r
def test(self):
r = self.request()
cookies = r.info().getallmatchingheaders("set-cookie")
if len(cookies) == 0:
print "No cookies set..."
else:
for cookie in cookies:
if self.verbosity:
print cookie.strip()
http_only = False
secure = False
written_to_disk = False
crumbs = cookie.split(";")
print "Analyzing: "+crumbs[0].replace("Set-Cookie: ","")
for crumb in crumbs:
if "httponly" in crumb.lower():
http_only = True
if "secure" in crumb.lower():
secure = True
if "expires" in crumb.lower():
written_to_disk = True
if not secure:
print "\tSecure flag is NOT set!"
#else:
# print "\tSecure flag IS set"
if not http_only:
print "\tHttpOnly flag is NOT set!"
#else:
# print "\tHttpOnly flag IS set"
if written_to_disk:
print "\tThe cookie IS written to disk!"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="This is a script that checks cookie settings for a website.")
parser.add_argument("-p","--port", type=int, help="the webserver port")
parser.add_argument("-s","--ssl", action="store_true", help="whether or not to use ssl")
parser.add_argument("-v","--verbose", action="store_true", default=False, help="turn on verbose output")
parser.add_argument("url", help="the URL to test. Remove http://")
args = parser.parse_args()
server = args.url
port = args.port
ssl = args.ssl
verbosity = args.verbose
t = cookie_settings(server, port, ssl, verbosity)
print "Analyzing cookie settings..."
t.test()
|
{
"content_hash": "b0473bed71129900068df0141e1e05c0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 111,
"avg_line_length": 36.3421052631579,
"alnum_prop": 0.5191889934829833,
"repo_name": "amckenna/fruit_picker",
"id": "65a1e0767d5e022309dbe3a5b73eedfb0cc0ae67",
"size": "2828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/cookie_settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29740"
}
],
"symlink_target": ""
}
|
from window import Window
from mapwindow import MapWindow
from map import Map
from draw import draw
from plot import plot
from gif import GIF
|
{
"content_hash": "c6a6f7fbedae0db6b19a169fd1608731",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 31,
"avg_line_length": 18,
"alnum_prop": 0.8194444444444444,
"repo_name": "geoscript/geoscript-py",
"id": "5a9a17b55683117356f90b80fd419bc93b1f9883",
"size": "144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geoscript/render/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "656"
},
{
"name": "Java",
"bytes": "8264"
},
{
"name": "Python",
"bytes": "262087"
},
{
"name": "Shell",
"bytes": "2173"
}
],
"symlink_target": ""
}
|
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
import json
# related third party imports
import webapp2
import httpagentparser
from webapp2_extras import security
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
from webapp2_extras.i18n import gettext as _
from webapp2_extras.appengine.auth.models import Unique
from google.appengine.api import taskqueue
from google.appengine.api import users
from google.appengine.api.datastore_errors import BadValueError
from google.appengine.runtime import apiproxy_errors
from github import github
from linkedin import linkedin
from google.appengine.ext import ndb
from google.appengine.api import mail
# local application/library specific imports
import models
import forms as forms
from lib import utils, captcha, twitter
from lib.basehandler import BaseHandler
from lib.decorators import user_required
from lib.decorators import taskqueue_method
from lib import facebook
class LoginRequiredHandler(BaseHandler):
def get(self):
continue_url, = self.request.get('continue', allow_multiple=True)
self.redirect(users.create_login_url(dest_url=continue_url))
class RegisterBaseHandler(BaseHandler):
"""
Base class for handlers with registration and login forms.
"""
@webapp2.cached_property
def form(self):
return forms.RegisterForm(self)
class SendEmailHandler(BaseHandler):
"""
Core Handler for sending Emails
Use with TaskQueue
"""
@taskqueue_method
def post(self):
from google.appengine.api import mail, app_identity
to = self.request.get("to")
subject = self.request.get("subject")
body = self.request.get("body")
sender = self.request.get("sender")
if sender != '' or not utils.is_email_valid(sender):
if utils.is_email_valid(self.app.config.get('contact_sender')):
sender = self.app.config.get('contact_sender')
else:
app_id = app_identity.get_application_id()
sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender=sender,
to=to,
subject=subject,
body=body,
when=utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
try:
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
except Exception, e:
logging.error("Error sending email: %s" % e)
class LoginHandler(BaseHandler):
"""
Handler for authentication
"""
def get(self):
""" Returns a simple HTML form for login """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('login.html', **params)
def post(self):
"""
username: Get the username from POST dict
password: Get the password from POST dict
"""
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
continue_url = self.request.get('continue_url').encode('ascii', 'ignore')
try:
if utils.is_email_valid(username):
user = models.User.get_by_email(username)
if user:
auth_id = user.auth_ids[0]
else:
raise InvalidAuthIdError
else:
auth_id = "own:%s" % username
user = models.User.get_by_auth_id(auth_id)
password = self.form.password.data.strip()
remember_me = True if str(self.request.POST.get('remember_me')) == 'on' else False
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Try to login user with password
# Raises InvalidAuthIdError if user is not found
# Raises InvalidPasswordError if provided password
# doesn't match with specified user
self.auth.get_user_by_password(
auth_id, password, remember=remember_me)
# if user account is not activated, logout and redirect to home
if (user.activated == False):
# logout
self.auth.unset_session()
# redirect to home with error message
resend_email_uri = self.uri_for('resend-account-activation', user_id=user.get_id(),
token=models.User.create_resend_token(user.get_id()))
message = _('Your account has not yet been activated. Please check your email to activate it or') + \
' <a href="' + resend_email_uri + '">' + _('click here') + '</a> ' + _('to resend the email.')
self.add_message(message, 'error')
return self.redirect_to('home')
# check twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user.key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
# check facebook association
fb_data = None
try:
fb_data = json.loads(self.session['facebook'])
except:
pass
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
# check linkedin association
li_data = None
try:
li_data = json.loads(self.session['linkedin'])
except:
pass
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
# end linkedin
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Your username or password is incorrect. "
"Please try again (make sure your caps lock is off)")
self.add_message(message, 'error')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.LoginForm(self)
class SocialLoginHandler(BaseHandler):
"""
Handler for Social authentication
"""
def get(self, provider_name):
provider = self.provider_info[provider_name]
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
if provider_name == "twitter":
twitter_helper = twitter.TwitterAuth(self, redirect_uri=callback_url)
self.redirect(twitter_helper.auth_url())
elif provider_name == "facebook":
self.session['linkedin'] = None
perms = ['email', 'publish_stream']
self.redirect(facebook.auth_url(self.app.config.get('fb_api_key'), callback_url, perms))
elif provider_name == 'linkedin':
self.session['facebook'] = None
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
self.redirect(authentication.authorization_url)
elif provider_name == "github":
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
self.redirect(github_helper.get_authorize_url())
elif provider_name in models.SocialUser.open_id_providers():
continue_url = self.request.get('continue_url')
if continue_url:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name, continue_url=continue_url)
else:
dest_url = self.uri_for('social-login-complete', provider_name=provider_name)
try:
login_url = users.create_login_url(federated_identity=provider['uri'], dest_url=dest_url)
self.redirect(login_url)
except users.NotAllowedError:
self.add_message('You must enable Federated Login Before for this application.<br> '
'<a href="http://appengine.google.com" target="_blank">Google App Engine Control Panel</a> -> '
'Administration -> Application Settings -> Authentication Options', 'error')
self.redirect_to('login')
else:
message = _('%s authentication is not yet implemented.' % provider.get('label'))
self.add_message(message, 'warning')
self.redirect_to('login')
class CallbackSocialLoginHandler(BaseHandler):
"""
Callback (Save Information) for Social Authentication
"""
def get(self, provider_name):
if not self.app.config.get('enable_federated_login'):
message = _('Federated login is disabled.')
self.add_message(message, 'warning')
return self.redirect_to('login')
continue_url = self.request.get('continue_url')
if provider_name == "twitter":
oauth_token = self.request.get('oauth_token')
oauth_verifier = self.request.get('oauth_verifier')
twitter_helper = twitter.TwitterAuth(self)
user_data = twitter_helper.auth_complete(oauth_token,
oauth_verifier)
logging.info('twitter user_data: ' + str(user_data))
if self.user:
# new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'twitter', str(user_data['user_id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='twitter',
uid=str(user_data['user_id']),
extra_data=user_data
)
social_user.put()
message = _('Twitter association added.')
self.add_message(message, 'success')
else:
message = _('This Twitter account is already in use.')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with twitter
social_user = models.SocialUser.get_by_provider_and_uid('twitter',
str(user_data['user_id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['user_id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# github association
elif provider_name == "github":
# get our request code back from the social login handler above
code = self.request.get('code')
# create our github auth object
scope = 'gist'
github_helper = github.GithubAuth(self.app.config.get('github_server'),
self.app.config.get('github_client_id'), \
self.app.config.get('github_client_secret'),
self.app.config.get('github_redirect_uri'), scope)
# retrieve the access token using the code and auth object
access_token = github_helper.get_access_token(code)
user_data = github_helper.get_user_info(access_token)
logging.info('github user_data: ' + str(user_data))
if self.user:
# user is already logged in so we set a new association with twitter
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'github', str(user_data['login'])):
social_user = models.SocialUser(
user=user_info.key,
provider='github',
uid=str(user_data['login']),
extra_data=user_data
)
social_user.put()
message = _('Github association added.')
self.add_message(message, 'success')
else:
message = _('This Github account is already in use.')
self.add_message(message, 'error')
self.redirect_to('edit-profile')
else:
# user is not logged in, but is trying to log in via github
social_user = models.SocialUser.get_by_provider_and_uid('github', str(user_data['login']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end github
# facebook association
elif provider_name == "facebook":
code = self.request.get('code')
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
token = facebook.get_access_token_from_code(code, callback_url, self.app.config.get('fb_api_key'),
self.app.config.get('fb_secret'))
access_token = token['access_token']
fb = facebook.GraphAPI(access_token)
user_data = fb.get_object('me')
logging.info('facebook user_data: ' + str(user_data))
if self.user:
# new association with facebook
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'facebook', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='facebook',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Facebook association added!')
self.add_message(message, 'success')
else:
message = _('This Facebook account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Facebook
social_user = models.SocialUser.get_by_provider_and_uid('facebook',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
# end facebook
# association with linkedin
elif provider_name == "linkedin":
callback_url = "%s/social_login/%s/complete" % (self.request.host_url, provider_name)
authentication = linkedin.LinkedInAuthentication(
self.app.config.get('linkedin_api'),
self.app.config.get('linkedin_secret'),
callback_url,
[linkedin.PERMISSIONS.BASIC_PROFILE, linkedin.PERMISSIONS.EMAIL_ADDRESS])
authentication.authorization_code = self.request.get('code')
access_token = authentication.get_access_token()
link = linkedin.LinkedInApplication(authentication)
u_data = link.get_profile(selectors=['id', 'first-name', 'last-name', 'email-address'])
user_data = {
'first_name': u_data.get('firstName'),
'last_name': u_data.get('lastName'),
'id': u_data.get('id'),
'email': u_data.get('emailAddress')}
self.session['linkedin'] = json.dumps(user_data)
logging.info('linkedin user_data: ' + str(user_data))
if self.user:
# new association with linkedin
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, 'linkedin', str(user_data['id'])):
social_user = models.SocialUser(
user=user_info.key,
provider='linkedin',
uid=str(user_data['id']),
extra_data=user_data
)
social_user.put()
message = _('Linkedin association added!')
self.add_message(message, 'success')
else:
message = _('This Linkedin account is already in use!')
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with Linkedin
social_user = models.SocialUser.get_by_provider_and_uid('linkedin',
str(user_data['id']))
if social_user:
# Social user exists. Need authenticate related site account
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
uid = str(user_data['id'])
email = str(user_data.get('email'))
self.create_account_from_social_provider(provider_name, uid, email, continue_url, user_data)
#end linkedin
# google, myopenid, yahoo OpenID Providers
elif provider_name in models.SocialUser.open_id_providers():
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
# get info passed from OpenId Provider
from google.appengine.api import users
current_user = users.get_current_user()
if current_user:
if current_user.federated_identity():
uid = current_user.federated_identity()
else:
uid = current_user.user_id()
email = current_user.email()
else:
message = _('No user authentication information received from %s. '
'Please ensure you are logging in from an authorized OpenID Provider (OP).'
% provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to(
'login')
if self.user:
# add social account to user
user_info = models.User.get_by_id(long(self.user_id))
if models.SocialUser.check_unique(user_info.key, provider_name, uid):
social_user = models.SocialUser(
user=user_info.key,
provider=provider_name,
uid=uid
)
social_user.put()
message = _('%s association successfully added.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
else:
# login with OpenId Provider
social_user = models.SocialUser.get_by_provider_and_uid(provider_name, uid)
if social_user:
# Social user found. Authenticate the user
user = social_user.user.get()
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('home')
else:
self.create_account_from_social_provider(provider_name, uid, email, continue_url)
else:
message = _('This authentication method is not yet implemented.')
self.add_message(message, 'warning')
self.redirect_to('login', continue_url=continue_url) if continue_url else self.redirect_to('login')
def create_account_from_social_provider(self, provider_name, uid, email=None, continue_url=None, user_data=None):
"""Social user does not exist yet so create it with the federated identity provided (uid)
and create prerequisite user and log the user account in
"""
provider_display_name = models.SocialUser.PROVIDERS_INFO[provider_name]['label']
if models.SocialUser.check_unique_uid(provider_name, uid):
# create user
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
# Assume provider has already verified email address
# if email is provided so set activated to True
auth_id = "%s:%s" % (provider_name, uid)
if email:
unique_properties = ['email']
user_info = self.auth.store.user_model.create_user(
auth_id, unique_properties, email=email,
activated=True
)
else:
user_info = self.auth.store.user_model.create_user(
auth_id, activated=True
)
if not user_info[0]: #user is a tuple
message = _('The account %s is already in use.' % provider_display_name)
self.add_message(message, 'error')
return self.redirect_to('register')
user = user_info[1]
# create social user and associate with user
social_user = models.SocialUser(
user=user.key,
provider=provider_name,
uid=uid,
)
if user_data:
social_user.extra_data = user_data
self.session[provider_name] = json.dumps(user_data) # TODO is this needed?
social_user.put()
# authenticate user
self.auth.set_session(self.auth.store.user_to_dict(user), remember=True)
if self.app.config['log_visit']:
try:
logVisit = models.LogVisit(
user=user.key,
uastring=self.request.user_agent,
ip=self.request.remote_addr,
timestamp=utils.get_date_time()
)
logVisit.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Visit Log in datastore")
message = _(
'Welcome! You have been registered as a new user through %s and logged in.' % provider_display_name)
self.add_message(message, 'success')
else:
message = _('This %s account is already in use.' % provider_display_name)
self.add_message(message, 'error')
if continue_url:
self.redirect(continue_url)
else:
self.redirect_to('edit-profile')
class DeleteSocialProviderHandler(BaseHandler):
"""
Delete Social association with an account
"""
@user_required
def post(self, provider_name):
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if len(user_info.get_social_providers_info()['used']) > 1 or (user_info.password is not None):
social_user = models.SocialUser.get_by_user_and_provider(user_info.key, provider_name)
if social_user:
social_user.key.delete()
message = _('%s successfully disassociated.' % provider_name)
self.add_message(message, 'success')
else:
message = _('Social account on %s not found for this user.' % provider_name)
self.add_message(message, 'error')
else:
message = ('Social account on %s cannot be deleted for user.'
' Please create a username and password to delete social account.' % provider_name)
self.add_message(message, 'error')
self.redirect_to('edit-profile')
class LogoutHandler(BaseHandler):
"""
Destroy user session and redirect to login
"""
def get(self):
if self.user:
message = _("You've signed out successfully. Warning: Please clear all cookies and logout "
"of OpenId providers too if you logged in on a public computer.")
self.add_message(message, 'info')
self.auth.unset_session()
# User is logged out, let's try redirecting to login page
try:
self.redirect(self.auth_config['login_url'])
except (AttributeError, KeyError), e:
logging.error("Error logging out: %s" % e)
message = _("User is logged out, but there was an error on the redirection.")
self.add_message(message, 'error')
return self.redirect_to('home')
class RegisterHandler(BaseHandler):
"""
Handler for Sign Up Users
"""
def get(self):
""" Returns a simple HTML form for create a new user """
if self.user:
self.redirect_to('home')
params = {}
return self.render_template('register.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
email = self.form.email.data.lower()
password = self.form.password.data.strip()
country = self.form.country.data
tz = self.form.tz.data
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
# Passing password_raw=password so password will be hashed
# Returns a tuple, where first value is BOOL.
# If True ok, If False no new user is created
unique_properties = ['username', 'email']
auth_id = "own:%s" % username
user = self.auth.store.user_model.create_user(
auth_id, unique_properties, password_raw=password,
username=username, name=name, last_name=last_name, email=email,
ip=self.request.remote_addr, country=country, tz=tz
)
if not user[0]: #user is a tuple
if "username" in str(user[1]):
message = _(
'Sorry, The username <strong>{}</strong> is already registered.').format(username)
elif "email" in str(user[1]):
message = _('Sorry, The email <strong>{}</strong> is already registered.').format(email)
else:
message = _('Sorry, The user is already registered.')
self.add_message(message, 'error')
return self.redirect_to('register')
else:
# User registered successfully
# But if the user registered using the form, the user has to check their email to activate the account ???
try:
if not user[1].activated:
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user[1].get_id(),
token=models.User.create_auth_token(user[1].get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
message = _('You were successfully registered. '
'Please check your email to activate your account.')
self.add_message(message, 'success')
return self.redirect_to('home')
# If the user didn't register using registration form ???
db_user = self.auth.get_user_by_password(user[1].auth_ids[0], password)
# Check Twitter association in session
twitter_helper = twitter.TwitterAuth(self)
twitter_association_data = twitter_helper.get_association_data()
if twitter_association_data is not None:
if models.SocialUser.check_unique(user[1].key, 'twitter', str(twitter_association_data['id'])):
social_user = models.SocialUser(
user=user[1].key,
provider='twitter',
uid=str(twitter_association_data['id']),
extra_data=twitter_association_data
)
social_user.put()
#check Facebook association
fb_data = json.loads(self.session['facebook'])
if fb_data is not None:
if models.SocialUser.check_unique(user.key, 'facebook', str(fb_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='facebook',
uid=str(fb_data['id']),
extra_data=fb_data
)
social_user.put()
#check LinkedIn association
li_data = json.loads(self.session['linkedin'])
if li_data is not None:
if models.SocialUser.check_unique(user.key, 'linkedin', str(li_data['id'])):
social_user = models.SocialUser(
user=user.key,
provider='linkedin',
uid=str(li_data['id']),
extra_data=li_data
)
social_user.put()
message = _('Welcome <strong>{}</strong>, you are now logged in.').format(username)
self.add_message(message, 'success')
return self.redirect_to('home')
except (AttributeError, KeyError), e:
logging.error('Unexpected error creating the user %s: %s' % (username, e ))
message = _('Unexpected error creating the user %s' % username)
self.add_message(message, 'error')
return self.redirect_to('home')
@webapp2.cached_property
def form(self):
f = forms.RegisterForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class AccountActivationHandler(BaseHandler):
"""
Handler for account activation
"""
def get(self, user_id, token):
try:
if not models.User.validate_auth_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
# activate the user's account
user.activated = True
user.put()
# Login User
self.auth.get_user_by_token(int(user_id), token)
# Delete token
models.User.delete_auth_token(user_id, token)
message = _('Congratulations, Your account <strong>{}</strong> has been successfully activated.').format(
user.username)
self.add_message(message, 'success')
self.redirect_to('home')
except (AttributeError, KeyError, InvalidAuthIdError, NameError), e:
logging.error("Error activating an account: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ResendActivationEmailHandler(BaseHandler):
"""
Handler to resend activation email
"""
def get(self, user_id, token):
try:
if not models.User.validate_resend_token(user_id, token):
message = _('The link is invalid.')
self.add_message(message, 'error')
return self.redirect_to('home')
user = models.User.get_by_id(long(user_id))
email = user.email
if (user.activated == False):
# send email
subject = _("%s Account Verification" % self.app.config.get('app_name'))
confirmation_url = self.uri_for("account-activation",
user_id=user.get_id(),
token=models.User.create_auth_token(user.get_id()),
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"username": user.username,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
body_path = "emails/account_activation.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': str(email),
'subject': subject,
'body': body,
})
models.User.delete_resend_token(user_id, token)
message = _('The verification email has been resent to %s. '
'Please check your email to activate your account.' % email)
self.add_message(message, 'success')
return self.redirect_to('home')
else:
message = _('Your account has been activated. Please <a href="/login/">sign in</a> to your account.')
self.add_message(message, 'warning')
return self.redirect_to('home')
except (KeyError, AttributeError), e:
logging.error("Error resending activation email: %s" % e)
message = _('Sorry, Some error occurred.')
self.add_message(message, 'error')
return self.redirect_to('home')
class ContactHandler(BaseHandler):
"""
Handler for Contact Form
"""
def get(self):
""" Returns a simple HTML for contact form """
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if user_info.name or user_info.last_name:
self.form.name.data = user_info.name + " " + user_info.last_name
if user_info.email:
self.form.email.data = user_info.email
params = {
"exception": self.request.get('exception')
}
return self.render_template('contact.html', **params)
def post(self):
""" validate contact form """
if not self.form.validate():
return self.get()
remoteip = self.request.remote_addr
user_agent = self.request.user_agent
exception = self.request.POST.get('exception')
name = self.form.name.data.strip()
email = self.form.email.data.lower()
message = self.form.message.data.strip()
try:
# parsing user_agent and getting which os key to use
# windows uses 'os' while other os use 'flavor'
ua = httpagentparser.detect(user_agent)
_os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-"
if 'version' in ua[_os]:
operating_system += ' ' + str(ua[_os]['version'])
if 'dist' in ua:
operating_system += ' ' + str(ua['dist'])
browser = str(ua['browser']['name']) if 'browser' in ua else "-"
browser_version = str(ua['browser']['version']) if 'browser' in ua else "-"
template_val = {
"name": name,
"email": email,
"browser": browser,
"browser_version": browser_version,
"operating_system": operating_system,
"ip": remoteip,
"message": message
}
except Exception as e:
logging.error("error getting user agent info: %s" % e)
try:
subject = _("Contact")
# exceptions for error pages that redirect to contact
if exception != "":
subject = subject + " (Exception error: %s)" % exception
body_path = "emails/contact.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': self.app.config.get('contact_recipient'),
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
message = _('Your message was sent successfully.')
self.add_message(message, 'success')
return self.redirect_to('contact')
except (AttributeError, KeyError), e:
logging.error('Error sending contact form: %s' % e)
message = _('Error sending the message. Please try again later.')
self.add_message(message, 'error')
return self.redirect_to('contact')
@webapp2.cached_property
def form(self):
return forms.ContactForm(self)
class CategoriesHandler(BaseHandler):
#Handler for Category
def get(self):
# Sends the vistor to the categories page
return self.render_template('categories.html')
class AboutHandler(BaseHandler):
#Handler for About
def get(self):
# Sends the vistor to the about page
return self.render_template('about.html')
class PoliciesHandler(BaseHandler):
#Handler for Policies
def get(self):
# Sends the vistor to the policy page
return self.render_template('policies.html')
class MyProfileHandler(BaseHandler):
# Handler for a User's profile
def get(self):
# Sends the user to the user profile page
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
allItems = models.Item.query()
userItems = allItems.filter(models.Item.user == user_info.key)
#itemNames = userItems.fetch(projection=["title"])
if userItems.count() > 0:
return self.render_template('profile.html', uploadedItems = userItems, added = True)
return self.render_template('profile.html', uploadedItems = userItems, added = False)
self.redirect_to('register')
class AddItemHandler(BaseHandler):
# Handler for adding an item to a User's database
def post(self):
# Sends the user to the user profile page
if self.user:
item = models.Item()
item.title = self.request.get("item-name")
item.description = self.request.get("item-description")
item.price = self.request.get("item-price")
user_info = models.User.get_by_id(long(self.user_id))
item.user = user_info.key
item.username = user_info.username
item.email = user_info.email
item.put()
allItems = models.Item.query()
userItems = allItems.filter(models.Item.user == user_info.key)
return self.render_template('profile.html', uploadedItems = userItems, added = True)
self.redirect_to('home')
class EditItemIntermediaryHandler(BaseHandler):
#Intermediary Handler for editing a User's items
def get(self):
# Sends the vistor to the edit item page
if self.user:
item_to_change = self.request.get("item-to-edit")
key = ndb.Key(urlsafe=item_to_change)
old_item = key.get()
return self.render_template('edit_item.html', item_to_edit = old_item)
self.redirect_to('home')
class EditItemHandler(BaseHandler):
#Handler for editing a User's items
def post(self):
# Edits the item's data
if self.user:
item_to_change = self.request.get("old-item-key")
key = ndb.Key(urlsafe=item_to_change)
old_item = key.get()
old_item.title = self.request.get("new-item-name")
old_item.description = self.request.get("new-item-description")
old_item.price = self.request.get("new-item-price")
old_item.put()
user_info = models.User.get_by_id(long(self.user_id))
allItems = models.Item.query()
userItems = allItems.filter(models.Item.user == user_info.key)
return self.render_template('profile.html', uploadedItems = userItems, added = True)
self.redirect_to('home')
class DeleteItemHandler(BaseHandler):
#Handler for deleting a User's items
def post(self):
# Delete's the given item
if self.user:
item_to_delete = self.request.get('item-to-delete')
key = ndb.Key(urlsafe=item_to_delete)
old_item = key.get()
old_item.key.delete()
user_info = models.User.get_by_id(long(self.user_id))
allItems = models.Item.query()
userItems = allItems.filter(models.Item.user == user_info.key)
return self.render_template('profile.html', uploadedItems = userItems, added = True)
self.redirect_to('home')
class ViewProfileHandler(BaseHandler):
#Handler for public profiles
def get(self, username):
# Sends the vistor to the profile page
name = username
allItems = models.Item.query()
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
if user_info.username == name:
userItems = allItems.filter(models.Item.user == user_info.key)
return self.render_template('public_profile.html', user = name, items = userItems, address = user_info.email)
allUsers = models.User.query()
tempUser = allUsers.filter(models.User.username == name)
tempItems = allItems.filter(models.Item.username == name)
return self.render_template('public_profile.html', user = name, items = tempItems, address = tempUser.get().email)
class EmailUserHandler(BaseHandler):
#Handler for users emailing other users
def post(self, username):
from google.appengine.api import mail, app_identity
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
to = self.request.get("email-to")
subject = self.request.get("email-subject")
body = user_info.email + " just sent you a message through AirShareBeta: " + self.request.get("email-body")
app_id = app_identity.get_application_id()
sender = "AirShareBeta <no-reply@%s.appspotmail.com>" % (app_id)
if self.app.config['log_email']:
try:
logEmail = models.LogEmail(
sender=sender,
to=to,
subject=subject,
body=body,
when=utils.get_date_time("datetimeProperty")
)
logEmail.put()
except (apiproxy_errors.OverQuotaError, BadValueError):
logging.error("Error saving Email Log in datastore")
try:
message = mail.EmailMessage()
message.sender = sender
message.to = to
message.subject = subject
message.html = body
message.send()
except Exception, e:
logging.error("Error sending email: %s" % e)
self.redirect_to('home')
class EditProfileHandler(BaseHandler):
"""
Handler for Edit User Profile
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit profile """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
self.form.username.data = user_info.username
self.form.name.data = user_info.name
self.form.last_name.data = user_info.last_name
self.form.country.data = user_info.country
self.form.tz.data = user_info.tz
providers_info = user_info.get_social_providers_info()
if not user_info.password:
params['local_account'] = False
else:
params['local_account'] = True
params['used_providers'] = providers_info['used']
params['unused_providers'] = providers_info['unused']
params['country'] = user_info.country
params['tz'] = user_info.tz
return self.render_template('edit_profile.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
username = self.form.username.data.lower()
name = self.form.name.data.strip()
last_name = self.form.last_name.data.strip()
country = self.form.country.data
tz = self.form.tz.data
try:
user_info = models.User.get_by_id(long(self.user_id))
try:
message = ''
# update username if it has changed and it isn't already taken
if username != user_info.username:
user_info.unique_properties = ['username', 'email']
uniques = [
'User.username:%s' % username,
'User.auth_id:own:%s' % username,
]
# Create the unique username and auth_id.
success, existing = Unique.create_multi(uniques)
if success:
# free old uniques
Unique.delete_multi(
['User.username:%s' % user_info.username, 'User.auth_id:own:%s' % user_info.username])
# The unique values were created, so we can save the user.
user_info.username = username
user_info.auth_ids[0] = 'own:%s' % username
message += _('Your new username is <strong>{}</strong>').format(username)
else:
message += _(
'The username <strong>{}</strong> is already taken. Please choose another.').format(
username)
# At least one of the values is not unique.
self.add_message(message, 'error')
return self.get()
user_info.name = name
user_info.last_name = last_name
user_info.country = country
user_info.tz = tz
user_info.put()
message += " " + _('Thanks, your settings have been saved.')
self.add_message(message, 'success')
return self.get()
except (AttributeError, KeyError, ValueError), e:
logging.error('Error updating profile: ' + e)
message = _('Unable to update profile. Please try again later.')
self.add_message(message, 'error')
return self.get()
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
f = forms.EditProfileForm(self)
f.country.choices = self.countries_tuple
f.tz.choices = self.tz
return f
class EditPasswordHandler(BaseHandler):
"""
Handler for Edit User Password
"""
@user_required
def get(self):
""" Returns a simple HTML form for editing password """
params = {}
return self.render_template('edit_password.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
current_password = self.form.current_password.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
current_password = utils.hashing(current_password, self.app.config.get('salt'))
try:
user = models.User.get_by_auth_password(auth_id, current_password)
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# send email
subject = self.app.config.get('app_name') + " Account Password Changed"
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"email": user.email,
"reset_password_url": self.uri_for("password-reset", _full=True)
}
email_body_path = "emails/password_changed.txt"
email_body = self.jinja2.render_template(email_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': email_body,
'sender': self.app.config.get('contact_sender'),
})
#Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('edit-profile')
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-password')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditPasswordForm(self)
class EditEmailHandler(BaseHandler):
"""
Handler for Edit User's Email
"""
@user_required
def get(self):
""" Returns a simple HTML form for edit email """
params = {}
if self.user:
user_info = models.User.get_by_id(long(self.user_id))
params['current_email'] = user_info.email
return self.render_template('edit_email.html', **params)
def post(self):
""" Get fields from POST dict """
if not self.form.validate():
return self.get()
new_email = self.form.new_email.data.strip()
password = self.form.password.data.strip()
try:
user_info = models.User.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = models.User.get_by_auth_password(auth_id, password)
# if the user change his/her email address
if new_email != user.email:
# check whether the new email has been used by another user
aUser = models.User.get_by_email(new_email)
if aUser is not None:
message = _("The email %s is already registered." % new_email)
self.add_message(message, 'error')
return self.redirect_to("edit-email")
# send email
subject = _("%s Email Changed Notification" % self.app.config.get('app_name'))
user_token = models.User.create_auth_token(self.user_id)
confirmation_url = self.uri_for("email-changed-check",
user_id=user_info.get_id(),
encoded_email=utils.encode(new_email),
token=user_token,
_full=True)
# load email's template
template_val = {
"app_name": self.app.config.get('app_name'),
"first_name": user.name,
"username": user.username,
"new_email": new_email,
"confirmation_url": confirmation_url,
"support_url": self.uri_for("contact", _full=True)
}
old_body_path = "emails/email_changed_notification_old.txt"
old_body = self.jinja2.render_template(old_body_path, **template_val)
new_body_path = "emails/email_changed_notification_new.txt"
new_body = self.jinja2.render_template(new_body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': old_body,
})
taskqueue.add(url=email_url, params={
'to': new_email,
'subject': subject,
'body': new_body,
})
# display successful message
msg = _(
"Please check your new email for confirmation. Your email will be updated after confirmation.")
self.add_message(msg, 'success')
return self.redirect_to('edit-profile')
else:
self.add_message(_("You didn't change your email."), "warning")
return self.redirect_to("edit-email")
except (InvalidAuthIdError, InvalidPasswordError), e:
# Returns error message to self.response.write in
# the BaseHandler.dispatcher
message = _("Incorrect password! Please enter your current password to change your account settings.")
self.add_message(message, 'error')
return self.redirect_to('edit-email')
except (AttributeError, TypeError), e:
login_error_message = _('Sorry you are not logged in.')
self.add_message(login_error_message, 'error')
self.redirect_to('login')
@webapp2.cached_property
def form(self):
return forms.EditEmailForm(self)
class PasswordResetHandler(BaseHandler):
"""
Password Reset Handler with Captcha
"""
def get(self):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error=None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
}
return self.render_template('password_reset.html', **params)
def post(self):
# check captcha
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remoteip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remoteip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('password-reset')
#check if we got an email or username
email_or_username = str(self.request.POST.get('email_or_username')).lower().strip()
if utils.is_email_valid(email_or_username):
user = models.User.get_by_email(email_or_username)
_message = _("If the email address you entered") + " (<strong>%s</strong>) " % email_or_username
else:
auth_id = "own:%s" % email_or_username
user = models.User.get_by_auth_id(auth_id)
_message = _("If the username you entered") + " (<strong>%s</strong>) " % email_or_username
_message = _message + _("is associated with an account in our records, you will receive "
"an email from us with instructions for resetting your password. "
"<br>If you don't receive instructions within a minute or two, "
"check your email's spam and junk filters, or ") + \
'<a href="' + self.uri_for('contact') + '">' + _('contact us') + '</a> ' + _(
"for further assistance.")
if user is not None:
user_id = user.get_id()
token = models.User.create_auth_token(user_id)
email_url = self.uri_for('taskqueue-send-email')
reset_url = self.uri_for('password-reset-check', user_id=user_id, token=token, _full=True)
subject = _("%s Password Assistance" % self.app.config.get('app_name'))
# load email's template
template_val = {
"username": user.username,
"email": user.email,
"reset_password_url": reset_url,
"support_url": self.uri_for("contact", _full=True),
"app_name": self.app.config.get('app_name'),
}
body_path = "emails/reset_password.txt"
body = self.jinja2.render_template(body_path, **template_val)
taskqueue.add(url=email_url, params={
'to': user.email,
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
self.add_message(_message, 'warning')
return self.redirect_to('login')
class PasswordResetCompleteHandler(BaseHandler):
"""
Handler to process the link of reset password that received the user
"""
def get(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
params = {}
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid. '
'Enter your details again below to get a new one.')
self.add_message(message, 'warning')
return self.redirect_to('password-reset')
else:
return self.render_template('password_reset_complete.html', **params)
def post(self, user_id, token):
verify = models.User.get_by_auth_token(int(user_id), token)
user = verify[0]
password = self.form.password.data.strip()
if user and self.form.validate():
# Password to SHA512
password = utils.hashing(password, self.app.config.get('salt'))
user.password = security.generate_password_hash(password, length=12)
user.put()
# Delete token
models.User.delete_auth_token(int(user_id), token)
# Login User
self.auth.get_user_by_password(user.auth_ids[0], password)
self.add_message(_('Password changed successfully.'), 'success')
return self.redirect_to('home')
else:
self.add_message(_('The two passwords must match.'), 'error')
return self.redirect_to('password-reset-check', user_id=user_id, token=token)
@webapp2.cached_property
def form(self):
return forms.PasswordResetCompleteForm(self)
class EmailChangedCompleteHandler(BaseHandler):
"""
Handler for completed email change
Will be called when the user click confirmation link from email
"""
def get(self, user_id, encoded_email, token):
verify = models.User.get_by_auth_token(int(user_id), token)
email = utils.decode(encoded_email)
if verify[0] is None:
message = _('The URL you tried to use is either incorrect or no longer valid.')
self.add_message(message, 'warning')
self.redirect_to('home')
else:
# save new email
user = verify[0]
user.email = email
user.put()
# delete token
models.User.delete_auth_token(int(user_id), token)
# add successful message and redirect
message = _('Your email has been successfully updated.')
self.add_message(message, 'success')
self.redirect_to('edit-profile')
class HomeRequestHandler(RegisterBaseHandler):
"""
Handler to show the home page
"""
def get(self):
""" Returns a simple HTML form for home """
allItems = models.Item.query()
hasItems = False
if allItems.count() > 0:
hasItems = True
return self.render_template('home.html', siteItems = allItems, hasUserContent = hasItems)
|
{
"content_hash": "486fd053cb4f365ee094684f124b4ea6",
"timestamp": "",
"source": "github",
"line_count": 1699,
"max_line_length": 128,
"avg_line_length": 42.45732783990583,
"alnum_prop": 0.5267207319609066,
"repo_name": "eugenewong/AirShare",
"id": "e27f978bbdc7b3fb2ea5d72fa656baa9006fe8e7",
"size": "72160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boilerplate/handlers.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10464"
},
{
"name": "JavaScript",
"bytes": "36265"
},
{
"name": "Perl",
"bytes": "71598"
},
{
"name": "Python",
"bytes": "1587268"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
}
|
import numpy as np
from sft.agent.model.KerasMlpModel import KerasMlpModel
class KerasMlpModelNew(KerasMlpModel):
def __init__(self, logger, layers, loss, optimizer):
super(KerasMlpModelNew, self).__init__(logger, layers, loss, optimizer)
def predict_qs(self, state):
# assume one channel and one sample
X = [np.array([[l]]) for l in state.to_list()]
# [ (1,1,5,5), (1,1,4,4) ]
p = self._model.predict(X, batch_size=1, verbose=0)
return p
def update_qs(self, states, targets):
n_samples = len(states)
assert len(states) > 0
list_length = len(states[0].to_list())
X = []
for i in range(list_length):
X.append([])
for state in states:
for i, v in enumerate(state.to_list()):
v = [v] # assume one channel
X[i].append(v)
for i, v in enumerate(X):
X[i] = np.array(v)
# [ (n, 1, 5, 5), (n, 1, 4, 4) ]
self._model.fit(X, targets, batch_size=n_samples, nb_epoch=1, verbose=0)
# weights = self._model.get_weights()
# means = [np.mean(np.abs(w)) for w in weights]
# maxs = [np.max(np.abs(w)) for w in weights]
# print "weights - maxs: %s, means: %s" % (str(maxs), str(means))
|
{
"content_hash": "a7cba266172d8909531d0d9212ef416b",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.6283185840707964,
"repo_name": "kevinkepp/look-at-this",
"id": "df3d9ab7d1c7949cd0fceff2573af40ec86bafc6",
"size": "1130",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sft/agent/model/KerasMlpModelNew.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import nussl
import matplotlib.pyplot as plt
import time
start_time = time.time()
audio_path = nussl.efz_utils.download_audio_file(
'historyrepeating_7olLrex.wav')
audio_signal = nussl.AudioSignal(audio_path)
separator = nussl.separation.primitive.Repet(
audio_signal, mask_type='binary')
estimates = separator()
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks({
'Background': estimates[0], 'Foreground': estimates[1]},
y_axis='mel', db_cutoff=-60, alpha_amount=2.0)
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform({
'Background': estimates[0], 'Foreground': estimates[1]},
show_legend=False)
plt.show()
nussl.play_utils.multitrack(estimates, ['Background', 'Foreground'])
# -
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
|
{
"content_hash": "fa147c7baa0550d2b546ca94610adf61",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 68,
"avg_line_length": 28.633333333333333,
"alnum_prop": 0.7217694994179278,
"repo_name": "interactiveaudiolab/nussl",
"id": "0a9438a7defa7ff8117f659a7d521c0a26ac8696",
"size": "1778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/examples/primitives/repet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "MATLAB",
"bytes": "11692"
},
{
"name": "Python",
"bytes": "591205"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
}
|
""" Define the class _KNNModel_, used to represent a k-nearest neighbhors model
"""
from rdkit.DataStructs.TopNContainer import TopNContainer
class KNNModel(object):
""" This is a base class used by KNNClassificationModel
and KNNRegressionModel to represent a k-nearest neighbor predictor. In general
one of this child classes needs to be instantiated.
_KNNModel_s can save the following pieces of internal state, accessible via
standard setter/getter functions - the child object store additional stuff:
1) _Examples_: a list of examples which have been predicted (either classified
or values predicted)
2) _TrainingExamples_: List of training examples (since this is a KNN model these examples
along with the value _k_ below define the model)
3) _TestExamples_: the list of examples used to test the model
4) _k_: the number of closest neighbors used for prediction
"""
def __init__(self, k, attrs, dfunc, radius=None):
self._setup(k, attrs, dfunc, radius)
def _setup(self, k, attrs, dfunc, radius):
self._examples = []
self._trainingExamples = []
self._testExamples = []
self._k = k
self._attrs = attrs
self._dfunc = dfunc
self._name = ""
self._radius = radius
def GetName(self):
return self._name
def SetName(self, name):
self._name = name
def GetExamples(self):
return self._examples
def SetExamples(self, examples):
self._examples = examples
def GetTrainingExamples(self):
return self._trainingExamples
def SetTrainingExamples(self, examples):
self._trainingExamples = examples
def GetTestExamples(self):
return self._testExamples
def SetTestExamples(self, examples):
self._testExamples = examples
def GetNeighbors(self, example):
""" Returns the k nearest neighbors of the example
"""
nbrs = TopNContainer(self._k)
for trex in self._trainingExamples:
dist = self._dfunc(trex, example, self._attrs)
if self._radius is None or dist < self._radius:
nbrs.Insert(-dist, trex)
nbrs.reverse()
return [x for x in nbrs]
|
{
"content_hash": "a19778333634545d2fc9a1e69b287b07",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 94,
"avg_line_length": 29.054054054054053,
"alnum_prop": 0.6818604651162791,
"repo_name": "rvianello/rdkit",
"id": "0200abe951fde04454f3c352520af798d0b2cb5a",
"size": "2233",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "rdkit/ML/KNN/KNNModel.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "385"
},
{
"name": "C",
"bytes": "227962"
},
{
"name": "C#",
"bytes": "6745"
},
{
"name": "C++",
"bytes": "8796795"
},
{
"name": "CMake",
"bytes": "632104"
},
{
"name": "Fortran",
"bytes": "7661"
},
{
"name": "HTML",
"bytes": "18138"
},
{
"name": "Java",
"bytes": "301151"
},
{
"name": "JavaScript",
"bytes": "11595"
},
{
"name": "Jupyter Notebook",
"bytes": "43461"
},
{
"name": "LLVM",
"bytes": "30376"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10552"
},
{
"name": "Objective-C",
"bytes": "298"
},
{
"name": "Python",
"bytes": "3363330"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "Shell",
"bytes": "9082"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "51959"
}
],
"symlink_target": ""
}
|
"""Support for the Netatmo binary sensors."""
import logging
import pyatmo
from homeassistant.components.binary_sensor import BinarySensorDevice
from .camera import CameraData
from .const import AUTH, DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
# These are the available sensors mapped to binary_sensor class
WELCOME_SENSOR_TYPES = {
"Someone known": "motion",
"Someone unknown": "motion",
"Motion": "motion",
}
PRESENCE_SENSOR_TYPES = {
"Outdoor motion": "motion",
"Outdoor human": "motion",
"Outdoor animal": "motion",
"Outdoor vehicle": "motion",
}
TAG_SENSOR_TYPES = {"Tag Vibration": "vibration", "Tag Open": "opening"}
SENSOR_TYPES = {
"NACamera": WELCOME_SENSOR_TYPES,
"NOC": PRESENCE_SENSOR_TYPES,
}
CONF_HOME = "home"
CONF_CAMERAS = "cameras"
CONF_WELCOME_SENSORS = "welcome_sensors"
CONF_PRESENCE_SENSORS = "presence_sensors"
CONF_TAG_SENSORS = "tag_sensors"
DEFAULT_TIMEOUT = 90
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the access to Netatmo binary sensor."""
auth = hass.data[DOMAIN][entry.entry_id][AUTH]
def get_entities():
"""Retrieve Netatmo entities."""
entities = []
def get_camera_home_id(data, camera_id):
"""Return the home id for a given camera id."""
for home_id in data.camera_data.cameras:
for camera in data.camera_data.cameras[home_id].values():
if camera["id"] == camera_id:
return home_id
return None
try:
data = CameraData(hass, auth)
for camera in data.get_all_cameras():
home_id = get_camera_home_id(data, camera_id=camera["id"])
sensor_types = {}
sensor_types.update(SENSOR_TYPES[camera["type"]])
# Tags are only supported with Netatmo Welcome indoor cameras
modules = data.get_modules(camera["id"])
if camera["type"] == "NACamera" and modules:
for module in modules:
for sensor_type in TAG_SENSOR_TYPES:
_LOGGER.debug(
"Adding camera tag %s (%s)",
module["name"],
module["id"],
)
entities.append(
NetatmoBinarySensor(
data,
camera["id"],
home_id,
sensor_type,
module["id"],
)
)
for sensor_type in sensor_types:
entities.append(
NetatmoBinarySensor(data, camera["id"], home_id, sensor_type)
)
except pyatmo.NoDevice:
_LOGGER.debug("No camera entities to add")
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class NetatmoBinarySensor(BinarySensorDevice):
"""Represent a single binary sensor in a Netatmo Camera device."""
def __init__(self, data, camera_id, home_id, sensor_type, module_id=None):
"""Set up for access to the Netatmo camera events."""
self._data = data
self._camera_id = camera_id
self._module_id = module_id
self._sensor_type = sensor_type
camera_info = data.camera_data.cameraById(cid=camera_id)
self._camera_name = camera_info["name"]
self._camera_type = camera_info["type"]
self._home_id = home_id
self._home_name = self._data.camera_data.getHomeName(home_id=home_id)
self._timeout = DEFAULT_TIMEOUT
if module_id:
self._module_name = data.camera_data.moduleById(mid=module_id)["name"]
self._name = (
f"{MANUFACTURER} {self._camera_name} {self._module_name} {sensor_type}"
)
self._unique_id = (
f"{self._camera_id}-{self._module_id}-"
f"{self._camera_type}-{sensor_type}"
)
else:
self._name = f"{MANUFACTURER} {self._camera_name} {sensor_type}"
self._unique_id = f"{self._camera_id}-{self._camera_type}-{sensor_type}"
self._state = None
@property
def name(self):
"""Return the name of the Netatmo device and this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def device_class(self):
"""Return the class of this sensor."""
if self._camera_type == "NACamera":
return WELCOME_SENSOR_TYPES.get(self._sensor_type)
if self._camera_type == "NOC":
return PRESENCE_SENSOR_TYPES.get(self._sensor_type)
return TAG_SENSOR_TYPES.get(self._sensor_type)
@property
def device_info(self):
"""Return the device info for the sensor."""
return {
"identifiers": {(DOMAIN, self._camera_id)},
"name": self._camera_name,
"manufacturer": MANUFACTURER,
"model": self._camera_type,
}
@property
def is_on(self):
"""Return true if binary sensor is on."""
return self._state
def update(self):
"""Request an update from the Netatmo API."""
self._data.update()
self._data.update_event(camera_type=self._camera_type)
if self._camera_type == "NACamera":
if self._sensor_type == "Someone known":
self._state = self._data.camera_data.someone_known_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Someone unknown":
self._state = self._data.camera_data.someone_unknown_seen(
cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Motion":
self._state = self._data.camera_data.motion_detected(
cid=self._camera_id, exclude=self._timeout
)
elif self._camera_type == "NOC":
if self._sensor_type == "Outdoor motion":
self._state = self._data.camera_data.outdoor_motion_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor human":
self._state = self._data.camera_data.human_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor animal":
self._state = self._data.camera_data.animal_detected(
cid=self._camera_id, offset=self._timeout
)
elif self._sensor_type == "Outdoor vehicle":
self._state = self._data.camera_data.car_detected(
cid=self._camera_id, offset=self._timeout
)
if self._sensor_type == "Tag Vibration":
self._state = self._data.camera_data.module_motion_detected(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
elif self._sensor_type == "Tag Open":
self._state = self._data.camera_data.module_opened(
mid=self._module_id, cid=self._camera_id, exclude=self._timeout
)
|
{
"content_hash": "f10a442410522ffba0d543b94e967bba",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 87,
"avg_line_length": 37.40886699507389,
"alnum_prop": 0.5360811166710561,
"repo_name": "postlund/home-assistant",
"id": "5f419bda2c25164e3af0e351981c7257e09687a7",
"size": "7594",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/netatmo/binary_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20215859"
},
{
"name": "Shell",
"bytes": "6663"
}
],
"symlink_target": ""
}
|
"""
This is an integration test for the BigQuery-luigi binding.
This test requires credentials that can access GCS & access to a bucket below.
Follow the directions in the gcloud tools to set up local credentials.
"""
import json
import os
import luigi
from luigi.contrib import bigquery
from contrib import gcs_test
from nose.plugins.attrib import attr
PROJECT_ID = gcs_test.PROJECT_ID
DATASET_ID = os.environ.get('BQ_TEST_DATASET_ID', 'luigi_tests')
@attr('gcloud')
class TestLoadTask(bigquery.BigQueryLoadTask):
source = luigi.Parameter()
table = luigi.Parameter()
@property
def schema(self):
return [
{'mode': 'NULLABLE', 'name': 'field1', 'type': 'STRING'},
{'mode': 'NULLABLE', 'name': 'field2', 'type': 'INTEGER'},
]
def source_uris(self):
return [self.source]
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table)
@attr('gcloud')
class TestRunQueryTask(bigquery.BigQueryRunQueryTask):
query = ''' SELECT 'hello' as field1, 2 as field2 '''
table = luigi.Parameter()
def output(self):
return bigquery.BigQueryTarget(PROJECT_ID, DATASET_ID, self.table)
@attr('gcloud')
class BigQueryGcloudTest(gcs_test._GCSBaseTestCase):
def setUp(self):
super(BigQueryGcloudTest, self).setUp()
self.bq_client = bigquery.BigQueryClient(gcs_test.CREDENTIALS)
self.table = bigquery.BQTable(project_id=PROJECT_ID, dataset_id=DATASET_ID,
table_id=self.id().split('.')[-1])
self.addCleanup(self.bq_client.delete_table, self.table)
def create_dataset(self, data=[]):
self.bq_client.delete_table(self.table)
text = '\n'.join(map(json.dumps, data))
gcs_file = gcs_test.bucket_url(self.id())
self.client.put_string(text, gcs_file)
task = TestLoadTask(source=gcs_file, table=self.table.table_id)
task.run()
def test_table_uri(self):
intended_uri = "bq://" + PROJECT_ID + "/" + \
DATASET_ID + "/" + self.table.table_id
self.assertTrue(self.table.uri == intended_uri)
def test_load_and_copy(self):
self.create_dataset([
{'field1': 'hi', 'field2': 1},
{'field1': 'bye', 'field2': 2},
])
# Cram some stuff in here to make the tests run faster - loading data takes a while!
self.assertTrue(self.bq_client.dataset_exists(self.table))
self.assertTrue(self.bq_client.table_exists(self.table))
self.assertIn(self.table.dataset_id,
list(self.bq_client.list_datasets(self.table.project_id)))
self.assertIn(self.table.table_id,
list(self.bq_client.list_tables(self.table.dataset)))
new_table = self.table._replace(table_id=self.table.table_id + '_copy')
self.bq_client.copy(
source_table=self.table,
dest_table=new_table
)
self.assertTrue(self.bq_client.table_exists(new_table))
self.bq_client.delete_table(new_table)
self.assertFalse(self.bq_client.table_exists(new_table))
def test_run_query(self):
task = TestRunQueryTask(table=self.table.table_id)
task._BIGQUERY_CLIENT = self.bq_client
task.run()
self.assertTrue(self.bq_client.table_exists(self.table))
|
{
"content_hash": "5cc1536d2df083bf637303828312354b",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 92,
"avg_line_length": 32.89320388349515,
"alnum_prop": 0.6304604486422668,
"repo_name": "rizzatti/luigi",
"id": "61c2ccbf0869b21458c76754d280c61309f1ec08",
"size": "3988",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/contrib/bigquery_gcloud_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2162"
},
{
"name": "HTML",
"bytes": "36680"
},
{
"name": "JavaScript",
"bytes": "84223"
},
{
"name": "Python",
"bytes": "1579138"
},
{
"name": "Shell",
"bytes": "2627"
}
],
"symlink_target": ""
}
|
"""Guardian Search web application
A small, simple web application which allows a user to search for
articles on the Guardian's website (http://www.guardian.co.uk) using
simple queries.
The aim of this project is primarily to learn about the Guardian's
Open Platform Content API (http://www.guardian.co.uk/open-platform)
and Google's App Engine infrastructure and webapp2 framework. The
focus is on back end development, so the front end interface is bare.
"""
import os
import urllib
import webapp2
import jinja2
import json
from google.appengine.api import urlfetch
from aux import generate_nav_urls
from errors import HTTPError
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class MainPage(webapp2.RequestHandler):
"""Front page of the search interface."""
def get(self):
template_values = {}
template = jinja_environment.get_template('index.html')
self.response.write(template.render(template_values))
class Search(webapp2.RequestHandler):
"""Searches for the phrase as specified by an encoded
URL. Displays results."""
def get(self):
q = self.request.get('q') # query
page = self.request.get('page', default_value='1') # page number
client = Client()
# Search for the results.
results = client.search(q=q, page=page,
show_fields='headline,byline,standfirst')
# Get the nostname that this instance is running on.
host = self.request.headers.get('host', 'no host')
# Generate navigation links for the results page.
nav_urls = generate_nav_urls(host, urllib.quote_plus(q), results)
# Render and display the results.
template_values = {
'total': results.total(),
'current_page': results.current_page(),
'pages': results.pages(),
'numbered_results': results.numbered_results(),
'url_prev_page': nav_urls['prev'],
'url_next_page': nav_urls['next'],
}
template = jinja_environment.get_template('search.html')
self.response.write(template.render(template_values))
class Client(object):
base_url = 'http://content.guardianapis.com/search'
def __init__(self):
pass
def _fetch(self, url):
"""Fetch results from JSON endpoint and return as dict."""
f = urlfetch.fetch(url)
if f.status_code != 200:
raise HTTPError(f.status_code, f.headers)
return json.loads(f.content)
def search(self, **kwargs):
"""Search for items, returning results as an instance of
Results. Parameters for the URL fields are passed in as
**kwargs, for example, client.search(q='tomatoes').
"""
url = '%s?%s' % (self.base_url,
urllib.urlencode(self.fix_kwargs(kwargs), doseq=True))
data = self._fetch(url)
return Results(data)
def fix_kwargs(self, kwargs):
kwargs2 = dict( [ (k.replace('_', '-'), v)
for k, v in kwargs.items() ] )
kwargs2['format'] = 'json'
# kwargs2['api_key'] = self.api_key
return kwargs2
class Results(object):
def __init__(self, data):
self.data = data
def total(self):
"""Total number of items found."""
return int(self.data['response']['total'])
def start_index(self):
"""Number of first item on this page."""
return int(self.data['response']['startIndex'])
def page_size(self):
"""Maximum number of results returned per page."""
return int(self.data['response']['pageSize'])
def current_page(self):
"""Page number of current page."""
return int(self.data['response']['currentPage'])
def pages(self):
"""Total number of pages found."""
return int(self.data['response']['pages'])
def results(self):
"""Items found for this page, returned as a list of results."""
return self.data['response']['results']
def numbered_results(self):
"""Items found for this page, returned as a list of tuples,
where the first element is the number of that item, and the
second item is the corresponding result.
"""
r = zip(range(self.start_index(),
self.start_index() + len(self.results())),
self.results())
return r
app = webapp2.WSGIApplication([('/', MainPage),
('/search', Search)],
debug=True)
|
{
"content_hash": "a6f2e95ad0ce70e802eab759b383134f",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 34.35820895522388,
"alnum_prop": 0.6092528236316247,
"repo_name": "alnesbit/guasearch",
"id": "249b37fa7177891b52e5d87b1eb2b6f454721f78",
"size": "4604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "5408"
}
],
"symlink_target": ""
}
|
import sys
import urllib2
import math
def getSongLink(name, song):
searchLink = "http://www.allmusic.com/search/songs/"
#Seperate the words to make a correct link
words = song.rsplit(" ")
allWords = name.rsplit(" ")
link = "http://allmusic.com" #This will hold the link to the actual album
#Currently we're looking at a search page
#Append the album title to where you want to search
#print("Looking up album")
for word in words:
searchLink += word
searchLink += "%2B" #Constant allmusic.com uses
#Get rid of the extra pesky %2B
#searchLink = searchLink.rsplit("%2B")
#print(searchLink)
page = urllib2.urlopen(searchLink)
data = page.read()
#Split up the html code line by line not byte by byte
lines = data.rsplit("\n")
#Loop throuh it
artists = []
links = []
lastLine = "" #Holds value of last line
for line in lines:
#print(line)
if(line.find('href="/song/') != -1):
#print(line)
#That means that this line has the link
firstLetter = line.find('href="') + 6 #Six is length of href="
#Here we get the Artist name (we want the right version of the song lol)
firstChar = lastLine.find("by ") + 3
lastChar = lastLine.find('"', firstChar)
artists.append(lastLine[firstChar:lastChar].upper())
#Now its added to the array which will be searched!
while( True ):
#Keep appending to the link until we reach the end, aka the "
if(line[firstLetter] != '"'):
#Append the letters to get the link
link += line[firstLetter]
firstLetter += 1
else:
links.append(link)
link = "http://allmusic.com"
break
lastLine = line
#Here lets find the right link to the right song
index = 0
ratios = []
for artist in artists:
name = name.upper()
artistWords = artist.rsplit(" ")
numRight = 0
numWords = 0
for word in artistWords:
numWords += 1
if(name.find(word) != -1):
numRight += 1
if(numWords == numRight):
val = findTimeForSong(links[index])
try:
a = int(val)
return val
except:
ratios.append(0)
index += 1
#return findTimeForSong(links[index])
#We've got the perfect one
#print("ME")
else:
ratios.append(numRight / numWords)
index += 1
order = []
while( True ):
index = 0
max = 0
maxIndex = 0
#Here we will fill out order
for i in ratios:
if(i > max):
if(findInOrder(order, index)):
pass
else:
maxIndex = index
max = i
index += 1
if(max == 0):
break
else:
order.append(maxIndex)
time = ""
for i in range(0, len(order) - 1):
time = findTimeForSong(links[order[i]])
#If we got a good value for time :)
print(i)
print(time)
try:
tmp = int(time)
break
except:
#We have a bad value for time
continue
try:
tmp = int(time)
except:
time = 10000000
return time
def findInOrder(order, num):
for i in order:
if(i == num):
return True
return False
def findTimeForSong(link):
#Finds the actual length of a song
page = urllib2.urlopen(link)
data = page.read()
lines = data.rsplit("\n")
for line in lines:
#print(line)
if(line.find('class="time"') != -1):
firstLetter = line.find("data-sort-value") + 17
lastLetter = firstLetter + 4
time = line[firstLetter] + line[firstLetter + 2:lastLetter]
#print(time)
return time
#This will pick the best youtube video to download
def compareTitles(possibleTitles, titles, string, times, song):
#First lets get the length of the song running the whole findTime sequence
actualTime = int(getSongLink(string, song))
actualTime = (actualTime % 100) + ((actualTime // 100) * 60)
#print("AC")
#print(actualTime)
#
numWords = 0
#split up the words input by word to be put in the search form
#properly
words = string.rsplit(" ")
for word in words:
numWords = numWords + 1
#Allows us to see how many words are in the title
#which will be used when seeing how good a video option each is
currVid = 0 #Will help us find which video we want by storing the best
for vid in titles:
numWordsRight = 0 #Will hold the number of words from our searchthat are in the video title
vid = vid.upper()
string = string.upper()
for word in words:
#Uppercase comparisons to make uncase sensitive
#vid = vid.upper()
word = word.upper()
#Look for search word in video title
if(vid.find(word) != -1):
numWordsRight = numWordsRight + 1
#See if the song were looking for is a remix
if(string.find("REMIX") == -1):
#If it isnt, make sure that the video isnt a remix either
if(vid.find("REMIX") != -1):
numWordsRight = numWordsRight - 3
#If it is a remix we'll reduce its value arbitraily
#We don't really want live versions either since they're annoying
if(string.find("LIVE") == -1):
#If it isnt wanted, make sure that the video isnt live either
if(vid.find("LIVE") != -1):
numWordsRight = numWordsRight - 3
if(string.find("CLEAN") == -1):
#If it isnt wanted, make sure that the video isnt clean either
if(vid.find("CLEAN") != -1):
numWordsRight = numWordsRight - 2
#No one wants a clean version...
#Decrease the want of videos that have bad times
#This is to say no to videos with talking, pauses, etc
#One extra case below sometimes, better save the error
try:
timeDiff = math.fabs(actualTime - times[currVid])
except:
timeDiff = 21
if(timeDiff > 100000):
pass
#This means that the length of the video couldn't be found
#Thus we won't do anything
#Unless theres a youtube video thats over 1000 minutes long in which case, oops
elif(timeDiff > 15):
numWordsRight -= 3
elif(timeDiff > 5):
numWordsRight -= 2
elif(timeDiff > 3):
numWordsRight -= 1
#Now we compare
if(numWordsRight == numWords):
#All of the search words are in the video title
return currVid #This could not be the best if there are extra words
#Not an issue worth dealing with as this works fine
#If you dont break immeadiatly, save your options so you can later choose the best one
possibleTitles.append(numWordsRight) #Its index will be used to refer to the title
currVid = currVid + 1
#If no option is perfect, find the best option
max = 0
maxIndex = 0
index = 0
for i in possibleTitles:
if(i > max):
maxIndex = index
max = i
index = index + 1
#store the best index and then we return
#Give a warning on bad performance
if( max < numWords * 1 / 4):
print("This title was hard to find and might be downloading the wrong song")
print("Check your spelling")
print("Downloading anyways")
return maxIndex
def lookupYoutube(string, song, pick = True):
#Make it a proper search link
words = string.rsplit(" ")
link = "http://www.youtube.com/results?search_query="
for each in words:
link += each + "+"
link = link.rstrip('+') #Get rid of the pesky extra +
# link += "&oe=UTF-8&um=1&ie=UTF-8&sa=N&tab=w1" #Add the ending part
page = urllib2.urlopen(link)
data = page.read()
currLine = "" #will hold the current tag line
inTag = False
inVideo = False
vidLink = ""
num = 1
links = []
titles = []
times = []
#NOTE: Read through in a byte by byte manner
currTime = ""
sameLink = True
inTime = False
for line in data:
if(inTime):
currTime += line
if(line == "<"):
if(inTime == True):
currTime = currTime.rstrip("<")
currTime = currTime.replace(":", "")
#print(currTime)
try:
Time = int(currTime)
#print(Time)
Time = (Time % 100) + ((Time // 100) * 60) #In seconds
except:
Time = 0
#print(Time)
times.append(Time)
currTime = ""
inTime = False
inTag = True
if(inTag):
currLine += line
if(inVideo):
# vidLink += line
pass
if(line == ">"):
if(inVideo):
watch = "youtube.com"
l = vidLink[vidLink.find('/watch'):vidLink.rfind('"')]
watch += l
#watch = watch.rstrip('">')
#print(vidLink)
title = vidLink[vidLink.find("title=") + 7:vidLink.find("data-session") - 10]
if(sameLink == False):
links.append(watch)
#print(title)
#print(watch)
titles.append(title)
num += 1
sameLink = not sameLink
inVideo = False
vidLink = ""
#Once we reach the end of the line we look to see if it was a vid link
if(currLine.find('href="/watch?v=') != -1):
vidLink = currLine
#if it is!
#print(currLine)
inVideo = True
if(currLine.find('class="video-time"') != -1):
inTime = True
currLine = ""
inTag = False
input = 2 #Will be used to choose which vid to return
#Titled input because for a long time this wasn't automatic!
if(pick):
#THIS IS THE -noauto ARGUMENT IN FRUITION, started out as an -auto option interestingly enough
num = 0
for title in titles:
num = num + 1
print(str(num) + " " + title)
#Give a nice output if they are picking
while(True):
input = raw_input("Pick a video: ")
try:
input = int(input)
if(input <= num):
input = input - 1
break
except:
pass
#Empty except statement
else:
possibleVideos = []
#compare titles returns the most matching of videos
bestVid = compareTitles(possibleVideos, titles, string, times, song)
print("Downloading Video: "+titles[bestVid])
input = bestVid
#Will be returned and written to a tmp for use by getmemusic
return links[input]
#Figure out the inputs to go into lookupYoutube
myfile = "/tmp/youtubelink.txt"
f = open(myfile, 'w')
line = ""
song = ""
index = 0
middleIndex = 0
for a in range(1, len(sys.argv)):
#print(sys.argv[a])
if( sys.argv[a] == ":" ):
middleIndex = index
index += 1
for a in range(middleIndex + 2, len(sys.argv)):
song += sys.argv[a] + " "
song = song.rstrip(" ")
#print(sys.argv[middleIndex])
if(sys.argv[middleIndex] == "-auto"):
#print("auto")
for i in range(1, middleIndex):
line+=sys.argv[i] + " "
link = lookupYoutube(line, song, False)
else:
for i in range(1, middleIndex + 1):
line+=sys.argv[i] + " "
link = lookupYoutube(line, song, True)
f.write(link)
|
{
"content_hash": "2fe0e72a874e58b7005ad6e928e52f9f",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 109,
"avg_line_length": 37.36521739130435,
"alnum_prop": 0.5038398882941587,
"repo_name": "aacoppa/Song-Downloader",
"id": "4971b1c32c24dc9e8a8b193413e1204e0dbce063",
"size": "12891",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Song Downloader/Dependencies/youtubeFinder.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "873"
},
{
"name": "Python",
"bytes": "15514"
},
{
"name": "Shell",
"bytes": "7946"
}
],
"symlink_target": ""
}
|
import sys
from devp2p.service import BaseService
from ethereum.db import BaseDB
from ethereum.slogging import get_logger
from ephemdb_service import EphemDB
log = get_logger('db')
dbs = {}
dbs['EphemDB'] = EphemDB
try:
from leveldb_service import LevelDBService
except ImportError:
pass
else:
dbs['LevelDB'] = LevelDBService
try:
from codernitydb_service import CodernityDB
except ImportError:
pass
else:
dbs['CodernityDB'] = CodernityDB
try:
from lmdb_service import LmDBService
except ImportError:
pass
else:
dbs['LmDB'] = LmDBService
class DBService(BaseDB, BaseService):
name = 'db'
default_config = dict(db=dict(implementation='LevelDB'))
def __init__(self, app):
super(DBService, self).__init__(app)
impl = self.app.config['db']['implementation']
if len(dbs) == 0:
log.warning('No db installed')
self.db_service = dbs[impl](app)
def start(self):
return self.db_service.start()
def _run(self):
return self.db_service._run()
def get(self, key):
return self.db_service.get(key)
def put(self, key, value):
return self.db_service.put(key, value)
def commit(self):
return self.db_service.commit()
def delete(self, key):
return self.db_service.delete(key)
def __contains__(self, key):
return key in self.db_service
def __eq__(self, other):
return isinstance(other, self.__class__) and self.db_service == other.db_service
def __repr__(self):
return repr(self.db_service)
def inc_refcount(self, key, value):
self.put(key, value)
def dec_refcount(self, key):
pass
def revert_refcount_changes(self, epoch):
pass
def commit_refcount_changes(self, epoch):
pass
def cleanup(self, epoch):
pass
def put_temporarily(self, key, value):
self.inc_refcount(key, value)
self.dec_refcount(key)
|
{
"content_hash": "36a967223809b2f94f1ef125ad889106",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 88,
"avg_line_length": 21.791208791208792,
"alnum_prop": 0.6333837619768028,
"repo_name": "gsalgado/pyethapp",
"id": "ed1c672570c4153d233b77caeee24633b927a7e0",
"size": "2006",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "pyethapp/db_service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "349654"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
}
|
from cs import CloudStackApiException
from cosmic.base import *
from cosmic.common import *
from cosmic.cosmicLog import CosmicLog
from cosmic.cosmicTestCase import cosmicTestCase
class TestSSVMs(cosmicTestCase):
def setUp(self):
self.logger = CosmicLog(CosmicLog.LOGGER_TEST).get_logger()
self.testClient = super(TestSSVMs, self).getClsTestClient()
self.apiclient = self.testClient.getApiClient()
self.cleanup = []
self.services = self.testClient.getParsedTestDataConfig()
self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests())
if not getattr(self.zone, 'dns2', False):
zone = Zone.list(self.apiclient)[0]
zone.update(self.apiclient, dns2="1.1.1.1")
self.services["sleep"] = 5
self.services["timeout"] = 180
return
def tearDown(self):
try:
# Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=['advanced'])
def test_01_list_sec_storage_vm(self):
self._test_list_svm_vm('secondarystoragevm')
@attr(tags=['advanced'])
def test_02_list_cpvm_vm(self):
self._test_list_svm_vm('consoleproxy')
@attr(tags=['advanced'])
def test_03_destroy_ssvm(self):
"""Test destroy SSVM
"""
# Validate the following
# 1. SSVM should be completely destroyed and a new one will spin up
# 2. listSystemVMs will show a different name for the
# systemVM from what it was before
# 3. new SSVM will have a public/private and link-local-ip
# 4. cloud process within SSVM must be up and running
list_ssvm_response = list_ssvms(
self.apiclient,
systemvmtype='secondarystoragevm',
state='Running',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_ssvm_response, list),
True,
"Check list response returns a valid list"
)
ssvm_response = list_ssvm_response[0]
old_name = ssvm_response.name
self.logger.debug("Destroying SSVM: %s" % ssvm_response.id)
cmd = {'id': ssvm_response.id, 'fetch_result': True}
try:
self.apiclient.destroySystemVm(**cmd)
except CloudStackApiException as e:
if e.error['errorcode'] != 530:
raise e
timeout = self.services["timeout"]
while True:
list_ssvm_response = list_ssvms(
self.apiclient,
zoneid=self.zone.id,
systemvmtype='secondarystoragevm'
)
if isinstance(list_ssvm_response, list) and len(list_ssvm_response) > 0:
if list_ssvm_response[0].state == 'Running':
break
if timeout == 0:
self.logger.debug(
"Warning: List SSVM didn't return systemvms in Running state. This is a known issue, ignoring it for now!")
return
time.sleep(self.services["sleep"])
timeout = timeout - 1
ssvm_response = list_ssvm_response[0]
# Verify Name, Public IP, Private IP and Link local IP
# for newly created SSVM
self.assertNotEqual(
ssvm_response.name,
old_name,
"Check SSVM new name with name of destroyed SSVM"
)
self.assertEqual(
hasattr(ssvm_response, 'privateip'),
True,
"Check whether SSVM has private IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'linklocalip'),
True,
"Check whether SSVM has link local IP field"
)
self.assertEqual(
hasattr(ssvm_response, 'publicip'),
True,
"Check whether SSVM has public IP field"
)
# Wait for the agent to be up
self.wait_for_system_vm_agent(ssvm_response.name)
return
@attr(tags=['advanced'])
def test_04_destroy_cpvm(self):
"""Test destroy CPVM
"""
# Validate the following
# 1. CPVM should be completely destroyed and a new one will spin up
# 2. listSystemVMs will show a different name for the systemVM from
# what it was before
# 3. new CPVM will have a public/private and link-local-ip
# 4. cloud process within CPVM must be up and running
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
zoneid=self.zone.id
)
self.assertEqual(
isinstance(list_cpvm_response, list),
True,
"Check list response returns a valid list"
)
cpvm_response = list_cpvm_response[0]
old_name = cpvm_response.name
self.logger.debug("Destroying CPVM: %s" % cpvm_response.id)
cmd = {'id': cpvm_response.id, 'fetch_result': True}
try:
self.apiclient.destroySystemVm(**cmd)
except CloudStackApiException as e:
if e.error['errorcode'] != 530:
raise e
timeout = self.services["timeout"]
while True:
list_cpvm_response = list_ssvms(
self.apiclient,
systemvmtype='consoleproxy',
zoneid=self.zone.id
)
if isinstance(list_cpvm_response, list) and len(list_cpvm_response) > 0:
if list_cpvm_response[0].state == 'Running':
break
if timeout == 0:
# FIXME: This should be fixed!
self.logger.debug(
"Warning: List CPVM didn't return systemvms in Running state. This is a known issue, ignoring it for now!")
return
time.sleep(self.services["sleep"])
timeout = timeout - 1
cpvm_response = list_cpvm_response[0]
# Verify Name, Public IP, Private IP and Link local IP
# for newly created CPVM
self.assertNotEqual(
cpvm_response.name,
old_name,
"Check SSVM new name with name of destroyed CPVM"
)
self.assertEqual(
hasattr(cpvm_response, 'privateip'),
True,
"Check whether CPVM has private IP field"
)
self.assertEqual(
hasattr(cpvm_response, 'linklocalip'),
True,
"Check whether CPVM has link local IP field"
)
self.assertEqual(
hasattr(cpvm_response, 'publicip'),
True,
"Check whether CPVM has public IP field"
)
# Wait for the agent to be up
self.wait_for_system_vm_agent(cpvm_response.name)
return
def wait_for_system_vm_agent(self, vmname):
list_host_response = []
self.logger.debug("Waiting for system VM %s agent to be UP" % vmname)
timeout = self.services["timeout"]
sleep_interval = self.services["sleep"]
while timeout > 0:
list_host_response = list_hosts(
self.apiclient,
name=vmname
)
if list_host_response and list_host_response[0].state == 'Up':
self.logger.debug("System VM %s agent is UP" % vmname)
break
time.sleep(sleep_interval)
timeout = timeout - sleep_interval
if timeout <= 0 and len(list_host_response) > 0 and list_host_response[0].state != 'Up':
# FIXME: This should be fixed!
self.logger.debug(
"Warning: List CPVM didn't return systemvms in Running state. This is a known issue, ignoring it for now!")
return
def _test_list_svm_vm(self, svm_type):
# Validate the following:
# 1. listSystemVM
# should return only ONE SVM per zone
# 2. The returned SVM should be in Running state
# 3. listSystemVM for should list publicip, privateip and link-localip
# 4. The gateway programmed on the SVM by listSystemVm should be
# the same as the gateway returned by listVlanIpRanges
# 5. DNS entries must match those given for the zone
list_svm_response = list_ssvms(
self.apiclient,
systemvmtype=svm_type,
state='Running',
)
self.assertEqual(
isinstance(list_svm_response, list),
True,
"Check list response returns a valid list"
)
# Verify SSVM response
self.assertNotEqual(
len(list_svm_response),
0,
"Check list System VMs response"
)
list_zones_response = list_zones(self.apiclient)
self.assertEqual(
isinstance(list_zones_response, list),
True,
"Check list response returns a valid list"
)
self.logger.debug("Number of zones: %s" % len(list_zones_response))
self.logger.debug("Number of System VMs: %s" % len(list_svm_response))
# Number of Sec storage VMs = No of Zones
self.assertEqual(
len(list_svm_response),
len(list_zones_response),
"Check number of System VMs with number of zones"
)
# For each secondary storage VM check private IP,
# public IP, link local IP and DNS
for svm in list_svm_response:
self.logger.debug("SVM state: %s" % svm.state)
self.assertEqual(
svm.state,
'Running',
"Check whether state of System VM is running"
)
self.assertEqual(
hasattr(svm, 'privateip'),
True,
"Check whether System VM has private IP field"
)
self.assertEqual(
hasattr(svm, 'linklocalip'),
True,
"Check whether System VM has link local IP field"
)
self.assertEqual(
hasattr(svm, 'publicip'),
True,
"Check whether System VM has public IP field"
)
# Fetch corresponding ip ranges information from listVlanIpRanges
ipranges_response = list_vlan_ipranges(
self.apiclient,
zoneid=svm.zoneid
)
self.assertEqual(
isinstance(ipranges_response, list),
True,
"Check list response returns a valid list"
)
iprange = ipranges_response[0]
# Execute the following assertion in all zones except basic Zones
if not (self.zone.networktype.lower() == 'basic'):
self.assertEqual(
svm.gateway,
iprange.gateway,
"Check gateway with that of corresponding ip range"
)
# Fetch corresponding zone information from listZones
zone_response = list_zones(
self.apiclient,
id=svm.zoneid
)
self.assertEqual(
isinstance(zone_response, list),
True,
"Check list response returns a valid list"
)
self.assertEqual(
svm.dns1,
zone_response[0].dns1,
"Check DNS1 with that of corresponding zone"
)
self.assertEqual(
svm.dns2,
zone_response[0].dns2,
"Check DNS2 with that of corresponding zone"
)
return
|
{
"content_hash": "c859780818bbaf61f7ddcc81bc918d51",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 127,
"avg_line_length": 33.7008547008547,
"alnum_prop": 0.5492433849015133,
"repo_name": "MissionCriticalCloud/cosmic",
"id": "fd92fd59be45fb24b003fda1bb461081785b46ad",
"size": "11829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cosmic-core/test/integration/tests/test_ssvm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "338798"
},
{
"name": "FreeMarker",
"bytes": "1832"
},
{
"name": "Groovy",
"bytes": "136420"
},
{
"name": "HTML",
"bytes": "127137"
},
{
"name": "Java",
"bytes": "16848786"
},
{
"name": "JavaScript",
"bytes": "4252831"
},
{
"name": "Python",
"bytes": "1721825"
},
{
"name": "Shell",
"bytes": "120959"
},
{
"name": "XSLT",
"bytes": "160281"
}
],
"symlink_target": ""
}
|
"""Utilities for working with multiple processes, including both forking
the server into multiple processes and managing subprocesses.
"""
from __future__ import absolute_import, division, print_function
import errno
import os
import signal
import subprocess
import sys
import time
from binascii import hexlify
from tornado.concurrent import Future, future_set_result_unless_cancelled
from tornado import ioloop
from tornado.iostream import PipeIOStream
from tornado.log import gen_log
from tornado.platform.auto import set_close_exec
from tornado import stack_context
from tornado.util import errno_from_exception, PY3
try:
import multiprocessing
except ImportError:
# Multiprocessing is not available on Google App Engine.
multiprocessing = None
if PY3:
long = int
# Re-export this exception for convenience.
try:
CalledProcessError = subprocess.CalledProcessError
except AttributeError:
# The subprocess module exists in Google App Engine, but is empty.
# This module isn't very useful in that case, but it should
# at least be importable.
if 'APPENGINE_RUNTIME' not in os.environ:
raise
def cpu_count():
"""Returns the number of processors on this machine."""
if multiprocessing is None:
return 1
try:
return multiprocessing.cpu_count()
except NotImplementedError:
pass
try:
return os.sysconf("SC_NPROCESSORS_CONF")
except (AttributeError, ValueError):
pass
gen_log.error("Could not detect number of processors; assuming 1")
return 1
def _reseed_random():
if 'random' not in sys.modules:
return
import random
# If os.urandom is available, this method does the same thing as
# random.seed (at least as of python 2.6). If os.urandom is not
# available, we mix in the pid in addition to a timestamp.
try:
seed = long(hexlify(os.urandom(16)), 16)
except NotImplementedError:
seed = int(time.time() * 1000) ^ os.getpid()
random.seed(seed)
def _pipe_cloexec():
r, w = os.pipe()
set_close_exec(r)
set_close_exec(w)
return r, w
_task_id = None
def fork_processes(num_processes, max_restarts=100):
"""Starts multiple worker processes.
If ``num_processes`` is None or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If ``num_processes`` is given and > 0, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``fork_processes``.
In each child process, ``fork_processes`` returns its *task id*, a
number between 0 and ``num_processes``. Processes that exit
abnormally (due to a signal or non-zero exit status) are restarted
with the same id (up to ``max_restarts`` times). In the parent
process, ``fork_processes`` returns None if all child processes
have exited normally, but will otherwise only exit by throwing an
exception.
"""
global _task_id
assert _task_id is None
if num_processes is None or num_processes <= 0:
num_processes = cpu_count()
gen_log.info("Starting %d processes", num_processes)
children = {}
def start_child(i):
pid = os.fork()
if pid == 0:
# child process
_reseed_random()
global _task_id
_task_id = i
return i
else:
children[pid] = i
return None
for i in range(num_processes):
id = start_child(i)
if id is not None:
return id
num_restarts = 0
while children:
try:
pid, status = os.wait()
except OSError as e:
if errno_from_exception(e) == errno.EINTR:
continue
raise
if pid not in children:
continue
id = children.pop(pid)
if os.WIFSIGNALED(status):
gen_log.warning("child %d (pid %d) killed by signal %d, restarting",
id, pid, os.WTERMSIG(status))
elif os.WEXITSTATUS(status) != 0:
gen_log.warning("child %d (pid %d) exited with status %d, restarting",
id, pid, os.WEXITSTATUS(status))
else:
gen_log.info("child %d (pid %d) exited normally", id, pid)
continue
num_restarts += 1
if num_restarts > max_restarts:
raise RuntimeError("Too many child restarts, giving up")
new_id = start_child(id)
if new_id is not None:
return new_id
# All child processes exited cleanly, so exit the master process
# instead of just returning to right after the call to
# fork_processes (which will probably just start up another IOLoop
# unless the caller checks the return value).
sys.exit(0)
def task_id():
"""Returns the current task id, if any.
Returns None if this process was not created by `fork_processes`.
"""
global _task_id
return _task_id
class Subprocess(object):
"""Wraps ``subprocess.Popen`` with IOStream support.
The constructor is the same as ``subprocess.Popen`` with the following
additions:
* ``stdin``, ``stdout``, and ``stderr`` may have the value
``tornado.process.Subprocess.STREAM``, which will make the corresponding
attribute of the resulting Subprocess a `.PipeIOStream`. If this option
is used, the caller is responsible for closing the streams when done
with them.
The ``Subprocess.STREAM`` option and the ``set_exit_callback`` and
``wait_for_exit`` methods do not work on Windows. There is
therefore no reason to use this class instead of
``subprocess.Popen`` on that platform.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
"""
STREAM = object()
_initialized = False
_waiting = {} # type: ignore
def __init__(self, *args, **kwargs):
self.io_loop = ioloop.IOLoop.current()
# All FDs we create should be closed on error; those in to_close
# should be closed in the parent process on success.
pipe_fds = []
to_close = []
if kwargs.get('stdin') is Subprocess.STREAM:
in_r, in_w = _pipe_cloexec()
kwargs['stdin'] = in_r
pipe_fds.extend((in_r, in_w))
to_close.append(in_r)
self.stdin = PipeIOStream(in_w)
if kwargs.get('stdout') is Subprocess.STREAM:
out_r, out_w = _pipe_cloexec()
kwargs['stdout'] = out_w
pipe_fds.extend((out_r, out_w))
to_close.append(out_w)
self.stdout = PipeIOStream(out_r)
if kwargs.get('stderr') is Subprocess.STREAM:
err_r, err_w = _pipe_cloexec()
kwargs['stderr'] = err_w
pipe_fds.extend((err_r, err_w))
to_close.append(err_w)
self.stderr = PipeIOStream(err_r)
try:
self.proc = subprocess.Popen(*args, **kwargs)
except:
for fd in pipe_fds:
os.close(fd)
raise
for fd in to_close:
os.close(fd)
for attr in ['stdin', 'stdout', 'stderr', 'pid']:
if not hasattr(self, attr): # don't clobber streams set above
setattr(self, attr, getattr(self.proc, attr))
self._exit_callback = None
self.returncode = None
def set_exit_callback(self, callback):
"""Runs ``callback`` when this process exits.
The callback takes one argument, the return code of the process.
This method uses a ``SIGCHLD`` handler, which is a global setting
and may conflict if you have other libraries trying to handle the
same signal. If you are using more than one ``IOLoop`` it may
be necessary to call `Subprocess.initialize` first to designate
one ``IOLoop`` to run the signal handlers.
In many cases a close callback on the stdout or stderr streams
can be used as an alternative to an exit callback if the
signal handler is causing a problem.
"""
self._exit_callback = stack_context.wrap(callback)
Subprocess.initialize()
Subprocess._waiting[self.pid] = self
Subprocess._try_cleanup_process(self.pid)
def wait_for_exit(self, raise_error=True):
"""Returns a `.Future` which resolves when the process exits.
Usage::
ret = yield proc.wait_for_exit()
This is a coroutine-friendly alternative to `set_exit_callback`
(and a replacement for the blocking `subprocess.Popen.wait`).
By default, raises `subprocess.CalledProcessError` if the process
has a non-zero exit status. Use ``wait_for_exit(raise_error=False)``
to suppress this behavior and return the exit status without raising.
.. versionadded:: 4.2
"""
future = Future()
def callback(ret):
if ret != 0 and raise_error:
# Unfortunately we don't have the original args any more.
future.set_exception(CalledProcessError(ret, None))
else:
future_set_result_unless_cancelled(future, ret)
self.set_exit_callback(callback)
return future
@classmethod
def initialize(cls):
"""Initializes the ``SIGCHLD`` handler.
The signal handler is run on an `.IOLoop` to avoid locking issues.
Note that the `.IOLoop` used for signal handling need not be the
same one used by individual Subprocess objects (as long as the
``IOLoops`` are each running in separate threads).
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been
removed.
"""
if cls._initialized:
return
io_loop = ioloop.IOLoop.current()
cls._old_sigchld = signal.signal(
signal.SIGCHLD,
lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup))
cls._initialized = True
@classmethod
def uninitialize(cls):
"""Removes the ``SIGCHLD`` handler."""
if not cls._initialized:
return
signal.signal(signal.SIGCHLD, cls._old_sigchld)
cls._initialized = False
@classmethod
def _cleanup(cls):
for pid in list(cls._waiting.keys()): # make a copy
cls._try_cleanup_process(pid)
@classmethod
def _try_cleanup_process(cls, pid):
try:
ret_pid, status = os.waitpid(pid, os.WNOHANG)
except OSError as e:
if errno_from_exception(e) == errno.ECHILD:
return
if ret_pid == 0:
return
assert ret_pid == pid
subproc = cls._waiting.pop(pid)
subproc.io_loop.add_callback_from_signal(
subproc._set_returncode, status)
def _set_returncode(self, status):
if os.WIFSIGNALED(status):
self.returncode = -os.WTERMSIG(status)
else:
assert os.WIFEXITED(status)
self.returncode = os.WEXITSTATUS(status)
# We've taken over wait() duty from the subprocess.Popen
# object. If we don't inform it of the process's return code,
# it will log a warning at destruction in python 3.6+.
self.proc.returncode = self.returncode
if self._exit_callback:
callback = self._exit_callback
self._exit_callback = None
callback(self.returncode)
|
{
"content_hash": "76cc91f5a9a0eabfd182a0299e56b4be",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 82,
"avg_line_length": 34.41907514450867,
"alnum_prop": 0.6198673272315056,
"repo_name": "Lancher/tornado",
"id": "122fd7e14b47bf478ee8a93570f76c16c6e9f370",
"size": "12484",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tornado/process.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1664"
},
{
"name": "HTML",
"bytes": "25"
},
{
"name": "Python",
"bytes": "1610904"
},
{
"name": "Ruby",
"bytes": "1428"
},
{
"name": "Shell",
"bytes": "4070"
}
],
"symlink_target": ""
}
|
""" Atop log data analyzer.
Usage:
{cmd} metrics [-c <cmd>] [-p <path>] [-e <time>] [-r <hours>]
{cmd} (csv|json|table) [-c <cmd>] [-p <path>] [-e <time>] [-r <hours>] [<metric>...]
{cmd} (diagram|gnuplot) [-c <cmd>] [-p <path>] [-e <time>] [-r <hours>] [-x <lines>] [-y <lines>] [<metric>...]
Options:
diagram Print the results as a braille character diagram (default).
gnuplot Print the results using a gnuplot subprocess.
table Print the results as ascii table.
csv Print the results as csv table.
json Print the results as json datagram.
metrics Print a list of all possible metric_path's.
-e <time>, --end=<time> The latest value to plot in ISO8601 format. Defaults to now. [default: {now}]
-r <hours>, --range=<hours> Number of hours, backwards from --stop, top plot. [default: 6]
-x <lines>, --width=<lines> Width of plotted graphs in text lines. [default: 59]
-y <lines>, --height=<lines> Height of plotted graphs in text lines. [default: 9]
-p <path>, --path=<path> Path to atop raw logs with date placeholders. [default: /var/log/atop/atop_%Y%m%d]
-c <cmd>, --cmd <cmd> Command to call with the raw files. [default: atop -f -r {{path}}]
<metric>... The metric to display. Defaults to display CPL.avg5
"""
from datetime import datetime, timedelta
from collections import OrderedDict
import iso8601
import re
import sys
import glob
import decimal
import humanfriendly
import pydash as py_
import subprocess
class AtopParser(object):
_entry_regex = re.compile(r'^ATOP - (?P<HOST>\S+)\s*(?P<TIME>\d+/\d+/\d+\s+\d+:\d+:\d+)\s.*')
_metric_regex = re.compile(r'^(?P<metric>(PRC|CPU|CPL|MEM|SWP|PAG|DSK|NET))\s(?P<details>.+)$')
_field_regex = re.compile(r'\|\s+([^|\s]*)\s+([^|]+)\s+')
_size_fields = [('MEM', 'buff'),
('MEM', 'cache'),
('MEM', 'free'),
('MEM', 'slab'),
('MEM', 'tot'),
('SWP', 'free'),
('SWP', 'tot'),
('SWP', 'vmcom'),
('SWP', 'vmlim'),
('NET', 'si'),
('NET', 'so')]
_time_span_fields = [('DSK', 'avio'),
('PRC', 'sys'),
('PRC', 'user')]
_percentage_fields = [('CPU', 'idle'),
('CPU', 'irq'),
('CPU', 'sys'),
('CPU', 'user'),
('CPU', 'wait'),
('DSK', 'busy')]
_float_fields = [('CPL', 'avg1'),
('CPL', 'avg5'),
('CPL', 'avg15')]
_integer_fields = [('CPL', 'csw'),
('CPL', 'intr'),
('DSK', 'read'),
('DSK', 'write'),
('NET', 'pcki'),
('NET', 'pcko'),
('NET', 'si'),
('NET', 'so'),
('NET', 'deliv'),
('NET', 'ipfrw'),
('NET', 'ipi'),
('NET', 'ipo'),
('NET', 'tcpi'),
('NET', 'tcpo'),
('NET', 'udpi'),
('NET', 'udpo'),
('PAG', 'scan'),
('PAG', 'stall'),
('PAG', 'swin'),
('PAG', 'swout'),
('PRC', 'exit'),
('PRC', 'proc'),
('PRC', 'sys'),
('PRC', 'user'),
('PRC', 'zombie')]
def __init__(self, min_date=None, max_date=None):
self.min_date = min_date
self.max_date = max_date
self.current_time = None
self.current_data = None
self.result = OrderedDict()
def _reset_current_entry(self):
self.current_time = None
self.current_data = None
def _append_current_entry(self):
if self.current_data is not None:
self.result[self.current_time] = self.current_data
self._reset_current_entry()
def __enter__(self, ):
self._reset_current_entry()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None and exc_val is not None and exc_tb is not None:
self._append_current_entry()
return False
@property
def available_metrics_tuples(self):
result = set()
for entry in self.result.values():
py_.map_values_deep(entry, lambda __, path: result.add(tuple(path)))
return sorted(result)
@property
def available_metric_paths(self):
return ['.'.join(p) for p in self.available_metrics_tuples]
def _parse_field(self, field_metric, field_key, field_value):
try:
# sizes
if (field_metric, field_key) in self._size_fields:
return humanfriendly.parse_size(field_value)
# time spans
elif (field_metric, field_key) in self._time_span_fields:
return sum(humanfriendly.parse_timespan(v)
if v else 0
for v in re.findall(r'([\d,\.]+\s*\D+)', field_value))
# percentages
elif (field_metric, field_key) in self._percentage_fields:
return int(field_value.replace('%', '').strip())
# floats
elif (field_metric, field_key) in self._float_fields:
return float(decimal.Decimal(field_value))
# integers
elif (field_metric, field_key) in self._integer_fields:
return int(decimal.Decimal(field_value))
except ValueError:
pass
def add_line(self, atop_line):
match = self._entry_regex.match(atop_line)
if match:
# this is a new entry
if self.current_data is not None:
self._append_current_entry()
entry_time = datetime.strptime(match.group('TIME'), '%Y/%m/%d %H:%M:%S')
if self.min_date and entry_time < self.min_date:
return False
if self.max_date and entry_time > self.max_date:
return False
self.current_time = entry_time
self.current_data = {}
return True
elif self.current_data is not None:
# this is a new metric line
match = self._metric_regex.match(atop_line)
if match:
line_metric = match.group('metric')
line_details = match.group('details')
metric_name = None
row = {}
for metric_key, metric_value in (m.groups() for m in self._field_regex.finditer(line_details)):
metric_key = metric_key.strip().replace('#', '') if metric_key.strip() else None
metric_value = metric_value.strip() if metric_value.strip() else None
if metric_key is None and metric_value is None:
continue
self.current_data[line_metric] = self.current_data.get(line_metric, {})
if line_metric in ('NET', 'DSK'):
if not metric_name:
if line_metric == 'NET':
metric_name = metric_key
elif line_metric == 'DSK' and metric_key is None:
metric_name = metric_value
self.current_data[line_metric][metric_name] = row
else:
parsed_value = self._parse_field(line_metric, metric_key, metric_value)
if parsed_value is not None:
row[metric_key] = parsed_value
else:
parsed_value = self._parse_field(line_metric, metric_key, metric_value)
if parsed_value is not None:
self.current_data[line_metric][metric_key] = parsed_value
return True
return False
class AtopReader(object):
def __init__(self, path_schema, atop_binary="atop -f -r {path}"):
self._atop_binary = atop_binary
self._path_schema = path_schema
def required_file_paths(self, begin, end):
file_name_list = glob.glob(re.sub(r'%[aAwdbBmyYHIpMSfzZjUWcxX]', '*', self._path_schema).replace('**', '*'))
available_times = sorted(datetime.strptime(file_name, self._path_schema) for file_name in file_name_list)
for file_index, file_time in enumerate(available_times):
if file_time < end:
if len(available_times) > file_index + 1 and available_times[file_index + 1] > begin:
# the next item is within the time frame,
yield file_time.strftime(self._path_schema)
elif len(available_times) == file_index + 1:
# there is no next item but this is the last item within the time frame
yield file_time.strftime(self._path_schema)
def atop_log_files(self, begin, end):
for file_path in self.required_file_paths(begin, end):
yield subprocess.Popen(self._atop_binary.format(path=file_path), stdout=subprocess.PIPE, shell=True).stdout
def main():
from docopt import docopt
arguments = docopt(__doc__.format(cmd=__file__,
now=datetime.now().replace(second=0, microsecond=0).isoformat()))
time_range = int(arguments['--range'])
metrics = arguments['<metric>'] or ['CPL.avg5']
end = iso8601.parse_date(arguments['--end'], default_timezone=None)
begin = end - timedelta(hours=time_range)
reader = AtopReader(arguments['--path'], arguments['--cmd'])
with AtopParser(begin, end) as parser:
for log_file in reader.atop_log_files(begin, end):
for line in log_file:
parser.add_line(line.decode())
if not len(parser.result):
sys.stderr.write('empty result\n')
sys.exit(1)
elif arguments['metrics']:
for metric in parser.available_metric_paths:
print(metric)
elif arguments['table']:
from tabulate import tabulate
print(tabulate([[time] + [py_.get(value, metric) for metric in metrics]
for time, value in parser.result.items()],
['time'] + metrics, tablefmt="plain"))
elif arguments['json']:
from json import dumps
print(dumps({time.isoformat(): {metric: py_.get(value, metric) for metric in metrics}
for time, value in parser.result.items()}))
elif arguments['csv']:
import csv
writer = csv.writer(sys.stdout)
writer.writerow(['time'] + metrics)
for time, value in parser.result.items():
writer.writerow([time.isoformat()] + [py_.get(value, metric) for metric in metrics])
elif arguments['gnuplot']:
for metric in metrics:
width = int(arguments['--width'])
height = int(arguments['--height'])
process = subprocess.Popen(["gnuplot"], stdin=subprocess.PIPE)
process.stdin.write(b"set term dumb %d %d \n" % (width, height))
process.stdin.write(b"unset border \n")
process.stdin.write(b"unset ytics \n")
process.stdin.write(b"unset xtics \n")
process.stdin.write(b"set xtics nomirror \n")
process.stdin.write(b"unset key \n")
process.stdin.write(b"set xdata time \n")
process.stdin.write(b"set format x '%H' \n")
process.stdin.write(b"set timefmt '%Y-%m-%dT%H:%M:%S' \n")
process.stdin.write(b"set datafile sep '\t' \n")
process.stdin.write(b"plot '-' using 1:2 notitle with linespoints \n")
for time, value in parser.result.items():
process.stdin.write(b"%s\t%s\n" % (str(time.isoformat()).encode('utf-8'),
str(py_.get(value, metric)).encode('utf-8')))
process.stdin.write(b"e\n")
process.stdin.flush()
process.stdin.close()
process.wait()
elif arguments['diagram']:
import diagram
width = int(arguments['--width'])
height = int(arguments['--height'])
class DiagramOptions(object):
axis = True
batch = False
color = False
encoding = 'utf-8'
function = None # None or any of diagram.FUNCTION.keys()
legend = True
palette = None # None or any of diagram.PALETTE.keys()
reverse = False
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
for metric in metrics:
engine = diagram.AxisGraph(diagram.Point((width, height)), DiagramOptions())
engine.update([py_.get(value, metric) for value in parser.result.values()])
if hasattr(sys.stdout, 'buffer'):
engine.render(sys.stdout.buffer)
else:
engine.render(sys.stdout)
if __name__ == '__main__':
main()
|
{
"content_hash": "e85d67886efc18ca78eb3d65100abb12",
"timestamp": "",
"source": "github",
"line_count": 374,
"max_line_length": 119,
"avg_line_length": 36.25668449197861,
"alnum_prop": 0.5011061946902655,
"repo_name": "efenka/aplot",
"id": "58f5e01aed0ee6c190684c9d7b6a49f9753b8846",
"size": "13600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aplot.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13600"
}
],
"symlink_target": ""
}
|
import os
from os.path import abspath, join, dirname
from sys import path
from envs.keys_and_passwords import *
PROJECT_ROOT = abspath(join(dirname(__file__), "../"))
APPS_DIR = abspath(join(dirname(__file__), "../", "apps"))
path.insert(0, PROJECT_ROOT)
path.insert(0, APPS_DIR)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Steven Skoczen', 'skoczen@gmail.com'),
)
MANAGERS = ADMINS
EMAIL_SUBJECT_PREFIX = "[footprintsapp.com] "
SERVER_EMAIL = 'footprints <no-reply@footprintsapp.com>'
DEFAULT_FROM_EMAIL = SERVER_EMAIL
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'footprints',
'USER': 'skoczen',
'PASSWORD': DB_PASSWORD,
'HOST': '',
'PORT': '',
}
}
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache',
'LOCATION': '127.0.0.1:11211',
}
}
ALLOWED_HOSTS = [
"skoczen-footprints-staging.herokuapp.com",
"skoczen-footprints.herokuapp.com",
"footprintsapp.com",
"*.footprintsapp.com",
"www.footprintsapp.com",
"*",
]
# TIME_ZONE = 'America/Vancouver'
TIME_ZONE = 'Asia/Bangkok'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
LOGIN_URL = '/accounts/login/'
USE_I18N = False
USE_L10N = True
MEDIA_ROOT = join(PROJECT_ROOT, "media_root")
STATIC_ROOT = join(PROJECT_ROOT, "collected_static")
STATIC_URL = '/static/'
BASE_URL = "http://localhost:8001"
MEDIA_URL = '%s/media/' % BASE_URL
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '^7!$isr6jd!o+mgl1qy@+8197dm53uhp2i*vp8k4p#*g#8mg1n'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
# 'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
join(abspath(PROJECT_ROOT), "templates"),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
"analytical",
"annoying",
'allauth',
'allauth.account',
'allauth.socialaccount',
# ... include the providers you want to enable:
# 'allauth.socialaccount.providers.amazon',
# 'allauth.socialaccount.providers.angellist',
# 'allauth.socialaccount.providers.bitbucket',
# 'allauth.socialaccount.providers.bitly',
# 'allauth.socialaccount.providers.dropbox',
# 'allauth.socialaccount.providers.facebook',
# 'allauth.socialaccount.providers.flickr',
# 'allauth.socialaccount.providers.feedly',
# 'allauth.socialaccount.providers.github',
# 'allauth.socialaccount.providers.google',
# 'allauth.socialaccount.providers.instagram',
# 'allauth.socialaccount.providers.linkedin',
# 'allauth.socialaccount.providers.linkedin_oauth2',
# 'allauth.socialaccount.providers.openid',
# 'allauth.socialaccount.providers.persona',
# 'allauth.socialaccount.providers.soundcloud',
# 'allauth.socialaccount.providers.stackexchange',
# 'allauth.socialaccount.providers.tumblr',
# 'allauth.socialaccount.providers.twitch',
# 'allauth.socialaccount.providers.twitter',
# 'allauth.socialaccount.providers.vimeo',
# 'allauth.socialaccount.providers.vk',
# 'allauth.socialaccount.providers.weibo',
"compressor",
"django_extensions",
"djcelery",
"gunicorn",
"sorl.thumbnail",
"south",
"main_site",
"posts",
"utils",
# Must come after south
"django_nose",
)
INSTALLED_APPS = ("longerusernameandemail",) + INSTALLED_APPS
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
# allauth specific context processors
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
# Intercom
"main_site.context_processors.intercom_custom_data",
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_SUBJECT_PREFIX = "Footprints: "
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 7
ACCOUNT_LOGOUT_ON_GET = True
ACCOUNT_LOGOUT_REDIRECT_URL = "/logged-out/"
ACCOUNT_SIGNUP_FORM_CLASS = "posts.forms.SignupForm"
AUTH_PROFILE_MODULE = "posts.Author"
# ACCOUNT_USER_DISPLAY = lambda user: user.get_profile().name
SITE_ID = 1
import djcelery
djcelery.setup_loader()
BROKER_URL = 'redis://localhost:6379/7'
LOGIN_REDIRECT_URL = "/my-writing/"
STATICFILES_EXCLUDED_APPS = []
COMPRESS_ROOT = STATIC_ROOT
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
GOOGLE_ANALYTICS_PROPERTY_ID = ""
GAUGES_SITE_ID = ""
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
}
import logging
# selenium_logger = logging.getLogger('selenium.webdriver.remote.remote_connection')
# selenium_logger.setLevel(logging.WARNING)
# logging.getLogger().setLevel(logging.WARNING)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
BROWSER = "chrome"
SOUTH_TESTS_MIGRATE = False
|
{
"content_hash": "20a1e83e7972d3e9bdd9d058daf36d45",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 84,
"avg_line_length": 28.4875,
"alnum_prop": 0.6845107503290917,
"repo_name": "hobson/totalgood",
"id": "521091f8d21576a154b79ff8bd95b348fb137455",
"size": "6837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "totalgood/envs/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183127"
}
],
"symlink_target": ""
}
|
import dataclasses
import json # type: ignore
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, path_template, rest_helpers, rest_streaming
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.transport.requests import AuthorizedSession # type: ignore
from google.protobuf import json_format
import grpc # type: ignore
from requests import __version__ as requests_version
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
from .base import RegionSecurityPoliciesTransport
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class RegionSecurityPoliciesRestInterceptor:
"""Interceptor for RegionSecurityPolicies.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the RegionSecurityPoliciesRestTransport.
.. code-block:: python
class MyCustomRegionSecurityPoliciesInterceptor(RegionSecurityPoliciesRestInterceptor):
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
transport = RegionSecurityPoliciesRestTransport(interceptor=MyCustomRegionSecurityPoliciesInterceptor())
client = RegionSecurityPoliciesClient(transport=transport)
"""
def pre_delete(
self,
request: compute.DeleteRegionSecurityPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteRegionSecurityPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionSecurityPolicies server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the RegionSecurityPolicies server but before
it is returned to user code.
"""
return response
def pre_get(
self,
request: compute.GetRegionSecurityPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.GetRegionSecurityPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionSecurityPolicies server.
"""
return request, metadata
def post_get(self, response: compute.SecurityPolicy) -> compute.SecurityPolicy:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the RegionSecurityPolicies server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertRegionSecurityPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertRegionSecurityPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionSecurityPolicies server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the RegionSecurityPolicies server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListRegionSecurityPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListRegionSecurityPoliciesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionSecurityPolicies server.
"""
return request, metadata
def post_list(
self, response: compute.SecurityPolicyList
) -> compute.SecurityPolicyList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the RegionSecurityPolicies server but before
it is returned to user code.
"""
return response
def pre_patch(
self,
request: compute.PatchRegionSecurityPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.PatchRegionSecurityPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the RegionSecurityPolicies server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the RegionSecurityPolicies server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class RegionSecurityPoliciesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: RegionSecurityPoliciesRestInterceptor
class RegionSecurityPoliciesRestTransport(RegionSecurityPoliciesTransport):
"""REST backend transport for RegionSecurityPolicies.
The RegionSecurityPolicies API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via an issue in this
library's source repository. Thank you!
"""
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[RegionSecurityPoliciesRestInterceptor] = None,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
NOTE: This REST transport functionality is currently in a beta
state (preview). We welcome your feedback via a GitHub issue in
this library's repository. Thank you!
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or RegionSecurityPoliciesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Delete(RegionSecurityPoliciesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteRegionSecurityPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteRegionSecurityPolicyRequest):
The request object. A request message for
RegionSecurityPolicies.Delete. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/regions/{region}/securityPolicies/{security_policy}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
pb_request = compute.DeleteRegionSecurityPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(RegionSecurityPoliciesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetRegionSecurityPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.SecurityPolicy:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetRegionSecurityPolicyRequest):
The request object. A request message for
RegionSecurityPolicies.Get. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.SecurityPolicy:
Represents a Google Cloud Armor
security policy resource. Only external
backend services that use load balancers
can reference a security policy. For
more information, see Google Cloud Armor
security policy overview.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/securityPolicies/{security_policy}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
pb_request = compute.GetRegionSecurityPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.SecurityPolicy()
pb_resp = compute.SecurityPolicy.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(RegionSecurityPoliciesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertRegionSecurityPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertRegionSecurityPolicyRequest):
The request object. A request message for
RegionSecurityPolicies.Insert. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/regions/{region}/securityPolicies",
"body": "security_policy_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
pb_request = compute.InsertRegionSecurityPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_insert(resp)
return resp
class _List(RegionSecurityPoliciesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListRegionSecurityPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.SecurityPolicyList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListRegionSecurityPoliciesRequest):
The request object. A request message for
RegionSecurityPolicies.List. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.SecurityPolicyList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/regions/{region}/securityPolicies",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
pb_request = compute.ListRegionSecurityPoliciesRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.SecurityPolicyList()
pb_resp = compute.SecurityPolicyList.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_list(resp)
return resp
class _Patch(RegionSecurityPoliciesRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchRegionSecurityPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchRegionSecurityPolicyRequest):
The request object. A request message for
RegionSecurityPolicies.Patch. See the
method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/regions/{region}/securityPolicies/{security_policy}",
"body": "security_policy_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
pb_request = compute.PatchRegionSecurityPolicyRequest.pb(request)
transcoded_request = path_template.transcode(http_options, pb_request)
# Jsonify the request body
body = json_format.MessageToJson(
transcoded_request["body"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
json_format.MessageToJson(
transcoded_request["query_params"],
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params, strict=True),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation()
pb_resp = compute.Operation.pb(resp)
json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True)
resp = self._interceptor.post_patch(resp)
return resp
@property
def delete(
self,
) -> Callable[[compute.DeleteRegionSecurityPolicyRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Delete(self._session, self._host, self._interceptor) # type: ignore
@property
def get(
self,
) -> Callable[[compute.GetRegionSecurityPolicyRequest], compute.SecurityPolicy]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Get(self._session, self._host, self._interceptor) # type: ignore
@property
def insert(
self,
) -> Callable[[compute.InsertRegionSecurityPolicyRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Insert(self._session, self._host, self._interceptor) # type: ignore
@property
def list(
self,
) -> Callable[
[compute.ListRegionSecurityPoliciesRequest], compute.SecurityPolicyList
]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._List(self._session, self._host, self._interceptor) # type: ignore
@property
def patch(
self,
) -> Callable[[compute.PatchRegionSecurityPolicyRequest], compute.Operation]:
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return self._Patch(self._session, self._host, self._interceptor) # type: ignore
@property
def kind(self) -> str:
return "rest"
def close(self):
self._session.close()
__all__ = ("RegionSecurityPoliciesRestTransport",)
|
{
"content_hash": "ca1d953471b217f400893d8457d6f03f",
"timestamp": "",
"source": "github",
"line_count": 856,
"max_line_length": 112,
"avg_line_length": 40.41238317757009,
"alnum_prop": 0.5918538432630879,
"repo_name": "googleapis/python-compute",
"id": "6c6305e5eecbe3717fb65a409249fc8a1a590388",
"size": "35194",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/compute_v1/services/region_security_policies/transports/rest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "32681847"
},
{
"name": "Shell",
"bytes": "30663"
}
],
"symlink_target": ""
}
|
"""Day 6: Universal Orbit Map"""
import math
from collections import defaultdict, deque
from copy import deepcopy
from typing import DefaultDict, Iterator, List, NamedTuple, Tuple
import aoc
DAY = 6
OrbitGraph = DefaultDict[str, List[str]]
def parse_input(input_text: str) -> OrbitGraph:
orbits: OrbitGraph = defaultdict(list)
pairs = [line.split(")") for line in input_text.splitlines()]
for orbited, orbited_by in pairs:
orbits[orbited].append(orbited_by)
return orbits
def orbit_depths(orbit_graph: OrbitGraph) -> Iterator[int]:
"""Yields node depths in a breadth-first traversal of the orbit graph."""
queue = deque([(0, "COM")])
while queue:
depth, body = queue.popleft()
yield depth
queue.extend(
[(depth + 1, orbiting_body) for orbiting_body in orbit_graph[body]]
)
def find_shortest_path(
directed_orbit_graph: OrbitGraph, source: str = "YOU", dest: str = "SAN"
) -> Tuple[int, List[str]]:
orbits = directed_to_undirected_graph(directed_orbit_graph)
class BFSEntry(NamedTuple):
depth: int
current_node: str
path_from_source: List[str]
start = orbits[source][0] # Body orbited by source
queue = deque([BFSEntry(0, start, [])])
min_distance = math.inf
shortest_path: List[str] = []
while queue:
depth, current_node, path = queue.popleft()
if depth >= min_distance:
# Cut off search branch if distance is already too long.
continue
# Filter already-visited nodes from next steps to avoid cycles.
unvisited_neighbours = [n for n in orbits[current_node] if n not in path]
current_path = path + [current_node]
if dest in unvisited_neighbours:
# The earlier check ensures the current distance is known to be
# shorter than the previous-shortest, so we can just assign the
# current distance and path without testing again.
min_distance = depth
shortest_path = current_path
else:
queue.extend(
[
BFSEntry(depth + 1, neighbour, current_path)
for neighbour in unvisited_neighbours
]
)
if min_distance is math.inf:
raise ValueError(f"Node {dest} not present in orbit graph.")
# Turn off mypy checking for the return value because the use of math.inf
# (which is a float) earlier causes it to complain about the return type
# really being a Union[float, int], where for any valid input graph the
# dest node will be found and min_distance will be an int.
return min_distance, shortest_path # type: ignore
def directed_to_undirected_graph(directed: OrbitGraph) -> OrbitGraph:
"""Create undirected graph from the given directed graph."""
# Make an undirected graph so that we can traverse orbits in
# either direction. The original orbit graph is strictly
# orbited_body -> orbiting body, ie a directed acyclic graph.
undirected = deepcopy(directed)
for orbited_body, orbiting_bodies in directed.items():
for orbiting in orbiting_bodies:
undirected[orbiting].append(orbited_body)
return undirected
def main(orbit_graph: OrbitGraph) -> Tuple[int, int]:
part_one_solution = sum(orbit_depths(orbit_graph))
part_two_solution, shortest_path = find_shortest_path(orbit_graph)
return (part_one_solution, part_two_solution)
def test_orbit_depths() -> None:
orbits = """\
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L"""
orbit_graph = parse_input(orbits)
assert sum(orbit_depths(orbit_graph)) == 42
def test_find_shortest_path() -> None:
orbits = """\
COM)B
B)C
C)D
D)E
E)F
B)G
G)H
D)I
E)J
J)K
K)L
K)YOU
I)SAN"""
orbit_graph = parse_input(orbits)
distance, path = find_shortest_path(orbit_graph)
assert distance == 4
assert path == ["K", "J", "E", "D", "I"]
if __name__ == "__main__":
parsed = parse_input(aoc.load_puzzle_input(2019, DAY))
part_one_solution, part_two_solution = main(parsed)
print(
aoc.format_solution(
title=__doc__,
part_one=part_one_solution,
part_two=part_two_solution,
)
)
|
{
"content_hash": "fd26c523ab8cc2cc3475ed88d342076d",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 81,
"avg_line_length": 29.06122448979592,
"alnum_prop": 0.6357677902621723,
"repo_name": "robjwells/adventofcode-solutions",
"id": "575c7323607ca37bb1ad3bcf43f942e74774eaa8",
"size": "4272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2019/python/aoc_2019_06.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "50446"
},
{
"name": "Java",
"bytes": "51983"
},
{
"name": "JavaScript",
"bytes": "6893"
},
{
"name": "Python",
"bytes": "229338"
},
{
"name": "Rust",
"bytes": "6700"
},
{
"name": "Swift",
"bytes": "6997"
}
],
"symlink_target": ""
}
|
from direct.gui.DirectGui import *
from direct.interval.IntervalGlobal import Sequence, Func, Wait
from menu import Menu
import os
import json
from pathlib import Path
class Load(Menu):
def __init__(self):
super().__init__()
self.parentNode = aspect2d.attachNewNode('Load')
self.backButton = DirectButton(text=("back"), scale = 0.25,
command=self.switchToMainMenu, parent=base.a2dTopLeft,
pos=(0.275,0,-0.225))
# These store dicts that hold saveDict objects of Tricker class
# for a particular save
self.slot1ButtonData = self.loadButtonData('1')
self.slot2ButtonData = self.loadButtonData('2')
self.slot3ButtonData = self.loadButtonData('3')
self.slot4ButtonData = self.loadButtonData('4')
self.slot1Button = DirectButton(text=(self.slot1ButtonData if self.slot1ButtonData else 'Empty Slot 1'),
scale=0.1, pos=(0, 0, .2),
command=self.openLoadDialog, extraArgs=['1'],
parent=self.parentNode)
self.slot2Button = DirectButton(text=(self.slot2ButtonData if self.slot2ButtonData else 'Empty Slot 2'),
scale=0.1, pos=(0, 0, 0),
command=self.openLoadDialog, extraArgs=['2'],
parent=self.parentNode)
self.slot3Button = DirectButton(text=(self.slot3ButtonData if self.slot3ButtonData else 'Empty Slot 3'),
scale=0.1, pos=(0, 0, -.2),
command=self.openLoadDialog, extraArgs=['3'],
parent=self.parentNode)
self.slot4Button = DirectButton(text=(self.slot4ButtonData if self.slot4ButtonData else 'Empty Slot 4'),
scale=0.1, pos=(0, 0, -.4),
command=self.openLoadDialog, extraArgs=['4'],
parent=self.parentNode)
self.guiElements = [self.slot1Button, self.slot2Button, self.slot3Button, self.slot4Button, self.backButton]
self.loadDialog = None
def openLoadDialog(self, slot):
saveFilePath = self.getSaveFilePath(slot)
projectPath = os.path.dirname(os.path.dirname((__file__)))
fullFilePath = os.path.join(projectPath, saveFilePath)
fullFilePathPathwtf = Path(os.path.join(projectPath, saveFilePath))
if fullFilePathPathwtf.is_file():
self.disableGUI()
self.loadDialog = DirectDialog(dialogName="LoadDialog", scale=1,
text="Which player do you want to load to?",
buttonTextList=['Player 1', 'Player 2'],
buttonValueList=[base.player1,base.player2],
command=self.loadGame, extraArgs=[fullFilePath])
else:
s = "Save not found!"
self.drawPopupText(s)
def loadGame(self, player, fullFilePath):
print("old:", player.saveDict)
with open(fullFilePath, 'r') as infile:
player.loadToSaveDict(json.load(infile))
s = "Loaded file... " + player.name
self.drawPopupText(s)
print("player1: ", base.player1.saveDict)
print("player2: ", base.player2.saveDict)
self.enableGUI()
self.loadDialog.detachNode()
# this overwrites the parent method because i dont want to move the buttons
def createPopupText(self,s):
self.popupText = OnscreenText(text=s, scale = 0.1, parent=base.a2dTopCenter,
pos = (0,-.5) )
|
{
"content_hash": "ecb0df751ac9b9d680086409697121cd",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 116,
"avg_line_length": 50.921052631578945,
"alnum_prop": 0.5488372093023256,
"repo_name": "Aklaran/trickingGame",
"id": "81c1d1c8bf83352bd5acad59d45635be5fff0b70",
"size": "3870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/load.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62098"
}
],
"symlink_target": ""
}
|
from haystack import indexes
from oscar.core.loading import get_class, get_model
# Load default strategy (without a user/request)
is_solr_supported = get_class('search.features', 'is_solr_supported')
Selector = get_class('partner.strategy', 'Selector')
strategy = Selector().strategy()
class ProductIndex(indexes.ModelSearchIndex, indexes.Indexable):
# Search text
text = indexes.EdgeNgramField(
document=True, use_template=True,
template_name='oscar/search/indexes/product/item_text.txt')
upc = indexes.CharField(model_attr="upc", null=True)
title = indexes.EdgeNgramField(model_attr='title', null=True)
# Custom Fields
coordinates = indexes.LocationField(model_attr="coordinates")
category_id = indexes.MultiValueField()
date_start = indexes.DateTimeField(model_attr="date_start", null=True)
date_end = indexes.DateTimeField(model_attr="date_end", null=True)
# Fields for faceting
product_class = indexes.CharField(null=True, faceted=True)
category = indexes.MultiValueField(null=True, faceted=True)
price = indexes.DecimalField(null=True, faceted=True)
num_in_stock = indexes.IntegerField(null=True, faceted=True)
rating = indexes.IntegerField(null=True, faceted=True)
# Spelling suggestions
suggestions = indexes.FacetCharField()
date_created = indexes.DateTimeField(model_attr='date_created')
date_updated = indexes.DateTimeField(model_attr='date_updated')
class Meta:
model = get_model('catalogue', 'Product')
def get_model(self):
return get_model('catalogue', 'Product')
def index_queryset(self, using=None):
# Only index browsable products (not each individual child product)
return self.get_model().browsable.order_by('-date_updated')
def read_queryset(self, using=None):
return self.get_model().browsable.base_queryset()
def prepare_product_class(self, obj):
return obj.get_product_class().name
def prepare_category(self, obj):
categories = obj.categories.all()
if len(categories) > 0:
return [category.full_name for category in categories]
def prepare_rating(self, obj):
if obj.rating is not None:
return int(obj.rating)
# Pricing and stock is tricky as it can vary per customer. However, the
# most common case is for customers to see the same prices and stock levels
# and so we implement that case here.
def prepare_price(self, obj):
result = None
if obj.is_parent:
result = strategy.fetch_for_parent(obj)
elif obj.has_stockrecords:
result = strategy.fetch_for_product(obj)
if result:
if result.price.is_tax_known:
return result.price.incl_tax
return result.price.excl_tax
def prepare_num_in_stock(self, obj):
if obj.is_parent:
# Don't return a stock level for parent products
return None
elif obj.has_stockrecords:
result = strategy.fetch_for_product(obj)
return result.stockrecord.net_stock_level
def prepare_category_id(self, obj):
categories = []
ProductCategory = get_model('catalogue', 'ProductCategory')
for p in ProductCategory.objects.filter(product=obj.id):
categories.append(p.category.id)
return categories
def prepare(self, obj):
prepared_data = super(ProductIndex, self).prepare(obj)
# We use Haystack's dynamic fields to ensure that the title field used
# for sorting is of type "string'.
if is_solr_supported():
prepared_data['title_s'] = prepared_data['title']
# Use title to for spelling suggestions
prepared_data['suggestions'] = prepared_data['text']
return prepared_data
def get_updated_field(self):
"""
Used to specify the field used to determine if an object has been
updated
Can be used to filter the query set when updating the index
"""
return 'date_updated'
|
{
"content_hash": "fbdce984f7e8e06c43244de1651117f6",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 79,
"avg_line_length": 35.608695652173914,
"alnum_prop": 0.663003663003663,
"repo_name": "angelsantosa/ad-manage",
"id": "1a01d728b158139d58ed6f19e5f1c9450af57e05",
"size": "4095",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ad_manage/djangoapps/search/search_indexes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1733526"
},
{
"name": "HTML",
"bytes": "605536"
},
{
"name": "JavaScript",
"bytes": "798746"
},
{
"name": "Python",
"bytes": "82619"
},
{
"name": "Shell",
"bytes": "3932"
}
],
"symlink_target": ""
}
|
from asposebarcode import Settings
from com.aspose.barcode import BarCodeBuilder
from com.aspose.barcode import Symbology
from com.aspose.barcode import AustraliaPostFormatControlCode
class ManageDimension:
def __init__(self):
dataDir = Settings.dataDir + 'WorkingWithBarcode/AdvanceBarcodeFeatures/ManageDimension/'
# Instantiate barcode object
bb = BarCodeBuilder()
# Set up code text (data to be encoded)
bb.setCodeText("1234567")
# Set the symbology type to Code128
symbology= Symbology
bb.setSymbologyType(symbology.Code128)
# Save the image to file
bb.save(dataDir + "barcode.jpg")
# Set the x-dimension for the bars of the barcode
bb.setxDimension(0.5)
# Save the image to file
bb.save(dataDir + "barcodeXDimensionChanged.jpg")
# Instantiate barcode object
bb1 = BarCodeBuilder()
# Set the code text of the barcode
bb1.setCodeText("12345678")
# Set the symbology type to code128
bb1.setSymbologyType(symbology.Pdf417)
# Set the x-dimension for the bars of the barcode
bb1.setxDimension(0.5)
# Save the image to file
bb1.save(dataDir + "barcodeYDimensionChanged.jpg")
# Display Status.
print "BarCodes with different dimensions have been created successfully."
if __name__ == '__main__':
ManageDimension()
|
{
"content_hash": "5e9985847891c4de2fcc94c856e18ce4",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 97,
"avg_line_length": 29.18,
"alnum_prop": 0.6572995202193284,
"repo_name": "asposebarcode/Aspose_BarCode_Java",
"id": "283fef6697c90b941fd85e6299bf8acd233c0896",
"size": "1459",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Plugins/Aspose.BarCode Java for Jython/asposebarcode/WorkingWithBarcode/AdvanceFeatures/ManageDimension.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "152045"
},
{
"name": "PHP",
"bytes": "53227"
},
{
"name": "Ruby",
"bytes": "47116"
}
],
"symlink_target": ""
}
|
import os
db = os.getenv('DB', "mongodb")
mongodb_uri = os.getenv('MONGODB_URI', "mongodb://username:password@host:port/db")
salt = os.getenv('salt', "")
rethink_db_host = os.getenv('RETHINK_DB_HOST', "localhost")
rethink_db_port = os.getenv('RETHINK_DB_PORT', "28015")
metadata_key = "st::"
reserved_words = ['sort', 'order', 'sortby', 'limit', 'skip']
token_secret = '0BR5zqTw7rlDyPOLtcHpRsmwwSQuDkZbij5yTMZgzZ9gi5kKRl'
|
{
"content_hash": "c4c2c0cfa57e2eeb4cf01d5ffddc0458",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 26.8125,
"alnum_prop": 0.6923076923076923,
"repo_name": "snclucas/stashy",
"id": "8a5a0fc318d3ac3a61fd7dc9a62363296fecd733",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "205"
},
{
"name": "Python",
"bytes": "33508"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
}
|
import sys
import os
import azure.cli.main
from azure.cli.core.telemetry import (init_telemetry, user_agrees_to_telemetry,
telemetry_flush, log_telemetry)
try:
try:
if user_agrees_to_telemetry():
init_telemetry()
except Exception: #pylint: disable=broad-except
pass
args = sys.argv[1:]
# Check if we are in argcomplete mode - if so, we
# need to pick up our args from environment variables
if os.environ.get('_ARGCOMPLETE'):
comp_line = os.environ.get('COMP_LINE')
if comp_line:
args = comp_line.split()[1:]
sys.exit(azure.cli.main.main(args))
except KeyboardInterrupt:
log_telemetry('keyboard interrupt')
sys.exit(1)
finally:
try:
if user_agrees_to_telemetry():
telemetry_flush()
except Exception: #pylint: disable=broad-except
pass
|
{
"content_hash": "b65c90e3861c93f934da0732e876e810",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 26.647058823529413,
"alnum_prop": 0.6169977924944813,
"repo_name": "BurtBiel/azure-cli",
"id": "96ab52b1481c6d6c3345af50b7c817988d3752ff",
"size": "1252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/azure-cli/azure/cli/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "Python",
"bytes": "2108820"
},
{
"name": "Shell",
"bytes": "3300"
}
],
"symlink_target": ""
}
|
from collections import namedtuple, OrderedDict
from ast import literal_eval
import re
from datetime import datetime
import sys
ScriptFunction = \
namedtuple('Method', ['definition', 'signature', 'body', 'sha1'])
class Scripts(object):
def __init__(self):
self.namespaces = { 'default': {} }
def add(self, functions, namespace=None):
if namespace:
dest = self.namespaces.get(namespace)
if dest:
dest.update(functions)
else:
self.namespaces[namespace] = functions
else:
self.namespaces['default'].update(functions)
def get_scripts(self, namespace=None):
if not namespace:
namespace = 'default'
return self.namespaces.get(namespace)
def get_script(self, name, namespace=None):
if not namespace:
namespace = 'default'
functions = self.namespaces.get(namespace)
if functions:
return functions.get(name)
else:
return None
def script_body(self, name, args = None, namespace = None):
function = self.get_script(name, namespace)
if not function:
return None
param_string = re.search(r'\(([\w=\'", ]+)\)', function.signature)
params = [ParamDefault(param.split('=')) for param in
param_string.group(1).split(',')] if param_string else None
if isinstance(args, dict):
param_defaults = \
OrderedDict([val + (None,)
if len(val) == 1 else val for val in params])
for param in params:
if len(param) == 2 and param[0] not in args:
# Use default value
args[param[0]] = literal_eval(param[1])
args = {k:v for k,v in args.items() if k in param_defaults.keys()}
elif isinstance(args, tuple) or isinstance(args, list):
args = { params[i][0]: args[i] if i < len(args)
else literal_eval(params[i][1])
for i in range(0, max(len(params), len(args)))
if i < len(args) or len(params[i]) == 2 }
else:
if params:
if args:
args = { params[0][0]: args }
else:
if len(params[0]) == 2:
args = { params[0][0]: literal_eval(params[0][1]) }
else:
args = {}
else:
args = {}
split_body = re.split(r'([\"\'])', function.body)
replacements = {}
for k, v in args.items():
if isinstance(v, str) or isinstance(v, datetime):
replacements[k] = "'{}'".format(v)
elif sys.version_info[0] < 3 and isinstance(v, unicode):
replacements[k] = repr(v.encode('utf-8'))
else:
replacements[k] = '{}'.format(v)
for i, s in enumerate(split_body):
if i % 4 == 0:
for k, v in replacements.items():
split_body[i] = re.sub(r'\b{}\b'.format(k),
v, split_body[i])
return ''.join(split_body)
class ParamDefault(tuple):
def __new__(self, pair):
l = len(pair)
if l > 2:
raise ValueError('Only a name, and (optionally) a '
'default value is valid for a parameter.')
return tuple.__new__(self, (pair[0].strip(),) +
((pair[1],) if l > 1 else tuple()))
|
{
"content_hash": "1c9889187df2fbc14edf1da4cc66c2b5",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 35.28155339805825,
"alnum_prop": 0.49036873968079253,
"repo_name": "Ostico/pyorient",
"id": "bcd6f69231ae6ae786350e3525e8cdbca55e999b",
"size": "3634",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyorient/scripts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "172"
},
{
"name": "Python",
"bytes": "411619"
},
{
"name": "Shell",
"bytes": "11391"
}
],
"symlink_target": ""
}
|
import numpy as np
from copy import copy
import pandas as pd
class constant:
def __init__(self, val):
self.val = val
def rvs(self, size=1):
if size == 1:
return copy(self.val)
return np.asarray([copy(self.val) for _ in range(size)])
def invert_value_mapping(value_mapping):
if value_mapping is None:
return None
result = {}
for name, mapping in value_mapping.items():
if isinstance(mapping, dict):
result[name] = {val: key for key, val in mapping.items()}
return result
def column_value_mapping(column):
unique = pd.unique(column)
return dict(zip(unique, range(len(unique))))
def dataframe_value_mapping(df):
result = {}
for name, column in zip(df.columns, df.values.T):
result[name] = column_value_mapping(column)
return result
def extract_kwarg(name, kwargs, default=None):
result = default
if name in kwargs:
result = kwargs[name]
del kwargs[name]
return result
|
{
"content_hash": "3930c0e86473690ff1b378095cade0ad",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 69,
"avg_line_length": 23.204545454545453,
"alnum_prop": 0.6258570029382958,
"repo_name": "DLunin/pygraphmodels",
"id": "1b5a1ae9d98fd9fbc1770ed230a182efb4708226",
"size": "1021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphmodels/misc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "9868"
},
{
"name": "Cuda",
"bytes": "12568"
},
{
"name": "Jupyter Notebook",
"bytes": "701970"
},
{
"name": "Makefile",
"bytes": "2047"
},
{
"name": "Python",
"bytes": "49367"
}
],
"symlink_target": ""
}
|
from Geometry import ListGeoms
import Tools.web as web
import logging
log = logging.getLogger(__name__)
class IRC(ListGeoms):
def __init__(self,other=None):
#IRC specific lines
self.direction = 1
self.both = False
ListGeoms.__init__(self,other)
def __nonzero__(self):
if self.x and self.e:
return True
else:
return False
def webData(self,SortGeom = False):
b_gradFromE = True
# set energies to a baseline and normalize gradients
self.ces = self.toBaseLine()
#IRC Specific
if b_gradFromE:
self.grad = [0,]
for i in range(1,len(self.x)-1):
de = self.ces[i+1]-self.ces[i-1]
dx = float(self.x[i+1])-float(self.x[i-1])
self.grad.append(abs(de/dx))
self.grad.append(0)
is_ircgrad = self.settings.ircgrad and hasattr(self,'grad') and self.grad
else:
is_ircgrad = self.settings.ircgrad
if is_ircgrad:
if self.ces:
self.cgrad = self.normalizeGradients(mx=max(self.ces))
else:
self.cgrad = []
# Prepare comments
"""
self.comments = self.applyComments()
if is_ircgrad:
for i in range(len(self.grad)):
self.comments[i] += ' Grad= %s'%(self.grad[i])
"""
# Sort x, ces, geoms, and cgrads along x
mapd = ['x','ces','geoms']
if is_ircgrad:
mapd.append('cgrad')
self.sortAlongX(mapd)
"""
For ElectronicStructure objects, geometries will be shown
in the order as they appear in the input file.
For XYZ, if scan/irc, geometries are sorted by scanned
parameter or reaction coordinate
"""
# Make a plot
y = [self.ces,]
if is_ircgrad:
xlabel = 'IRC coord (red, sqrt(amu)*Bohr) and gradient (green)'
y.append(self.cgrad)
else:
xlabel = 'IRC coord (sqrt(amu)*Bohr)'
s = self.plot(xlabel,x=self.x,y=y)
if is_ircgrad:
s += self.extrema(title='IRC remarkable points (min_grads):',yg=self.cgrad,show_max=False,frame_names=self.x,frame_prefix='IRC=')
if self.settings.textirc:
s += textirc(self.x,self.ces)
return s
def textirc(self,xs,ys):
s = ''
for x,y in zip(xs,ys):
s += "%.3f %.2f\n" % (x, y) + web.br
return s
def textDirection(self):
irc_dir = {-1:'Reverse',1:'Forward'}
if self.both:
return 'Reverse + Forward'
else:
return irc_dir[self.direction]
def normalizeGradients(self,mx):
if max(self.grad)>0:
ratio = mx / max(self.grad)
else:
ratio = 1.
yg = list(self.grad)
for i in range(len(yg)):
yg[i] *= ratio
return yg
|
{
"content_hash": "6a2328820c0f21dced59018249bbeb0d",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 141,
"avg_line_length": 29.15686274509804,
"alnum_prop": 0.5225285810356423,
"repo_name": "mtthwflst/terse",
"id": "87b87428b79ca4cdd057beb91565a15a698438e4",
"size": "2974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Geometry/IRC.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "282"
},
{
"name": "DIGITAL Command Language",
"bytes": "1364"
},
{
"name": "HTML",
"bytes": "4318"
},
{
"name": "Python",
"bytes": "194740"
}
],
"symlink_target": ""
}
|
import math
VERBOSE = False # enables various prints during the algorithm execution. Used for debugging purpose or for the purpose of simulating the algorithm by hand and checking the various steps against those made by this program.
LITERALS_COST_FUNCTION = 0 # TODO explain (refer to Reti Logice book for explaination) ;)
IMPLICANTS_COST_FUNCTION = 1
DEFAULT_COST_FUNCTION = LITERALS_COST_FUNCTION
class Implicant:
def __init__(self, element, num_of_vars, cost_function, coverset=False, dcmask=0, funmask=1):
self.onmask = element
self.dcmask = dcmask
self.num_of_vars = num_of_vars
self.funmask = funmask
self.reduced = False
self.cost_function = cost_function
self.already_selected = False
self.name = None
if not coverset:
self.coverset = {element}
else:
self.coverset = coverset
def onesNumber(self):
""" Number of 'ones' in the implicant (seen as a binary string) """
onemask = self.onmask & ~self.dcmask
ones_number = 0
while onemask > 0:
ones_number += onemask & 1
onemask >>= 1
return ones_number
def defaultCost(self):
""" The basic (without considering that the implicant could be shared)
cost of the implicant, computed with the cost function set above """
if self.cost_function == LITERALS_COST_FUNCTION:
cost = self.num_of_vars
for i in range(cost):
if (self.dcmask >> i) & 1 == 1:
cost -= 1
return cost
else:
return 1 # each implicant costs 1
def setUpdatedCost(self):
""" Update the cost, bringing it to that of shared implicants """
self.already_selected = True
def nextCost(self):
""" cost of selecting another time the implicant """
if self.already_selected:
return self.sharedCost()
else:
return self.defaultCost()
def sharedCost(self):
if self.cost_function == LITERALS_COST_FUNCTION:
return 1
else:
return 0
def joinedWith(self, implicant2):
""" Create a new implicant by joining two implicants
WARNING: this method doesn't check if they're joinable!!! """
return Implicant(element = self.onmask & implicant2.onmask,
num_of_vars = max(self.num_of_vars, implicant2.num_of_vars),
cost_function = self.cost_function,
dcmask = self.dcmask | (self.onmask ^ implicant2.onmask),
coverset = self.coverset | implicant2.coverset,
funmask = self.funmask & implicant2.funmask )
def isInListOfImplicants(self, list_of_implicants):
""" Check whether the implicant is in a list. Very bad & ugly :-) """
for element in list_of_implicants:
if element.onmask == self.onmask and element.dcmask == self.dcmask:
return True
return False
def hammingDistanceFrom(self, imp2):
""" Computes the Hamming distance between two implicants,
taking into account the don't cares """
diffmask = self.onmask ^ imp2.onmask & ~(self.dcmask | imp2.dcmask)
# values in either the 1st or the 2nd dcmask taken into account later
distance = 0
while diffmask > 0:
distance += diffmask & 1
diffmask >>= 1
if self.dcmask != imp2.dcmask: # difference in the dcmask is relevant
diffmask = self.dcmask | imp2.dcmask
while diffmask > 0:
distance += diffmask & 1
diffmask >>= 1
return distance
def maskToString(self, num_of_bits = None):
""" A string, with the function mask in binary form (DRAFT...)
WARNING: * zeroes on the right (after the last one) are MISSING! """
t_str, k = '', 0
num = self.funmask
while num > 0:
if num & 1 == 1:
t_str = t_str + '1'
else:
t_str = t_str + '0'
num >>= 1
k += 1
# pad with zeroes
if num_of_bits != None and num_of_bits > k:
t_str += '0' * (num_of_bits - k)
return t_str
def __str__(self):
""" A string with the implicant in boolean form """
t_str = ''
for i in range(self.num_of_vars):
if (self.dcmask >> self.num_of_vars - i - 1) & 1 == 0:
ltr = chr(ord('a') + i)
if (self.onmask >> self.num_of_vars - i - 1) & 1 == 1:
t_str += ltr
else:
t_str += ltr + '\''
return t_str
# the following are just getters and\or setters, implemented
# only for the sake of making the rest of the code clearer
def setReduced(self, isTrue = True):
self.reduced = isTrue
def isReduced(self):
return self.reduced
class QmcFunction:
def __init__(self, ONset, OFFset, cnt):
DCset=range(2**cnt)
for i in ONset:
DCset.remove(i)
for i in OFFset:
DCset.remove(i)
self.var_num = self.variableSetCardinality(ONset, DCset)
self.ONset = ONset
self.DCset = DCset
self.to_be_covered = ONset
def assignNumber(self, number):
# will throw a LOT of exceptions if number is not assigned!!!!!!!!
self.number = number
def reset(self):
""" revert the minterms to cover to the complete ONset """
self.to_be_covered = self.ONset
def variableSetCardinality(self, ONset, DCset):
""" number of variables of the function """
return int(math.ceil(math.log(max(ONset + DCset)+1, 2)))
def updateMintermsToCover(self, implicant):
""" Select a new implicant for coverage """
self.to_be_covered = filter(lambda x : x not in implicant.coverset, self.to_be_covered)
class QuineMcCluskey:
def __init__(self, function_list, cost_function):
# some assertions...
assert cost_function == IMPLICANTS_COST_FUNCTION \
or cost_function == LITERALS_COST_FUNCTION
# initializations & structures...
self.cost_function = cost_function
for k, fun in enumerate(function_list):
fun.assignNumber(k)
self.functions_to_simplify = function_list
self.implicant_list = self.buildMintermList()
self.sol = Solution(len(self.functions_to_simplify))
# sort the list wrt the number of ones
# (the list is assumed sorted by some loops later!!!)
self.implicant_list.sort(key = lambda e : e.onesNumber())
def numberOfVariables(self):
n = 0
for fun in self.functions_to_simplify:
if fun.var_num > n:
n = fun.var_num
return n
def buildFunMask(self, functions_covering):
mask = 0
for element in functions_covering:
tmp = element
mask |= 1 << tmp
return mask
def buildMintermList(self):
""" Build the list of minterms that will be expanded given the ONset
and DCset as lists of number representing in decimal notation the
input values for which the output should be 1 (or don't cares) """
nov = self.numberOfVariables()
masks = {} # {element of ON\DCset:[list of functions it covers]}
for fun in self.functions_to_simplify:
for el in fun.ONset + fun.DCset:
try:
masks[el].append(fun.number)
except KeyError:
masks[el] = [fun.number]
return [Implicant(key, nov,
cost_function = self.cost_function,
funmask=self.buildFunMask(masks[key]))
for key in masks]
def expansionStep(self, implicant_list):
""" single step of the expansion procedure """
newimp = []
for numi1, i1 in enumerate(implicant_list):
onesnumber = i1.onesNumber()
for i2 in implicant_list[numi1 + 1:]:
if i2.onesNumber() > onesnumber + 1: # if I have two more ones
# the hamming distance is >= 1 if the implicant list IS ORDERED
break
joinfunmask = i1.funmask & i2.funmask
if i1.hammingDistanceFrom(i2) == 1 and joinfunmask != 0:
if i1.funmask == joinfunmask:
i1.setReduced()
if i2.funmask == joinfunmask:
i2.setReduced()
tmp = i1.joinedWith(i2)
if not tmp.isInListOfImplicants(newimp):
newimp.append(tmp)
return newimp
def findPrimeImplicants(self):
""" performs the expansion procedure and finds the prime implicants
Writes the result in self.implicant_list (not to waste bytes...) """
newimp = self.implicant_list
while newimp != []:
newimp = self.expansionStep(newimp)
self.implicant_list += newimp
self.implicant_list[:] = [i for i in self.implicant_list if not i.isReduced()]
self.purgeTable() # remove implicants covering only don't cares
self.giveNameToImplicants()
return self.implicant_list # those are the prime implicants
def giveNameToImplicants(self):
n = 0
for imp in self.implicant_list:
imp.name = chr(ord('A') + n)
n += 1
def essentialityStep(self):
""" A single essentiality step. Returns the set of covered
minterms and the set of essential implicants that have been found. """
done_something = False
for fun in self.functions_to_simplify:
selected = self.doEssentialityForFunction(fun)
for implicant in selected:
done_something = True
# add the implicant to the solution & related things
implicant.setUpdatedCost()
fun.updateMintermsToCover(implicant)
self.sol.addImplicant(implicant, fun)
# if the implicants we've selected can't cover anything more
# they should be removed from the table
self.purgeTable()
return done_something
def doEssentialityForFunction(self, fun):
""" The essentiality step for a single function """
selected = set()
for column in fun.to_be_covered:
selected_implicant = self.implicantEssentialForElementInColumn(column, fun)
if selected_implicant:
selected.add(selected_implicant)
return selected
def implicantEssentialForElementInColumn(self, column, fun):
""" Is there a prime implicant essential for the minterm 'column'
in the function 'fun_num'? If so, returns it """
for number, implicant in enumerate(self.implicant_list):
if column in implicant.coverset \
and (implicant.funmask >> fun.number) & 1 == 1:
for implicant2 in self.implicant_list[number + 1:]:
if column in implicant2.coverset \
and (implicant2.funmask >> fun.number) & 1 == 1:
return False
return implicant
def purgeTable(self):
""" Purge the table (implicant_list) from empty rows """
todelete = set()
for implicant in self.implicant_list:
todelete.add(implicant)
for f in self.functions_to_simplify:
if (implicant.funmask >> f.number) & 1 == 1:
s = {i for i in f.to_be_covered if i in implicant.coverset}
if s != set():
todelete.remove(implicant)
break
self.implicant_list = [i for i in self.implicant_list if i not in todelete]
def rowDominanceStep(self):
""" Purge the implicant list from dominated rows """
todelete = set()
done_something = False
for i, implicant in enumerate(self.implicant_list):
for implicant2 in self.implicant_list[i + 1:]:
a_cbd = True
b_cbd = True
for f in self.functions_to_simplify:
a = set()
b = set()
if (implicant.funmask >> f.number) & 1 == 1:
a = {x for x in implicant.coverset
if x in f.to_be_covered}
if (implicant2.funmask >> f.number) & 1 == 1:
b = {x for x in implicant2.coverset
if x in f.to_be_covered}
if not b.issubset(a): a_cbd = False
if not a.issubset(b): b_cbd = False
if a_cbd and implicant.nextCost() <= implicant2.nextCost():
todelete.add(implicant2)
done_something = True
elif b_cbd and implicant2.nextCost() <= implicant.nextCost():
todelete.add(implicant)
done_something = True
self.implicant_list = [i for i in self.implicant_list
if not i in todelete]
return done_something
def columnDominanceStep(self):
""" The list of minterms to cover purged from dominated columns """
done_something = False
for f in self.functions_to_simplify:
to_be_deleted = set()
for numcol, col1 in enumerate(f.to_be_covered):
for col2 in f.to_be_covered[numcol + 1:]:
lc1 = {imp for imp in self.implicant_list
if col1 in imp.coverset}
lc2 = {imp for imp in self.implicant_list
if col2 in imp.coverset}
if lc1.issubset(lc2): # col1 dominates col2
to_be_deleted.add(col2)
done_something = True
elif lc2.issubset(lc1): # col2 dominates col1
to_be_deleted.add(col1)
done_something = True
f.to_be_covered = [i for i in f.to_be_covered
if i not in to_be_deleted]
return done_something
def branchAndBound(self):
""" Branch And Bound algorithm to find the optimal coverage """
set_s = [Solution(len(self.functions_to_simplify))]
# set (list for simplicity) of partially explored solutions
solution = Solution(len(self.functions_to_simplify)) # empty solution
bound = 'inf' # means infinity
while set_s: # while we have solutions to explore
k = set_s[0] # select a partial solution from set_s (the first)
set_s = set_s[1:] # delete set_s[0] from the solution set
if bound == 'inf' or k.getCost() < bound:
branched_list = self.branch(k)
for k_i in branched_list:
b = k_i.getCost()
if bound == 'inf' or b < bound:
if k_i.isComplete(self.functions_to_simplify):
solution = k_i
bound = b
if VERBOSE:
print ' |=> Complete solution. New bound = ', b
else:
set_s.append(k_i)
return solution
def branch(self, solution):
""" Branches a _partial_ solution by selecting another implicant """
k_list = []
for f in self.functions_to_simplify:
for i in f.to_be_covered:
if not solution.covers(i, f):
for j in self.implicant_list:
if i in j.coverset and (j.funmask >> f.number) & 1 == 1:
k_list.append(solution.branchWith(j, f.number))
return k_list
def simplify(self):
""" Start all the optimization process! """
if not self.reduceTable():
if VERBOSE:
print 'Branch and bound'
solution = self.branchAndBound()
self.sol.mergeWith(solution)
return self.sol
def reduceTable(self):
""" Try to simplify the problem by applying essentiality & dominance
criteria before using the branch and bound algorithm """
done_something = True
while done_something:
if VERBOSE: print 'Detecting essential implicants:'
done_something = self.essentialityStep()
if done_something:
found_complete_cover = self.sol.isComplete(self.functions_to_simplify)
if VERBOSE and not found_complete_cover:
self.printTable()
elif VERBOSE: print '(essential implicants not found)'
if VERBOSE: print 'Simplifying table by deleting dominated rows & columns'
tmp1 = self.rowDominanceStep()
tmp2 = self.columnDominanceStep()
done_something = tmp1 or tmp2 or done_something
if VERBOSE:
if tmp1 or tmp2:
print 'Resulting table:'
self.printTable()
if not tmp1: print '(dominated rows not found)'
elif not tmp2: print '(dominated columns not found)'
return found_complete_cover
def printImplicantList(self):
""" prints the specified list of implicants """
for implicant in self.implicant_list:
if implicant.isReduced():
selected_string = 'X'
else:
selected_string = ' '
print '%3c = %s %c\t' % (implicant.name,
implicant, selected_string),
print implicant.maskToString(len(self.functions_to_simplify)), '\t',
for i in implicant.coverset:
print i,
print '' # newline
def printTable(self):
""" prints the coverage table in a decent format. C column = cost"""
print ' ' * 8,
for f in self.functions_to_simplify:
for column in f.to_be_covered:
print "%3d " % column,
print '|',
print ' C'
for implicant in self.implicant_list:
print '%3c ->' % implicant.name,
for f in self.functions_to_simplify:
print '|',
for column in f.to_be_covered:
if column in implicant.coverset \
and (implicant.funmask >> f.number) & 1 == 1:
print ' X ',
else:
print ' ',
print '| ',implicant.nextCost()
class Solution():
def __init__(self, fnum = 0):
self.selected = []
self.cost = 0
for k in range(fnum):
self.selected.append(set())
def getCost(self):
return self.cost
def recomputeCost(self):
""" computes the cost of the partial solution """
cost = 0
sel = set()
for set_f in self.selected:
for implicant in set_f:
if implicant in sel: # shared implicant
if implicant.cost_function == LITERALS_COST_FUNCTION:
cost += 1
else:
sel.add(implicant)
cost += implicant.defaultCost()
return cost
def covers(self, target_imp, f):
""" Is this solution covering target_imp wrt the fnum function? """
for implicant in self.selected[f.number]:
if target_imp in implicant.coverset:
return True
return False
def isComplete(self, functions_to_simplify):
""" Is this solution a complete one? """
for f in functions_to_simplify:
fcv = set()
for im in self.selected[f.number]:
fcv |= im.coverset
for n in f.to_be_covered:
if n not in fcv:
return False
return True
def branchWith(self, implicant, fnum):
""" Create a new Solution object by adding implicant for fnum """
t = Solution()
for x in self.selected:
t.selected.append(set() | x)
t.cost = self.cost
t.selected[fnum].add(implicant)
# update the cost...
updated_cost = False
if implicant.nextCost() == implicant.defaultCost():
# check if the implicant is already selected for some other functions...
for fun in range(len(self.selected)):
if fun != fnum and implicant in self.selected[fun]:
t.cost += implicant.sharedCost()
updated_cost = True
break
if not updated_cost:
t.cost += implicant.nextCost()
return t
def mergeWith(self, sol2):
for i in range(len(self.selected)):
self.selected[i] |= sol2.selected[i]
# this code is executed only one time
# so it's quicker just to recompute the cost from the beginning
self.cost = self.recomputeCost()
def addImplicant(self, implicant, fun):
""" add an implicant to this Solution object """
self.selected[fun.number].add(implicant)
self.cost = self.recomputeCost()
def __str__(self):
""" returns a string containing the SOP form of the logical function
of all the implicants selected until now or specified in cov """
a, b, t_str = [], [], ''
for num, selected_for_cur_f in enumerate(self.selected):
t_str += 'F%d = ' % (num + 1)
is_first_step = True
for element in selected_for_cur_f:
if not is_first_step:
t_str += ' + '
else: is_first_step = False
if element in a:
t_str += element.name
b.append(element)
else:
a.append(element)
found = False
for selected_for_next_f in self.selected[num+1:]:
if element in selected_for_next_f:
found = True
t_str += element.name
if not found:
t_str += element.__str__()
t_str += '\n'
b.sort(key = lambda x: x.name)
for element in b:
t_str += element.name + ' = ' + element.__str__() + '\n'
return t_str
# TODO parse command line args as input instead than modifying the source code!
if __name__ == '__main__':
# an example of use of the library
VERBOSE = True # enable debug prints when running with the example...
q = QuineMcCluskey([
QmcFunction([3,4,6,7], [0,2]),
QmcFunction([3,4,6], [2,5]),
QmcFunction([0,2], [4,6])
], LITERALS_COST_FUNCTION)
print ' --- [Expansion] Prime implicants: ---'
q.findPrimeImplicants()
q.printImplicantList()
q.printTable()
print 'Solution found:'
print q.simplify()
print 'Cost:', q.sol.getCost()
|
{
"content_hash": "4e9c75e9166031fd90d34e01479eaa24",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 221,
"avg_line_length": 41.126315789473686,
"alnum_prop": 0.5379233853766744,
"repo_name": "massivezh/qmc",
"id": "b3b6d8f23f8423b9d48ad9728ba0138dee37a29e",
"size": "24413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qmc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "28618"
}
],
"symlink_target": ""
}
|
import unittest
try:
import unittest.mock as mock
except ImportError:
import mock
from oslo_config import cfg
from cloudbaseinit.tests import testutils
from cloudbaseinit.utils import network
CONF = cfg.CONF
class NetworkUtilsTest(unittest.TestCase):
@mock.patch('sys.platform', new='win32')
@mock.patch('cloudbaseinit.osutils.factory.get_os_utils')
@mock.patch('six.moves.urllib.parse.urlparse')
def _test_check_metadata_ip_route(self, mock_urlparse, mock_get_os_utils,
side_effect):
mock_utils = mock.MagicMock()
mock_split = mock.MagicMock()
mock_get_os_utils.return_value = mock_utils
mock_utils.check_os_version.return_value = True
mock_urlparse().netloc.split.return_value = mock_split
mock_split[0].startswith.return_value = True
mock_utils.check_static_route_exists.return_value = False
mock_utils.get_default_gateway.return_value = (1, '0.0.0.0')
mock_utils.add_static_route.side_effect = [side_effect]
network.check_metadata_ip_route('196.254.196.254')
mock_utils.check_os_version.assert_called_once_with(6, 0)
mock_urlparse.assert_called_with('196.254.196.254')
mock_split[0].startswith.assert_called_once_with("169.254.")
mock_utils.check_static_route_exists.assert_called_once_with(
mock_split[0])
mock_utils.get_default_gateway.assert_called_once_with()
mock_utils.add_static_route.assert_called_once_with(
mock_split[0], "255.255.255.255", '0.0.0.0', 1, 10)
def test_test_check_metadata_ip_route(self):
self._test_check_metadata_ip_route(side_effect=None)
def test_test_check_metadata_ip_route_fail(self):
with testutils.LogSnatcher('cloudbaseinit.utils.network') as snatcher:
self._test_check_metadata_ip_route(side_effect=ValueError)
self.assertIn('ValueError', snatcher.output[-1])
def test_address6_to_4_truncate(self):
address_map = {
"0:0:0:0:0:ffff:c0a8:f": "192.168.0.15",
"::ffff:c0a8:e": "192.168.0.14",
"::1": "0.0.0.1",
"1:2:3:4:5::8": "0.0.0.8",
"::": "0.0.0.0",
"::7f00:1": "127.0.0.1"
}
for v6, v4 in address_map.items():
self.assertEqual(v4, network.address6_to_4_truncate(v6))
def test_netmask6_to_4_truncate(self):
netmask_map = {
"128": "255.255.255.255",
"96": "255.255.255.0",
"0": "0.0.0.0",
"100": "255.255.255.128"
}
for v6, v4 in netmask_map.items():
self.assertEqual(v4, network.netmask6_to_4_truncate(v6))
|
{
"content_hash": "d1834d7382b090e1d602f0a05a53c665",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 78,
"avg_line_length": 38.28169014084507,
"alnum_prop": 0.6103752759381899,
"repo_name": "cmin764/cloudbase-init",
"id": "f10aaabe0922bc2f237432d3f24201fedb17746b",
"size": "3334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudbaseinit/tests/utils/test_network.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "789914"
}
],
"symlink_target": ""
}
|
import re
import sys
import requests
def searchDatabase(startDate, endDate, searchText, stateArray):
# Database imposed year range
STARTYEAR = 1789
ENDYEAR = 1922
# check if all arguments are present
if len(sys.argv) >= 5:
if startDate >= STARTYEAR and endDate <= ENDYEAR:
print("---------------------------------------------")
title = '|{:^43}| '.format("Newspaper Usage of Term: " + searchText)
print(title)
print("---------------------------------------------")
header = '| {:>14}| {:>8}| {:>16}|'.format("STATE","YEAR","MATCHES")
print(header)
print("---------------------------------------------")
for state in stateArray:
for year in range(startDate, endDate + 1):
# create url and return html from results page
url = "http://chroniclingamerica.loc.gov/search/pages/results/?state=" + state + "&date1=" + \
str(year) + "&date2=" + str(year) + "&proxtext=" + searchText \
+ "&x=17&y=16&dateFilterType=yearRange&rows=20&searchType=basic"
page = requests.get(url)
# search for results pattern
matchObj = re.search('<p class="term">(.*) results', page.text)
# if matches
if matchObj:
matches = matchObj.group(1) + " matches"
result = '| {:>14}| {:>8}| {:>16}|'.format(state, year, matches)
print(result)
else:
matches = "0 matches"
result = '| {:>14}| {:>8}| {:>16}|'.format(state, year, matches)
print(result)
print("---------------------------------------------")
else:
sys.stderr.write("error: start date must be >= " + str(STARTYEAR) + " AND end date must be <= " + str(ENDYEAR))
else:
sys.stderr.write("usage: newspaperSearch.py startDate endDate searchWord state...")
# state argumentes provided past index 4
stateArray = sys.argv[4:]
searchDatabase(int(sys.argv[1]), int(sys.argv[2]), sys.argv[3], stateArray)
|
{
"content_hash": "a474cc6201144b36c7014a4bc6ea4579",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 123,
"avg_line_length": 41.36363636363637,
"alnum_prop": 0.46373626373626375,
"repo_name": "azharhussain96/newspaperDatabaseSearch",
"id": "d326ca696868b9c14db746a6d377367c3d0a59c6",
"size": "2696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "newspaperSearch.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2696"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'OrganizationMember.counter'
db.add_column(
'sentry_organizationmember',
'counter',
self.gf('sentry.db.models.fields.bounded.BoundedPositiveIntegerField')(
null=True, blank=True
),
keep_default=False
)
# Adding unique constraint on 'OrganizationMember', fields ['organization', 'counter']
db.create_unique('sentry_organizationmember', ['organization_id', 'counter'])
def backwards(self, orm):
# Removing unique constraint on 'OrganizationMember', fields ['organization', 'counter']
db.delete_unique('sentry_organizationmember', ['organization_id', 'counter'])
# Deleting field 'OrganizationMember.counter'
db.delete_column('sentry_organizationmember', 'counter')
models = {
'sentry.accessgroup': {
'Meta': {
'unique_together': "(('team', 'name'),)",
'object_name': 'AccessGroup'
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.User']",
'symmetrical': 'False'
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'projects': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Project']",
'symmetrical': 'False'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '50'
})
},
'sentry.activity': {
'Meta': {
'object_name': 'Activity'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Event']",
'null': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.alert': {
'Meta': {
'object_name': 'Alert'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'related_groups': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'related_alerts'",
'symmetrical': 'False',
'through': "orm['sentry.AlertRelatedGroup']",
'to': "orm['sentry.Group']"
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.alertrelatedgroup': {
'Meta': {
'unique_together': "(('group', 'alert'),)",
'object_name': 'AlertRelatedGroup'
},
'alert': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Alert']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
})
},
'sentry.apikey': {
'Meta': {
'object_name': 'ApiKey'
},
'allowed_origins':
('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '32'
}),
'label': (
'django.db.models.fields.CharField', [], {
'default': "'Default'",
'max_length': '64',
'blank': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Organization']"
}
),
'scopes': ('django.db.models.fields.BigIntegerField', [], {
'default': 'None'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.auditlogentry': {
'Meta': {
'object_name': 'AuditLogEntry'
},
'actor': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'audit_actors'",
'to': "orm['sentry.User']"
}
),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ip_address': (
'django.db.models.fields.GenericIPAddressField', [], {
'max_length': '39',
'null': 'True'
}
),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'target_object':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'target_user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'audit_targets'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.authidentity': {
'Meta': {
'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))",
'object_name': 'AuthIdentity'
},
'auth_provider': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.AuthProvider']"
}
),
'data': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'last_synced':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_verified':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
)
},
'sentry.authprovider': {
'Meta': {
'object_name': 'AuthProvider'
},
'config': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'default_global_access':
('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'default_role':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'default_teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']",
'unique': 'True'
}
),
'provider': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'sync_time':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
})
},
'sentry.broadcast': {
'Meta': {
'object_name': 'Broadcast'
},
'badge': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active':
('django.db.models.fields.BooleanField', [], {
'default': 'True',
'db_index': 'True'
}),
'link': (
'django.db.models.fields.URLField', [], {
'max_length': '200',
'null': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.CharField', [], {
'max_length': '256'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'",
'index_together': "(('group', 'datetime'),)"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'data':
('sentry.db.models.fields.node.NodeField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'null': 'True'
})
},
'sentry.eventmapping': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'EventMapping'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'event_id': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.file': {
'Meta': {
'object_name': 'File'
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '40',
'null': 'True'
}),
'headers': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'path': ('django.db.models.fields.TextField', [], {
'null': 'True'
}),
'size':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True'
}),
'storage':
('django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True'
}),
'storage_options': ('jsonfield.fields.JSONField', [], {
'default': '{}'
}),
'timestamp': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'type': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_public': (
'django.db.models.fields.NullBooleanField', [], {
'default': 'False',
'null': 'True',
'blank': 'True'
}
),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "''",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'null': 'True'
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'time_spent_total':
('sentry.db.models.fields.bounded.BoundedIntegerField', [], {
'default': '0'
}),
'times_seen': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
)
},
'sentry.groupassignee': {
'Meta': {
'object_name': 'GroupAssignee',
'db_table': "'sentry_groupasignee'"
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'unique': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'assignee_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_assignee_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.grouphash': {
'Meta': {
'unique_together': "(('project', 'hash'),)",
'object_name': 'GroupHash'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']",
'null': 'True'
}
),
'hash':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {
'unique_together': "(('rule', 'group'),)",
'object_name': 'GroupRuleStatus'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_active': ('django.db.models.fields.DateTimeField', [], {
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'rule': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Rule']"
}
),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {
'default': '0'
})
},
'sentry.groupseen': {
'Meta': {
'unique_together': "(('user', 'group'),)",
'object_name': 'GroupSeen'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'db_index': 'False'
}
)
},
'sentry.grouptagkey': {
'Meta': {
'unique_together': "(('project', 'group', 'key'),)",
'object_name': 'GroupTagKey'
},
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.grouptagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'GroupTagValue',
'db_table': "'sentry_messagefiltervalue'"
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'grouptag'",
'null': 'True',
'to': "orm['sentry.Project']"
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.helppage': {
'Meta': {
'object_name': 'HelpPage'
},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_visible': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'key': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'unique': 'True',
'null': 'True'
}
),
'priority':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'title': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.lostpasswordhash': {
'Meta': {
'object_name': 'LostPasswordHash'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'hash': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'unique': 'True'
}
)
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'last_updated':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {
'object_name': 'Organization'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'members': (
'django.db.models.fields.related.ManyToManyField', [], {
'related_name': "'org_memberships'",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMember']",
'to': "orm['sentry.User']"
}
),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.organizationaccessrequest': {
'Meta': {
'unique_together': "(('team', 'member'),)",
'object_name': 'OrganizationAccessRequest'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'member': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.organizationmember': {
'Meta': {
'unique_together':
"(('organization', 'user'), ('organization', 'email'), ('organization', 'counter'))",
'object_name':
'OrganizationMember'
},
'counter': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'null': 'True',
'blank': 'True'
}
),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': (
'django.db.models.fields.EmailField', [], {
'max_length': '75',
'null': 'True',
'blank': 'True'
}
),
'flags': ('django.db.models.fields.BigIntegerField', [], {
'default': '0'
}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Organization']"
}
),
'teams': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.Team']",
'symmetrical': 'False',
'through': "orm['sentry.OrganizationMemberTeam']",
'blank': 'True'
}
),
'type':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '50'
}),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'sentry_orgmember_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
)
},
'sentry.organizationmemberteam': {
'Meta': {
'unique_together': "(('team', 'organizationmember'),)",
'object_name': 'OrganizationMemberTeam',
'db_table': "'sentry_organizationmember_teams'"
},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'organizationmember': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.OrganizationMember']"
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.project': {
'Meta': {
'unique_together': "(('team', 'slug'), ('organization', 'slug'))",
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'platform':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True'
}),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50',
'null': 'True'
}),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']"
}
)
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': (
'django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True',
'blank': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'roles': ('django.db.models.fields.BigIntegerField', [], {
'default': '1'
}),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'status': (
'sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {
'unique_together': "(('project', 'version'),)",
'object_name': 'Release'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'version': ('django.db.models.fields.CharField', [], {
'max_length': '64'
})
},
'sentry.releasefile': {
'Meta': {
'unique_together': "(('release', 'ident'),)",
'object_name': 'ReleaseFile'
},
'file': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.File']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'ident': ('django.db.models.fields.CharField', [], {
'max_length': '40'
}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'release': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Release']"
}
)
},
'sentry.rule': {
'Meta': {
'object_name': 'Rule'
},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'label': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
)
},
'sentry.tagkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'TagKey',
'db_table': "'sentry_filterkey'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'label':
('django.db.models.fields.CharField', [], {
'max_length': '64',
'null': 'True'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}
),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'values_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.tagvalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'TagValue',
'db_table': "'sentry_filtervalue'"
},
'data': (
'sentry.db.models.fields.gzippeddict.GzippedDictField', [], {
'null': 'True',
'blank': 'True'
}
),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.team': {
'Meta': {
'unique_together': "(('organization', 'slug'),)",
'object_name': 'Team'
},
'date_added': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'organization': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Organization']"
}
),
'slug': ('django.db.models.fields.SlugField', [], {
'max_length': '50'
}),
'status':
('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {
'default': '0'
})
},
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_managed': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '128'
})
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}
),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
{
"content_hash": "8d99317833e053c65cdb0bdd87772364",
"timestamp": "",
"source": "github",
"line_count": 1492,
"max_line_length": 101,
"avg_line_length": 36.34517426273459,
"alnum_prop": 0.3923137920224243,
"repo_name": "jean/sentry",
"id": "3ffc1b7b91a8e8e967b466deea2846552e55c15a",
"size": "54251",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/sentry/south_migrations/0176_auto__add_field_organizationmember_counter__add_unique_organizationmem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "296112"
},
{
"name": "HTML",
"bytes": "314273"
},
{
"name": "JavaScript",
"bytes": "1293918"
},
{
"name": "Lua",
"bytes": "57158"
},
{
"name": "Makefile",
"bytes": "6632"
},
{
"name": "Python",
"bytes": "24515298"
},
{
"name": "Ruby",
"bytes": "4410"
},
{
"name": "Shell",
"bytes": "2942"
}
],
"symlink_target": ""
}
|
import os
import os.path
import sys
import tempfile
import subprocess
import logging
# This and check_output are shims to support features of Python 2.7
# on Python 2.6.
#
# This code was borrowed from PyPy 2.7.
# bitbucket.org/pypy/pypy/src/9d88b4875d6e/lib-python/2.7/subprocess.py
#
# This can be removed when the CloudFoundry environment is upgraded
# to Python 2.7 or higher.
#
class CalledProcessError(Exception):
"""This exception is raised when a process run by check_call() or
check_output() returns a non-zero exit status.
The exit status will be stored in the returncode attribute;
check_output() will also store the output in the output attribute.
"""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (
self.cmd, self.returncode)
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd, output=output)
return output
def stream_output(*popenargs, **kwargs):
r"""Run command with arguments and stream its output.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute.
The first argument should be the file like object where the output
should be written. The remainder of the arguments are the same as
for the Popen constructor.
Example:
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> fp = open('cmd-output.txt', 'wb')
>>> stream_output(fp, ["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=subprocess.STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
if hasattr(popenargs[0], 'fileno'):
process = subprocess.Popen(stdout=popenargs[0],
*popenargs[1:], **kwargs)
retcode = process.wait()
else:
process = subprocess.Popen(stdout=subprocess.PIPE,
*popenargs[1:], **kwargs)
for c in iter(lambda: process.stdout.read(1024), ''):
popenargs[0].write(c)
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise CalledProcessError(retcode, cmd)
class BuildPack(object):
def __init__(self, ctx, url, branch=None, stream=sys.stdout):
self._ctx = ctx
self._url = url
self._branch = branch
self._stream = stream
self.bp_dir = tempfile.mkdtemp(prefix='buildpack')
self._log = logging.getLogger('runner')
def run(self):
if self._url:
self._clone()
self.framework = self._detect()
self._compile()
self.start_yml = self._release()
def _clone(self):
self._log.debug("Clongin [%s] to [%s]", self._url, self.bp_dir)
stream_output(self._stream,
" ".join(['git', 'clone', self._url, self.bp_dir]),
stderr=subprocess.STDOUT,
shell=True)
if self._branch:
self._log.debug("Branching to [%s]", self._branch)
stream_output(self._stream,
" ".join(['git', 'checkout', self._branch]),
stderr=subprocess.STDOUT,
shell=True)
def _detect(self):
self._log.debug("Running detect script")
cmd = [os.path.join(self.bp_dir, 'bin', 'detect'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
def _compile(self):
self._log.debug("Running compile script with build dir [%s] "
"and cache dir [%s]",
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR'])
cmd = [os.path.join(self.bp_dir, 'bin', 'compile'),
self._ctx['BUILD_DIR'],
self._ctx['CACHE_DIR']]
stream_output(self._stream,
" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True)
def _release(self):
self._log.debug("Running release script")
cmd = [os.path.join(self.bp_dir, 'bin', 'release'),
self._ctx['BUILD_DIR']]
return check_output(" ".join(cmd),
stderr=subprocess.STDOUT,
shell=True).strip()
|
{
"content_hash": "d38b7adf31bca122a28b1f0922c1175f",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 36.773809523809526,
"alnum_prop": 0.5768857235351247,
"repo_name": "cloudn/cf-php-build-pack",
"id": "2a992486e9bdd6a621868831912905b6a15ccb78",
"size": "6178",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "lib/build_pack_utils/runner.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "4095"
},
{
"name": "Batchfile",
"bytes": "5850"
},
{
"name": "CSS",
"bytes": "86729"
},
{
"name": "HTML",
"bytes": "40374"
},
{
"name": "JavaScript",
"bytes": "23991"
},
{
"name": "Nginx",
"bytes": "765"
},
{
"name": "PHP",
"bytes": "16338051"
},
{
"name": "Python",
"bytes": "252832"
},
{
"name": "Ruby",
"bytes": "10391"
},
{
"name": "Shell",
"bytes": "12750"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import time
import math
import re
import platform
import uuid
import pwd
import signal
from functools import partial
from multiprocessing import Process, Event
from ast import literal_eval
import psutil
from thrift.protocol import TBinaryProtocol
from plow.rndaemon import conf
conf.NETWORK_DISABLED = True
from plow.rndaemon.rpc import ttypes, RndServiceApi
from plow.rndaemon import core, server, client, utils
import logging
logging.basicConfig(level=logging.DEBUG)
conf.TASK_PROXY_USER = os.getenv('PLOW_PROXY_USER', conf.TASK_PROXY_USER)
ROOT = os.path.abspath(os.path.dirname(__file__))
CMDS_UTIL = os.path.join(ROOT, 'utils/cmds.py')
DATA_DIR = os.path.join(ROOT, 'data')
IS_LINUX = platform.system() in ('FreeBSD', 'Linux')
class TestResourceManager(unittest.TestCase):
def tearDown(self):
print "\n"
print "="*60, "\n"
def testCoreCheckout(self):
manager = core.ResourceMgr
totalCores = core.Profiler.physicalCpus
slots = len(manager.getSlots())
self.assertEqual(totalCores, slots)
slots = len(manager.getOpenSlots())
self.assertEqual(totalCores, slots)
slots = []
for i in xrange(1, totalCores + 1):
slots += manager.checkout(1)
total = totalCores - i
openslots = len(manager.getOpenSlots())
self.assertEqual(total, openslots)
manager.checkin(slots)
openslots = len(manager.getOpenSlots())
self.assertEqual(totalCores, openslots)
class TestProcessManager(unittest.TestCase):
_logdir = tempfile.gettempdir()
_totalCores = core.Profiler.physicalCpus
def setUp(self):
self._logfile = tempfile.mktemp('.log', 'plow-test-')
self._processmgr_processFinished = core.ProcessMgr.processFinished
def tearDown(self):
# give these types of tests a moment to close down
time.sleep(1)
core.ProcessMgr.processFinished = self._processmgr_processFinished
print "\n"
print "="*60, "\n"
#
# Tests
#
def testRunTaskCommand(self):
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(status, 0, "Expected a 0 Exit Status, but got %s" % status)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandHalfCores(self):
if self._totalCores < 3:
return
cores = int(math.ceil(self._totalCores * .5))
process = self.getNewTaskCommand()
process.cores = cores
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandMaxCores(self):
process = self.getNewTaskCommand()
process.cores = self._totalCores
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandOutOfCores(self):
process = self.getNewTaskCommand()
process.cores = self._totalCores + 1
process.command = ["/bin/ls", self._logdir]
self.assertRaises(ttypes.RndException, core.ProcessMgr.runProcess, process)
def testKillRunningTask(self):
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'hard_to_kill']
core.ProcessMgr.runProcess(process)
time.sleep(1)
runningTasks = core.ProcessMgr.getRunningTasks()
total = len(runningTasks)
self.assertEqual(total, 1, msg="Expected there to be one running task")
task = runningTasks[0]
core.ProcessMgr.killRunningTask(task.procId, "Killing for testing reasons")
time.sleep(1)
count = len(core.ProcessMgr.getRunningTasks())
self.assertEqual(count, 0,
msg="Expected 0 running tasks but got %s" % count)
i = 0
while core.ProcessMgr.getRunningTasks():
time.sleep(.5)
self.assertTrue(i < 10,
"Tasks are still running when they should be dead by now")
i += 1
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(status, 1, "Expected a 0 Exit Status, but got %s" % status)
assert abs(sig) in (signal.SIGTERM, signal.SIGKILL), "Expected a 9 or 15 Exit Signal, but got %s" % sig
def testFailedTask(self):
D = {'result': None}
def processFinished(d, *args):
d['result'] = args[0]
self._processmgr_processFinished(*args)
core.ProcessMgr.processFinished = partial(processFinished, D)
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'crashing']
task = core.ProcessMgr.runProcess(process, wait=5)
ppid = task.pid
# self.assertNotEqual(ppid, -1, "Procss never started properly")
try:
psutil.Process(ppid).wait(5)
except psutil.TimeoutExpired:
self.fail("Task should not still be running: %s" % task)
except psutil.NoSuchProcess:
pass
i = 0
while core.ProcessMgr.getRunningTasks():
time.sleep(.5)
self.assertTrue(i < 10,
"Tasks are still running when they should be dead by now")
i += 1
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(sig, 0)
self.assertEqual(status, 1)
self.assertTrue(D['result'] is not None, "Result was %r" % D)
self.assertEqual(D['result'].exitStatus, 1)
self.assertEqual(D['result'].exitSignal, 0)
def testTaskProgress(self):
# disable the callback
D = {'result': None}
def processFinished(d, *args):
d['result'] = args[0]
conf.TASK_PROGRESS_PATTERNS = {
'blender': '^Fra:\\d+ .*? \\| Rendering \\| .*? (\\d+/\\d+)$',
'mray': '^JOB[\\w. ]+:\\s+([\\d.]+%)\\s+'
}
core.ProcessMgr.processFinished = partial(processFinished, D)
process = self.getNewTaskCommand()
process.taskTypes = ['blender', 'mray']
for log in ('blender.log', 'mentalRay.log'):
process.command = [CMDS_UTIL, 'echo_log', os.path.join(DATA_DIR, log)]
t = core._ProcessThread(process, cpus=[0])
running = t.getRunningTask()
self.assertEqual(running.progress, 0,
'Initial progress for "%s" job should be 0' % log)
running.lastLog = None
repr(running)
t.start()
t.join()
self.assertTrue(D['result'] is not None)
self.assertEqual(D['result'].exitStatus, 0)
running = t.getRunningTask()
self.assertEqual(running.progress, 1,
'Final progress for "%s" job should be 1. Got %s' \
% (log, running.progress))
D['result'] = None
def testTaskShutdown(self):
procs = []
for slot in core.ResourceMgr.getOpenSlots():
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'hard_to_kill']
core.ProcessMgr.runProcess(process, wait=1)
procs.append(process)
time.sleep(1)
core.ProcessMgr.shutdown()
for proc in procs:
sig, status = self.getLogSignalStatus(proc.logFile)
self.assertEqual(status, 1, "Expected 1 Exit Status, but got %s" % status)
self.assertEqual(sig, 86, "Expected 86 Exit Signal, but got %s" % sig)
def testPingPong(self):
process = self.getNewTaskCommand()
process.command = ["sleep", ".25"]
core.ProcessMgr.runProcess(process)
handler = server.RndProcessHandler()
ping = handler.pingPong()
self.assertFalse(ping.tasks, "Expected and empty task list")
ping = handler.pingPong(withTasks=True)
self.assertTrue(ping.tasks, "Expected to find a running task in ping result")
logging.debug("PingPong: %r", ping)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
#
# Utils
#
def getNewTaskCommand(self):
process = ttypes.RunTaskCommand()
process.procId = uuid.uuid4()
process.taskId = uuid.uuid4()
process.cores = 1
process.uid = os.geteuid()
process.username = pwd.getpwuid(process.uid).pw_name
process.env = {}
process.logFile = self._logfile
return process
def cpuAffinityTestUtil(self, process):
captured_affinity = tuple(self.getLogCpuAffinity(process.logFile))
count = len(captured_affinity)
self.assertTrue(count == 1, "Expected only 1 result. Got %d" % count)
if IS_LINUX:
captured = captured_affinity[0]
cpu_set = set()
logical_cpus = core.Profiler.cpuprofile.logical_cpus
for i in xrange(process.cores):
cpu_set.update(logical_cpus[i])
cpu_tuple = tuple(cpu_set)
self.assertEqual(captured, cpu_tuple,
'Captured cpu affinity %s does not match expected %s' % (cpu_tuple, captured))
@staticmethod
def getLogSignalStatus(logfile):
status = None
signal = None
status_field = 'Exit Status:'
signal_field = 'Signal:'
with open(logfile) as f:
for line in f:
if line.startswith(status_field):
try:
status = int(line.split(status_field, 1)[-1])
except:
pass
elif line.startswith(signal_field):
try:
signal = int(line.split(signal_field, 1)[-1])
except:
pass
return signal, status
@staticmethod
def getLogCpuAffinity(logfile):
affinity = set()
with open(logfile) as f:
for line in f:
match = re.search(r'cpu_affinity == (\([\d, ]+\))', line)
if match:
try:
cpus = literal_eval(match.group(1))
except:
continue
affinity.add(cpus)
return affinity
class TestCommunications(unittest.TestCase):
"""
Creates a mock server to accept communication tests
from the client API
"""
def setUp(self):
self.event = Event()
self.server_port = 21212
handler = _ServiceHandler(self.event)
prot = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
self.server = server.get_server(RndServiceApi,
handler,
self.server_port,
protocol=prot)
self.t_server = Process(target=self.server.serve)
self.t_server.daemon = True
self.t_server.start()
time.sleep(.1)
def tearDown(self):
self.t_server.terminate()
time.sleep(1)
print "\n"
print "="*60, "\n"
def testSendPing(self):
"""
Get a connection to the local "server" and test
that it receives a ping.
"""
ping = ttypes.Ping()
ping.hw = ttypes.Hardware()
service, transport = client.getPlowConnection("localhost", self.server_port)
service.sendPing(ping)
self.event.wait(3)
self.assertTrue(
self.event.is_set(),
msg="Server did not receive ping from client in reasonable time")
transport.close()
class _ServiceHandler(object):
def __init__(self, evt):
self.event = evt
def sendPing(self, ping):
self.event.set()
class TestLogParser(unittest.TestCase):
def testProgressStatic(self):
parser = utils.ProcessLogParser([
'^Fra:\d+ .*? \| Rendering \| .*? (\d+/\d+)$',
'^JOB[\w. ]+:\s+([\d.]+%)\s+'])
logtests = {
'blender.log': {
'total': 42,
'indexes': [(0, 0.0), (5, .4375), (20, .671875), (30, .828125), (-1, 1.0)]
},
'mentalRay.log': {
'total': 300,
'indexes': [(0, .003), (20, .07), (50, .17), (150, .503), (250, .836), (-1, 1.0)]
}
}
for name, attribs in logtests.iteritems():
log = os.path.join(DATA_DIR, name)
progs = []
for line in open(log):
val = parser.parseProgress(line)
if val is not None:
progs.append(val)
total = attribs['total']
found = len(progs)
self.assertEqual(found, total, "Expected %d progress updates. Got %d" % (total, found))
for idx, val in attribs['indexes']:
self.assertEqual(progs[idx], val)
if __name__ == "__main__":
suite = unittest.TestSuite()
for t in (TestCommunications, TestResourceManager, TestProcessManager):
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(t))
unittest.TextTestRunner(verbosity=2).run(suite)
|
{
"content_hash": "3c08bbcef1a7d03ec3c1bb4f0ef99e1b",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 111,
"avg_line_length": 29.789934354485776,
"alnum_prop": 0.575657411488174,
"repo_name": "chadmv/plow",
"id": "77c9413373123ef96bc6f829449c71d5e9487bb5",
"size": "13637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python/plow/rndaemon/test/test_run.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1126"
},
{
"name": "C++",
"bytes": "84342"
},
{
"name": "CSS",
"bytes": "11753"
},
{
"name": "Java",
"bytes": "646872"
},
{
"name": "JavaScript",
"bytes": "20300"
},
{
"name": "Python",
"bytes": "580077"
},
{
"name": "SQL",
"bytes": "39971"
},
{
"name": "Shell",
"bytes": "8542"
}
],
"symlink_target": ""
}
|
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
## See the LICENSE file in the project root for more information.
#
#
#USAGE:
#Add Events: modify <root>src/vm/ClrEtwAll.man
#Look at the Code in <root>/src/scripts/genLttngProvider.py for using subroutines in this file
#
# Python 2 compatibility
from __future__ import print_function
import os
import xml.dom.minidom as DOM
from utilities import open_for_update
stdprolog="""
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
/******************************************************************
DO NOT MODIFY. AUTOGENERATED FILE.
This file is generated using the logic from <root>/src/scripts/genEventing.py
******************************************************************/
"""
stdprolog_cmake="""
#
#
#******************************************************************
#DO NOT MODIFY. AUTOGENERATED FILE.
#This file is generated using the logic from <root>/src/scripts/genEventing.py
#******************************************************************
"""
lindent = " ";
palDataTypeMapping ={
#constructed types
"win:null" :" ",
"win:Int64" :"const __int64",
"win:ULong" :"const ULONG",
"win:count" :"*",
"win:Struct" :"const void",
#actual spec
"win:GUID" :"const GUID",
"win:AnsiString" :"LPCSTR",
"win:UnicodeString" :"PCWSTR",
"win:Double" :"const double",
"win:Int32" :"const signed int",
"win:Boolean" :"const BOOL",
"win:UInt64" :"const unsigned __int64",
"win:UInt32" :"const unsigned int",
"win:UInt16" :"const unsigned short",
"win:UInt8" :"const unsigned char",
"win:Pointer" :"const void*",
"win:Binary" :"const BYTE"
}
# A Template represents an ETW template can contain 1 or more AbstractTemplates
# The AbstractTemplate contains FunctionSignature
# FunctionSignature consist of FunctionParameter representing each parameter in it's signature
def getParamSequenceSize(paramSequence, estimate):
total = 0
pointers = 0
for param in paramSequence:
if param == "win:Int64":
total += 8
elif param == "win:ULong":
total += 4
elif param == "GUID":
total += 16
elif param == "win:Double":
total += 8
elif param == "win:Int32":
total += 4
elif param == "win:Boolean":
total += 4
elif param == "win:UInt64":
total += 8
elif param == "win:UInt32":
total += 4
elif param == "win:UInt16":
total += 2
elif param == "win:UInt8":
total += 1
elif param == "win:Pointer":
if estimate:
total += 8
else:
pointers += 1
elif param == "win:Binary":
total += 1
elif estimate:
if param == "win:AnsiString":
total += 32
elif param == "win:UnicodeString":
total += 64
elif param == "win:Struct":
total += 32
else:
raise Exception("Don't know size for " + param)
if estimate:
return total
return total, pointers
class Template:
def __repr__(self):
return "<Template " + self.name + ">"
def __init__(self, templateName, fnPrototypes, dependencies, structSizes, arrays):
self.name = templateName
self.signature = FunctionSignature()
self.structs = structSizes
self.arrays = arrays
for variable in fnPrototypes.paramlist:
for dependency in dependencies[variable]:
if not self.signature.getParam(dependency):
self.signature.append(dependency, fnPrototypes.getParam(dependency))
def getFnParam(self, name):
return self.signature.getParam(name)
@property
def num_params(self):
return len(self.signature.paramlist)
@property
def estimated_size(self):
total = getParamSequenceSize((self.getFnParam(paramName).winType for paramName in self.signature.paramlist), True)
if total < 32:
total = 32
elif total > 1024:
total = 1024
return total
class FunctionSignature:
def __repr__(self):
return ", ".join(self.paramlist)
def __init__(self):
self.LUT = {} # dictionary of FunctionParameter
self.paramlist = [] # list of parameters to maintain their order in signature
def append(self,variable,fnparam):
self.LUT[variable] = fnparam
self.paramlist.append(variable)
def getParam(self,variable):
return self.LUT.get(variable)
def getLength(self):
return len(self.paramlist)
class FunctionParameter:
def __repr__(self):
return self.name
def __init__(self,winType,name,count,prop):
self.winType = winType #ETW type as given in the manifest
self.name = name #parameter name as given in the manifest
self.prop = prop #any special property as determined by the manifest and developer
#self.count #indicates if the parameter is a pointer
if count == "win:null":
self.count = "win:null"
elif count or winType == "win:GUID" or count == "win:count":
#special case for GUIDS, consider them as structs
self.count = "win:count"
else:
self.count = "win:null"
def getTopLevelElementsByTagName(node,tag):
dataNodes = []
for element in node.getElementsByTagName(tag):
if element.parentNode == node:
dataNodes.append(element)
return dataNodes
ignoredXmlTemplateAttribes = frozenset(["map","outType"])
usedXmlTemplateAttribes = frozenset(["name","inType","count", "length"])
def parseTemplateNodes(templateNodes):
#return values
allTemplates = {}
for templateNode in templateNodes:
structCounts = {}
arrays = {}
templateName = templateNode.getAttribute('tid')
var_Dependecies = {}
fnPrototypes = FunctionSignature()
dataNodes = getTopLevelElementsByTagName(templateNode,'data')
# Validate that no new attributes has been added to manifest
for dataNode in dataNodes:
nodeMap = dataNode.attributes
for attrib in nodeMap.values():
attrib_name = attrib.name
if attrib_name not in ignoredXmlTemplateAttribes and attrib_name not in usedXmlTemplateAttribes:
raise ValueError('unknown attribute: '+ attrib_name + ' in template:'+ templateName)
for dataNode in dataNodes:
variable = dataNode.getAttribute('name')
wintype = dataNode.getAttribute('inType')
#count and length are the same
wincount = dataNode.getAttribute('count')
winlength = dataNode.getAttribute('length');
var_Props = None
var_dependency = [variable]
if winlength:
if wincount:
raise Exception("both count and length property found on: " + variable + "in template: " + templateName)
wincount = winlength
if (wincount.isdigit() and int(wincount) ==1):
wincount = ''
if wincount:
if (wincount.isdigit()):
var_Props = wincount
elif fnPrototypes.getParam(wincount):
var_Props = wincount
var_dependency.insert(0, wincount)
arrays[variable] = wincount
#construct the function signature
if wintype == "win:GUID":
var_Props = "sizeof(GUID)/sizeof(int)"
var_Dependecies[variable] = var_dependency
fnparam = FunctionParameter(wintype,variable,wincount,var_Props)
fnPrototypes.append(variable,fnparam)
structNodes = getTopLevelElementsByTagName(templateNode,'struct')
for structToBeMarshalled in structNodes:
structName = structToBeMarshalled.getAttribute('name')
countVarName = structToBeMarshalled.getAttribute('count')
assert(countVarName == "Count")
assert(countVarName in fnPrototypes.paramlist)
if not countVarName:
raise ValueError("Struct '%s' in template '%s' does not have an attribute count." % (structName, templateName))
names = [x.attributes['name'].value for x in structToBeMarshalled.getElementsByTagName("data")]
types = [x.attributes['inType'].value for x in structToBeMarshalled.getElementsByTagName("data")]
structCounts[structName] = countVarName
var_Dependecies[structName] = [countVarName, structName]
fnparam_pointer = FunctionParameter("win:Struct", structName, "win:count", countVarName)
fnPrototypes.append(structName, fnparam_pointer)
allTemplates[templateName] = Template(templateName, fnPrototypes, var_Dependecies, structCounts, arrays)
return allTemplates
def generateClrallEvents(eventNodes,allTemplates):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
clrallEvents.append("inline BOOL EventEnabled")
clrallEvents.append(eventName)
clrallEvents.append("() {return ")
clrallEvents.append("EventPipeEventEnabled" + eventName + "() || ")
clrallEvents.append("EventXplatEnabled" + eventName + "();}\n\n")
#generate FireEtw functions
fnptype = []
fnbody = []
fnptype.append("inline ULONG FireEtw")
fnptype.append(eventName)
fnptype.append("(\n")
line = []
fnptypeline = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#fnsignature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(",")
#remove trailing commas
if len(line) > 0:
del line[-1]
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n)\n{\n")
fnbody.append(lindent)
fnbody.append("ULONG status = EventPipeWriteEvent" + eventName + "(" + ''.join(line) + ");\n")
fnbody.append(lindent)
fnbody.append("status &= FireEtXplat" + eventName + "(" + ''.join(line) + ");\n")
fnbody.append(lindent)
fnbody.append("return status;\n")
fnbody.append("}\n\n")
clrallEvents.extend(fnptype)
clrallEvents.extend(fnbody)
return ''.join(clrallEvents)
def generateClrXplatEvents(eventNodes, allTemplates, extern):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventEnabled
if extern: clrallEvents.append('extern "C" ')
clrallEvents.append("BOOL EventXplatEnabled")
clrallEvents.append(eventName)
clrallEvents.append("();\n")
#generate FireEtw functions
fnptype = []
fnptypeline = []
if extern: fnptype.append('extern "C" ')
fnptype.append("ULONG FireEtXplat")
fnptype.append(eventName)
fnptype.append("(\n")
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
fnptype.extend(fnptypeline)
fnptype.append("\n);\n")
clrallEvents.extend(fnptype)
return ''.join(clrallEvents)
def generateClrEventPipeWriteEvents(eventNodes, allTemplates, extern):
clrallEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
#generate EventPipeEventEnabled and EventPipeWriteEvent functions
eventenabled = []
writeevent = []
fnptypeline = []
if extern:eventenabled.append('extern "C" ')
eventenabled.append("BOOL EventPipeEventEnabled")
eventenabled.append(eventName)
eventenabled.append("();\n")
if extern: writeevent.append('extern "C" ')
writeevent.append("ULONG EventPipeWriteEvent")
writeevent.append(eventName)
writeevent.append("(\n")
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
wintypeName = fnparam.winType
typewName = palDataTypeMapping[wintypeName]
winCount = fnparam.count
countw = palDataTypeMapping[winCount]
if params in template.structs:
fnptypeline.append("%sint %s_ElementSize,\n" % (lindent, params))
fnptypeline.append(lindent)
fnptypeline.append(typewName)
fnptypeline.append(countw)
fnptypeline.append(" ")
fnptypeline.append(fnparam.name)
fnptypeline.append(",\n")
#remove trailing commas
if len(fnptypeline) > 0:
del fnptypeline[-1]
writeevent.extend(fnptypeline)
writeevent.append("\n);\n")
clrallEvents.extend(eventenabled)
clrallEvents.extend(writeevent)
return ''.join(clrallEvents)
#generates the dummy header file which is used by the VM as entry point to the logging Functions
def generateclrEtwDummy(eventNodes,allTemplates):
clretmEvents = []
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
fnptype = []
#generate FireEtw functions
fnptype.append("#define FireEtw")
fnptype.append(eventName)
fnptype.append("(");
line = []
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
fnparam = fnSig.getParam(params)
if params in template.structs:
line.append(fnparam.name + "_ElementSize")
line.append(", ")
line.append(fnparam.name)
line.append(", ")
#remove trailing commas
if len(line) > 0:
del line[-1]
fnptype.extend(line)
fnptype.append(") 0\n")
clretmEvents.extend(fnptype)
return ''.join(clretmEvents)
def generateClralltestEvents(sClrEtwAllMan):
tree = DOM.parse(sClrEtwAllMan)
clrtestEvents = []
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
for eventNode in eventNodes:
eventName = eventNode.getAttribute('symbol')
templateName = eventNode.getAttribute('template')
clrtestEvents.append(" EventXplatEnabled" + eventName + "();\n")
clrtestEvents.append("Error |= FireEtXplat" + eventName + "(\n")
line =[]
if templateName:
template = allTemplates[templateName]
fnSig = template.signature
for params in fnSig.paramlist:
if params in template.structs:
line.append("sizeof(Struct1),\n")
argline =''
fnparam = fnSig.getParam(params)
if fnparam.name.lower() == 'count':
argline = '2'
else:
if fnparam.winType == "win:Binary":
argline = 'win_Binary'
elif fnparam.winType == "win:Pointer" and fnparam.count == "win:count":
argline = "(const void**)&var11"
elif fnparam.winType == "win:Pointer" :
argline = "(const void*)var11"
elif fnparam.winType =="win:AnsiString":
argline = '" Testing AniString "'
elif fnparam.winType =="win:UnicodeString":
argline = 'W(" Testing UnicodeString ")'
else:
if fnparam.count == "win:count":
line.append("&")
argline = fnparam.winType.replace(":","_")
line.append(argline)
line.append(",\n")
#remove trailing commas
if len(line) > 0:
del line[-1]
line.append("\n")
line.append(");\n")
clrtestEvents.extend(line)
return ''.join(clrtestEvents)
def generateSanityTest(sClrEtwAllMan,testDir):
if not testDir:
return
print('Generating Event Logging Tests')
if not os.path.exists(testDir):
os.makedirs(testDir)
cmake_file = testDir + "/CMakeLists.txt"
test_cpp = "clralltestevents.cpp"
testinfo = testDir + "/testinfo.dat"
#CMake File:
with open_for_update(cmake_file) as Cmake_file:
Cmake_file.write(stdprolog_cmake)
Cmake_file.write("""
cmake_minimum_required(VERSION 2.8.12.2)
set(CMAKE_INCLUDE_CURRENT_DIR ON)
set(SOURCES
""")
Cmake_file.write(test_cpp)
Cmake_file.write("""
)
include_directories(${GENERATED_INCLUDE_DIR})
if(FEATURE_PAL)
include_directories(${COREPAL_SOURCE_DIR}/inc/rt)
endif(FEATURE_PAL)
add_executable(eventprovidertest
${SOURCES}
)
set(EVENT_PROVIDER_DEPENDENCIES "")
set(EVENT_PROVIDER_LINKER_OTPTIONS "")
if(FEATURE_EVENT_TRACE)
add_definitions(-DFEATURE_EVENT_TRACE=1)
list(APPEND EVENT_PROVIDER_DEPENDENCIES
eventprovider
)
if(CLR_CMAKE_PLATFORM_LINUX)
list(APPEND EVENT_PROVIDER_DEPENDENCIES
coreclrtraceptprovider
)
endif(CLR_CMAKE_PLATFORM_LINUX)
list(APPEND EVENT_PROVIDER_LINKER_OTPTIONS
${EVENT_PROVIDER_DEPENDENCIES}
)
endif(FEATURE_EVENT_TRACE)
add_dependencies(eventprovidertest ${EVENT_PROVIDER_DEPENDENCIES} coreclrpal)
target_link_libraries(eventprovidertest
coreclrpal
${EVENT_PROVIDER_LINKER_OTPTIONS}
)
""")
with open_for_update(testinfo) as Testinfo:
Testinfo.write("""
Copyright (c) Microsoft Corporation. All rights reserved.
#
Version = 1.0
Section = EventProvider
Function = EventProvider
Name = PAL test for FireEtW* and EventEnabled* functions
TYPE = DEFAULT
EXE1 = eventprovidertest
Description = This is a sanity test to check that there are no crashes in Xplat eventing
""")
#Test.cpp
with open_for_update(testDir + "/" + test_cpp) as Test_cpp:
Test_cpp.write(stdprolog)
Test_cpp.write("""
/*=====================================================================
**
** Source: clralltestevents.cpp
**
** Purpose: Ensure Correctness of Eventing code
**
**
**===================================================================*/
#if FEATURE_PAL
#include <palsuite.h>
#endif //FEATURE_PAL
#include <clrxplatevents.h>
typedef struct _Struct1 {
ULONG Data1;
unsigned short Data2;
unsigned short Data3;
unsigned char Data4[8];
} Struct1;
Struct1 var21[2] = { { 245, 13, 14, "deadbea" }, { 542, 0, 14, "deadflu" } };
Struct1* var11 = var21;
Struct1* win_Struct = var21;
GUID win_GUID ={ 245, 13, 14, "deadbea" };
double win_Double =34.04;
ULONG win_ULong = 34;
BOOL win_Boolean = FALSE;
unsigned __int64 win_UInt64 = 114;
unsigned int win_UInt32 = 4;
unsigned short win_UInt16 = 12;
unsigned char win_UInt8 = 9;
int win_Int32 = 12;
BYTE* win_Binary =(BYTE*)var21 ;
int __cdecl main(int argc, char **argv)
{
#if defined(FEATURE_PAL)
/* Initialize the PAL.
*/
if(0 != PAL_Initialize(argc, argv))
{
return FAIL;
}
#endif
ULONG Error = ERROR_SUCCESS;
#if defined(FEATURE_EVENT_TRACE)
Trace("\\n Starting functional eventing APIs tests \\n");
""")
Test_cpp.write(generateClralltestEvents(sClrEtwAllMan))
Test_cpp.write("""
if (Error != ERROR_SUCCESS)
{
Fail("One or more eventing Apis failed\\n ");
return FAIL;
}
Trace("\\n All eventing APIs were fired succesfully \\n");
#endif //defined(FEATURE_EVENT_TRACE)
#if defined(FEATURE_PAL)
/* Shutdown the PAL.
*/
PAL_Terminate();
#endif
return PASS;
}
""")
Testinfo.close()
def generateEtmDummyHeader(sClrEtwAllMan,clretwdummy):
if not clretwdummy:
return
print(' Generating Dummy Event Headers')
tree = DOM.parse(sClrEtwAllMan)
incDir = os.path.dirname(os.path.realpath(clretwdummy))
if not os.path.exists(incDir):
os.makedirs(incDir)
with open_for_update(clretwdummy) as Clretwdummy:
Clretwdummy.write(stdprolog + "\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#pal: create etmdummy.h
Clretwdummy.write(generateclrEtwDummy(eventNodes, allTemplates) + "\n")
def generatePlatformIndependentFiles(sClrEtwAllMan, incDir, etmDummyFile, extern, write_xplatheader):
generateEtmDummyHeader(sClrEtwAllMan,etmDummyFile)
tree = DOM.parse(sClrEtwAllMan)
if not incDir:
return
print(' Generating Event Headers')
if not os.path.exists(incDir):
os.makedirs(incDir)
# Write the main header for FireETW* functions
clrallevents = os.path.join(incDir, "clretwallmain.h")
with open_for_update(clrallevents) as Clrallevents:
Clrallevents.write(stdprolog)
Clrallevents.write("""
#include "clrxplatevents.h"
#include "clreventpipewriteevents.h"
""")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#vm header:
Clrallevents.write(generateClrallEvents(eventNodes, allTemplates) + "\n")
clreventpipewriteevents = os.path.join(incDir, "clreventpipewriteevents.h")
with open_for_update(clreventpipewriteevents) as Clreventpipewriteevents:
Clreventpipewriteevents.write(stdprolog + "\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#eventpipe: create clreventpipewriteevents.h
Clreventpipewriteevents.write(generateClrEventPipeWriteEvents(eventNodes, allTemplates, extern) + "\n")
# Write secondary headers for FireEtXplat* and EventPipe* functions
if write_xplatheader:
clrxplatevents = os.path.join(incDir, "clrxplatevents.h")
with open_for_update(clrxplatevents) as Clrxplatevents:
Clrxplatevents.write(stdprolog + "\n")
for providerNode in tree.getElementsByTagName('provider'):
templateNodes = providerNode.getElementsByTagName('template')
allTemplates = parseTemplateNodes(templateNodes)
eventNodes = providerNode.getElementsByTagName('event')
#pal: create clrallevents.h
Clrxplatevents.write(generateClrXplatEvents(eventNodes, allTemplates, extern) + "\n")
import argparse
import sys
def main(argv):
#parse the command line
parser = argparse.ArgumentParser(description="Generates the Code required to instrument LTTtng logging mechanism")
required = parser.add_argument_group('required arguments')
required.add_argument('--man', type=str, required=True,
help='full path to manifest containig the description of events')
required.add_argument('--inc', type=str, default=None,
help='full path to directory where the header files will be generated')
required.add_argument('--dummy', type=str,default=None,
help='full path to file that will have dummy definitions of FireEtw functions')
required.add_argument('--testdir', type=str, default=None,
help='full path to directory where the test assets will be deployed' )
required.add_argument('--nonextern', action='store_true',
help='if specified, will not generated extern function stub headers' )
required.add_argument('--noxplatheader', action='store_true',
help='if specified, will not write a generated cross-platform header' )
args, unknown = parser.parse_known_args(argv)
if unknown:
print('Unknown argument(s): ', ', '.join(unknown))
return 1
sClrEtwAllMan = args.man
incdir = args.inc
etmDummyFile = args.dummy
testDir = args.testdir
extern = not args.nonextern
write_xplatheader = not args.noxplatheader
generatePlatformIndependentFiles(sClrEtwAllMan, incdir, etmDummyFile, extern, write_xplatheader)
generateSanityTest(sClrEtwAllMan, testDir)
if __name__ == '__main__':
return_code = main(sys.argv[1:])
sys.exit(return_code)
|
{
"content_hash": "3a9cefae24ef101c15f860aa34c1e75d",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 127,
"avg_line_length": 34.84841075794621,
"alnum_prop": 0.5873851119062653,
"repo_name": "JosephTremoulet/coreclr",
"id": "123ce81be4f35630921ed336f07c3f6e1689170c",
"size": "28508",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/scripts/genEventing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "939689"
},
{
"name": "Awk",
"bytes": "5861"
},
{
"name": "Batchfile",
"bytes": "151661"
},
{
"name": "C",
"bytes": "3031703"
},
{
"name": "C#",
"bytes": "134208091"
},
{
"name": "C++",
"bytes": "68871444"
},
{
"name": "CMake",
"bytes": "641475"
},
{
"name": "Groovy",
"bytes": "206586"
},
{
"name": "Makefile",
"bytes": "2736"
},
{
"name": "Objective-C",
"bytes": "471844"
},
{
"name": "PAWN",
"bytes": "903"
},
{
"name": "Perl",
"bytes": "23640"
},
{
"name": "PowerShell",
"bytes": "9319"
},
{
"name": "Python",
"bytes": "242544"
},
{
"name": "Roff",
"bytes": "529523"
},
{
"name": "Shell",
"bytes": "223880"
},
{
"name": "Smalltalk",
"bytes": "1162648"
},
{
"name": "SuperCollider",
"bytes": "4752"
},
{
"name": "XSLT",
"bytes": "1016"
},
{
"name": "Yacc",
"bytes": "157348"
}
],
"symlink_target": ""
}
|
import flyer
import pyuv
import signal
import socket
class PyuvAdaptor(object):
"""pyuv event loop adaptor."""
def __init__(self, listener):
self._listener = listener
self._is_connected = False
self._is_writable = False
self._loop = pyuv.Loop.default_loop()
return
def connect(self, host, port):
self._handle = pyuv.TCP(self._loop)
self._handle.connect((host, int(port)), self._on_connected)
self._handle.start_read(self._on_read)
self._sigint_handler = pyuv.Signal(self._loop)
self._sigint_handler.start(self._on_signal, signal.SIGINT)
return
def disconnect(self):
self._handle.shutdown(self._on_disconnected)
return
def send(self, buf, buflen):
self._handle.write(buf, self._on_written)
return buflen
def run(self):
self._loop.run()
return
def stop(self):
self._loop.stop()
return
def _on_connected(self, handle, error):
print("_on_connected")
self._listener.on_connected()
return
def _on_disconnected(self, handle, error):
print("_on_disconnected")
self._listener.on_disconnected()
return
def _on_read(self, handle, data, error):
print("_on_read", len(data) if data else 0)
self._listener.on_data(data)
return
def _on_written(self, handle, error):
print("_on_written")
return
def _on_signal(self, handler, signal):
print("_on_signal", int(signal))
self.stop()
return
class BasicExample(flyer.Listener):
def __init__(self, username, password):
self._username = username
self._password = password
self._manager = flyer.ApplicationManager(self, self)
self._adaptor = PyuvAdaptor(self)
return
def start(self):
self._adaptor.connect("127.0.0.1", 12917)
self._adaptor.run()
self._adaptor.disconnect()
return
# Flyer Sender interface.
def send_bytes(self, buf, buflen):
self._adaptor.send(buf, buflen)
return
def needs_flush(self):
# Not needed for PyUV, because write() guarantees to send the
# whole buffer.
return
# Flyer Listener interface
#
# Because we inherit from the interface, and because it provides a
# default, do-nothing implementation of all functions, we only
# need to implement those that we actually use.
def on_logon(self, event):
print("on_logon")
return
def on_disconnect(self):
print("on_disconnect")
self._adaptor.stop()
return
def on_heartbeat(self, event):
print("on_heartbeat")
return
def on_error(self, event):
print("on_error")
return
# Network adaptor callbacks
def on_data(self, data):
print("on_data")
self._manager.receive_bytes(data, len(data) if data else 0)
return
def on_connected(self):
print("TCP session connected.")
self._manager.logon(self._username, self._password)
return
def on_disconnected(self):
print("TCP session disconnected.")
return
if __name__ == "__main__":
example = BasicExample("guest", "guest")
example.start()
|
{
"content_hash": "6f253d8b351dbc476fdf62b9b04f7b31",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 70,
"avg_line_length": 23.39160839160839,
"alnum_prop": 0.5910313901345291,
"repo_name": "FIXFlyer/pyflyer",
"id": "cda8650d2cc952b68fb77dbf6825eb85ee551eeb",
"size": "3347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/basic/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58018"
}
],
"symlink_target": ""
}
|
import nltk
import os.path
from nltk.corpus import stopwords
from nltk.collocations import *
from urllib import urlopen
unnecessary = ["http", ":", "RT", "@", "&", ";", "I", "'m", "(", ")", "lt", "...", "n't", "``", "''", "gt", "!", "[", "]", "?", "#", "--", "-", "YouTube", ".", ",","=","'s","'ll" ]
url = "http://ils.unc.edu/~heejunk/tweets_1.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_1.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_1.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_1.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_1.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_1.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_1.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_1.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_1.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_2.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_2.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_2.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_2.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_2.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_2.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_2.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_2.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_2.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_3.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_3.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_3.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_3.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_3.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_3.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_3.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_3.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_3.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_4.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_4.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_4.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_4.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_4.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_4.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_4.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_4.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_4.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_5.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_5.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_5.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_5.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_5.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_5.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_5.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_5.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_5.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_6.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_6.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_6.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_6.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_6.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_6.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_6.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_6.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_6.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_7.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_7.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_7.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_7.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_7.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_7.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_7.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_7.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_7.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_8.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_8.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_8.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_8.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_8.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_8.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_8.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_8.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_8.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_9.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_9.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_9.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_9.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_9.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_9.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_9.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_9.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_9.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
url = "http://ils.unc.edu/~heejunk/tweets_10.txt"
raw = urlopen(url).read()
tokens = nltk.word_tokenize(raw)
filtered_tokens = [w for w in tokens if not w in stopwords.words('english')]
filtered_tokens2 = [w for w in filtered_tokens if not w in unnecessary]
text = nltk.Text(filtered_tokens2)
words = [w.lower() for w in text]
bigram_measures = nltk.collocations.BigramAssocMeasures()
finder = BigramCollocationFinder.from_words(
words, window_size = 7)
output = finder.nbest(bigram_measures.likelihood_ratio, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_likelihood_10.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_likelihood_10.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.pmi, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_pmi_10.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_pmi_10.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.student_t, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_stu_10.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_stu_10.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
output = finder.nbest(bigram_measures.chi_sq, 100)
save_path = 'D:\Dropbox\ILS890\collocation_result'
file_name = 'output_window7_chi_10.txt'
complete_name = os.path.join(save_path, file_name)
output_file = open('D:\Dropbox\ILS890\collocation_result\output_window7_chi_10.txt', 'w')
for words in output:
output_file.write( '\t '.join(words)+'\n')
output_file.close()
|
{
"content_hash": "c92675873a950fde87e8cbf05452b4f5",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 180,
"avg_line_length": 45.760919540229885,
"alnum_prop": 0.7256103687330453,
"repo_name": "britth/inls890-microblog",
"id": "affba945bcf44682f0c2642fc9b366e0bb795edb",
"size": "19906",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/collocation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47135"
}
],
"symlink_target": ""
}
|
import logging
import datetime as dt
import numpy as np
from opyrant import (EndSession,
EndExperiment,
ComponentError,
InterfaceError,
utils)
logger = logging.getLogger(__name__)
class State(object):
""" States provide a nice interface for running experiments and transitioning to sleep/idle phases. By implementing __enter__ and __exit__ methods, they use the "with" statement construct that allows for simple error handling (e.g. session ends, keyboard interrupts to stop an experiment, etc.)
Parameters
----------
schedulers: list of scheduler objects
These determine whether or not the state should be running, using their check method
Methods
-------
check() - Check if the state should be active according to its schedulers
run() - Run the state (should be used within the "with" statement)
start() - wrapper for run that includes the "with" statement
"""
def __init__(self, experiment=None, schedulers=None):
if schedulers is None:
schedulers = list()
if not isinstance(schedulers, list):
schedulers = [schedulers]
self.schedulers = schedulers
self.experiment = experiment
def check(self):
""" Checks all of the states schedulers to see if the state should be active.
Returns
-------
True if the state should be active and False otherwise.
"""
# If any scheduler says not to run, then don't run
for scheduler in self.schedulers:
if not scheduler.check():
return False
return True
def __enter__(self):
""" Start all of the schedulers """
logger.info("Entering %s state" % self.__class__.__name__)
for scheduler in self.schedulers:
scheduler.start()
return self
def __exit__(self, type_, value, traceback):
""" Handles KeyboardInterrupt and EndExperiment exceptions to end the experiment, EndSession exceptions to end the session state, and logs all others.
"""
logger.info("Exiting %s state" % self.__class__.__name__)
# Stop the schedulers
for scheduler in self.schedulers:
scheduler.stop()
# Handle expected exceptions
if type_ in [KeyboardInterrupt, EndExperiment]:
logger.info("Finishing experiment")
self.experiment.end()
return True
elif type_ is EndSession:
logger.info("Session has ended")
return True
# Log all other exceptions and raise them
if isinstance(value, Exception):
if type_ in [InterfaceError, ComponentError]:
logger.critical("There was a critical error in communicating with the hardware!")
logger.critical(repr(value))
return False
def run(self):
pass
def start(self):
""" Implements the "with" context for this state """
with self as state:
state.run()
class Session(State):
""" Session state for running an experiment. Should be used with the "with" statement (see Examples).
Parameters
----------
schedulers: list of scheduler objects
These determine whether or not the state should be running, using their check method
experiment: an instance of a Behavior class
The experiment whose session methods should be run.
Methods
-------
check() - Check if the state should be active according to its schedulers
run() - Run the experiment's session_main method
start() - wrapper for run that includes the "with" statement
update() - Update schedulers at the end of the trial
Examples
--------
with Session(experiment=experiment) as state: # Runs experiment.session_pre
state.run() # Runs experiment.session_main
# Exiting with statement runs experiment.session_post
# "with" context is also implemented in the start() method
state = Session(experiment=experiment)
state.start()
"""
def __enter__(self):
self.experiment.session_pre()
for scheduler in self.schedulers:
scheduler.start()
return self
def run(self):
""" Runs session main """
self.experiment.session_main()
def __exit__(self, type_, value, traceback):
self.experiment.session_post()
return super(Session, self).__exit__(type_, value, traceback)
def update(self):
""" Updates all schedulers with information on the current trial """
if hasattr(self.experiment, "this_trial"):
for scheduler in self.schedulers:
scheduler.update(self.experiment.this_trial)
class Idle(State):
""" A simple idle state.
Parameters
----------
experiment: an instance of a Behavior class
The experiment whose session methods should be run.
poll_interval: int
The interval, in seconds, at which other states should be checked to run
Methods
-------
run() - Run the experiment's session_main method
"""
def __init__(self, experiment=None, poll_interval=60):
super(Idle, self).__init__(experiment=experiment,
schedulers=None)
self.poll_interval = poll_interval
def run(self):
""" Checks if the experiment should be sleeping or running a session and kicks off those states. """
while True:
if self.experiment.check_sleep_schedule():
return self.experiment._sleep.start()
elif self.experiment.check_session_schedule():
return self.experiment.session.start()
else:
logger.debug("idling...")
utils.wait(self.poll_interval)
class Sleep(State):
""" A panel sleep state. Turns off all outputs, checking every so often if it should wake up
Parameters
----------
experiment: an instance of a Behavior class
The experiment whose session methods should be run.
schedulers: an instance of TimeOfDayScheduler
The time of day scheduler to follow for when to sleep.
poll_interval: int
The interval, in seconds, at which other states should be checked to run
time_period: string or tuple
Either "night" or a tuple of "HH:MM" start and end times. Only used if scheduler is not provided.
Methods
-------
run() - Run the experiment's session_main method
"""
def __init__(self, experiment=None, schedulers=None, poll_interval=60,
time_period="night"):
if schedulers is None:
schedulers = TimeOfDayScheduler(time_period)
self.poll_interval = poll_interval
super(Sleep, self).__init__(experiment=experiment,
schedulers=schedulers)
def run(self):
""" Checks every poll interval whether the panel should be sleeping and puts it to sleep """
while True:
logger.debug("sleeping")
self.experiment.panel.sleep()
utils.wait(self.poll_interval)
if not self.check():
break
self.experiment.panel.wake()
class BaseScheduler(object):
""" Implements a base class for scheduling states
Summary
-------
Schedulers allow the state to be started and stopped based on certain critera. For instance, you can start the sleep state when the sun sets, or stop and session state after 100 trials.
Methods
-------
check() - Checks whether the state should be active
start() - Run when the state starts to initialize any variables
stop() - Run when the state finishes to close out any variables
update(trial) - Run after each trial to update the scheduler if necessary
"""
def __init__(self):
pass
def start(self):
pass
def stop(self):
pass
def update(self, trial):
pass
def check(self):
""" This should really be implemented by the subclass """
raise NotImplementedError("Scheduler %s does not have a check method" % self.__class__.__name__)
class TimeOfDayScheduler(BaseScheduler):
""" Schedule a state to start and stop depending on the time of day
Parameters
----------
time_periods: string or list
The time periods in which this schedule should be active. The value of "sun" can be passed to use the current day-night schedule. Otherwise, pass a list of tuples (start, end) (e.g. [("5:00", "17:00")] for 5am to 5pm)
Methods
-------
check() - Returns True if the state should be active according to this schedule
"""
def __init__(self, time_periods="sun"):
# Any other sanitizations?
if isinstance(time_periods, tuple):
time_periods = [time_periods]
self.time_periods = time_periods
def check(self):
""" Returns True if the state should be active according to this schedule
"""
return utils.check_time(self.time_periods)
class TimeScheduler(BaseScheduler):
""" Schedules a state to start and stop based on how long the state has been active and how long since the state was previously active.
Parameters
----------
duration: int
The duration, in minutes, that the state should be active
interval: int
The time since the state was last active before it should become active again.
Methods
-------
start() - Stores the start time of the current state
stop() - Stores the end time of the current state
check() - Returns True if the state should activate
"""
def __init__(self, duration=None, interval=None):
self.duration = duration
self.interval = interval
self.start_time = None
self.stop_time = None
def start(self):
""" Stores the start time of the current state """
self.start_time = dt.datetime.now()
self.stop_time = None
def stop(self):
""" Stores the end time of the current state """
self.stop_time = dt.datetime.now()
self.start_time = None
def check(self):
""" Checks if the current time is greater than `duration` minutes after start time or `interval` minutes after stop time """
current_time = dt.datetime.now()
# If start_time is None, the state is not active. Should it be?
if self.start_time is None:
# No interval specified, always start
if self.interval is None:
return True
# The state hasn't activated yet, always start
if self.stop_time is None:
return True
# Has it been greater than interval minutes since the last time?
time_since = (current_time - self.stop_time).total_seconds() / 60.
if time_since < self.interval:
return False
# If stop_time is None, the state is currently active. Should it stop?
if self.stop_time is None:
# No duration specified, so do not stop
if self.duration is None:
return True
# Has the state been active for long enough?
time_since = (current_time - self.start_time).total_seconds() / 60.
if time_since >= self.duration:
return False
return True
class CountScheduler(BaseScheduler):
""" Schedules a state stop after a certain number of trials.
Parameters
----------
max_trials: int
The maximum number of trials
Methods
-------
check() - Returns True if the state has not yet reached max_trials
TODO: This could be expanded to include things like total number of rewards or correct responses.
"""
def __init__(self, max_trials=None):
self.max_trials = max_trials
self.trial_index = 0
def check(self):
""" Returns True if current trial index is less than max_trials """
if self.max_trials is None:
return True
return self.trial_index < self.max_trials
def stop(self):
""" Resets the trial index since the session is over """
self.trial_index = 0
def update(self, trial):
""" Updates the current trial index """
self.trial_index = trial.index
available_states = {"idle": Idle,
"session": Session,
"sleep": Sleep}
available_schedulers = {"day": TimeOfDayScheduler,
"timeofday": TimeOfDayScheduler,
"time": TimeScheduler}
|
{
"content_hash": "c9a30813b87888585cd1ff39b2bb17d2",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 298,
"avg_line_length": 31.48138957816377,
"alnum_prop": 0.614566091274533,
"repo_name": "opyrant/opyrant",
"id": "93a01dd6ef39afa3c2fee9cfff4d0de4baa1fb95",
"size": "12687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opyrant/states.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "1870"
},
{
"name": "C",
"bytes": "2143"
},
{
"name": "Makefile",
"bytes": "2445"
},
{
"name": "Python",
"bytes": "280220"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup, find_packages
version = '0.3.3'
def get_package_manifest(filename):
packages = []
with open(filename) as package_file:
for line in package_file.readlines():
line = line.strip()
if not line:
continue
if line.startswith('#'):
# comment
continue
if line.startswith('-e '):
# not a valid package
continue
packages.append(line)
return packages
def get_install_requires():
"""
:returns: A list of packages required for installation.
"""
return get_package_manifest('requirements.txt')
def get_tests_requires():
"""
:returns: A list of packages required for running the tests.
"""
packages = get_package_manifest('requirements_dev.txt')
try:
from unittest import mock
except ImportError:
packages.append('mock')
if sys.version_info[:2] < (2, 7):
packages.append('unittest2')
return packages
def read(f):
with open(os.path.join(os.path.dirname(__file__), f)) as f:
return f.read().strip()
setup(
name='sockjs-gevent',
version=version,
description=('gevent base sockjs server'),
long_description='\n\n'.join((read('README.md'), read('CHANGES.txt'))),
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Internet :: WWW/HTTP",
'Topic :: Internet :: WWW/HTTP :: WSGI'
],
author='Nick Joyce',
author_email='nick.joyce@realkinetic.com',
url='https://github.com/njoyce/sockjs-gevent',
license='MIT',
install_requires=get_install_requires(),
tests_require=get_tests_requires(),
setup_requires=['nose>=1.0'],
test_suite='nose.collector',
include_package_data = True,
packages=find_packages(exclude=["examples", "tests"]),
zip_safe = False,
)
|
{
"content_hash": "045e2d93ed3aa4c4b021eb1e497a5b11",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 75,
"avg_line_length": 25.305882352941175,
"alnum_prop": 0.594607159460716,
"repo_name": "njoyce/sockjs-gevent",
"id": "865849c6bd13fd18622333b7a97d4874136554a1",
"size": "2151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "116964"
}
],
"symlink_target": ""
}
|
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.views.generic import ListView, UpdateView
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404
from django.contrib.contenttypes.models import ContentType
from django.apps import apps
from reversion.models import Version, Revision
from reversion import revisions as reversion
from Lagerregal.utils import PaginationMixin
from devices.models import Device, Room, Manufacturer
from devicetypes.models import Type, TypeAttributeValue
from users.mixins import PermissionRequiredMixin
class Globalhistory(PermissionRequiredMixin, PaginationMixin, ListView):
queryset = Revision.objects.select_related("user").prefetch_related("version_set", "version_set__content_type"
).filter().order_by("-date_created")
context_object_name = "revision_list"
template_name = 'history/globalhistory.html'
permission_required = 'devices.change_device'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["breadcrumbs"] = [(reverse("globalhistory"), _("Global edit history"))]
if context["is_paginated"] and context["page_obj"].number > 1:
context["breadcrumbs"].append(["", context["page_obj"].number])
return context
excluded_fields = ["currentlending", "created_at", "archived", "trashed", "inventoried", "bookmarks", "trashed",
"last_seen", "creator"]
def cleanup_fielddict(version):
del version.field_dict["id"]
if version.field_dict.get("devicetype") is not None:
try:
version.field_dict["devicetype"] = Type.objects.get(
pk=version.field_dict["devicetype"])
except Type.DoesNotExist:
version.field_dict["devicetype"] = "[deleted]"
if version.field_dict.get("manufacturer") is not None:
try:
version.field_dict["manufacturer"] = Manufacturer.objects.get(
pk=version.field_dict["manufacturer"])
except Manufacturer.DoesNotExist:
version.field_dict["manufacturer"] = "[deleted]"
if version.field_dict.get("room") is not None:
try:
version.field_dict["room"] = Room.objects.get(
pk=version.field_dict["room"])
except Room.DoesNotExist:
version.field_dict["room"] = "[deleted]"
if version.field_dict.get("device") is not None:
try:
version.field_dict["device"] = Device.objects.get(
pk=version.field_dict["device"])
except Device.DoesNotExist:
version.field_dict["device"] = "[deleted]"
for excluded_field in excluded_fields:
if excluded_field in version.field_dict:
del version.field_dict[excluded_field]
return version
class HistoryDetail(PermissionRequiredMixin, UpdateView):
model = Version
template_name = 'history/history_detail.html'
context_object_name = "this_version"
fields = "__all__"
permission_required = 'devices.change_device'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["current_version"] = get_object_or_404(
apps.get_model(context["this_version"].content_type.app_label, context["this_version"].content_type.model),
id=context["this_version"].object_id)
context["this_version"] = cleanup_fielddict(context["this_version"])
previous_version = Version.objects.filter(object_id=context["current_version"].pk,
revision__date_created__lt=context["this_version"].revision.date_created,
content_type_id=context["this_version"].content_type.id).order_by(
"-pk")
if len(previous_version) == 0:
context["previous_version"] = None
else:
context["previous_version"] = cleanup_fielddict(previous_version[0])
next_version = Version.objects.filter(object_id=context["current_version"].pk,
revision__date_created__gt=context["this_version"].revision.date_created,
content_type_id=context["this_version"].content_type.id).order_by("pk")
if len(next_version) == 0:
context["next_version"] = None
else:
context["next_version"] = next_version[0]
context["breadcrumbs"] = [
(reverse("{0}-list".format(context["this_version"].content_type.model)),
_(context["this_version"].content_type.name)),
(context["current_version"].get_absolute_url(), str(context["current_version"])),
(reverse("history-list", kwargs={"content_type_id": context["this_version"].content_type.id,
"object_id": context["this_version"].object_id}), _("History")),
("", _("Version {0}".format(context["this_version"].pk)))
]
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(**kwargs)
version = context["this_version"]
object = context["current_version"]
for name, value in version.field_dict.items():
if value == "[deleted]":
setattr(object, name, None)
else:
setattr(object, name, value)
object.save()
if version.field_dict.get("devicetype") is not None:
TypeAttributeValue.objects.filter(device=version.object_id).delete()
reversion.set_comment("Reverted to version from {0}".format(version.revision.date_created))
messages.success(self.request,
_('Successfully reverted Device to revision {0}').format(version.revision.id))
return HttpResponseRedirect(object.get_absolute_url())
class HistoryList(PermissionRequiredMixin, ListView):
context_object_name = 'version_list'
template_name = 'history/history_list.html'
permission_required = 'devices.change_device'
def get_queryset(self):
object_id = self.kwargs["object_id"]
content_type_id = self.kwargs["content_type_id"]
self.content_type = get_object_or_404(ContentType, id=content_type_id)
self.object = get_object_or_404(apps.get_model(self.content_type.app_label, self.content_type.model), id=object_id)
return Version.objects.filter(object_id=self.object.id,
content_type_id=self.content_type.id).order_by("-pk")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["breadcrumbs"] = [
(reverse("{0}-list".format(self.content_type.model)),
_(self.content_type.name)),
(self.object.get_absolute_url(), str(self.object)),
(reverse("history-list", kwargs={"content_type_id": self.content_type.id,
"object_id": self.object.id}), _("History"))
]
return context
|
{
"content_hash": "dab776c0fa579552ae0374b2dcd59a42",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 123,
"avg_line_length": 45.161490683229815,
"alnum_prop": 0.6182093247146198,
"repo_name": "vIiRuS/Lagerregal",
"id": "6b48fb681e1e6383979d70b5c43e5de7b983504d",
"size": "7271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "history/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42486"
},
{
"name": "HTML",
"bytes": "264180"
},
{
"name": "JavaScript",
"bytes": "179557"
},
{
"name": "Python",
"bytes": "396530"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python3
import codecs
#from pprint import pprint
key_map = {
'COUNT': 'count',
'KEY': 'key',
'PLAINTEXT': 'plain',
'CIPHERTEXT': 'cipher',
}
def c_escaped_string(s, encoding='latin'):
if isinstance(s, str):
s = s.encode(encoding)
s = s.decode('latin')
result = ''
for c in s:
if c in '\\"':
result += '\\' + c
elif not (32 <= ord(c) < 127):
result += '\\%03o' % ord(c)
else:
result += c
return '"' + result + '"'
def byte_string_to_c_array_init(byte_string):
return ", ".join("0x{:02X}".format(c) for c in byte_string)
def vectors_iter(fileobj):
for line in fileobj:
line = line.strip()
if "[ENCRYPT]" in line:
break
for line in fileobj:
line = line.strip()
if "[DECRYPT]" in line:
break
if line.startswith("COUNT"):
parts = line.split("=")
count = int(parts[1].strip())
#yield count
test_data = { 'count': count }
for line in fileobj:
line = line.strip()
if not line:
yield test_data
break
key, valuestr = [ a.strip() for a in line.split("=") ]
key = key_map.get(key, key)
value = codecs.decode(valuestr, "hex")
test_data[key] = value
def files_vectors_iter(filenames):
for setnum, filename in enumerate(filenames):
with open(filename, "r") as f:
for test_data in vectors_iter(f):
test_data['set'] = setnum
yield test_data
def main():
import os.path
import sys
filenames = sys.argv[1:]
vectors_list = []
set_names = list(filenames)
#set_names = [ '"', '\\', 'abc']
print("""
/* This file was generated by Python program {}
from the AES KAT (Known Answer Tests), using the following command:
{}
This file should not be edited manually.
It is used by tests/aes-vectors-test.c.
*/
""".format(os.path.basename(sys.argv[0]), " ".join(sys.argv)))
for test_data in files_vectors_iter(filenames):
#pprint(test_data)
vector_prefix = "set{}count{}".format(test_data['set'], test_data['count'])
for key in ('key', 'plain', 'cipher'):
if key in test_data:
array_data = byte_string_to_c_array_init(test_data[key])
print("const uint8_t {}{}[] = {{ {} }};".format(vector_prefix, key, array_data))
print("const vector_data_t {} = {{".format(vector_prefix))
print(" .set_num = {},".format(test_data['set']))
print(" .count = {},".format(test_data['count']))
for key in ('key', 'plain', 'cipher'):
if key in test_data:
print(" .{} = {}{},".format(key, vector_prefix, key))
else:
print(" .{} = NULL,".format(key))
print("};\n")
vectors_list.append(vector_prefix)
print("const vector_data_t * const test_vectors[] = {")
for vector_name in vectors_list:
print(" &{},".format(vector_name))
print("};\n")
print("const char * const set_names[] = {")
for set_name in set_names:
print(' {},'.format(c_escaped_string(set_name)))
print("};\n")
if __name__ == "__main__":
main()
|
{
"content_hash": "c596dc145f8899b25925ed33bc5717cb",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 96,
"avg_line_length": 30.486486486486488,
"alnum_prop": 0.5132978723404256,
"repo_name": "cmcqueen/aes-min",
"id": "763ecd1d0837498b9fca6b154b720094cfa49fc6",
"size": "3384",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/parse-vectors.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3417068"
},
{
"name": "M4",
"bytes": "1290"
},
{
"name": "Makefile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "8528"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
}
|
import argparse
import glob
import multiprocessing
import os
import shutil
import subprocess
import sys
from parse_link_map import parse_link_map
sys.path.append(
os.path.join(
os.path.dirname(sys.argv[0]), '..', '..', 'run_tests', 'python_utils'))
import comment_on_pr
# Only show diff 1KB or greater
diff_threshold = 1000
size_labels = ('Core', 'ObjC', 'BoringSSL', 'Protobuf', 'Total')
argp = argparse.ArgumentParser(
description='Binary size diff of gRPC Objective-C sample')
argp.add_argument(
'-d',
'--diff_base',
type=str,
help='Commit or branch to compare the current one to')
args = argp.parse_args()
def dir_size(dir):
total = 0
for dirpath, dirnames, filenames in os.walk(dir):
for f in filenames:
fp = os.path.join(dirpath, f)
total += os.stat(fp).st_size
return total
def get_size(where, frameworks):
build_dir = 'src/objective-c/examples/Sample/Build/Build-%s/' % where
if not frameworks:
link_map_filename = 'Build/Intermediates.noindex/Sample.build/Release-iphoneos/Sample.build/Sample-LinkMap-normal-arm64.txt'
return parse_link_map(build_dir + link_map_filename)
else:
framework_dir = 'Build/Products/Release-iphoneos/Sample.app/Frameworks/'
boringssl_size = dir_size(
build_dir + framework_dir + 'openssl.framework')
core_size = dir_size(build_dir + framework_dir + 'grpc.framework')
objc_size = dir_size(build_dir + framework_dir + 'GRPCClient.framework') + \
dir_size(build_dir + framework_dir + 'RxLibrary.framework') + \
dir_size(build_dir + framework_dir + 'ProtoRPC.framework')
protobuf_size = dir_size(
build_dir + framework_dir + 'Protobuf.framework')
app_size = dir_size(
build_dir + 'Build/Products/Release-iphoneos/Sample.app')
return core_size, objc_size, boringssl_size, protobuf_size, app_size
def build(where, frameworks):
subprocess.check_call(['make', 'clean'])
shutil.rmtree(
'src/objective-c/examples/Sample/Build/Build-%s' % where,
ignore_errors=True)
subprocess.check_call(
'CONFIG=opt EXAMPLE_PATH=src/objective-c/examples/Sample SCHEME=Sample FRAMEWORKS=%s ./build_one_example.sh'
% ('YES' if frameworks else 'NO'),
shell=True,
cwd='src/objective-c/tests')
os.rename('src/objective-c/examples/Sample/Build/Build',
'src/objective-c/examples/Sample/Build/Build-%s' % where)
text = 'Objective-C binary sizes\n'
for frameworks in [False, True]:
build('new', frameworks)
new_size = get_size('new', frameworks)
old_size = None
if args.diff_base:
old = 'old'
where_am_i = subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
subprocess.check_call(['git', 'checkout', '--', '.'])
subprocess.check_call(['git', 'checkout', args.diff_base])
subprocess.check_call(['git', 'submodule', 'update'])
try:
build('old', frameworks)
old_size = get_size('old', frameworks)
finally:
subprocess.check_call(['git', 'checkout', '--', '.'])
subprocess.check_call(['git', 'checkout', where_am_i])
subprocess.check_call(['git', 'submodule', 'update'])
text += ('***************FRAMEWORKS****************\n'
if frameworks else '*****************STATIC******************\n')
row_format = "{:>10}{:>15}{:>15}" + '\n'
text += row_format.format('New size', '', 'Old size')
if old_size == None:
for i in range(0, len(size_labels)):
text += ('\n'
if i == len(size_labels) - 1 else '') + row_format.format(
'{:,}'.format(new_size[i]), size_labels[i], '')
else:
has_diff = False
for i in range(0, len(size_labels) - 1):
if abs(new_size[i] - old_size[i]) < diff_threshold:
continue
if new_size[i] > old_size[i]:
diff_sign = ' (>)'
else:
diff_sign = ' (<)'
has_diff = True
text += row_format.format('{:,}'.format(new_size[i]),
size_labels[i] + diff_sign, '{:,}'.format(
old_size[i]))
i = len(size_labels) - 1
if new_size[i] > old_size[i]:
diff_sign = ' (>)'
elif new_size[i] < old_size[i]:
diff_sign = ' (<)'
else:
diff_sign = ' (=)'
text += ('\n' if has_diff else '') + row_format.format(
'{:,}'.format(new_size[i]), size_labels[i] + diff_sign,
'{:,}'.format(old_size[i]))
if not has_diff:
text += '\n No significant differences in binary sizes\n'
text += '\n'
print text
comment_on_pr.comment_on_pr('```\n%s\n```' % text)
|
{
"content_hash": "f425f441c05ed387eddaf7d3eb1f0a9d",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 132,
"avg_line_length": 37,
"alnum_prop": 0.5554659136748689,
"repo_name": "thinkerou/grpc",
"id": "d4d134fef3725c41692e088fd9fdd6e021148c05",
"size": "5563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/profiling/ios_bin/binary_size.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "31177"
},
{
"name": "C",
"bytes": "1651907"
},
{
"name": "C#",
"bytes": "1723819"
},
{
"name": "C++",
"bytes": "29784655"
},
{
"name": "CMake",
"bytes": "548015"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "141732"
},
{
"name": "Go",
"bytes": "27069"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "6907"
},
{
"name": "JavaScript",
"bytes": "51278"
},
{
"name": "M4",
"bytes": "45321"
},
{
"name": "Makefile",
"bytes": "1143240"
},
{
"name": "Objective-C",
"bytes": "284919"
},
{
"name": "Objective-C++",
"bytes": "37952"
},
{
"name": "PHP",
"bytes": "468767"
},
{
"name": "Python",
"bytes": "2408483"
},
{
"name": "Ruby",
"bytes": "963396"
},
{
"name": "Shell",
"bytes": "403820"
},
{
"name": "Swift",
"bytes": "3435"
},
{
"name": "XSLT",
"bytes": "9673"
}
],
"symlink_target": ""
}
|
from . import opdict_data
from . import entities
import sys
opdict = {}
import os
def _lookup_entity(content):
if content.startswith('&') and content.endswith(';'):
return entities.map[content[1:-1]]
else:
return content
for line in opdict_data.data.split('\n'):
tokens = line.split()
if not tokens: continue
try:
content = _lookup_entity(tokens[0][1:-1])
except KeyError:
continue
attrs = {}
for token in tokens[1:]:
name, value = token.split('=')
attrs[name] = value[1:-1] # removes quotes
form = sys.intern(attrs['form'])
del attrs['form']
if content in opdict:
opdict[content][form] = attrs
else:
opdict[content] = {form: attrs}
_default_attributes = {
"fence": "false",
"separator": "false",
"lspace": "thickmathspace",
"rspace": "thickmathspace",
"stretchy": "false",
"symmetric": "true",
"maxsize": "infinity",
"minsize": "1",
"largeop": "false",
"movablelimits": "false",
"accent": "false"
}
def lookup(content, form):
try:
entry = opdict[content]
except KeyError:
return _default_attributes
try:
return entry[form]
except KeyError:
try:
return entry['infix']
except KeyError:
try:
return entry['postfix']
except KeyError:
try:
return entry['prefix']
except KeyError:
return _default_attributes
|
{
"content_hash": "4940749288fbbf817911a727eb309fdf",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 57,
"avg_line_length": 21.96923076923077,
"alnum_prop": 0.5980392156862745,
"repo_name": "ahjulstad/mathdom-python3",
"id": "274715ad12f32bc9b1db053d07a706d494596b90",
"size": "1428",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mathml/pmathml/opdict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "825"
},
{
"name": "Python",
"bytes": "295908"
},
{
"name": "XSLT",
"bytes": "145765"
}
],
"symlink_target": ""
}
|
import plot
import model
import dataset
import sys
import numpy as np
import os.path as path
group_id = int(sys.argv[1])
cluster = model.load(path.realpath(sys.argv[2]))
articles = dataset.news.fetch(100000)
nodes = cluster['group'][group_id, 0:cluster['group_size'][group_id]]
print('nodes: ', nodes)
for node_id in nodes:
print("%6d | %s" % (node_id, articles[node_id]['title']))
|
{
"content_hash": "7ee8469bb366c1e9caad72b9adf9e3a2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 69,
"avg_line_length": 22.941176470588236,
"alnum_prop": 0.6974358974358974,
"repo_name": "AndreasMadsen/bachelor-code",
"id": "b1c781542043018bfd502a88bfeb338def604b4f",
"size": "391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plot/show-group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2889"
},
{
"name": "HTML",
"bytes": "3726"
},
{
"name": "JavaScript",
"bytes": "18060"
},
{
"name": "Makefile",
"bytes": "64"
},
{
"name": "Python",
"bytes": "144683"
},
{
"name": "Shell",
"bytes": "805"
}
],
"symlink_target": ""
}
|
from webob import Request
from fixtures import ServerFixture
class TestInput(ServerFixture):
def make_app(self):
def app(environ, start_response):
req = Request(environ)
status = "200 OK"
body =\
"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="text_input_form">
<input name="foo" type="text" value="bar">
<input name="button" type="submit" value="text">
</form>
<form method="POST" id="radio_input_form">
<input name="foo" type="radio" value="bar">
<input name="foo" type="radio" value="baz" checked>
<input name="button" type="submit" value="radio">
</form>
<form method="POST" id="checkbox_input_form">
<input name="foo" type="checkbox" value="bar" checked>
<input name="button" type="submit" value="text">
</form>
</body>
</html>
"""
headers = [
('Content-Type', 'text/html'),
('Content-Length', str(len(body)))]
start_response(status, headers)
return [body]
return app
def test_input(self):
res = self.app.get('/')
assert res.status_int == 200
assert res.headers['content-type'] == 'text/html'
assert res.content_type == 'text/html'
form = res.forms['text_input_form']
assert form['foo'].value == 'bar'
assert form.submit_fields() == [('foo', 'bar')]
form = res.forms['radio_input_form']
assert form['foo'].value == 'baz'
assert form.submit_fields() == [('foo', 'baz')]
form = res.forms['checkbox_input_form']
assert form['foo'].value == 'bar'
assert form.submit_fields() == [('foo', 'bar')]
class TestInputNoDefault(ServerFixture):
def make_app(self):
def app(environ, start_response):
req = Request(environ)
status = "200 OK"
body =\
"""
<html>
<head><title>form page</title></head>
<body>
<form method="POST" id="text_input_form">
<input name="foo" type="text">
<input name="button" type="submit" value="text">
</form>
<form method="POST" id="radio_input_form">
<input name="foo" type="radio" value="bar">
<input name="foo" type="radio" value="baz">
<input name="button" type="submit" value="radio">
</form>
<form method="POST" id="checkbox_input_form">
<input name="foo" type="checkbox" value="bar">
<input name="button" type="submit" value="text">
</form>
</body>
</html>
"""
headers = [
('Content-Type', 'text/html'),
('Content-Length', str(len(body)))]
start_response(status, headers)
return [body]
return app
def test_input_no_default(self):
res = self.app.get('/')
assert res.status_int == 200
assert res.headers['content-type'] == 'text/html'
assert res.content_type == 'text/html'
form = res.forms['text_input_form']
assert form['foo'].value == ''
assert form.submit_fields() == [('foo', '')]
form = res.forms['radio_input_form']
assert form['foo'].value is None
assert form.submit_fields() == []
form = res.forms['checkbox_input_form']
assert form['foo'].value is None
assert form.submit_fields() == []
|
{
"content_hash": "956bc93e2c65a196c356ef24230acb2c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 36.179245283018865,
"alnum_prop": 0.4863102998696219,
"repo_name": "storborg/livetest",
"id": "9ba3312c338c3346805cf356370181baa8e84a94",
"size": "3835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_input.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35175"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.