text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import __init__
import os, sys, time
import numpy as np
from commons.utils import logger
from commons import utils
from commons import dataloader
import evaluator
# parameter config area
para = {'dataPath': '../../../data/',
'dataName': 'dataset#1',
'dataType': 'tp', # set the dataType as 'rt' or 'tp'
'outPath': 'result/',
'metrics': ['MAE', 'NMAE', 'RMSE', 'MRE', 'NPRE'], # delete where appropriate
'density': np.arange(0.05, 0.31, 0.05), # matrix density
'rounds': 20, # how many runs are performed at each matrix density
'dimension': 10, # dimenisionality of the latent factors
'eta': 0.0001, # learning rate
'alpha': 0.6, # the combination coefficient
'lambda': 5, # regularization parameter
'maxIter': 300, # the max iterations
'saveTimeInfo': False, # whether to keep track of the running time
'saveLog': True, # whether to save log into file
'debugMode': False, # whether to record the debug info
'parallelMode': True # whether to leverage multiprocessing for speedup
}
startTime = time.time() # start timing
utils.setConfig(para) # set configuration
logger.info('==============================================')
logger.info('LN-LFM: Latent Neighbor and Latent Factor Model')
# load the dataset
dataMatrix = dataloader.load(para)
# load the service location information
wsInfoList = dataloader.loadServInfo(para)
# evaluate QoS prediction algorithm
evaluator.execute(dataMatrix, wsInfoList, para)
logger.info('All done. Elaspsed time: ' + utils.formatElapsedTime(time.time() - startTime)) # end timing
logger.info('==============================================')
|
{
"content_hash": "d45563271f72ea882e647c0073fd53c4",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 104,
"avg_line_length": 37.644444444444446,
"alnum_prop": 0.6422668240850059,
"repo_name": "wsdream/WS-DREAM",
"id": "43c502c2979e211b59335bfdd25b8f9b8561d9d5",
"size": "1977",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "benchmarks/model-based/LN_LFM/run_tp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7398"
},
{
"name": "C++",
"bytes": "78282"
},
{
"name": "Python",
"bytes": "131548"
}
],
"symlink_target": ""
}
|
print "You enter a dark room with two doors. Do you go through door #1 or door #2?"
door = raw_input("> ")
if door == "1":
print "There's a giant bear here eating a cheese cake. What do you do?"
print "1. Take the cake."
print "2. Scream at the bear."
bear = raw_input("> ")
if bear == "1":
print "The bear eats your face off. Good job!"
elif bear == "2":
print "The bear eats your legs off. Good job!"
else:
print "Well, doing %s is probably better. Bear runs away." % bear
elif door == "2":
print "You stare into the endless abyss at Cthulhu's retina."
print "1. Blueberries."
print "2. Yellow jacket clothespins."
print "3. Understanding revolvers yelling melodies."
insanity = raw_input("> ")
if insanity == "1" or insanity == "2":
print "Your body survives powered by a mind of jello. Good job!"
else:
print "The insanity rots your eyes into a pool of muck. Good job!"
else:
print "You stumble around and fall on a knife and die. Good job!"
|
{
"content_hash": "f3ebe08cd653e2370821db2bb38f8b9d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 83,
"avg_line_length": 31.228571428571428,
"alnum_prop": 0.6010978956999085,
"repo_name": "cloudedge/LearningPython",
"id": "49f0ec71adeffd8e13afd6135635098ab55fb9d2",
"size": "1093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex31.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23065"
}
],
"symlink_target": ""
}
|
"""
"""
from gluegov.lib.tables import CSVTable
CSVTable(
"communications",
"broadbandcoverage",
"http://d2a9983j4okwzn.cloudfront.net/downloads/ofcom-uk-broadband-speed-data-2013.csv",
"broadband_coverage_2013.csv"
).parse(keyRow=1)
CSVTable(
"communications",
"mopbilecoverage",
"http://d2a9983j4okwzn.cloudfront.net/downloads/ofcom-uk-mobile-coverage-data-2012.csv",
"mobile_coverage_2012.csv"
).parse(keyRow=1)
#http://d2a9983j4okwzn.cloudfront.net/downloads/ofcom-uk-broadband-speed-data-2013.csv
#http://d2a9983j4okwzn.cloudfront.net/downloads/ofcom-uk-mobile-coverage-data-2012.csv
|
{
"content_hash": "3c28628ebaac16db2d19640b36662a36",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 92,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.7483974358974359,
"repo_name": "JoeReid/GlueGov",
"id": "2aa8f7bb829824b9cbe55e100dac4f5bdb8b2a69",
"size": "624",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/gluegov/data/communications.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2911"
},
{
"name": "HTML",
"bytes": "9082"
},
{
"name": "Makefile",
"bytes": "3603"
},
{
"name": "Mako",
"bytes": "1034"
},
{
"name": "Python",
"bytes": "17950"
}
],
"symlink_target": ""
}
|
"""
Bake and deploy test to see if Spinnaker can interoperate with Microsoft Azure.
Sample Usage:
Assuming you have created $CITEST_ROOT points to the root directory of this repository
(which is . if you execute this from the root)
PYTHONPATH=$CITEST_ROOT:$CITEST_ROOT/spinnaker_testing \
python $CITEST_ROOT/tests/azure_bake_and_deploy_test.py \
--azure_storage_account_name=$AZURE_STORAGE_ACCOUNT_NAME, \
--azure_storage_account_key=$AZURE_STORAGE_ACCOUNT_KEY, \
--spinnaker_azure_account=$SPINNAKER_AZURE_ACCOUNT, \
--test_azure_subscription_id=$TEST_AZURE_SUBSCRIPTION_ID, \
--test_azure_rg_location=$TEST_AZURE_RG_LOCATION, \
--test_azure_resource_group=$TEST_AZURE_RG, \
--test_azure_vnet=$TEST_AZURE_VNET_NAME, \
--test_azure_subnets=$TEST_AZURE_SUBNET1_NAME=$TEST_AZURE_SUBNET1_ADDRESS,\
$TEST_AZURE_SUBNET2_NAME=$TEST_AZURE_SUBNET1_ADDRESS \
--native_hostname=localhost, \
--native_platform=native, \
"""
# Standard python modules.
import sys
import json
# citest modules.
import citest.base
import citest.azure_testing as az
import citest.json_contract as jc
import citest.json_predicate as jp
import citest.service_testing as st
# Spinnaker modules.
import spinnaker_testing as sk
import spinnaker_testing.gate as gate
ov_factory = jc.ObservationPredicateFactory()
class AzureBakeAndDeployTestScenario(sk.SpinnakerTestScenario):
"""Defines the scenario for the test
This scenario defines the different test operations.
We're going to:
Create a Spinnaker Application
Create a LoadBalancer
Create a pipeline to bake and deploy, which will create a server group
Create a pipeline to disable and destroy the server group
Trigger the bake_and_deploy pipeline
Trigger the disable_and_destroy pipeline
Delete the bake_and_deploy pipeline
Delete the disable_and_destroy pipeline
Delete the LoadBalancer
Delete the Spinnaker Application
"""
@classmethod
def new_agent(cls, bindings):
"""Implements citest.service_testing.AgentTestScenario.new_agent."""
agent = gate.new_agent(bindings)
agent.default_max_wait_secs = 180
return agent
@classmethod
def initArgumentParser(cls, parser, defaults=None):
"""Initialize command line argument parser.
Args:
parser: argparse.ArgumnetParser
"""
super(AzureBakeAndDeployTestScenario, cls).initArgumentParser(
parser, defaults=defaults)
defaults = defaults or {}
parser.add_argument(
'--test_namespace', default='default',
help='The namespace to manage within the tests.')
def __init__(self, bindings, agent=None):
"""Constructor.
Args:
bindings: [dict] The data bindings to use to configure the scenario.
agent: [GateAgent] The agent for invoking the test operations on Gate.
"""
# to avoid error when citest call binding
bindings['GCE_PROJECT'] = None
bindings['GOOGLE_PRIMARY_MANAGED_PROJECT_ID'] = None
super(AzureBakeAndDeployTestScenario, self).__init__(bindings, agent)
bindings = self.bindings
self.TEST_APP = bindings['TEST_APP']
self.ACCOUNT = bindings['SPINNAKER_AZURE_ACCOUNT']
self.__rg_name = bindings['TEST_AZURE_RESOURCE_GROUP']
self.__rg_location = bindings['TEST_AZURE_RG_LOCATION']
self.__subscription_id = bindings['TEST_AZURE_SUBSCRIPTION_ID']
self.__vnet_name = bindings['TEST_AZURE_VNET']
self.__subnets = [sn.split('=')[0] for sn in bindings['TEST_AZURE_SUBNETS'].split(',')]
self.__subnets_address = [sn.split('=')[1] for sn in bindings['TEST_AZURE_SUBNETS'].split(',')]
self.__os_type = bindings['TEST_AZURE_OSTYPE']
self.__base_os = bindings['TEST_AZURE_BASEOS']
self.__stack = bindings['TEST_STACK']
self.__detail = 'dt'
self.__sku = dict(
name=bindings['TEST_AZURE_VM_SKU'],
tier='Standard',
capacity=1
)
self.__full_lb_name = '{app}-{stack}-{detail}'.format(
app=self.TEST_APP, stack=self.__stack,
detail=self.__detail)
assert len(self.__subnets) >= 2
assert len(self.__subnets_address) >= 2
def create_app(self):
"""Creates OperationContract that creates a new Spinnaker Application."""
return st.OperationContract(
self.agent.make_create_app_operation(
bindings=self.bindings, application=self.TEST_APP,
account_name=self.ACCOUNT),
contract=jc.Contract())
def delete_app(self):
"""Creates OperationContract that deletes a new Spinnaker Application."""
return st.OperationContract(
self.agent.make_delete_app_operation(
application=self.TEST_APP,
account_name=self.ACCOUNT),
contract=jc.Contract(),
cleanup=self.delete_resource_group)
def delete_resource_group(self, _unused_execution_context):
"""Deletes the Azure Resource Group created by this Spinnaker Application."""
execution_context = citest.base.ExecutionContext()
args = ['--name',
'{app}-{rg}'.format(app=self.TEST_APP, rg=self.__rg_location),
'--yes'] # wait until the Resource Group deleted
cmd = self.az_observer.build_az_command_args('group', 'delete', args)
self.az_observer.run(execution_context.eval(cmd))
def create_load_balancer(self):
"""Create OperationContract that create a new Load Balancer
To verify the operation, we just check that the spinnaker load balancer
for the given application was created.
"""
healthyCheck = [{
"probeName": "{lb}-probe".format(lb=self.__full_lb_name),
"probeProtocol": "HTTP",
"probePort": "80",
"probePath": "/",
"probeInterval": 30,
"unhealthyThreshold": 8,
"timeout": 120
}]
rules = [{
"ruleName": "{lb}-rule0".format(lb=self.__full_lb_name),
"protocol": "HTTP",
"externalPort": 80,
"backendPort": 80,
"probeName": "{lb}-probe".format(lb=self.__full_lb_name),
"persistence": "None",
"idleTimeout": 4
}]
subnets = [{
"account": self.ACCOUNT,
"addressPrefix": self.__subnets_address[1],
"device": [],
"id": '/subscriptions/{id}/resourceGroups/{rg}/providers/Microsoft.Network/virtualNetworks/{vnet}/subnets/{name}'.format(
id=self.__subscription_id, rg=self.__rg_name,
vnet=self.__vnet_name, name=self.__subnets[1]
),
"name": self.__subnets[1],
"purpose": 'TBD',
"region": self.__rg_location,
"type": 'azure',
"vnet": self.__vnet_name
}]
vnet = {
"account": self.ACCOUNT,
"cloudProvider": "azure",
"id": self.__vnet_name,
"name": self.__vnet_name,
"region": self.__rg_location,
"resourceGroup": self.__rg_name,
"subnets": subnets
}
payload = self.agent.make_json_payload_from_kwargs(
job=[{
"stack": self.__stack,
"detail": self.__detail,
"credentials": self.ACCOUNT,
"region": self.__rg_location,
"cloudProvider": "azure",
"vnet": self.__vnet_name,
"subnet": self.__subnets[1],
"probes": healthyCheck,
"securityGroups": [],
"loadBalancingRules": rules,
"name": self.__full_lb_name,
"selectedVnet": vnet,
"vnetResourceGroup": self.__rg_name,
"selectedSubnet": subnets[0],
"type": "upsertLoadBalancer",
"loadBalancerType": "Azure Application Gateway",
"appName": self.TEST_APP,
"loadBalancerName": self.__full_lb_name,
"user": "[anonymous]"
}],
description="Test - Create load balancer: {lb}".format(
lb=self.__full_lb_name),
application=self.TEST_APP)
builder = az.AzContractBuilder(self.az_observer)
(builder.new_clause_builder(
'Load Balancer Created', retryable_for_secs=30)
.collect_resources(
az_resource='network',
command='application-gateway',
args=['list', '--resource-group',
'{app}-{rg}'.format(app=self.TEST_APP, rg=self.__rg_location)])
.EXPECT(ov_factory.value_list_contains(
jp.DICT_MATCHES({
'name': jp.STR_EQ(self.__full_lb_name),
'tags': jp.DICT_MATCHES({
'vnet': jp.STR_EQ(self.__vnet_name),
'subnet': jp.STR_EQ(self.__subnets[1])
})
}))))
return st.OperationContract(
self.new_post_operation(
title="create_load_balancer", data=payload,
path=('applications/{app}/tasks').format(app=self.TEST_APP),
max_wait_secs=2400),
contract=builder.build())
def delete_load_balancer(self):
"""Create OperationContract that delete the Load Balancer
To verify the operation, we just check that the spinnaker load balancer
for the given application was deleted.
"""
payload = self.agent.make_json_payload_from_kwargs(
job=[{
"cloudProvider": "azure",
"loadBalancerName": self.__full_lb_name,
"credentials": self.ACCOUNT,
"region": self.__rg_location,
"appName": self.TEST_APP,
"type": "deleteLoadBalancer",
"user": "[anonymous]"
}],
description="Test - Delete load balancer: {lb}".format(
lb=self.__full_lb_name),
application=self.TEST_APP)
builder = az.AzContractBuilder(self.az_observer)
(builder.new_clause_builder(
'Load Balancer Deleted', retryable_for_secs=30)
.collect_resources(
az_resource='network',
command='application-gateway',
args=['list', '--resource-group',
'{app}-{rg}'.format(app=self.TEST_APP, rg=self.__rg_location)])
# expected no lb
.EXPECT(ov_factory.error_list_contains(
jp.ExceptionMatchesPredicate(
klass=st.CliAgentRunError,
regex=r'(?:.* operation: Cannot find .*)|(?:.*\(.*could not be found.\).*)')))
# or no target lb
.OR(ov_factory.value_list_path_excludes(
'name', jp.STR_EQ(self.__full_lb_name)))
)
return st.OperationContract(
self.new_post_operation(
title='delete_load_balancer', data=payload,
path=('applications/{app}/tasks').format(app=self.TEST_APP),
max_wait_secs=1800),
contract=builder.build())
def make_bake_stage(self, providerType, package="", requisiteStage=[], **kwargs):
stage ={
"refId": "BAKE",
"requisiteStageRefIds": requisiteStage,
"type": "bake",
"name": "Bake",
"cloudProviderType": providerType,
"extendedAttributes": {},
"regions": [self.__rg_location],
"user": "[anonymous]",
"osType": self.__os_type,
"baseOs": self.__base_os,
"baseLabel": "release",
"package": package,
}
stage.update(kwargs)
return stage
def make_azure_deploy_stage(self, requisiteStage=[], **kwargs):
image = {
"imageName": "",
"isCustom": "true",
"publisher": "",
"offer": "",
"sku": "",
"version": "",
"region": self.__rg_location,
"uri": "",
"ostype": ""
}
clusters = [{
"name": self.__full_lb_name,
"cloudProvider": "azure",
"application": self.TEST_APP,
"stack": self.__stack,
"strategy": "",
"rollback": {
"onFailure": None,
},
"allowDeleteActive": None,
"allowScaleDownActive": None,
"detail": self.__detail,
"freeFormDetails": self.__detail,
"account": self.ACCOUNT,
"selectedProvider": "azure",
"vnet": self.__vnet_name,
"subnet": self.__subnets[0],
"useSourceCapacity": False,
"capacity": {
"min": 1,
"max": 1
},
"region": self.__rg_location,
"loadBalancerName": self.__full_lb_name,
"user": "[anonymous]",
"upgradePolicy": "Manual",
"type": "createServerGroup",
"image": image,
"sku": self.__sku,
"instanceTags": {},
"viewState":{
"instanceProfile": "custom",
"allImageSelection": None,
"useAllImageSelection": False,
"useSimpleCapacity": True,
"usePreferredZones": True,
"mode": "createPipeline",
"disableStrategySelection": True,
"loadBalancersConfigured": True,
"networkSettingsConfigured": True,
"securityGroupsConfigured": True,
"disableImageSelection": True,
"showImageSourceSelector": True,
"expectedArtifacts": [],
"imageId": None,
"readOnlyFields": {},
"submitButtonLabel": "Add",
"hideClusterNamePreview": False,
"templatingEnabled": True
},
"osConfig": {
"customData": None
},
"customScriptsSettings": {
"fileUris": None,
"commandToExecute": ""
},
"zonesEnabled": False,
"zones": [],
"enableInboundNAT": False,
"instanceType": self.__sku['name'],
"interestingHealthProviderNames": []
}]
stage = {
"refId": "DEPLOY",
"requisiteStageRefIds": requisiteStage,
"type": "deploy",
"name": "Deploy",
"clusters": clusters
}
stage.update(kwargs)
return stage
def make_azure_disable_group_stage(self, requisiteStage=[], **kwargs):
moniker = {
"app": self.TEST_APP,
"cluster": self.__full_lb_name,
"detail": self.__detail,
"stack": self.__stack
}
stage = {
"cloudProvider": "azure",
"cloudProviderType": "azure",
"cluster": self.__full_lb_name,
"credentials": self.ACCOUNT,
"moniker": moniker,
"name": "Disable Server Group",
"refId": "DISABLE",
"regions": [self.__rg_location],
"requisiteStageRefIds": requisiteStage,
"target": "current_asg_dynamic",
"type": "disableServerGroup"
}
stage.update(kwargs)
return stage
def make_azure_destroy_group_stage(self, requisiteStage=[], **kwargs):
moniker = {
"app": self.TEST_APP,
"cluster": self.__full_lb_name,
"detail": self.__detail,
"stack": self.__stack
}
stage = {
"cloudProvider": "azure",
"cloudProviderType": "azure",
"cluster": self.__full_lb_name,
"credentials": self.ACCOUNT,
"interestingHealthProviderNames": [],
"moniker": moniker,
"name": "Destroy Server Group",
"refId": "DESTROY",
"regions": [self.__rg_location],
"requisiteStageRefIds": requisiteStage,
"target": "current_asg_dynamic",
"type": "destroyServerGroup"
}
stage.update(kwargs)
return stage
def create_bake_and_deploy_pipeline(self):
"""Create OperationContract that create the bake and deploy pipeline
To verify the operation, we just check that the bake and deploy pipeline
with the default name was created.
"""
name = 'BakeAndDeploy'
self.bake_pipeline_id = name
bake_stage = self.make_bake_stage(providerType='azure')
deploy_stage = self.make_azure_deploy_stage(requisiteStage=['BAKE'])
pipeline_spec = dict(
stages=[bake_stage, deploy_stage],
stageCounter=2,
triggers=[],
limitConcurrent=True,
keepWaitingPipelines=False,
name=name,
application=self.TEST_APP
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, {"name": name})
)
return st.OperationContract(
self.new_post_operation(
title="create bake pipeline", data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def create_disable_and_destroy_pipeline(self):
"""Create OperationContract that create the disable and destroy pipeline
To verify the operation, we just check that the disable and destroy pipeline
with the default name was created.
"""
name = 'DisableAndDestroy'
self.destroy_pipeline_id = name
disable_stage = self.make_azure_disable_group_stage()
destroy_stage = self.make_azure_destroy_group_stage(requisiteStage=["DISABLE"])
pipeline_spec = dict(
stages=[disable_stage, destroy_stage],
stageCounter=2,
triggers=[],
limitConcurrent=True,
keepWaitingPipelines=False,
name=name,
application=self.TEST_APP
)
payload = self.agent.make_json_payload_from_kwargs(**pipeline_spec)
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline')
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.contains_path_value(None, {"name": name})
)
return st.OperationContract(
self.new_post_operation(
title="create destroy pipeline", data=payload, path='pipelines',
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def delete_pipeline(self, pipeline_id):
"""Create OperationContract that delete target pipeline
Args:
pipeline_id: [str] The name of the pipeline to be delete
To verify the operation, we just check that the pipeline
with the given name was deleted.
"""
builder = st.HttpContractBuilder(self.agent)
(builder.new_clause_builder('Has Pipeline', retryable_for_secs=5)
.get_url_path(
'applications/{app}/pipelineConfigs'.format(app=self.TEST_APP))
.excludes_path_value('name', pipeline_id))
return st.OperationContract(
self.new_delete_operation(
title="delete pipeline", data="",
path=('pipelines/{app}/{pl}'.format(
app=self.TEST_APP,
pl=pipeline_id
)),
status_class=st.SynchronousHttpOperationStatus),
contract=builder.build())
def trigger_bake_and_deploy_pipeline(self):
"""Create OperationContract that manually trigger the bake and deploy pipeline
This create a new server group below the given load balancer.
To verify the operation, we check that the spinnaker server group
for the given load balancer was created in correct size.
"""
pipeline_id = self.bake_pipeline_id
payload = self.agent.make_json_payload_from_kwargs(
job=[{
"dryRun": False,
"type": "manual",
"user": "[anonymous]"
}],
description="Test - begin bake and deploy: {pl}".format(
pl=pipeline_id),
application=self.TEST_APP)
builder = az.AzContractBuilder(self.az_observer)
(builder.new_clause_builder(
"Has Virtual Machine Scale Set", retryable_for_secs=30)
.collect_resources(
az_resource='vmss',
command='list',
args=['--resource-group',
'{app}-{rg}'.format(app=self.TEST_APP, rg=self.__rg_location)])
.EXPECT(ov_factory.value_list_contains(
jp.DICT_MATCHES({
"name": jp.STR_EQ('{lb}-v000'.format(lb=self.__full_lb_name)),
"provisioningState": jp.STR_EQ('Succeeded'),
"tags": jp.DICT_MATCHES({
"appGatewayName": jp.STR_EQ(self.__full_lb_name)
}),
"sku": jp.DICT_MATCHES({
"name": jp.STR_EQ(self.__sku['name']),
"tier": jp.STR_EQ(self.__sku['tier']),
"capacity": jp.NUM_EQ(self.__sku['capacity'])
})
})
)))
return st.OperationContract(
self.new_post_operation(
title='bake and deploy', data=payload,
# TODO: cannot use v2 url: pipelines/v2/{app}/{pl}
path='pipelines/{app}/{pl}'.format(
app=self.TEST_APP, pl=pipeline_id),
max_wait_secs=3600),
contract=builder.build())
def trigger_disable_and_destroy(self):
"""Create OperationContract that manually trigger the disable and destroy pipeline
To verify the operation, we just check that the spinnaker server group
for the given load balancer was deleted.
"""
pipeline_id = self.destroy_pipeline_id
payload = self.agent.make_json_payload_from_kwargs(
job=[{
"dryRun": False,
"type": "manual",
"user": "[anonymous]"
}],
description="Test - begin disable and destroy server group: {pl}".format(
pl=pipeline_id),
application=self.TEST_APP)
builder = az.AzContractBuilder(self.az_observer)
(builder.new_clause_builder(
"Has No Virtual Machine Scale Set", retryable_for_secs=30)
.collect_resources(
az_resource='vmss',
command='list',
args=['--resource-group',
'{app}-{rg}'.format(app=self.TEST_APP, rg=self.__rg_location)])
.EXPECT(ov_factory.error_list_contains(
jp.ExceptionMatchesPredicate(
klass=st.CliAgentRunError,
regex=r'(?:.* operation: Cannot find .*)|(?:.*\(.*could not be found.\).*)')))
.OR(ov_factory.value_list_path_excludes(
"name", jp.STR_EQ("{lb}-v000".format(lb=self.__full_lb_name))))
)
return st.OperationContract(
self.new_post_operation(
title='disable and destroy', data=payload,
# TODO: cannot use v2 url: pipelines/v2/{app}/{pl}
path='pipelines/{app}/{pl}'.format(
app=self.TEST_APP, pl=pipeline_id),
max_wait_secs=3600),
contract=builder.build())
class AzureTest(st.AgentTestCase):
"""The test fixture for the AzureBakeAndDeployTest.
This is implemented using citest OperationContract instances that are
created by the AzureBakeAndDeployTestScenario.
"""
@property
def scenario(self):
return citest.base.TestRunner.global_runner().get_shared_data(
AzureBakeAndDeployTestScenario)
def test_a_create_app(self):
self.run_test_case(self.scenario.create_app())
def test_b_create_load_balancer(self):
self.run_test_case(self.scenario.create_load_balancer())
def test_c1_create_bake_and_deploy_pipeline(self):
self.run_test_case(self.scenario.create_bake_and_deploy_pipeline())
def test_c2_create_disable_and_destroy_pipeline(self):
self.run_test_case(self.scenario.create_disable_and_destroy_pipeline())
def test_d_trigger_bake_and_deploy_pipeline(self):
self.run_test_case(self.scenario.trigger_bake_and_deploy_pipeline())
def test_w_trigger_disable_and_destroy_pipeline(self):
self.run_test_case(self.scenario.trigger_disable_and_destroy())
def test_x1_delete_bake_and_deploy_pipeline(self):
self.run_test_case(self.scenario.delete_pipeline(
self.scenario.bake_pipeline_id
))
def test_x2_delete_disable_and_destroy_pipeline(self):
self.run_test_case(self.scenario.delete_pipeline(
self.scenario.destroy_pipeline_id
))
def test_y_delete_load_balancer(self):
self.run_test_case(self.scenario.delete_load_balancer(),
max_retries=1)
def test_z_delete_app(self):
self.run_test_case(self.scenario.delete_app(),
retry_interval_secs=8, max_retries=8)
def main():
"""Implements the main method running this smoke test."""
defaults = {
'TEST_STACK': 'st',
'TEST_APP': 'azurebaketest' + AzureBakeAndDeployTestScenario.DEFAULT_TEST_ID
}
return citest.base.TestRunner.main(
parser_inits=[AzureBakeAndDeployTestScenario.initArgumentParser],
default_binding_overrides=defaults,
test_case_list=[AzureTest])
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "c35bbd5388b4ad4af353757c650f89d3",
"timestamp": "",
"source": "github",
"line_count": 695,
"max_line_length": 133,
"avg_line_length": 37.90791366906475,
"alnum_prop": 0.5571244211645031,
"repo_name": "ewiseblatt/spinnaker",
"id": "6f2b62749bfcdc86125c72238fa3a5c38a013d68",
"size": "26946",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testing/citest/tests/azure_bake_and_deploy_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1832"
},
{
"name": "Go",
"bytes": "8690"
},
{
"name": "HTML",
"bytes": "614"
},
{
"name": "Jsonnet",
"bytes": "35034"
},
{
"name": "Makefile",
"bytes": "276"
},
{
"name": "Python",
"bytes": "1250457"
},
{
"name": "Shell",
"bytes": "185432"
},
{
"name": "Smarty",
"bytes": "2087"
}
],
"symlink_target": ""
}
|
'''
Free as freedom will be 26/8/2016
@author: luisza
'''
from django.conf.urls import url, include
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponseRedirect, HttpResponseForbidden
from django.urls.base import reverse_lazy, reverse
from django.urls.exceptions import NoReverseMatch
from django.views import View
from django.views.generic import (ListView, CreateView, DeleteView,
UpdateView, DetailView)
from cruds_adminlte import utils
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.db.models.query_utils import Q
from django.shortcuts import get_object_or_404
from cruds_adminlte.filter import get_filters
from django.template.loader import render_to_string
import types
class CRUDMixin(object):
def get_template_names(self):
dev = []
base_name = "%s/%s/" % (self.model._meta.app_label,
self.model.__name__.lower())
dev.append(base_name + self.template_name)
dev.append(self.template_name)
base = self.template_name.split("/")[-1]
dev.append("cruds/" + base)
return dev
def get_search_fields(self, context):
try:
context['search'] = self.search_fields
except AttributeError:
context['search'] = False
if self.view_type == 'list' and 'q' in self.request.GET:
context['q'] = self.request.GET.get('q', '')
def get_filters(self, context):
filter_params = []
if self.view_type == 'list' and self.list_filter:
filters = get_filters(self.model, self.list_filter, self.request)
context['filters'] = filters
for filter in filters:
param = filter.get_params(self.related_fields or [])
if param:
filter_params += param
if filter_params:
if self.getparams:
self.getparams += "&"
self.getparams += "&".join(filter_params)
def validate_user_perms(self, user, perm, view):
if isinstance(perm, types.FunctionType):
return perm(user, view)
return user.has_perm(perm)
def get_check_perms(self, context):
user = self.request.user
available_perms = {}
for perm in self.all_perms:
if self.check_perms:
if perm in self.views_available:
available_perms[perm] = all([
self.validate_user_perms(user, x, perm)
for x in self.all_perms[perm]])
else:
available_perms[perm] = False
else:
available_perms[perm] = True
context['crud_perms'] = available_perms
def get_urls_and_fields(self, context):
include = None
if hasattr(self, 'display_fields') and self.view_type == 'detail':
include = getattr(self, 'display_fields')
if hasattr(self, 'list_fields') and self.view_type == 'list':
include = getattr(self, 'list_fields')
context['fields'] = utils.get_fields(self.model, include=include)
if hasattr(self, 'object') and self.object:
for action in utils.INSTANCE_ACTIONS:
try:
nurl = utils.crud_url_name(self.model, action)
if self.namespace:
nurl = self.namespace + ':' + nurl
url = reverse(nurl, kwargs={'pk': self.object.pk})
except NoReverseMatch:
url = None
context['url_%s' % action] = url
for action in utils.LIST_ACTIONS:
try:
nurl = utils.crud_url_name(self.model, action)
if self.namespace:
nurl = self.namespace + ':' + nurl
url = reverse(nurl)
except NoReverseMatch:
url = None
context['url_%s' % action] = url
def get_context_data(self, **kwargs):
"""
Adds available urls and names.
"""
context = super(CRUDMixin, self).get_context_data(**kwargs)
context.update({
'model_verbose_name': self.model._meta.verbose_name,
'model_verbose_name_plural': self.model._meta.verbose_name_plural,
'namespace': self.namespace
})
context.update({'blocks': self.template_blocks})
if self.view_type in ['update', 'detail']:
context['inlines'] = self.inlines
if 'object' not in context:
context['object'] = self.model
self.get_urls_and_fields(context)
self.get_check_perms(context)
self.get_search_fields(context)
self.get_filters(context)
context['views_available'] = self.views_available
if self.view_type == 'list':
context['paginate_template'] = self.paginate_template
context['paginate_position'] = self.paginate_position
context['template_father'] = self.template_father
context.update(self.context_rel)
context['getparams'] = "?" + self.getparams
context['getparams'] += "&" if self.getparams else ""
return context
def dispatch(self, request, *args, **kwargs):
self.related_fields = self.related_fields or []
self.context_rel = {}
getparams = []
self.getparams = ''
for related in self.related_fields:
pk = self.request.GET.get(related, '')
if pk:
Classrelated = utils.get_related_class_field(
self.model, related)
self.context_rel[related] = get_object_or_404(
Classrelated, pk=pk)
getparams.append("%s=%s" % (
related, str(self.context_rel[related].pk)))
if getparams:
self.getparams = "&".join(getparams)
for perm in self.perms:
if not self.validate_user_perms(request.user, perm,
self.view_type):
return HttpResponseForbidden(render_to_string(
'cruds/403.html', request=request
))
return View.dispatch(self, request, *args, **kwargs)
class CRUDView(object):
"""
CRUDView is a generic way to provide create, list, detail, update,
delete views in one class,
you can inherit for it and manage login_required, model perms,
pagination, update and add forms
how to use:
In views
.. code:: python
from testapp.models import Customer
from cruds_adminlte.crud import CRUDView
class Myclass(CRUDView):
model = Customer
In urls.py
.. code:: python
myview = Myclass()
urlpatterns = [
url('path', include(myview.get_urls())) # also support
# namespace
]
The default behavior is check_login = True and check_perms=True but
you can turn off with
.. code:: python
from testapp.models import Customer
from cruds_adminlte.crud import CRUDView
class Myclass(CRUDView):
model = Customer
check_login = False
check_perms = False
You also can defined extra perms with
.. code:: python
class Myclass(CRUDView):
model = Customer
perms = { 'create': ['applabel.mycustom_perm'],
'list': [],
'delete': [],
'update': [],
'detail': []
}
If check_perms = True we will add default django model perms
(<applabel>.[add|change|delete|view]_<model>)
You can also overwrite add and update forms
.. code:: python
class Myclass(CRUDView):
model = Customer
add_form = MyFormClass
update_form = MyFormClass
And of course overwrite base template name
.. code:: python
class Myclass(CRUDView):
model = Customer
template_name_base = "mybase"
Remember basename is generated like app_label/modelname if
template_name_base is set as None and
'cruds' by default so template loader search this structure
basename + '/create.html'
basename + '/detail.html'
basename + '/update.html'
basename + '/list.html'
basename + '/delete.html'
Note: also import <applabel>/<model>/<basename>/<view type>.html
Using namespace
In views
.. code:: python
from testapp.models import Customer
from cruds_adminlte.crud import CRUDView
class Myclass(CRUDView):
model = Customer
namespace = "mynamespace"
In urls.py
.. code:: python
myview = Myclass()
urlpatterns = [
url('path', include(myview.get_urls(),
namespace="mynamespace"))
]
If you want to filter views add views_available list
.. code:: python
class Myclass(CRUDView):
model = Customer
views_available = ['create', 'list', 'delete',
'update', 'detail']
"""
model = None
template_name_base = "cruds"
template_blocks = {}
namespace = None
fields = '__all__'
urlprefix = ""
check_login = True
check_perms = True
paginate_by = 10
paginate_template = 'cruds/pagination/prev_next.html'
paginate_position = 'Bottom'
update_form = None
add_form = None
display_fields = None
list_fields = None
inlines = None
views_available = None
template_father = "cruds/base.html"
search_fields = None
split_space_search = False
related_fields = None
list_filter = None
mixin = CRUDMixin
"""
It's obligatory this structure
perms = {
'create': [],
'list': [],
'delete': [],
'update': [],
'detail': []
}
"""
perms = None
# DECORATORS
def check_decorator(self, viewclass):
if self.check_login:
return login_required(viewclass)
return viewclass
def decorator_create(self, viewclass):
return self.check_decorator(viewclass)
def decorator_detail(self, viewclass):
return self.check_decorator(viewclass)
def decorator_list(self, viewclass):
return self.check_decorator(viewclass)
def decorator_update(self, viewclass):
return self.check_decorator(viewclass)
def decorator_delete(self, viewclass):
return self.check_decorator(viewclass)
# GET GENERIC CLASS
def get_create_view_class(self):
return CreateView
def get_create_view(self):
CreateViewClass = self.get_create_view_class()
class OCreateView(self.mixin, CreateViewClass):
namespace = self.namespace
perms = self.perms['create']
all_perms = self.perms
form_class = self.add_form
view_type = 'create'
views_available = self.views_available[:]
check_perms = self.check_perms
template_father = self.template_father
template_blocks = self.template_blocks
related_fields = self.related_fields
def form_valid(self, form):
if not self.related_fields:
return super(OCreateView, self).form_valid(form)
self.object = form.save(commit=False)
for key, value in self.context_rel.items():
setattr(self.object, key, value)
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
url = super(OCreateView, self).get_success_url()
if (self.getparams): # fixed filter create action
url += '?' + self.getparams
return url
return OCreateView
def get_detail_view_class(self):
return DetailView
def get_detail_view(self):
ODetailViewClass = self.get_detail_view_class()
class ODetailView(self.mixin, ODetailViewClass):
namespace = self.namespace
perms = self.perms['detail']
all_perms = self.perms
view_type = 'detail'
display_fields = self.display_fields
inlines = self.inlines
views_available = self.views_available[:]
check_perms = self.check_perms
template_father = self.template_father
template_blocks = self.template_blocks
related_fields = self.related_fields
def get_success_url(self):
url = super(ODetailView, self).get_success_url()
if (self.getparams): # fixed filter detail action
url += '?' + self.getparams
return url
return ODetailView
def get_update_view_class(self):
return UpdateView
def get_update_view(self):
EditViewClass = self.get_update_view_class()
class OEditView(self.mixin, EditViewClass):
namespace = self.namespace
perms = self.perms['update']
form_class = self.update_form
all_perms = self.perms
view_type = 'update'
inlines = self.inlines
views_available = self.views_available[:]
check_perms = self.check_perms
template_father = self.template_father
template_blocks = self.template_blocks
related_fields = self.related_fields
def form_valid(self, form):
if not self.related_fields:
return super(OEditView, self).form_valid(form)
self.object = form.save(commit=False)
for key, value in self.context_rel.items():
setattr(self.object, key, value)
self.object.save()
return HttpResponseRedirect(self.get_success_url())
def get_success_url(self):
url = super(OEditView, self).get_success_url()
if (self.getparams): # fixed filter edit action
url += '?' + self.getparams
return url
return OEditView
def get_list_view_class(self):
return ListView
def get_list_view(self):
OListViewClass = self.get_list_view_class()
class OListView(self.mixin, OListViewClass):
namespace = self.namespace
perms = self.perms['list']
all_perms = self.perms
list_fields = self.list_fields
view_type = 'list'
paginate_by = self.paginate_by
views_available = self.views_available[:]
check_perms = self.check_perms
template_father = self.template_father
template_blocks = self.template_blocks
search_fields = self.search_fields
split_space_search = self.split_space_search
related_fields = self.related_fields
paginate_template = self.paginate_template
paginate_position = self.paginate_position
list_filter = self.list_filter
def get_listfilter_queryset(self, queryset):
if self.list_filter:
filters = get_filters(
self.model, self.list_filter, self.request)
for filter in filters:
queryset = filter.get_filter(queryset)
return queryset
def search_queryset(self, query):
if self.split_space_search is True:
self.split_space_search = ' '
if self.search_fields and 'q' in self.request.GET:
q = self.request.GET.get('q')
if self.split_space_search:
q = q.split(self.split_space_search)
elif q:
q = [q]
sfilter = None
for field in self.search_fields:
for qsearch in q:
if field not in self.context_rel:
if sfilter is None:
sfilter = Q(**{field: qsearch})
else:
sfilter |= Q(**{field: qsearch})
if sfilter is not None:
query = query.filter(sfilter)
if self.related_fields:
query = query.filter(**self.context_rel)
return query
def get_success_url(self):
url = super(OListView, self).get_success_url()
if (self.getparams): # fixed filter detail action
url += '?' + self.getparams
return url
def get_queryset(self):
queryset = super(OListView, self).get_queryset()
queryset = self.search_queryset(queryset)
queryset = self.get_listfilter_queryset(queryset)
return queryset
return OListView
def get_delete_view_class(self):
return DeleteView
def get_delete_view(self):
ODeleteClass = self.get_delete_view_class()
class ODeleteView(self.mixin, ODeleteClass):
namespace = self.namespace
perms = self.perms['delete']
all_perms = self.perms
view_type = 'delete'
views_available = self.views_available[:]
check_perms = self.check_perms
template_father = self.template_father
template_blocks = self.template_blocks
related_fields = self.related_fields
def get_success_url(self):
url = super(ODeleteView, self).get_success_url()
print(self.getparams)
if (self.getparams): # fixed filter delete action
url += '?' + self.getparams
return url
return ODeleteView
# INITIALIZERS
def initialize_create(self, basename):
OCreateView = self.get_create_view()
url = utils.crud_url_name(
self.model, 'list', prefix=self.urlprefix)
if self.namespace:
url = self.namespace + ":" + url
fields = self.fields
if self.add_form:
fields = None
self.create = self.decorator_create(OCreateView.as_view(
model=self.model,
fields=fields,
success_url=reverse_lazy(url),
template_name=basename
))
def initialize_detail(self, basename):
ODetailView = self.get_detail_view()
self.detail = self.decorator_detail(
ODetailView.as_view(
model=self.model,
template_name=basename
))
def initialize_update(self, basename):
OUpdateView = self.get_update_view()
url = utils.crud_url_name(
self.model, 'list', prefix=self.urlprefix)
if self.namespace:
url = self.namespace + ":" + url
fields = self.fields
if self.update_form:
fields = None
self.update = self.decorator_update(OUpdateView.as_view(
model=self.model,
fields=fields,
success_url=reverse_lazy(url),
template_name=basename
))
def initialize_list(self, basename):
OListView = self.get_list_view()
self.list = self.decorator_list(OListView.as_view(
model=self.model,
template_name=basename
))
def initialize_delete(self, basename):
ODeleteView = self.get_delete_view()
url = utils.crud_url_name(
self.model, 'list', prefix=self.urlprefix)
if self.namespace:
url = self.namespace + ":" + url
self.delete = self.decorator_delete(ODeleteView.as_view(
model=self.model,
success_url=reverse_lazy(url),
template_name=basename
))
def get_base_name(self):
ns = self.template_name_base
if not self.template_name_base:
ns = "%s/%s" % (
self.model._meta.app_label,
self.model.__name__.lower())
return ns
def check_create_perm(self, applabel, name):
notfollow = False
try:
model, created = ContentType.objects.get_or_create(
app_label=applabel, model=name)
except:
notfollow = True
if not notfollow and not Permission.objects.filter(content_type=model,
codename="view_%s" %
(name, )).exists():
Permission.objects.create(
content_type=model,
codename="view_%s" % (name,),
name=_("Can see available %s" % (name,)))
def initialize_perms(self):
if self.perms is None:
self.perms = {
'create': [],
'list': [],
'delete': [],
'update': [],
'detail': []
}
applabel = self.model._meta.app_label
name = self.model.__name__.lower()
if self.check_perms:
self.check_create_perm(applabel, name)
self.perms['create'].append("%s.add_%s" % (applabel, name))
self.perms['update'].append("%s.change_%s" % (applabel, name))
self.perms['delete'].append("%s.delete_%s" % (applabel, name))
# maybe other default perm can be here
self.perms['list'].append("%s.view_%s" % (applabel, name))
self.perms['detail'].append("%s.view_%s" % (applabel, name))
def initialize_views_available(self):
if self.views_available is None:
self.views_available = [
'create', 'list', 'delete', 'update', 'detail']
def __init__(
self, namespace=None, model=None, template_name_base=None,
mixin=None
):
if namespace:
self.namespace = namespace
if model:
self.model = model
if template_name_base:
self.template_name_base = template_name_base
if mixin:
self.mixin = mixin
basename = self.get_base_name()
self.initialize_views_available()
self.initialize_perms()
if 'create' in self.views_available:
self.initialize_create(basename + '/create.html')
if 'detail' in self.views_available:
self.initialize_detail(basename + '/detail.html')
if 'update' in self.views_available:
self.initialize_update(basename + '/update.html')
if 'list' in self.views_available:
self.initialize_list(basename + '/list.html')
if 'delete' in self.views_available:
self.initialize_delete(basename + '/delete.html')
def get_urls(self):
pre = ""
try:
if self.cruds_url:
pre = "%s/" % self.cruds_url
except AttributeError:
pre = ""
base_name = "%s%s/%s" % (pre, self.model._meta.app_label,
self.model.__name__.lower())
myurls = []
if 'list' in self.views_available:
myurls.append(url("^%s/list$" % (base_name,),
self.list,
name=utils.crud_url_name(
self.model, 'list', prefix=self.urlprefix)))
if 'create' in self.views_available:
myurls.append(url("^%s/create$" % (base_name,),
self.create,
name=utils.crud_url_name(
self.model, 'create', prefix=self.urlprefix))
)
if 'detail' in self.views_available:
myurls.append(url('^%s/(?P<pk>[^/]+)$' % (base_name,),
self.detail,
name=utils.crud_url_name(
self.model, 'detail', prefix=self.urlprefix))
)
if 'update' in self.views_available:
myurls.append(url("^%s/(?P<pk>[^/]+)/update$" % (base_name,),
self.update,
name=utils.crud_url_name(
self.model, 'update', prefix=self.urlprefix))
)
if 'delete' in self.views_available:
myurls.append(url(r"^%s/(?P<pk>[^/]+)/delete$" % (base_name,),
self.delete,
name=utils.crud_url_name(
self.model, 'delete', prefix=self.urlprefix))
)
myurls += self.add_inlines(base_name)
return myurls
def add_inlines(self, base_name):
dev = []
if self.inlines:
for i, inline in enumerate(self.inlines):
klass = inline
if isinstance(klass, type):
# FIXME: This is a dirty hack to act on repeated calls to get_urls()
# as those mean that inline is a type instance not a class from
# the second run onwars.
klass = klass()
self.inlines[i] = klass
if self.namespace:
dev.append(
url('^inline/',
include(klass.get_urls(),
namespace=self.namespace))
)
else:
dev.append(
url('^inline/', include(klass.get_urls()))
)
return dev
class UserCRUDView(CRUDView):
def get_create_view(self):
View = super(UserCRUDView, self).get_create_view()
class UCreateView(View):
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return HttpResponseRedirect(self.get_success_url())
return UCreateView
def get_update_view(self):
View = super(UserCRUDView, self).get_update_view()
class UUpdateView(View):
def form_valid(self, form):
self.object = form.save(commit=False)
self.object.user = self.request.user
self.object.save()
return HttpResponseRedirect(self.get_success_url())
return UUpdateView
def get_list_view(self):
View = super(UserCRUDView, self).get_list_view()
class UListView(View):
def get_queryset(self):
queryset = super(UListView, self).get_queryset()
queryset = queryset.filter(user=self.request.user)
return queryset
return UListView
|
{
"content_hash": "844b5e53c822ecd3eac9f7506f291711",
"timestamp": "",
"source": "github",
"line_count": 791,
"max_line_length": 90,
"avg_line_length": 34.97597977243995,
"alnum_prop": 0.5278681414009976,
"repo_name": "oscarmlage/django-cruds-adminlte",
"id": "022508880ef79b547ae1fb7e7c78e64fbf84a382",
"size": "27685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cruds_adminlte/crud.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "246922"
},
{
"name": "HTML",
"bytes": "76561"
},
{
"name": "JavaScript",
"bytes": "246595"
},
{
"name": "Python",
"bytes": "70399"
}
],
"symlink_target": ""
}
|
import unittest
from apps.computeSPVariability2 import *
class PDBTestCase(unittest.TestCase):
def testOperate(self):
workingDirectory = "/home/jatienza/Desktop/cathAnalysis/eclipseProject/cathAnalysis/src/launchers/kk/"
alnFileName = workingDirectory+"1.10.10.180.mmult-FINAL.aln"
minimumPercentage = 0.2
verbose = True
operate(workingDirectory, alnFileName, minimumPercentage, verbose)
|
{
"content_hash": "713d875f5df1bba0c8fb5e98ebfe8d1a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 111,
"avg_line_length": 49.77777777777778,
"alnum_prop": 0.71875,
"repo_name": "julianah/cathAnalysis",
"id": "11e5acd1df9d68319a1cce1f7e30c752eaf31e38",
"size": "448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/computeSPVariabilityTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "139787"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_array_equal
from seaborn._core.groupby import GroupBy
from seaborn._stats.counting import Hist, Count
class TestCount:
@pytest.fixture
def df(self, rng):
n = 30
return pd.DataFrame(dict(
x=rng.uniform(0, 7, n).round(),
y=rng.normal(size=n),
color=rng.choice(["a", "b", "c"], n),
group=rng.choice(["x", "y"], n),
))
def get_groupby(self, df, orient):
other = {"x": "y", "y": "x"}[orient]
cols = [c for c in df if c != other]
return GroupBy(cols)
def test_single_grouper(self, df):
ori = "x"
df = df[["x"]]
gb = self.get_groupby(df, ori)
res = Count()(df, gb, ori, {})
expected = df.groupby("x").size()
assert_array_equal(res.sort_values("x")["y"], expected)
def test_multiple_groupers(self, df):
ori = "x"
df = df[["x", "group"]].sort_values("group")
gb = self.get_groupby(df, ori)
res = Count()(df, gb, ori, {})
expected = df.groupby(["x", "group"]).size()
assert_array_equal(res.sort_values(["x", "group"])["y"], expected)
class TestHist:
@pytest.fixture
def single_args(self):
groupby = GroupBy(["group"])
class Scale:
scale_type = "continuous"
return groupby, "x", {"x": Scale()}
@pytest.fixture
def triple_args(self):
groupby = GroupBy(["group", "a", "s"])
class Scale:
scale_type = "continuous"
return groupby, "x", {"x": Scale()}
def test_string_bins(self, long_df):
h = Hist(bins="sqrt")
bin_kws = h._define_bin_params(long_df, "x", "continuous")
assert bin_kws["range"] == (long_df["x"].min(), long_df["x"].max())
assert bin_kws["bins"] == int(np.sqrt(len(long_df)))
def test_int_bins(self, long_df):
n = 24
h = Hist(bins=n)
bin_kws = h._define_bin_params(long_df, "x", "continuous")
assert bin_kws["range"] == (long_df["x"].min(), long_df["x"].max())
assert bin_kws["bins"] == n
def test_array_bins(self, long_df):
bins = [-3, -2, 1, 2, 3]
h = Hist(bins=bins)
bin_kws = h._define_bin_params(long_df, "x", "continuous")
assert_array_equal(bin_kws["bins"], bins)
def test_binwidth(self, long_df):
binwidth = .5
h = Hist(binwidth=binwidth)
bin_kws = h._define_bin_params(long_df, "x", "continuous")
n_bins = bin_kws["bins"]
left, right = bin_kws["range"]
assert (right - left) / n_bins == pytest.approx(binwidth)
def test_binrange(self, long_df):
binrange = (-4, 4)
h = Hist(binrange=binrange)
bin_kws = h._define_bin_params(long_df, "x", "continuous")
assert bin_kws["range"] == binrange
def test_discrete_bins(self, long_df):
h = Hist(discrete=True)
x = long_df["x"].astype(int)
bin_kws = h._define_bin_params(long_df.assign(x=x), "x", "continuous")
assert bin_kws["range"] == (x.min() - .5, x.max() + .5)
assert bin_kws["bins"] == (x.max() - x.min() + 1)
def test_discrete_bins_from_nominal_scale(self, rng):
h = Hist()
x = rng.randint(0, 5, 10)
df = pd.DataFrame({"x": x})
bin_kws = h._define_bin_params(df, "x", "nominal")
assert bin_kws["range"] == (x.min() - .5, x.max() + .5)
assert bin_kws["bins"] == (x.max() - x.min() + 1)
def test_count_stat(self, long_df, single_args):
h = Hist(stat="count")
out = h(long_df, *single_args)
assert out["y"].sum() == len(long_df)
def test_probability_stat(self, long_df, single_args):
h = Hist(stat="probability")
out = h(long_df, *single_args)
assert out["y"].sum() == 1
def test_proportion_stat(self, long_df, single_args):
h = Hist(stat="proportion")
out = h(long_df, *single_args)
assert out["y"].sum() == 1
def test_percent_stat(self, long_df, single_args):
h = Hist(stat="percent")
out = h(long_df, *single_args)
assert out["y"].sum() == 100
def test_density_stat(self, long_df, single_args):
h = Hist(stat="density")
out = h(long_df, *single_args)
assert (out["y"] * out["space"]).sum() == 1
def test_frequency_stat(self, long_df, single_args):
h = Hist(stat="frequency")
out = h(long_df, *single_args)
assert (out["y"] * out["space"]).sum() == len(long_df)
def test_invalid_stat(self):
with pytest.raises(ValueError, match="The `stat` parameter for `Hist`"):
Hist(stat="invalid")
def test_cumulative_count(self, long_df, single_args):
h = Hist(stat="count", cumulative=True)
out = h(long_df, *single_args)
assert out["y"].max() == len(long_df)
def test_cumulative_proportion(self, long_df, single_args):
h = Hist(stat="proportion", cumulative=True)
out = h(long_df, *single_args)
assert out["y"].max() == 1
def test_cumulative_density(self, long_df, single_args):
h = Hist(stat="density", cumulative=True)
out = h(long_df, *single_args)
assert out["y"].max() == 1
def test_common_norm_default(self, long_df, triple_args):
h = Hist(stat="percent")
out = h(long_df, *triple_args)
assert out["y"].sum() == pytest.approx(100)
def test_common_norm_false(self, long_df, triple_args):
h = Hist(stat="percent", common_norm=False)
out = h(long_df, *triple_args)
for _, out_part in out.groupby(["a", "s"]):
assert out_part["y"].sum() == pytest.approx(100)
def test_common_norm_subset(self, long_df, triple_args):
h = Hist(stat="percent", common_norm=["a"])
out = h(long_df, *triple_args)
for _, out_part in out.groupby("a"):
assert out_part["y"].sum() == pytest.approx(100)
def test_common_norm_warning(self, long_df, triple_args):
h = Hist(common_norm=["b"])
with pytest.warns(UserWarning, match=r"Undefined variable\(s\)"):
h(long_df, *triple_args)
def test_common_bins_default(self, long_df, triple_args):
h = Hist()
out = h(long_df, *triple_args)
bins = []
for _, out_part in out.groupby(["a", "s"]):
bins.append(tuple(out_part["x"]))
assert len(set(bins)) == 1
def test_common_bins_false(self, long_df, triple_args):
h = Hist(common_bins=False)
out = h(long_df, *triple_args)
bins = []
for _, out_part in out.groupby(["a", "s"]):
bins.append(tuple(out_part["x"]))
assert len(set(bins)) == len(out.groupby(["a", "s"]))
def test_common_bins_subset(self, long_df, triple_args):
h = Hist(common_bins=False)
out = h(long_df, *triple_args)
bins = []
for _, out_part in out.groupby("a"):
bins.append(tuple(out_part["x"]))
assert len(set(bins)) == out["a"].nunique()
def test_common_bins_warning(self, long_df, triple_args):
h = Hist(common_bins=["b"])
with pytest.warns(UserWarning, match=r"Undefined variable\(s\)"):
h(long_df, *triple_args)
def test_histogram_single(self, long_df, single_args):
h = Hist()
out = h(long_df, *single_args)
hist, edges = np.histogram(long_df["x"], bins="auto")
assert_array_equal(out["y"], hist)
assert_array_equal(out["space"], np.diff(edges))
def test_histogram_multiple(self, long_df, triple_args):
h = Hist()
out = h(long_df, *triple_args)
bins = np.histogram_bin_edges(long_df["x"], "auto")
for (a, s), out_part in out.groupby(["a", "s"]):
x = long_df.loc[(long_df["a"] == a) & (long_df["s"] == s), "x"]
hist, edges = np.histogram(x, bins=bins)
assert_array_equal(out_part["y"], hist)
assert_array_equal(out_part["space"], np.diff(edges))
|
{
"content_hash": "6c96a8a99cd821a774bc6de90c26da90",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 80,
"avg_line_length": 31.149425287356323,
"alnum_prop": 0.5428044280442804,
"repo_name": "mwaskom/seaborn",
"id": "7656654492aa5ce56d47cbe9cf923376ed714643",
"size": "8131",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/_stats/test_counting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "211"
},
{
"name": "Python",
"bytes": "1688263"
},
{
"name": "Shell",
"bytes": "142"
}
],
"symlink_target": ""
}
|
__author__ = 'Pabitra'
from django.test import TestCase
from hs_core import hydroshare
from hs_core.models import AbstractResource
class TestGetResourceTypesAPI(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_get_resource_types(self):
# this is the api call we are testing
res_types = hydroshare.get_resource_types()
# test that each resource type is a subclass of AbstractResource type
for res_type in res_types:
self.assertEqual(issubclass(res_type, AbstractResource), True)
# not sure what else can be tested
|
{
"content_hash": "ddfeb8ff2c76be6aed42c836477bbb77",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 26.82608695652174,
"alnum_prop": 0.6807131280388979,
"repo_name": "hydroshare/hydroshare_temp",
"id": "994b81a7a6d4d96a45245a6e1bbc7cd68810fea4",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hs_core/tests/api/native/test_get_resource_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "173515"
},
{
"name": "C++",
"bytes": "4136"
},
{
"name": "CSS",
"bytes": "228598"
},
{
"name": "CoffeeScript",
"bytes": "34267"
},
{
"name": "JavaScript",
"bytes": "736373"
},
{
"name": "Python",
"bytes": "1870088"
},
{
"name": "Shell",
"bytes": "5335"
},
{
"name": "XSLT",
"bytes": "790987"
}
],
"symlink_target": ""
}
|
import argparse
from pipeline.backend.pipeline import PipeLine
from pipeline.component import DataTransform
from pipeline.component import HeteroFeatureBinning
from pipeline.component import Intersection
from pipeline.component import Reader
from pipeline.interface import Data
from pipeline.utils.tools import load_job_config
def main(config="../../config.yaml", namespace=""):
# obtain config
if isinstance(config, str):
config = load_job_config(config)
parties = config.parties
guest = parties.guest[0]
host = parties.host[0]
guest_train_data = {"name": "breast_hetero_guest", "namespace": f"experiment{namespace}"}
host_train_data = {"name": "breast_hetero_host", "namespace": f"experiment{namespace}"}
pipeline = PipeLine().set_initiator(role='guest', party_id=guest).set_roles(guest=guest, host=host)
reader_0 = Reader(name="reader_0")
reader_0.get_party_instance(role='guest', party_id=guest).component_param(table=guest_train_data)
reader_0.get_party_instance(role='host', party_id=host).component_param(table=host_train_data)
data_transform_0 = DataTransform(name="data_transform_0")
data_transform_0.get_party_instance(role='guest', party_id=guest).component_param(with_label=True)
data_transform_0.get_party_instance(role='host', party_id=host).component_param(with_label=False)
intersection_0 = Intersection(name="intersection_0")
param = {
"method": "quantile",
"compress_thres": 10000,
"head_size": 10000,
"error": 0.001,
"bin_num": 100,
"bin_indexes": -1,
"bin_names": None,
"category_indexes": None,
"category_names": None,
"adjustment_factor": 0.5,
"local_only": False,
"transform_param": {
"transform_cols": -1,
"transform_names": None,
"transform_type": "bin_num"
}
}
hetero_feature_binning_0 = HeteroFeatureBinning(name="hetero_feature_binning_0", **param)
pipeline.add_component(reader_0)
pipeline.add_component(data_transform_0, data=Data(data=reader_0.output.data))
pipeline.add_component(intersection_0, data=Data(data=data_transform_0.output.data))
pipeline.add_component(hetero_feature_binning_0, data=Data(data=intersection_0.output.data))
pipeline.compile()
pipeline.fit()
if __name__ == "__main__":
parser = argparse.ArgumentParser("PIPELINE DEMO")
parser.add_argument("-config", type=str,
help="config file")
args = parser.parse_args()
if args.config is not None:
main(args.config)
else:
main()
|
{
"content_hash": "5e4450aadc33199d0151d2c12d4984fa",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 103,
"avg_line_length": 35.729729729729726,
"alnum_prop": 0.6656580937972768,
"repo_name": "FederatedAI/FATE",
"id": "061111e53b6da37a93ab2b26e9e11ade2553a058",
"size": "3261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/pipeline/hetero_feature_binning/pipeline-hetero-binning-large-bin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
class DummyPostData(dict):
def getlist(self, key):
v = self[key]
if not isinstance(v, (list, tuple)):
v = [v]
return v
|
{
"content_hash": "b66c3d3012f01716d8077f2b4728e85f",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 44,
"avg_line_length": 26.5,
"alnum_prop": 0.5094339622641509,
"repo_name": "wtforms/wtforms",
"id": "c8d28a0f34803d1ddfe6beb2e3ade4a03396a855",
"size": "159",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "222611"
}
],
"symlink_target": ""
}
|
"""
LlamaLab Automate notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.llamalab_automate/
"""
import logging
import requests
import voluptuous as vol
from homeassistant.components.notify import (BaseNotificationService,
PLATFORM_SCHEMA)
from homeassistant.const import CONF_API_KEY
from homeassistant.helpers import config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_TO = 'to'
CONF_DEVICE = 'device'
_RESOURCE = 'https://llamalab.com/automate/cloud/message'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_TO): cv.string,
vol.Optional(CONF_DEVICE): cv.string,
})
def get_service(hass, config):
"""Get the LlamaLab Automate notification service."""
secret = config.get(CONF_API_KEY)
recipient = config.get(CONF_TO)
device = config.get(CONF_DEVICE)
return AutomateNotificationService(secret, recipient, device)
# pylint: disable=too-few-public-methods
class AutomateNotificationService(BaseNotificationService):
"""Implement the notification service for LlamaLab Automate."""
def __init__(self, secret, recipient, device=None):
"""Initialize the service."""
self._secret = secret
self._recipient = recipient
self._device = device
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
_LOGGER.debug("Sending to: %s, %s", self._recipient, str(self._device))
data = {
"secret": self._secret,
"to": self._recipient,
"device": self._device,
"payload": message,
}
response = requests.post(_RESOURCE, json=data)
if response.status_code != 200:
_LOGGER.error("Error sending message: " + str(response))
|
{
"content_hash": "6f5f2e006eca1652b1526134f76e56a3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 32.016666666666666,
"alnum_prop": 0.6631962519521083,
"repo_name": "Smart-Torvy/torvy-home-assistant",
"id": "7a00b5ba237e34b6e90b1c64d2c7469c7fadd05c",
"size": "1921",
"binary": false,
"copies": "5",
"ref": "refs/heads/torvy",
"path": "homeassistant/components/notify/llamalab_automate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1373149"
},
{
"name": "Python",
"bytes": "3734212"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
import sqlite3
import json
from flask import Flask, request, session, g, redirect, \
url_for, abort, render_template, flash
from contextlib import closing
import bcrypt
import game
#cfg
DATABASE = 'db.db'
#
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.secret_key = b'\t\x00\x8dSAc\x1fM\x9e\x1d0!\x94\x90\xe0\x90\xda\xac\x1a\xdf\xaa3\xd5Q'
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def get_username(uid):
qr = g.db.execute('select username from users where id=?',(uid,)).fetchall()
if qr == []:
return None
return qr[0][0]
@app.route('/games')
def games():
if 'logged_in' not in session:
return redirect(url_for("index"))
uname = session['username']
uid = g.db.execute('select id from users where username=?',(uname,))
uid = uid.fetchall()[0][0]
games = g.db.execute('select * from games where player1=? or player2=?',
(uid,uid)).fetchall()
glist = []
for game in games:
if game[3] == uname:# 3 is p1
glist.append((get_username(game[4]),game[2]==uid,game[0]))
else:
glist.append((get_username(game[3]),game[2]!=uid,game[0]))
app.logger.debug(glist)
games_waiting = g.db.execute('select * from waiting where player=?',
(uid,)).fetchall()
waiting = False
if len(games_waiting)!=0:
waiting = True
app.logger.debug(waiting)
return render_template('games.html',waiting=waiting,glist=glist)
@app.route('/newgame')
def newgame():
if 'logged_in' not in session:
return redirect(url_for("index"))
app.logger.debug(session['username'])
uid = g.db.execute('select id from users where username=?',
(session['username'],)).fetchall()[0][0]
waiting = g.db.execute('select * from waiting').fetchall()
for game in waiting:
if uid==game[1]:
return redirect(url_for("games"))
for game in waiting:
opp_id = game[1]
if opp_id!=uid:
g.db.execute('delete from waiting where id=?',
(game[0],))
g.db.execute('insert into games (player1, player2, whose_turn) values (?,?,?)',(uid,opp_id,uid))
g.db.commit()
game_id = g.db.execute('select id from games where player1=? and player2=?',(uid,opp_id)).fetchall()
return redirect(url_for("play",game_id=game_id[0][0]))
g.db.execute('insert into waiting (player) values (?)',(uid,))
g.db.commit()
return redirect(url_for("games")) # how to tell if on waitlist?
@app.route('/play/<int:game_id>')
def play(game_id):# whose turn?
g.game_id = game_id
db_gm = g.db.execute('select * from games where id=?',(game_id,)).fetchall()
if db_gm == []:
return redirect(url_for("games"))
the_gm = game.Game(db_gm[0][3],db_gm[0][4],db_gm[0][0])
the_gm.import_string(db_gm[0][1])
app.logger.debug(db_gm[0][1])
waiting = False
whose_turn = db_gm[0][2]
uid = session['uid']
if whose_turn!=uid:
waiting = True
g.color_table = {}
cell_list = []
for cg in game.cell_groups:
cell_list += cg
for cell in cell_list:
curr_color = "ffff00"
if cell in the_gm.p1_cells:
curr_color = "ff0000"
elif cell in the_gm.p2_cells:
curr_color = "0000ff"
g.color_table[cell] = curr_color
return render_template('play.html',waiting=waiting)
@app.route('/forefeit/<int:game_id>')
def forefeit(game_id):
g_q = g.db.execute('select (player1,player2) from games where id=?',
(game_id,)).fetchall()
p1 = g_q[0][2]
p2 = g_q[0][3]
winner = None
loser = None
if p1==session['uid']:
winner = p2
loser = p1
else:
winner = p1
loser = p2
g.db.execute("delete from games where id=?",(game_id,))
g.db.execute("update users set wins=wins+1 where id=?",(winner,))
g.db.execute("update users set losses=losses+1 where id=?",(loser,))
g.db.commit()
return redirect(url_for("games"))
@app.route('/submit/<int:game_id>/<move>')
def submit(game_id, move):
# get game from db
db_gm = g.db.execute('select * from games where id=?',(game_id,))
gdata = db_gm.fetchall()
if gdata == []:
return redirect(url_for("index"))
the_gm = game.Game(gdata[0][3],gdata[0][4],game_id)
the_gm.import_string(gdata[0][1])
# check right user
curr_user = session['username']
user_id_q = g.db.execute('select id from users where username=?',
(curr_user,)).fetchall()
if user_id_q == []:
return redirect(url_for("play",game_id=game_id))
app.logger.debug(move)
uid = user_id_q[0][0]
app.logger.debug(uid)
app.logger.debug(gdata)
if uid!=gdata[0][2]: # don't need to do more than this?
return redirect(url_for("play",game_id=game_id))
# checks move valid
app.logger.debug(the_gm.open_cells)
if move not in the_gm.open_cells:
return redirect(url_for("play",game_id=game_id))
# do it?
the_gm.move(uid,move)
app.logger.debug(the_gm.is_over())
if the_gm.is_over():
winner = the_gm.get_winner()
loser = the_gm.p1 if winner==the_gm.p2 else the_gm.p2
g.db.execute('delete from games where id=?',
(the_gm.g_id,))
g.db.execute('update users set wins=wins+1 where id=?',
(winner,))
g.db.execute('update users set losses=losses+1 where id=?',
(loser,))
g.db.commit()
return redirect(url_for("games"))
estr = the_gm.export_string()
app.logger.debug(game_id)
opp_id = the_gm.p1 if the_gm.p2==uid else the_gm.p2
g.db.execute('update games set board=? where id=?',(estr,game_id))
g.db.execute('update games set whose_turn=? where id=?',
(opp_id,game_id))
g.db.commit()
return redirect(url_for("play",game_id=game_id))
@app.route('/logout')
def logout():
session.pop('logged_in',None)
return redirect(url_for("index"))
@app.route('/login', methods=['POST'])
def login():
if request.method == 'POST':
user = g.db.execute('select * from users where username=?',
(request.form.get('username'),)).fetchall()
user_exists = len(user)!=0
if request.form.get('login')!=None:
if request.form.get('username')==None:
return redirect(url_for("index"))
if not user_exists:
flash(u'No account with this username exists','login error')
return redirect(url_for("index"))
pw_sql = g.db.execute('select pw_hash from users where username=?',
(request.form.get('username'),))
pw_hash = pw_sql.fetchall()[0][0]
pw_plain = request.form.get('password').encode('UTF-8')
if bcrypt.hashpw(pw_plain, pw_hash) == pw_hash:
#app.logger.debug('success!')
session['logged_in'] = True
session['username'] = request.form.get('username')
session['uid'] = user[0][0]
return redirect(url_for("games"))
else:
flash(u'Wrong Password','login error')
return redirect(url_for("index"))
elif request.form.get('register')!=None:
if(user_exists):
flash(u'Username already taken','login error')
return redirect(url_for("index"))
pw_plain = request.form.get('password').encode('UTF-8')
pw_hash = bcrypt.hashpw(pw_plain, bcrypt.gensalt())
g.db.execute('insert into users (username, pw_hash) values (?, ?)',
(request.form.get('username'),pw_hash))
g.db.commit()
return redirect(url_for("games"))
@app.route('/')
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run
|
{
"content_hash": "f610ffaa984612559211e6be83a274dc",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 112,
"avg_line_length": 34.239669421487605,
"alnum_prop": 0.5664977069756215,
"repo_name": "eli173/star",
"id": "7e62512775e3943b6acdd487870f84515b1f1f0e",
"size": "8368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "star.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "957"
},
{
"name": "HTML",
"bytes": "66737"
},
{
"name": "Python",
"bytes": "23599"
}
],
"symlink_target": ""
}
|
from flask_testing import TestCase
from fortunate.utils import make_app
class ViewsMixin(object):
fortunes = list('abcdefg')
def set_fortune(self, *args, **kwargs):
payload = kwargs
response = self.client.post('/fortune/', data=payload)
return response
def test_token_get(self):
#import pdb; pdb.set_trace()
response = self.client.get('/token/')
self.assert200(response)
self.assertEqual(len(response.json['token']), 16)
response = self.client.get('/token/')
self.assert200(response)
def test_fortune_post(self):
response = self.client.get('/token/')
result = self.set_fortune(token=response.json['token'], fortune=self.fortunes[0])
self.assert200(result)
self.assertEqual('a', result.json['fortune'])
result = self.set_fortune(token=response.json['token'], fortune=self.fortunes[1])
self.assert200(result)
def test_fortune_post_empty(self):
response = self.client.get('/token/')
result = self.set_fortune(token=response.json['token'])
self.assert400(result)
self.assertTrue('Required' in result.json['message'])
def test_fortune_post_invalid_token(self):
result = self.set_fortune(token='x', fortune='x')
self.assert400(result)
self.assertTrue('Invalid' in result.json['message'])
def test_fortune_get(self):
response = self.client.get('/token/')
result = self.set_fortune(token=response.json['token'], fortune=self.fortunes[0])
fortune = self.client.get('/fortune/', data={'token': response.json['token']})
self.assert200(fortune)
self.assertEqual('a', fortune.json['fortune'])
def test_fortune_get_random(self):
token = self.client.get('/token/').json
results = [self.set_fortune(token=token['token'], fortune=x) for x in self.fortunes]
fortune = self.client.get('/fortune/', data=token)
self.assert200(fortune)
self.assertTrue(fortune.json['fortune'] in self.fortunes)
def test_fortune_get_empty_token(self):
response = self.client.get('/token/')
result = self.set_fortune(token=response.json['token'], fortune=self.fortunes[0])
fortune = self.client.get('/fortune/', data={'token': None})
self.assert400(fortune)
self.assertTrue('Required' in fortune.json['message'])
def test_fortune_get_invalid_token(self):
result = self.client.get('/fortune/', data={'token': 'x'})
self.assert400(result)
self.assertTrue('Invalid' in result.json['message'])
def test_fortune_get_empty_fortune(self):
token = self.client.get('/token/')
result = self.client.get('/fortune/', data={'token': token.json['token']})
self.assert400(result)
self.assertTrue('Fortune' in result.json['message'])
def test_index(self):
resp = self.client.get('/')
self.assert200(resp)
class TestSqlViews(TestCase, ViewsMixin):
def create_app(self):
app = make_app('fortunate.test_settings.test_sql')
app.add_url_rule('/', view_func=lambda: 'test')
return app
def setUp(self):
from fortunate.models.sqlalchemy import db
db.create_all()
def tearDown(self):
from fortunate.models.sqlalchemy import db
db.session.remove()
db.drop_all()
#class TestDictViews(TestCase, ViewsMixin):
# def create_app(self):
# app = make_app('fortunate.test_settings.test_dict')
# app.add_url_rule('/', view_func=lambda: 'test')
# return app
|
{
"content_hash": "4642b6412d7bc144d5aae2ef087c9a7c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 92,
"avg_line_length": 34.22429906542056,
"alnum_prop": 0.6234298197706172,
"repo_name": "kryptn/Fortunate",
"id": "0cc9d30c12789cf2891eb02ab72098ce41fdfe8a",
"size": "3662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fortunate/fortunate/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19215"
},
{
"name": "Shell",
"bytes": "208"
}
],
"symlink_target": ""
}
|
import sys
import csv
import django
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
django.setup()
from helios.models import Election
election_uuid = sys.argv[1]
email = sys.argv[2]
csv_output = csv.writer(sys.stdout)
voters = Election.objects.get(uuid=election_uuid).voter_set.filter(voter_email=email)
for voter in voters:
csv_output.writerow([voter.voter_login_id, voter.voter_password])
|
{
"content_hash": "6260e4004f9b3b065f7c3dcfc5772527",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 85,
"avg_line_length": 20.238095238095237,
"alnum_prop": 0.76,
"repo_name": "benadida/helios-server",
"id": "dc9ac956425b1145d5432b2d8db848737e5fff7a",
"size": "632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extract-passwords-for-email.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13437"
},
{
"name": "HTML",
"bytes": "162385"
},
{
"name": "Java",
"bytes": "2271"
},
{
"name": "JavaScript",
"bytes": "307826"
},
{
"name": "Procfile",
"bytes": "119"
},
{
"name": "Python",
"bytes": "483554"
},
{
"name": "Shell",
"bytes": "402"
}
],
"symlink_target": ""
}
|
"""Platform and system specific function."""
import getpass
import logging
import os
import pkgutil
import sys
log = logging.getLogger(__name__)
def is_tty():
"""
Checks if the standard output is connected (is associated with a terminal device) to a tty(-like)
device.
"""
if not hasattr(sys.stdout, "isatty"):
return False
return sys.stdout.isatty()
def is_terminal_support_colors() -> bool:
"""Try to determine if the current terminal supports colors."""
if sys.platform == "win32":
return False
if not is_tty():
return False
if "COLORTERM" in os.environ:
return True
term = os.environ.get("TERM", "dumb").lower()
if term in ("xterm", "linux") or "color" in term:
return True
return False
def get_airflow_git_version():
"""Returns the git commit hash representing the current version of the application."""
git_version = None
try:
git_version = str(pkgutil.get_data('airflow', 'git_version'), encoding="UTF-8")
except Exception as e: # pylint: disable=broad-except
log.debug(e)
return git_version
def getuser() -> str:
"""
Gets the username associated with the current user, or error with a nice
error message if there's no current user.
We don't want to fall back to os.getuid() because not having a username
probably means the rest of the user environment is wrong (e.g. no $HOME).
Explicit failure is better than silently trying to work badly.
"""
try:
return getpass.getuser()
except KeyError:
# Inner import to avoid circular import
from airflow.exceptions import AirflowConfigException
raise AirflowConfigException(
"The user that Airflow is running as has no username; you must run"
"Airflow as a full user, with a username and home directory, "
"in order for it to function properly."
)
|
{
"content_hash": "80f33a581ce0286a925bf2b58b44ef38",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 101,
"avg_line_length": 30,
"alnum_prop": 0.6538461538461539,
"repo_name": "sekikn/incubator-airflow",
"id": "73eb609cdbcc2c6e2c4b7452d96dfb4702a42afb",
"size": "2736",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/utils/platform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/terminal/shared_terminal_character_builder.iff"
result.attribute_template_id = -1
result.stfName("terminal_name","terminal_character_builder")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/blue_frog.py")
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "e1310f65a6de66d172d150adb5f4e152",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 83,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.7301980198019802,
"repo_name": "anhstudios/swganh",
"id": "2448215c416a6ee08a1167c9f886761d50648eff",
"size": "549",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/terminal/shared_terminal_character_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
""" Optimizations of the expression tree representation for better CSE
opportunities.
"""
from sympy import Add, Mul
from sympy.core.operations import AssocOp
from sympy.utilities.iterables import preorder_traversal
def assumed(e, name):
""" Return True if the given assumption is true about the sympy expression.
Examples
--------
>>> from sympy import symbols
>>> from sympy.simplify.cse_opts import assumed
>>> from sympy.abc import x, y
>>> assumed(x+y, 'is_Add')
True
>>> assumed(x+y, 'is_Mul')
False
"""
return getattr(e, name, False)
class Sub(AssocOp):
""" Stub of a Sub operator to replace Add(x, Mul(NegativeOne(-1), y)).
"""
__slots__ = []
is_Add = False
is_Sub = True
def _eval_subs(self, old, new):
if self == old:
return new
else:
return self.__class__(*[s._eval_subs(old, new) for s in self.args ])
def sub_pre(e):
""" Replace Add(x, Mul(NegativeOne(-1), y)) with Sub(x, y).
"""
replacements = []
for node in preorder_traversal(e):
if assumed(node, 'is_Add'):
positives = []
negatives = []
for arg in node.args:
if assumed(arg, 'is_Mul'):
a, b = arg.as_two_terms()
if (assumed(a, 'is_number') and
assumed(a, 'is_negative')):
negatives.append(Mul(-a, b))
continue
positives.append(arg)
if len(negatives) > 0:
replacement = Sub(Add(*positives), Add(*negatives))
replacements.append((node, replacement))
for node, replacement in replacements:
e = e.subs(node, replacement)
return e
def sub_post(e):
""" Replace Sub(x,y) with the canonical form Add(x, Mul(NegativeOne(-1), y)).
"""
replacements = []
for node in preorder_traversal(e):
if assumed(node, 'is_Sub'):
replacements.append((node, Add(node.args[0], Mul(-1, node.args[1]))))
for node, replacement in replacements:
e = e.subs(node, replacement)
return e
default_optimizations = [
(sub_pre, sub_post),
]
|
{
"content_hash": "1be4ad265d41e75bab0e4cbeec90366c",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 81,
"avg_line_length": 29.13157894736842,
"alnum_prop": 0.5564588979223125,
"repo_name": "minrk/sympy",
"id": "ec1d194b3fdb0aa1ce03b2dbe23d680d33122788",
"size": "2214",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "sympy/simplify/cse_opts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6803953"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "2734"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('votes', '0003_auto_20170928_2238'),
]
operations = [
migrations.AlterField(
model_name='vote',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='votes', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
]
|
{
"content_hash": "c68dffd12dfc0d18bbdf823e58046d8a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 153,
"avg_line_length": 27.05,
"alnum_prop": 0.6543438077634011,
"repo_name": "Angoreher/xcero",
"id": "5e74d758cacb023846f6c31392298b735bc29d91",
"size": "614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "votes/migrations/0004_auto_20170929_1948.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "505"
},
{
"name": "HTML",
"bytes": "31590"
},
{
"name": "JavaScript",
"bytes": "78"
},
{
"name": "Python",
"bytes": "166869"
},
{
"name": "Shell",
"bytes": "8487"
}
],
"symlink_target": ""
}
|
from django.utils.functional import LazyObject
__version__ = '1.1.0.internal.1'
default_app_config = 'constance.apps.ConstanceConfig'
class LazyConfig(LazyObject):
def _setup(self):
from .base import Config
self._wrapped = Config()
config = LazyConfig()
|
{
"content_hash": "445d722e41b7b98976b731a0b8768b16",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 53,
"avg_line_length": 21.46153846153846,
"alnum_prop": 0.6953405017921147,
"repo_name": "dmugtasimov/django-constance",
"id": "d563ea68adda77c4a989dbf702e2e5031298b07d",
"size": "279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "constance/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3371"
},
{
"name": "Python",
"bytes": "44223"
}
],
"symlink_target": ""
}
|
import logging
from pants.backend.python.targets.import_wheels_mixin import ImportWheelsMixin
from pants.base.deprecated import deprecated_conditional
from pants.base.payload import Payload
from pants.base.payload_field import PrimitiveField
from pants.build_graph.target import Target
from pants.util.collections import ensure_str_list
logger = logging.getLogger(__name__)
class UnpackedWheels(ImportWheelsMixin, Target):
"""A set of sources extracted from JAR files.
NB: Currently, wheels are always resolved for the 'current' platform.
:API: public
"""
imported_target_kwargs_field = "libraries"
imported_target_payload_field = "library_specs"
@classmethod
def alias(cls):
return "unpacked_whls"
class ExpectedLibrariesError(Exception):
"""Thrown when the target has no libraries defined."""
pass
# TODO: consider introducing some form of source roots instead of the manual `within_data_subdir`
# kwarg!
def __init__(
self,
module_name,
libraries=None,
include_patterns=None,
exclude_patterns=None,
compatibility=None,
within_data_subdir=None,
payload=None,
**kwargs
):
"""
:param str module_name: The name of the specific python module containing headers and/or
libraries to extract (e.g. 'tensorflow').
:param list libraries: addresses of python_requirement_library targets that specify the wheels
you want to unpack
:param list include_patterns: fileset patterns to include from the archive
:param list exclude_patterns: fileset patterns to exclude from the archive. Exclude patterns
are processed before include_patterns.
:param compatibility: Python interpreter constraints used to create the pex for the requirement
target. If unset, the default interpreter constraints are used. This
argument is unnecessary unless the native code depends on libpython.
:param bool within_data_subdir: If True, descend into '<name>-<version>.data/' when matching
`include_patterns`. For python wheels which declare any non-code
data, this is usually needed to extract that without manually
specifying the relative path, including the package version. For
example, when `data_files` is used in a setup.py,
`within_data_subdir=True` will allow specifying
`include_patterns` matching exactly what is specified in the
setup.py.
"""
deprecated_conditional(
lambda: type(within_data_subdir) not in (bool, type(None)),
removal_version="1.28.0.dev2",
entity_description="A non-boolean value for `within_data_subdir`",
hint_message="The location of the .data subdirectory will be inferred from the module name!",
)
payload = payload or Payload()
payload.add_fields(
{
"library_specs": PrimitiveField(libraries or ()),
"module_name": PrimitiveField(module_name),
"include_patterns": PrimitiveField(include_patterns or ()),
"exclude_patterns": PrimitiveField(exclude_patterns or ()),
"compatibility": PrimitiveField(ensure_str_list(compatibility or ())),
"within_data_subdir": PrimitiveField(within_data_subdir),
# TODO: consider supporting transitive deps like UnpackedJars!
# TODO: consider supporting `platforms` as in PythonBinary!
}
)
super().__init__(payload=payload, **kwargs)
if not libraries:
raise self.ExpectedLibrariesError(
"Expected non-empty libraries attribute for {spec}".format(spec=self.address.spec)
)
@property
def module_name(self):
return self.payload.module_name
@property
def compatibility(self):
return self.payload.compatibility
@property
def within_data_subdir(self):
return self.payload.within_data_subdir
|
{
"content_hash": "55533a6b5b433502ef90dc3a7b1dd8a7",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 105,
"avg_line_length": 43.14705882352941,
"alnum_prop": 0.615087480118155,
"repo_name": "wisechengyi/pants",
"id": "45057e31d8840e2d2ed92d842246fb8cc5dd927d",
"size": "4533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/python/targets/unpacked_whls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "6634"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "507948"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7608990"
},
{
"name": "Rust",
"bytes": "1005243"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "105217"
},
{
"name": "Starlark",
"bytes": "489739"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
"""
Based on https://github.com/analytics-pros/universal-analytics-python
"""
|
{
"content_hash": "1596d50d54a14621679b96c4b817601e",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 69,
"avg_line_length": 26,
"alnum_prop": 0.7435897435897436,
"repo_name": "botstory/botstory",
"id": "425bb5467b595af8a4e67c2a0f8995f5cd10f77b",
"size": "78",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "botstory/integrations/ga/universal_analytics/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "340760"
},
{
"name": "Shell",
"bytes": "2009"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry import analytics
class BaseIncidentEvent(analytics.Event):
attributes = (
analytics.Attribute("incident_id"),
analytics.Attribute("organization_id"),
analytics.Attribute("incident_type"),
)
class IncidentCreatedEvent(BaseIncidentEvent):
type = "incident.created"
class IncidentStatusUpdatedEvent(BaseIncidentEvent):
type = "incident.status_change"
attributes = BaseIncidentEvent.attributes + (
analytics.Attribute("prev_status"),
analytics.Attribute("status"),
)
class IncidentCommentCreatedEvent(BaseIncidentEvent):
type = "incident.comment"
attributes = BaseIncidentEvent.attributes + (
analytics.Attribute("user_id", required=False),
analytics.Attribute("activity_id", required=False),
)
analytics.register(IncidentCreatedEvent)
analytics.register(IncidentStatusUpdatedEvent)
analytics.register(IncidentCommentCreatedEvent)
|
{
"content_hash": "1539a8f52cd4fcd5749210d1f19bc83d",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 59,
"avg_line_length": 27.25,
"alnum_prop": 0.7339449541284404,
"repo_name": "mvaled/sentry",
"id": "c4f4f4bccda41fb8b1ca2ec94d901d5855e1e45d",
"size": "981",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/incidents/events.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaebusiness.business import CommandExecutionException
from tekton import router
from gaecookie.decorator import no_csrf
from livro_app import facade
from routes.livros import admin
@no_csrf
def index(livro_id):
livro = facade.get_livro_cmd(livro_id)()
detail_form = facade.livro_detail_form()
context = {'save_path': router.to_path(save, livro_id), 'livro': detail_form.fill_with_model(livro)}
return TemplateResponse(context, 'livros/admin/form.html')
def save(_handler, livro_id, **livro_properties):
cmd = facade.update_livro_cmd(livro_id, **livro_properties)
try:
cmd()
except CommandExecutionException:
context = {'errors': cmd.errors,
'livro': cmd.form}
return TemplateResponse(context, 'livros/admin/form.html')
_handler.redirect(router.to_path(admin))
|
{
"content_hash": "92c89f60b6299efdc4aceab201be26c9",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 104,
"avg_line_length": 34.25,
"alnum_prop": 0.7174139728884255,
"repo_name": "RoAbreu/AulaJavaScripts",
"id": "a2f7ba17a124883270c01ab2b96e5dec2e60ce30",
"size": "983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PROJETO/backend/appengine/routes/livros/admin/edit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "222"
},
{
"name": "JavaScript",
"bytes": "10802"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "318194"
},
{
"name": "Shell",
"bytes": "2542"
}
],
"symlink_target": ""
}
|
__all__ = [
'CombineTables',
'ReshapeTable',
'ExtractArray',
'SplitTableOnArray',
'AppendTableToCellData',
]
__displayname__ = 'Table Operations'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
import pyvista as pv
from .. import _helpers, interface
from ..base import FilterBase, FilterPreserveTypeBase
###############################################################################
###############################################################################
class CombineTables(FilterBase):
"""Takes two tables and combines them if they have the same number of rows.
Currently this cannot handle time varing tables as that gets complicated
real quick if the tables do not have the same timestep values
"""
__displayname__ = 'Combine Tables'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=2,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters... none
# CRITICAL for multiple input ports
def FillInputPortInformation(self, port, info):
"""Used by pipeline. Necessary when dealing with multiple input ports"""
# all are tables so no need to check port
info.Set(self.INPUT_REQUIRED_DATA_TYPE(), "vtkTable")
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0)
pdi1 = self.GetInputData(inInfo, 1, 0)
pdo = self.GetOutputData(outInfo, 0)
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = pdi0.GetNumberOfRows()
nrows1 = pdi1.GetNumberOfRows()
if not (nrows == nrows1):
raise AssertionError('Tables must have the same number of rows')
for i in range(pdi1.GetRowData().GetNumberOfArrays()):
arr = pdi1.GetRowData().GetArray(i)
pdo.GetRowData().AddArray(arr)
return 1
def apply(self, table0, table1):
"""Run the algorithm on the two input tables"""
self.SetInputDataObject(0, table0)
self.SetInputDataObject(1, table1)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
# ---- Reshape Table ----#
class ReshapeTable(FilterBase):
"""This filter will take a ``vtkTable`` object and reshape it. This filter
essentially treats ``vtkTable``s as 2D matrices and reshapes them using
``numpy.reshape`` in a C contiguous manner. Unfortunately, data fields will
be renamed arbitrarily because VTK data arrays require a name.
"""
__displayname__ = 'Reshape Table'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkTable',
)
# Parameters
self.__nrows = kwargs.get('nrows', 1)
self.__ncols = kwargs.get('ncols', 1)
self.__names = kwargs.get('names', [])
self.__order = kwargs.get('order', 'F')
def _reshape(self, pdi, pdo):
"""Internal helper to perfrom the reshape"""
# Get number of columns
cols = pdi.GetNumberOfColumns()
# Get number of rows
rows = pdi.GetColumn(0).GetNumberOfTuples()
if len(self.__names) != 0:
num = len(self.__names)
if num < self.__ncols:
for i in range(num, self.__ncols):
self.__names.append('Field %d' % i)
elif num > self.__ncols:
raise _helpers.PVGeoError(
'Too many array names. `ncols` specified as %d and %d names given.'
% (self.__ncols, num)
)
else:
self.__names = ['Field %d' % i for i in range(self.__ncols)]
# Make a 2D numpy array and fill with data from input table
data = np.empty((rows, cols))
for i in range(cols):
c = pdi.GetColumn(i)
data[:, i] = interface.convert_array(c)
if (self.__ncols * self.__nrows) != (cols * rows):
raise _helpers.PVGeoError(
'Total number of elements must remain %d. Check reshape dimensions.'
% (cols * rows)
)
# Use numpy.reshape() to reshape data NOTE: only 2D because its a table
# NOTE: column access of this reshape is not contigous
data = np.array(
np.reshape(data.flatten(), (self.__nrows, self.__ncols), order=self.__order)
)
pdo.SetNumberOfRows(self.__nrows)
# Add new array to output table and assign incremental names (e.g. Field0)
for i in range(self.__ncols):
# Make a contigous array from the column we want
col = np.array(data[:, i])
# allow type to be determined by input
# VTK arrays need a name. Set arbitrarily
insert = interface.convert_array(
col, name=self.__names[i]
) # array_type=vtk.VTK_FLOAT
# pdo.AddColumn(insert) # these are not getting added to the output table
# ... work around:
pdo.GetRowData().AddArray(insert) # NOTE: this is in the FieldData
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perfrom task
self._reshape(pdi, pdo)
return 1
#### Seters and Geters ####
def set_names(self, names):
"""Set names using a semicolon (;) seperated string or a list of strings
Args:
names (string): a string of data array names for the reshaped table
using a semicolon (;) to spearate
"""
# parse the names (a semicolon seperated list of names)
if isinstance(names, str):
names = names.split(';')
if self.__names != names:
self.__names = names
self.Modified()
def add_name(self, name):
"""Use to append a name to the list of data array names for the output
table.
"""
self.__names.append(name)
self.Modified()
def get_names(self):
"""Returns a list of the names given to the new arrays"""
return self.__names
def set_number_of_columns(self, ncols):
"""Set the number of columns for the output ``vtkTable``"""
if isinstance(ncols, float):
ncols = int(ncols)
if self.__ncols != ncols:
self.__ncols = ncols
self.Modified()
def set_number_of_rows(self, nrows):
"""Set the number of rows for the output ``vtkTable``"""
if isinstance(nrows, float):
nrows = int(nrows)
if self.__nrows != nrows:
self.__nrows = nrows
self.Modified()
def set_order(self, order):
"""Set the reshape order (``'C'`` of ``'F'``)"""
if self.__order != order:
self.__order = order
self.Modified()
###############################################################################
class ExtractArray(FilterBase):
"""Extract an array from a ``vtkDataSet`` and make a ``vtkTable`` of it."""
__displayname__ = 'Extract Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkDataSet',
nOutputPorts=1,
outputType='vtkTable',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi = self.GetInputData(inInfo, 0, 0)
table = self.GetOutputData(outInfo, 0)
# Note user has to select a single array to save out
field, name = self.__input_array[0], self.__input_array[1]
vtkarr = _helpers.get_vtk_array(pdi, field, name)
table.GetRowData().AddArray(vtkarr)
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to extract.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class SplitTableOnArray(FilterBase):
"""A filter to seperate table data based on the unique values of a given data
array into a ``vtkMultiBlockDataSet``.
"""
__displayname__ = 'Split Table On Array'
__category__ = 'filter'
def __init__(self):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkTable',
nOutputPorts=1,
outputType='vtkMultiBlockDataSet',
)
self.__input_array = [None, None]
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
table = self.GetInputData(inInfo, 0, 0)
# Get number of points
output = vtk.vtkMultiBlockDataSet.GetData(outInfo, 0)
#### Perfrom task ####
# Get input array
field, name = self.__input_array[0], self.__input_array[1]
wtbl = dsa.WrapDataObject(table)
spliton = _helpers.get_numpy_array(wtbl, field, name)
uniq = np.unique(spliton)
# Split the input data based on indices
df = interface.table_to_data_frame(table)
blk = 0
output.SetNumberOfBlocks(len(uniq))
for val in uniq:
temp = interface.data_frame_to_table(df[df[name] == val])
output.SetBlock(blk, temp)
output.GetMetaData(blk).Set(
vtk.vtkCompositeDataSet.NAME(), '{}{}'.format(name, val)
)
blk += 1
return 1
def SetInputArrayToProcess(self, idx, port, connection, field, name):
"""Used to set the input array(s)
Args:
idx (int): the index of the array to process
port (int): input port (use 0 if unsure)
connection (int): the connection on the port (use 0 if unsure)
field (int): the array field (0 for points, 1 for cells, 2 for
field, and 6 for row)
name (int): the name of the array
"""
if self.__input_array[0] != field:
self.__input_array[0] = field
self.Modified()
if self.__input_array[1] != name:
self.__input_array[1] = name
self.Modified()
return 1
def apply(self, input_data_object, array_name):
"""Run the algorithm on the input data object, specifying the array name
to use for the split.
"""
self.SetInputDataObject(input_data_object)
arr, field = _helpers.search_for_array(input_data_object, array_name)
self.SetInputArrayToProcess(0, 0, 0, field, array_name)
self.Update()
return pv.wrap(self.GetOutput())
###############################################################################
class AppendTableToCellData(FilterPreserveTypeBase):
"""Takes two inputs, a dataset to preserve and a table of data, where the
data in the table is appended to the CellData of the input dataset.
The 0th port is the dataset to preserve and the 1st port is a table whos rows
will be appended as CellData to the 0th port. The number of rows in the table
MUST match the number of cells in the input dataset.
"""
__displayname__ = 'Append Table to Cell Data'
__category__ = 'filter'
def __init__(self):
FilterPreserveTypeBase.__init__(self, nInputPorts=2)
self._preserve_port = 0 # ensure port 0's type is preserved
self.__timesteps = None
def _update_time_steps(self):
"""For internal use only: appropriately sets the timesteps."""
# Use the inputs' timesteps: this merges the timesteps values
tsAll = _helpers.get_combined_input_time_steps(self)
# Use both inputs' time steps
self.__timesteps = _helpers.update_time_steps(self, tsAll, explicit=True)
return 1
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Inputs from different ports:
pdi0 = self.GetInputData(inInfo, 0, 0) # Keep me!
table = self.GetInputData(inInfo, 1, 0) # add my data to the input
pdo = self.GetOutputData(outInfo, 0) # The output
pdo.DeepCopy(pdi0)
# Get number of rows
nrows = table.GetNumberOfRows()
ncells = pdo.GetNumberOfCells()
if nrows != ncells:
raise _helpers.PVGeoError(
'Number rows in table ({}) does not match number of cells ({})'.format(
nrows, ncells
)
)
for i in range(table.GetRowData().GetNumberOfArrays()):
arr = table.GetRowData().GetArray(i)
pdo.GetCellData().AddArray(arr)
return 1
def RequestInformation(self, request, inInfo, outInfo):
"""Used by pipeline to handle time variance"""
self._update_time_steps()
return 1
def apply(self, dataset, table):
"""Update the algorithm and get the output data object
Args:
dataset (vtkDataSet): Any dataset with CellData
table (vtkTable): table of data values that will be appended to
``dataset``'s CellData
Return:
vtkDataSet: The appended dataset as a new object
"""
self.SetInputDataObject(0, dataset)
self.SetInputDataObject(1, table)
self.Update()
return pv.wrap(self.GetOutput())
def get_time_step_values(self):
"""Use this in ParaView decorator to register timesteps."""
# if unset, force at least one attempt to set the timesteps
if self.__timesteps is None:
self._update_time_steps()
# self.__timesteps should already be of type list
return self.__timesteps if self.__timesteps is not None else None
|
{
"content_hash": "044d62b47cd82bf30da0a457cd55606a",
"timestamp": "",
"source": "github",
"line_count": 437,
"max_line_length": 88,
"avg_line_length": 35.153318077803206,
"alnum_prop": 0.5621663845853404,
"repo_name": "banesullivan/ParaViewGeophysics",
"id": "bc46373c47326daf165af4cbb3ff80245ab39e5b",
"size": "15362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PVGeo/filters/tables.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "789"
},
{
"name": "Python",
"bytes": "191998"
},
{
"name": "Shell",
"bytes": "9602"
}
],
"symlink_target": ""
}
|
code_begin = """#!/usr/bin/python
from __future__ import print_function
import re
import pycurl
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
def main():
buffer = BytesIO()
curl_handler = pycurl.Curl()
curl_handler.setopt(curl_handler.URL, '{url}')
curl_handler.setopt(curl_handler.WRITEDATA, buffer)
curl_handler.setopt(curl_handler.HTTPHEADER, {headers})
# for verbosity
curl_handler.setopt(curl_handler.VERBOSE, True)
# Follow redirects
curl_handler.setopt(curl_handler.FOLLOWLOCATION, True)
# For older PycURL versions:
#curl_handler.setopt(curl_handler.WRITEFUNCTION, buffer.write)
"""
code_proxy = """
curl_handler.setopt(curl_handler.PROXY, '{proxy}')
"""
code_post = """
# Sets request method to POST
curl_handler.setopt(curl_handler.POSTFIELDS, "{data}") #expects body to urlencoded
"""
code_https = """
curl_handler.setopt(pycurl.SSL_VERIFYPEER, 1)
curl_handler.setopt(pycurl.SSL_VERIFYHOST, 2)
# If providing updated certs
# curl_handler.setopt(pycurl.CAINFO, "/path/to/updated-certificate-chain.crt")
"""
code_search = """
try:
curl_handler.perform()
except pycurl.error, error:
print('An error occurred: ', error)
curl_handler.close()
body = buffer.getvalue()
# Body is a string on Python 2 and a byte string on Python 3.
# If we know the encoding, we can always decode the body and
# end up with a Unicode string.
response = body.decode('iso-8859-1')
match = re.findall(r"{search_string}", str(response))
try:
from termcolor import colored
lib_available = True
except ImportError:
lib_available = False
if match:
for item in match:
if lib_available:
replace_string = colored(match[item], 'green')
response = re.sub(match[item], replace_string, str(response))
else:
print("Matched item: ",item)
print(response)
if __name__ == '__main__':
main()
"""
code_nosearch = """
try:
curl_handler.perform()
except pycurl.error, error:
print('An error occurred: ', error)
curl_handler.close()
body = buffer.getvalue()
# Body is a string on Python 2 and a byte string on Python 3.
# If we know the encoding, we can always decode the body and
# end up with a Unicode string.
response = body.decode('iso-8859-1')
print(response)
if __name__ == '__main__':
main()
"""
|
{
"content_hash": "1c053f1fc55e1163b03f4ca7a7f2ccb0",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 87,
"avg_line_length": 26.20618556701031,
"alnum_prop": 0.6369000786782061,
"repo_name": "dhruvagarwal/http-request-translator",
"id": "d6d2ac584f51491cc1ecd8cb9c45e6fd7237721a",
"size": "2542",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "hrt/templates/python.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "70046"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.gs
Tool-specific initialization for Ghostscript.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gs.py 3603 2008/10/10 05:46:45 scons"
import SCons.Action
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if env.has_key('PS2PDF'):
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
|
{
"content_hash": "232fa7fc83a2c9e4877e13153db3d3d3",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 83,
"avg_line_length": 32.48,
"alnum_prop": 0.7253694581280788,
"repo_name": "frew/simpleproto",
"id": "24f985f3ebe775b0abd889345cd08fbfebd2b827",
"size": "2436",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scons-local-1.1.0/SCons/Tool/gs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C++",
"bytes": "30217"
},
{
"name": "Protocol Buffer",
"bytes": "1960"
},
{
"name": "Python",
"bytes": "1704215"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import logging
import socket
try:
import cmemcache as memcache
except ImportError:
try:
import memcache
except:
memcache = None
from django.conf import settings
from djblets.cache.forwarding_backend import DEFAULT_FORWARD_CACHE_ALIAS
def get_memcached_hosts():
"""Return the hosts currently configured for memcached."""
if not memcache:
return None
cache_info = settings.CACHES[DEFAULT_FORWARD_CACHE_ALIAS]
backend = cache_info['BACKEND']
locations = cache_info.get('LOCATION', [])
if (not backend.startswith('django.core.cache.backends.memcached') or
not locations):
return []
if not isinstance(locations, list):
locations = [locations]
return locations
def get_has_cache_stats():
"""Return whether or not cache stats are supported."""
return get_memcached_hosts() is not None
def get_cache_stats():
"""Return a dictionary containing information on the current cache stats.
This only supports memcache.
"""
hostnames = get_memcached_hosts()
if not hostnames:
return None
all_stats = []
for hostname in hostnames:
try:
host, port = hostname.split(":")
except ValueError:
logging.error('Invalid cache hostname "%s"' % hostname)
continue
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((host, int(port)))
except socket.error:
s.close()
continue
s.send(b"stats\r\n")
data = s.recv(2048).decode('ascii')
s.close()
stats = {}
for line in data.splitlines():
info = line.split(" ")
if info[0] == "STAT":
try:
value = int(info[2])
except ValueError:
value = info[2]
stats[info[1]] = value
if stats['cmd_get'] == 0:
stats['hit_rate'] = 0
stats['miss_rate'] = 0
else:
stats['hit_rate'] = 100 * stats['get_hits'] / stats['cmd_get']
stats['miss_rate'] = 100 * stats['get_misses'] / stats['cmd_get']
all_stats.append((hostname, stats))
return all_stats
|
{
"content_hash": "dce2ea971b196b9c62dea95651543704",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 77,
"avg_line_length": 24.54255319148936,
"alnum_prop": 0.5721716514954487,
"repo_name": "bkochendorfer/reviewboard",
"id": "e9f0ae38b33f39c171f6a24270c276905d028cab",
"size": "2307",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "reviewboard/admin/cache_stats.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "212721"
},
{
"name": "HTML",
"bytes": "179427"
},
{
"name": "JavaScript",
"bytes": "1463002"
},
{
"name": "Python",
"bytes": "3686542"
},
{
"name": "Shell",
"bytes": "20225"
}
],
"symlink_target": ""
}
|
from itertools import product
import random
class Mazegen(object):
"""
Base class to generates random mazes. The default implements is the Aldous-Broder algorithm.
The Aldous-Broder algorithm is to randomly choose direction (target) from your current position (source). if the target is hadn't been visited add it to the source children.
If it had already been visited do nothing.
Continue to pick nodes until all nodes have been visited.
"""
#Aldous-Broder algo
def __init__(self,size):
self.size = size
self.tree = {}
def generate_possible_choices(self,node):
x,y = node
max=self.size
moves = []
if x-1 >= 0 : moves.append((x-1,y))
if y-1 >= 0 : moves.append((x,y-1))
if x+1 < max: moves.append((x+1,y))
if y+1 < max: moves.append((x,y+1))
return moves
def pick_a_move(self,node):
return random.choice(self.generate_possible_choices(node)),node
def is_visited(self,node):
if node in self.tree:
return self.tree[node][2]
else:
self.tree.update({node:[None,[],False]})
return False
def set_parent(self,node,parent):
if self.tree[node][0]:
raise Exception('%s Already have a parent %s received %s (%s) ' % (node,self.tree[node][0],parent,self.tree))
self.tree[node][0] = parent
def add_child_to_parent(self,node,parent):
direction = self.get_direction(parent,node)
if direction in self.tree[parent][1]:
raise Exception('%s already a child of %s. %s' % (node,parent,self.tree))
self.tree[parent][1].append(direction)
def set_visited(self,node):
if node in self.tree:
self.tree[node][2] = True
else:
self.tree.update({node:[None,[],True]})
def visit_a_node(self,node,parent):
if self.is_visited(node):
return
self.set_parent(node,parent)
self.add_child_to_parent(node,parent)
self.set_visited(node)
def is_finish(self):
return not filter(lambda n:not self.is_visited(n), self.tree)
def generate_maze(self):
root = (0,0)
self.set_visited(root)
current = root
while not self.is_finish():
next,current = self.pick_a_move(current)
if next:
self.visit_a_node(next,current)
current = next
return self.tree
def get_classes_for_node(self,node):
classes = self.tree[node][1]
if self.tree[node][0]:
classes.append(self.get_direction(node,self.tree[node][0]))
classes_string = " ".join(classes)
return classes_string
def draw_maze(self,filename):
f = open(filename,mode='w')
from renderer import render_static_html
result = render_static_html(self)
f.write(result.__unicode__())
f.close()
def get_direction(self,from_node,to_node):
x1,y1 = from_node
x2,y2 = to_node
if x1 < x2: return 'S'
if x1 > x2: return 'N'
if y1 > y2: return 'E'
if y1 < y2: return 'W'
class RecursiveBacktracker(Mazegen):
"""
Generate maze using the Recursice Backtracker algorithm
Which basically means put all visited nodes in a stack. We you get stuck. Pop elements of the stack until you can continue. When the stack is empty you are finished
"""
def __init__(self,size):
super(RecursiveBacktracker,self).__init__(size)
self.stack = []
def pick_a_move(self,node):
choices = filter(lambda c: not self.is_visited(c),self.generate_possible_choices(node))
if choices:
self.stack.append(node)
choice = random.choice(choices)
return choice,node
return self.pick_a_move(self.stack.pop())
class GrowingTree(Mazegen):
"""
Generate maze using the Growing Tree algorithm
Which basically means put all visited nodes in a set. We you get stuck. Randomly chooses a node from that set(remove it from the set too). You are finished when the set is empty
"""
def __init__(self,size):
super(GrowingTree,self).__init__(size)
self.active_sets = set([])
def pick_a_move(self,node):
choices = filter(lambda c: not self.is_visited(c),self.generate_possible_choices(node))
if choices:
choice = random.choice(choices)
return choice,node
self.active_sets.remove(node)
if self.is_finish():
return None,None
type = random.randint(0,2)
if type == 0:
return self.pick_a_move(random.choice(list(self.active_sets)))
if type == 1:
return self.pick_a_move(list(self.active_sets)[0])
if type == 2:
return self.pick_a_move(list(self.active_sets)[-1])
def set_visited(self,node):
super(GrowingTree,self).set_visited(node)
self.active_sets.add(node)
def is_finish(self):
return len(self.active_sets) == 0
if __name__ == '__main__':
from sys import argv,setrecursionlimit
setrecursionlimit(2000)
if not len(argv) == 3:
print "Usage : mazegen [size] [filename]"
exit(1)
name,size,filename = argv
maze_gen = GrowingTree(size=int(size))
maze_gen.generate_maze()
maze_gen.draw_maze(filename=filename)
|
{
"content_hash": "5ac1bf03c5348f0538da9d559753d172",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 181,
"avg_line_length": 33.72049689440994,
"alnum_prop": 0.5988211456990238,
"repo_name": "evostrov/python-maze-generator",
"id": "e158cf9c304dd74826660e1a4c58a085f0a2e9d4",
"size": "5429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mazegen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "508"
},
{
"name": "HTML",
"bytes": "4237"
},
{
"name": "JavaScript",
"bytes": "3966"
},
{
"name": "Python",
"bytes": "6661"
}
],
"symlink_target": ""
}
|
"""
System-level utilities and helper functions.
"""
import collections.abc
import math
import re
import unicodedata
import urllib
from oslo_utils._i18n import _
from oslo_utils import encodeutils
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
'mixed': (None, re.compile(r'(^[-+]?\d*\.?\d+)([kKMGT]i?)?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
# NOTE(flaper87): The following globals are used by `mask_password` and
# `mask_dict_password`. They must all be lowercase.
_SANITIZE_KEYS = ['adminpass', 'admin_pass', 'password', 'admin_password',
'auth_token', 'new_pass', 'auth_password', 'secret_uuid',
'secret', 'sys_pswd', 'token', 'configdrive',
'chappassword', 'encrypted_key', 'private_key',
'fernetkey', 'sslkey', 'passphrase',
'cephclusterfsid', 'octaviaheartbeatkey', 'rabbitcookie',
'cephmanilaclientkey', 'pacemakerremoteauthkey',
'designaterndckey', 'cephadminkey', 'heatauthencryptionkey',
'cephclientkey', 'keystonecredential',
'barbicansimplecryptokek', 'cephrgwkey', 'swifthashsuffix',
'migrationsshkey', 'cephmdskey', 'cephmonkey']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS_2 = {}
_SANITIZE_PATTERNS_1 = {}
_SANITIZE_PATTERNS_WILDCARD = {}
# NOTE(amrith): Some regular expressions have only one parameter, some
# have two parameters. Use different lists of patterns here.
_FORMAT_PATTERNS_1 = [r'(%(key)s[0-9]*\s*[=]\s*)[^\s^\'^\"]+']
_FORMAT_PATTERNS_2 = [r'(%(key)s[0-9]*\s*[=]\s*[\"\'])[^\"\']*([\"\'])',
r'(%(key)s[0-9]*\s*[=]\s*[\"])[^\"]*([\"])',
r'(%(key)s[0-9]*\s*[=]\s*[\'])[^\']*([\'])',
r'(%(key)s[0-9]*\s+[\"\'])[^\"\']*([\"\'])',
r'([-]{2}%(key)s[0-9]*\s+)[^\'^\"^=^\s]+([\s]*)',
r'(<%(key)s[0-9]*>)[^<]*(</%(key)s[0-9]*>)',
r'([\"\']%(key)s[0-9]*[\"\']\s*:\s*[\"\'])[^\"\']*'
r'([\"\'])',
r'([\'"][^"\']*%(key)s[0-9]*[\'"]\s*:\s*u?[\'"])[^\"\']*'
r'([\'"])',
r'([\'"][^\'"]*%(key)s[0-9]*[\'"]\s*,\s*\'--?[A-z]+'
r'\'\s*,\s*u?[\'"])[^\"\']*([\'"])',
r'(%(key)s[0-9]*\s*--?[A-z]+\s*)\S+(\s*)']
_FORMAT_PATTERNS_WILDCARD = [r'([\'\"][^\"\']*%(key)s[0-9]*[\'\"]\s*:\s*u?[\'\"].*[\'\"])[^\"\']*([\'\"])'] # noqa: E501
# NOTE(dhellmann): Keep a separate list of patterns by key so we only
# need to apply the substitutions for keys we find using a quick "in"
# test.
for key in _SANITIZE_KEYS:
_SANITIZE_PATTERNS_1[key] = []
_SANITIZE_PATTERNS_2[key] = []
_SANITIZE_PATTERNS_WILDCARD[key] = []
for pattern in _FORMAT_PATTERNS_2:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL | re.IGNORECASE)
_SANITIZE_PATTERNS_2[key].append(reg_ex)
for pattern in _FORMAT_PATTERNS_1:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL | re.IGNORECASE)
_SANITIZE_PATTERNS_1[key].append(reg_ex)
for pattern in _FORMAT_PATTERNS_WILDCARD:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL | re.IGNORECASE)
_SANITIZE_PATTERNS_WILDCARD[key].append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return int(bool_from_string(subject))
def bool_from_string(subject, strict=False, default=False):
"""Interpret a subject as a boolean.
A subject can be a boolean, a string or an integer. Boolean type value
will be returned directly, otherwise the subject will be converted to
a string. A case-insensitive match is performed such that strings
matching 't','true', 'on', 'y', 'yes', or '1' are considered True and,
when `strict=False`, anything else returns the value specified by
'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if isinstance(subject, bool):
return subject
if not isinstance(subject, str):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def is_valid_boolstr(value):
"""Check if the provided string is a valid bool string or not.
:param value: value to verify
:type value: string
:returns: true if value is boolean string, false otherwise
.. versionadded:: 3.17
"""
boolstrs = TRUE_STRINGS + FALSE_STRINGS
return str(value).lower() in boolstrs
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC / mixed::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
SI units are interpreted as power-of-ten (e.g. 1kb = 1000b). Note
that the SI unit system does not support capital letter 'K'
IEC units are interpreted as power-of-two (e.g. 1MiB = 1MB =
1024b)
Mixed units interpret the "i" to mean IEC, and no "i" to mean SI
(e.g. 1kb = 1000b, 1kib == 1024b). Additionaly, mixed units
interpret 'K' as power-of-ten. This mode is not particuarly
useful for new code, but can help with compatability for parsers
such as GNU parted.
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
# In the mixed matcher, IEC units (with a trailing 'i') are
# interpreted as power-of-two, others as power-of-ten
if unit_system == 'mixed':
if unit_prefix and not unit_prefix.endswith('i'):
# For maximum compatability in mixed mode, we understand
# "K" (which is not strict SI) as "k"
if unit_prefix.startswith == 'K':
unit_prefix = 'k'
base = 1000
else:
base = 1024
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = encodeutils.safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
# NOTE(dhellmann): Before submitting a patch to add a new argument to
# this function to allow the caller to pass in "extra" or "additional"
# or "replacement" patterns to be masked out, please note that we have
# discussed that feature many times and always rejected it based on
# the desire to have Oslo functions behave consistently across all
# projects and *especially* to have security features work the same
# way no matter where they are used. If every project adopted its own
# set patterns for secret values, it would be very difficult to audit
# the logging to ensure that everything is properly masked. So, please
# either add your pattern to the module-level variables at the top of
# this file or, even better, pick an existing pattern or key to use in
# your application to ensure that the value is masked by this
# function.
def mask_password(message, secret="***"): # nosec
"""Replace password with *secret* in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
.. versionadded:: 0.2
.. versionchanged:: 1.1
Replace also ``'auth_token'``, ``'new_pass'`` and ``'auth_password'``
keys.
.. versionchanged:: 1.1.1
Replace also ``'secret_uuid'`` key.
.. versionchanged:: 1.5
Replace also ``'sys_pswd'`` key.
.. versionchanged:: 2.6
Replace also ``'token'`` key.
.. versionchanged:: 2.7
Replace also ``'secret'`` key.
.. versionchanged:: 3.4
Replace also ``'configdrive'`` key.
.. versionchanged:: 3.8
Replace also ``'CHAPPASSWORD'`` key.
"""
try:
message = str(message)
except UnicodeDecodeError: # nosec
# NOTE(jecarey): Temporary fix to handle cases where message is a
# byte string. A better solution will be provided in Kilo.
pass
substitute1 = r'\g<1>' + secret
substitute2 = r'\g<1>' + secret + r'\g<2>'
substitute_wildcard = r'\g<1>'
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
for key in _SANITIZE_KEYS:
if key in message.lower():
for pattern in _SANITIZE_PATTERNS_2[key]:
message = re.sub(pattern, substitute2, message)
for pattern in _SANITIZE_PATTERNS_1[key]:
message = re.sub(pattern, substitute1, message)
# NOTE(hberaud): Those case are poorly handled by previous
# patterns. They are passwords with quotes or double quotes.
# They also needs a different way to substitute group this is why
# they aren't fix in the pattern 1 or 2.
for pattern in _SANITIZE_PATTERNS_WILDCARD[key]:
message = re.sub(pattern, substitute_wildcard, message)
return message
def mask_dict_password(dictionary, secret="***"): # nosec
"""Replace password with *secret* in a dictionary recursively.
:param dictionary: The dictionary which includes secret information.
:param secret: value with which to replace secret information.
:returns: The dictionary with string substitutions.
A dictionary (which may contain nested dictionaries) contains
information (such as passwords) which should not be revealed, and
this function helps detect and replace those with the 'secret'
provided (or `***` if none is provided).
Substitution is performed in one of three situations:
If the key is something that is considered to be indicative of a
secret, then the corresponding value is replaced with the secret
provided (or `***` if none is provided).
If a value in the dictionary is a string, then it is masked
using the ``mask_password()`` function.
Finally, if a value is a dictionary, this function will
recursively mask that dictionary as well.
For example:
>>> mask_dict_password({'password': 'd81juxmEW_',
>>> 'user': 'admin',
>>> 'home-dir': '/home/admin'},
>>> '???')
{'password': '???', 'user': 'admin', 'home-dir': '/home/admin'}
For example (the value is masked using mask_password())
>>> mask_dict_password({'password': '--password d81juxmEW_',
>>> 'user': 'admin',
>>> 'home-dir': '/home/admin'},
>>> '???')
{'password': '--password ???', 'user': 'admin',
'home-dir': '/home/admin'}
For example (a nested dictionary is masked):
>>> mask_dict_password({"nested": {'password': 'd81juxmEW_',
>>> 'user': 'admin',
>>> 'home': '/home/admin'}},
>>> '???')
{"nested": {'password': '???', 'user': 'admin', 'home': '/home/admin'}}
.. versionadded:: 3.4
"""
if not isinstance(dictionary, collections.abc.Mapping):
raise TypeError("Expected a Mapping, got %s instead."
% type(dictionary))
out = {}
for k, v in dictionary.items():
if isinstance(v, collections.abc.Mapping):
out[k] = mask_dict_password(v, secret=secret)
continue
# NOTE(jlvillal): Check to see if anything in the dictionary 'key'
# contains any key specified in _SANITIZE_KEYS.
k_matched = False
if isinstance(k, str):
for sani_key in _SANITIZE_KEYS:
if sani_key in k.lower():
out[k] = secret
k_matched = True
break
if not k_matched:
# We did not find a match for the key name in the
# _SANITIZE_KEYS, so we fall through to here
if isinstance(v, str):
out[k] = mask_password(v, secret=secret)
else:
# Just leave it alone.
out[k] = v
return out
def is_int_like(val):
"""Check if a value looks like an integer with base 10.
:param val: Value to verify
:type val: string
:returns: bool
.. versionadded:: 1.1
"""
try:
return str(int(val)) == str(val)
except (TypeError, ValueError):
return False
def check_string_length(value, name=None, min_length=0, max_length=None):
"""Check the length of specified string.
:param value: the value of the string
:param name: the name of the string
:param min_length: the min_length of the string
:param max_length: the max_length of the string
:raises TypeError, ValueError: For any invalid input.
.. versionadded:: 3.7
"""
if name is None:
name = value
if not isinstance(value, str):
msg = _("%s is not a string or unicode") % name
raise TypeError(msg)
length = len(value)
if length < min_length:
msg = _("%(name)s has %(length)s characters, less than "
"%(min_length)s.") % {'name': name, 'length': length,
'min_length': min_length}
raise ValueError(msg)
if max_length and length > max_length:
msg = _("%(name)s has %(length)s characters, more than "
"%(max_length)s.") % {'name': name, 'length': length,
'max_length': max_length}
raise ValueError(msg)
def validate_integer(value, name, min_value=None, max_value=None):
"""Make sure that value is a valid integer, potentially within range.
:param value: value of the integer
:param name: name of the integer
:param min_value: min_value of the integer
:param max_value: max_value of the integer
:returns: integer
:raises: ValueError if value is an invalid integer
.. versionadded:: 3.33
"""
try:
value = int(str(value))
except (ValueError, UnicodeEncodeError):
msg = _('%(value_name)s must be an integer'
) % {'value_name': name}
raise ValueError(msg)
if min_value is not None and value < min_value:
msg = _('%(value_name)s must be >= %(min_value)d'
) % {'value_name': name, 'min_value': min_value}
raise ValueError(msg)
if max_value is not None and value > max_value:
msg = _('%(value_name)s must be <= %(max_value)d'
) % {'value_name': name, 'max_value': max_value}
raise ValueError(msg)
return value
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""Validate and split the given HTTP request path.
**Examples**::
['a'] = _split_path('/a')
['a', None] = _split_path('/a', 1, 2)
['a', 'c'] = _split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = _split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises: ValueError if given an invalid path
.. versionadded:: 3.11
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError(_('minsegs > maxsegs: %(min)d > %(max)d)') %
{'min': minsegs, 'max': maxsegs})
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError(_('Invalid path: %s') % urllib.parse.quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError(_('Invalid path: %s') % urllib.parse.quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
def split_by_commas(value):
"""Split values by commas and quotes according to api-wg
:param value: value to be split
.. versionadded:: 3.17
"""
# pyparsing is a slow import; defer loading until we need it
import pyparsing as pp
word = (
pp.QuotedString(quoteChar='"', escChar='\\') |
pp.Word(pp.printables, excludeChars='",')
)
grammar = pp.stringStart + pp.delimitedList(word) + pp.stringEnd
try:
return list(grammar.parseString(value))
except pp.ParseException:
raise ValueError("Invalid value: %s" % value)
|
{
"content_hash": "0aa47c8dcf478c08e0f302159faf3cac",
"timestamp": "",
"source": "github",
"line_count": 573,
"max_line_length": 121,
"avg_line_length": 36.410122164048865,
"alnum_prop": 0.5812682739778555,
"repo_name": "openstack/oslo.utils",
"id": "2bd89ee976a1c47c4fcef711012ed1c3ea1ad0d1",
"size": "21500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_utils/strutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "334957"
}
],
"symlink_target": ""
}
|
"""Support for the LiteJet lighting system."""
import logging
import pylitejet
from serial import SerialException
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PORT
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType
from .const import CONF_EXCLUDE_NAMES, CONF_INCLUDE_SWITCHES, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): cv.string,
vol.Optional(CONF_EXCLUDE_NAMES): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_INCLUDE_SWITCHES, default=False): cv.boolean,
}
)
},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the LiteJet component."""
if DOMAIN in config and not hass.config_entries.async_entries(DOMAIN):
# No config entry exists and configuration.yaml config exists, trigger the import flow.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config[DOMAIN]
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up LiteJet via a config entry."""
port = entry.data[CONF_PORT]
try:
system = pylitejet.LiteJet(port)
except SerialException as ex:
_LOGGER.error("Error connecting to the LiteJet MCP at %s", port, exc_info=ex)
raise ConfigEntryNotReady from ex
hass.data[DOMAIN] = system
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a LiteJet config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].close()
hass.data.pop(DOMAIN)
return unload_ok
|
{
"content_hash": "512176f4e35edf101af102ea5aaa1484",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 95,
"avg_line_length": 31.453333333333333,
"alnum_prop": 0.658753709198813,
"repo_name": "mezz64/home-assistant",
"id": "5131ee52e672bc6fae44ddca89a6133615e38558",
"size": "2359",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/litejet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from collections import deque
import sys
import time
from eventlet.pools import Pool
from eventlet import timeout
from eventlet import hubs
from eventlet.hubs.timer import Timer
from eventlet.greenthread import GreenThread
class ConnectTimeout(Exception):
pass
class BaseConnectionPool(Pool):
def __init__(self, db_module,
min_size = 0, max_size = 4,
max_idle = 10, max_age = 30,
connect_timeout = 5,
*args, **kwargs):
"""
Constructs a pool with at least *min_size* connections and at most
*max_size* connections. Uses *db_module* to construct new connections.
The *max_idle* parameter determines how long pooled connections can
remain idle, in seconds. After *max_idle* seconds have elapsed
without the connection being used, the pool closes the connection.
*max_age* is how long any particular connection is allowed to live.
Connections that have been open for longer than *max_age* seconds are
closed, regardless of idle time. If *max_age* is 0, all connections are
closed on return to the pool, reducing it to a concurrency limiter.
*connect_timeout* is the duration in seconds that the pool will wait
before timing out on connect() to the database. If triggered, the
timeout will raise a ConnectTimeout from get().
The remainder of the arguments are used as parameters to the
*db_module*'s connection constructor.
"""
assert(db_module)
self._db_module = db_module
self._args = args
self._kwargs = kwargs
self.max_idle = max_idle
self.max_age = max_age
self.connect_timeout = connect_timeout
self._expiration_timer = None
super(BaseConnectionPool, self).__init__(min_size=min_size,
max_size=max_size,
order_as_stack=True)
def _schedule_expiration(self):
""" Sets up a timer that will call _expire_old_connections when the
oldest connection currently in the free pool is ready to expire. This
is the earliest possible time that a connection could expire, thus, the
timer will be running as infrequently as possible without missing a
possible expiration.
If this function is called when a timer is already scheduled, it does
nothing.
If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
"""
if self.max_age is 0 or self.max_idle is 0:
# expiration is unnecessary because all connections will be expired
# on put
return
if ( self._expiration_timer is not None
and not getattr(self._expiration_timer, 'called', False)):
# the next timer is already scheduled
return
try:
now = time.time()
self._expire_old_connections(now)
# the last item in the list, because of the stack ordering,
# is going to be the most-idle
idle_delay = (self.free_items[-1][0] - now) + self.max_idle
oldest = min([t[1] for t in self.free_items])
age_delay = (oldest - now) + self.max_age
next_delay = min(idle_delay, age_delay)
except (IndexError, ValueError):
# no free items, unschedule ourselves
self._expiration_timer = None
return
if next_delay > 0:
# set up a continuous self-calling loop
self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
self._schedule_expiration, [], {})
self._expiration_timer.schedule()
def _expire_old_connections(self, now):
""" Iterates through the open connections contained in the pool, closing
ones that have remained idle for longer than max_idle seconds, or have
been in existence for longer than max_age seconds.
*now* is the current time, as returned by time.time().
"""
original_count = len(self.free_items)
expired = [
conn
for last_used, created_at, conn in self.free_items
if self._is_expired(now, last_used, created_at)]
new_free = [
(last_used, created_at, conn)
for last_used, created_at, conn in self.free_items
if not self._is_expired(now, last_used, created_at)]
self.free_items.clear()
self.free_items.extend(new_free)
# adjust the current size counter to account for expired
# connections
self.current_size -= original_count - len(self.free_items)
for conn in expired:
self._safe_close(conn, quiet=True)
def _is_expired(self, now, last_used, created_at):
""" Returns true and closes the connection if it's expired."""
if ( self.max_idle <= 0
or self.max_age <= 0
or now - last_used > self.max_idle
or now - created_at > self.max_age ):
return True
return False
def _unwrap_connection(self, conn):
""" If the connection was wrapped by a subclass of
BaseConnectionWrapper and is still functional (as determined
by the __nonzero__ method), returns the unwrapped connection.
If anything goes wrong with this process, returns None.
"""
base = None
try:
if conn:
base = conn._base
conn._destroy()
else:
base = None
except AttributeError:
pass
return base
def _safe_close(self, conn, quiet = False):
""" Closes the (already unwrapped) connection, squelching any
exceptions."""
try:
conn.close()
except (KeyboardInterrupt, SystemExit):
raise
except AttributeError:
pass # conn is None, or junk
except:
if not quiet:
print "Connection.close raised: %s" % (sys.exc_info()[1])
def get(self):
conn = super(BaseConnectionPool, self).get()
# None is a flag value that means that put got called with
# something it couldn't use
if conn is None:
try:
conn = self.create()
except Exception:
# unconditionally increase the free pool because
# even if there are waiters, doing a full put
# would incur a greenlib switch and thus lose the
# exception stack
self.current_size -= 1
raise
# if the call to get() draws from the free pool, it will come
# back as a tuple
if isinstance(conn, tuple):
_last_used, created_at, conn = conn
else:
created_at = time.time()
# wrap the connection so the consumer can call close() safely
wrapped = PooledConnectionWrapper(conn, self)
# annotating the wrapper so that when it gets put in the pool
# again, we'll know how old it is
wrapped._db_pool_created_at = created_at
return wrapped
def put(self, conn):
created_at = getattr(conn, '_db_pool_created_at', 0)
now = time.time()
conn = self._unwrap_connection(conn)
if self._is_expired(now, now, created_at):
self._safe_close(conn, quiet=False)
conn = None
else:
# rollback any uncommitted changes, so that the next client
# has a clean slate. This also pokes the connection to see if
# it's dead or None
try:
if conn:
conn.rollback()
except KeyboardInterrupt:
raise
except:
# we don't care what the exception was, we just know the
# connection is dead
print "WARNING: connection.rollback raised: %s" % (sys.exc_info()[1])
conn = None
if conn is not None:
super(BaseConnectionPool, self).put( (now, created_at, conn) )
else:
# wake up any waiters with a flag value that indicates
# they need to manufacture a connection
if self.waiting() > 0:
super(BaseConnectionPool, self).put(None)
else:
# no waiters -- just change the size
self.current_size -= 1
self._schedule_expiration()
def clear(self):
""" Close all connections that this pool still holds a reference to,
and removes all references to them.
"""
if self._expiration_timer:
self._expiration_timer.cancel()
free_items, self.free_items = self.free_items, deque()
for item in free_items:
# Free items created using min_size>0 are not tuples.
conn = item[2] if isinstance(item, tuple) else item
self._safe_close(conn, quiet=True)
def __del__(self):
self.clear()
class TpooledConnectionPool(BaseConnectionPool):
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(self._db_module,
self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
from eventlet import tpool
conn = tpool.execute(db_module.connect, *args, **kw)
return tpool.Proxy(conn, autowrap_names=('cursor',))
finally:
t.cancel()
class RawConnectionPool(BaseConnectionPool):
"""A pool which gives out plain database connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(self._db_module,
self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
return db_module.connect(*args, **kw)
finally:
t.cancel()
# default connection pool is the tpool one
ConnectionPool = TpooledConnectionPool
class GenericConnectionWrapper(object):
def __init__(self, baseconn):
self._base = baseconn
def __enter__(self): return self._base.__enter__()
def __exit__(self, exc, value, tb): return self._base.__exit__(exc, value, tb)
def __repr__(self): return self._base.__repr__()
def affected_rows(self): return self._base.affected_rows()
def autocommit(self,*args, **kwargs): return self._base.autocommit(*args, **kwargs)
def begin(self): return self._base.begin()
def change_user(self,*args, **kwargs): return self._base.change_user(*args, **kwargs)
def character_set_name(self,*args, **kwargs): return self._base.character_set_name(*args, **kwargs)
def close(self,*args, **kwargs): return self._base.close(*args, **kwargs)
def commit(self,*args, **kwargs): return self._base.commit(*args, **kwargs)
def cursor(self, *args, **kwargs): return self._base.cursor(*args, **kwargs)
def dump_debug_info(self,*args, **kwargs): return self._base.dump_debug_info(*args, **kwargs)
def errno(self,*args, **kwargs): return self._base.errno(*args, **kwargs)
def error(self,*args, **kwargs): return self._base.error(*args, **kwargs)
def errorhandler(self, *args, **kwargs): return self._base.errorhandler(*args, **kwargs)
def insert_id(self, *args, **kwargs): return self._base.insert_id(*args, **kwargs)
def literal(self, *args, **kwargs): return self._base.literal(*args, **kwargs)
def set_character_set(self, *args, **kwargs): return self._base.set_character_set(*args, **kwargs)
def set_sql_mode(self, *args, **kwargs): return self._base.set_sql_mode(*args, **kwargs)
def show_warnings(self): return self._base.show_warnings()
def warning_count(self): return self._base.warning_count()
def ping(self,*args, **kwargs): return self._base.ping(*args, **kwargs)
def query(self,*args, **kwargs): return self._base.query(*args, **kwargs)
def rollback(self,*args, **kwargs): return self._base.rollback(*args, **kwargs)
def select_db(self,*args, **kwargs): return self._base.select_db(*args, **kwargs)
def set_server_option(self,*args, **kwargs): return self._base.set_server_option(*args, **kwargs)
def server_capabilities(self,*args, **kwargs): return self._base.server_capabilities(*args, **kwargs)
def shutdown(self,*args, **kwargs): return self._base.shutdown(*args, **kwargs)
def sqlstate(self,*args, **kwargs): return self._base.sqlstate(*args, **kwargs)
def stat(self, *args, **kwargs): return self._base.stat(*args, **kwargs)
def store_result(self,*args, **kwargs): return self._base.store_result(*args, **kwargs)
def string_literal(self,*args, **kwargs): return self._base.string_literal(*args, **kwargs)
def thread_id(self,*args, **kwargs): return self._base.thread_id(*args, **kwargs)
def use_result(self,*args, **kwargs): return self._base.use_result(*args, **kwargs)
class PooledConnectionWrapper(GenericConnectionWrapper):
""" A connection wrapper where:
- the close method returns the connection to the pool instead of closing it directly
- ``bool(conn)`` returns a reasonable value
- returns itself to the pool if it gets garbage collected
"""
def __init__(self, baseconn, pool):
super(PooledConnectionWrapper, self).__init__(baseconn)
self._pool = pool
def __nonzero__(self):
return (hasattr(self, '_base') and bool(self._base))
def _destroy(self):
self._pool = None
try:
del self._base
except AttributeError:
pass
def close(self):
""" Return the connection to the pool, and remove the
reference to it so that you can't use it again through this
wrapper object.
"""
if self and self._pool:
self._pool.put(self)
self._destroy()
def __del__(self):
return # this causes some issues if __del__ is called in the
# main coroutine, so for now this is disabled
#self.close()
class DatabaseConnector(object):
"""\
This is an object which will maintain a collection of database
connection pools on a per-host basis."""
def __init__(self, module, credentials,
conn_pool=None, *args, **kwargs):
"""\
constructor
*module*
Database module to use.
*credentials*
Mapping of hostname to connect arguments (e.g. username and password)"""
assert(module)
self._conn_pool_class = conn_pool
if self._conn_pool_class is None:
self._conn_pool_class = ConnectionPool
self._module = module
self._args = args
self._kwargs = kwargs
self._credentials = credentials # this is a map of hostname to username/password
self._databases = {}
def credentials_for(self, host):
if host in self._credentials:
return self._credentials[host]
else:
return self._credentials.get('default', None)
def get(self, host, dbname):
""" Returns a ConnectionPool to the target host and schema. """
key = (host, dbname)
if key not in self._databases:
new_kwargs = self._kwargs.copy()
new_kwargs['db'] = dbname
new_kwargs['host'] = host
new_kwargs.update(self.credentials_for(host))
dbpool = self._conn_pool_class(self._module,
*self._args, **new_kwargs)
self._databases[key] = dbpool
return self._databases[key]
|
{
"content_hash": "9c44c13fe49fab01e7acdaaaee28f344",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 105,
"avg_line_length": 40.29573934837093,
"alnum_prop": 0.5971513869884314,
"repo_name": "ioram7/keystone-federado-pgid2013",
"id": "d388c19de2cd9f3f4fa73a0bc3a0f2a76990d524",
"size": "16078",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "build/eventlet/eventlet/db_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1841"
},
{
"name": "C",
"bytes": "10584735"
},
{
"name": "C++",
"bytes": "19231"
},
{
"name": "CSS",
"bytes": "172341"
},
{
"name": "JavaScript",
"bytes": "530938"
},
{
"name": "Python",
"bytes": "26306359"
},
{
"name": "Shell",
"bytes": "38138"
},
{
"name": "XSLT",
"bytes": "306125"
}
],
"symlink_target": ""
}
|
import os
import re
import time
import netaddr
import six
from six import moves
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils
from rally.deployment.serverprovider import provider
from rally import exceptions
LOG = logging.getLogger(__name__)
INET_ADDR_RE = re.compile(r" *inet ((\d+\.){3}\d+)\/\d+ .*")
IPT_PORT_TEMPLATE = ("iptables -t nat -{action} PREROUTING -d {host_ip}"
" -p tcp --syn --dport {port}"
" -j DNAT --to-destination {ip}:22")
def _get_script(filename):
path = os.path.abspath(os.path.join(os.path.dirname(__file__),
"lxc", filename))
return open(path, "rb")
def _get_script_from_template(template_filename, **kwargs):
template = _get_script(template_filename).read()
return moves.StringIO(template.format(**kwargs))
class LxcHost(object):
"""Represent lxc enabled host."""
def __init__(self, server, config):
"""Initialize LxcHost object.
:param server: Server object
:param config: dictionary with following key/values:
network ipv4 network for containers
lxc_bridge bridge interface name (default lxcbr0)
tunnel_to ip address for make tunnel to
forward_ssh use ssh port forwarding (do not use for
controller nodes)
"""
self.config = config
if "network" in config:
self.network = netaddr.IPNetwork(config["network"])
else:
self.network = None
self.server = server
self.containers = []
self.path = "/var/lib/lxc/"
self._port_cache = {}
def _get_updated_server(self, **kwargs):
credentials = self.server.get_credentials()
credentials.update(kwargs)
return provider.Server.from_credentials(credentials)
@property
def backingstore(self):
if not hasattr(self, "_backingstore"):
code = self.server.ssh.execute("df -t btrfs %s" % self.path)[0]
self._backingstore = "" if code else "btrfs"
return self._backingstore
def prepare(self):
if self.network:
dhcp_start = str(self.network.network + 2)
dhcp_end = str(self.network.network + self.network.size - 2)
dhcp_range = ",".join([dhcp_start, dhcp_end])
values = {
"USE_LXC_BRIDGE": "true",
"LXC_BRIDGE": self.config.get("lxc_bridge", "lxcbr0"),
"LXC_ADDR": self.network.network + 1,
"LXC_NETMASK": self.network.netmask,
"LXC_NETWORK": self.network,
"LXC_DHCP_RANGE": dhcp_range,
"LXC_DHCP_MAX": self.network.size - 3,
}
config = moves.StringIO()
for name, value in six.iteritems(values):
config.write("%(name)s=\"%(value)s\"\n" % {"name": name,
"value": value})
config.seek(0)
self.server.ssh.run("cat > /tmp/.lxc_default", stdin=config)
self.server.ssh.run("/bin/sh", stdin=_get_script("lxc-install.sh"))
self.create_local_tunnels()
self.create_remote_tunnels()
def create_local_tunnels(self):
"""Create tunel on lxc host side."""
for tunnel_to in self.config["tunnel_to"]:
script = _get_script_from_template("tunnel-local.sh",
net=self.network,
local=self.server.host,
remote=tunnel_to)
self.server.ssh.run("/bin/sh", stdin=script)
def create_remote_tunnels(self):
"""Create tunel on remote side."""
for tunnel_to in self.config["tunnel_to"]:
script = _get_script_from_template("tunnel-remote.sh",
net=self.network,
local=tunnel_to,
remote=self.server.host)
server = self._get_updated_server(host=tunnel_to)
server.ssh.run("/bin/sh", stdin=script)
def delete_tunnels(self):
for tunnel_to in self.config["tunnel_to"]:
remote_server = self._get_updated_server(host=tunnel_to)
remote_server.ssh.execute("ip tun del t%s" % self.network.ip)
self.server.ssh.execute("ip tun del t%s" % tunnel_to)
def get_ip(self, name):
"""Get container's ip by name."""
cmd = "lxc-attach -n %s ip addr list dev eth0" % name
for attempt in range(1, 16):
code, stdout = self.server.ssh.execute(cmd)[:2]
if code:
continue
for line in stdout.splitlines():
m = INET_ADDR_RE.match(line)
if m:
return m.group(1)
time.sleep(attempt)
msg = _("Timeout waiting for ip address of container \"%s\"") % name
raise exceptions.TimeoutException(msg)
def get_port(self, ip):
"""Get forwarded ssh port for instance ip.
Ssh port forwarding is used for containers access from outside.
Any container is accessible by host's ip and forwarded port. E.g:
6.6.6.6:10023 -> 10.1.1.11:22
6.6.6.6:10024 -> 10.1.1.12:22
6.6.6.6:10025 -> 10.1.1.13:22
where 6.6.6.6 is host's ip.
Ip->port association is stored in self._port_cache to reduce number
of iptables calls.
"""
if not self._port_cache:
self._port_cache = {}
port_re = re.compile(r".+ tcp dpt:(\d+).*to:([\d\.]+)\:22")
cmd = "iptables -n -t nat -L PREROUTING"
code, out, err = self.server.ssh.execute(cmd)
for l in out:
m = port_re.match(l)
if m:
self._port_cache[m.group(2)] = int(m.group(1))
port = self._port_cache.get(ip)
if port is None:
if self._port_cache:
port = max(self._port_cache.values()) + 1
else:
port = 1222
self._port_cache[ip] = port
cmd = IPT_PORT_TEMPLATE.format(host_ip=self.server.host, ip=ip,
port=port, action="I")
self.server.ssh.run(cmd)
return port
def create_container(self, name, distribution, release=None):
cmd = ["lxc-create"]
if self.backingstore == "btrfs":
cmd += ["-B", "btrfs"]
cmd += ["-n", name, "-t", distribution]
if release:
if distribution == "ubuntu":
cmd += ["--", "-r", release]
elif distribution == "debian":
cmd = ["SUITE=%s" % release] + cmd
self.server.ssh.run(" ".join(cmd))
self.configure_container(name)
self.containers.append(name)
def create_clone(self, name, source):
cmd = ["lxc-clone"]
if self.backingstore == "btrfs":
cmd.append("--snapshot")
cmd.extend(["-o", source, "-n", name])
self.server.ssh.execute(" ".join(cmd))
self.configure_container(name)
self.containers.append(name)
def configure_container(self, name):
path = os.path.join(self.path, name, "rootfs")
conf_script = _get_script("configure_container.sh")
self.server.ssh.run("/bin/sh -e -s %s" % path, stdin=conf_script)
def start_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-start -d -n %s" % name)
def stop_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-stop -n %s" % name)
def destroy_ports(self, ipports):
script = ""
for ip, port in ipports:
cmd = IPT_PORT_TEMPLATE.format(action="D", port=port, ip=ip,
host_ip=self.server.host)
script += cmd + "\n"
self.server.ssh.run("/bin/sh -e", stdin=script)
def destroy_containers(self):
for name in self.containers:
self.server.ssh.run("lxc-stop -n %s" % name)
self.server.ssh.run("lxc-destroy -n %s" % name)
def get_server_object(self, name, wait=True):
"""Create Server object for container."""
ip = self.get_ip(name)
if self.config.get("forward_ssh", False):
server = self._get_updated_server(port=self.get_port(ip))
else:
server = self._get_updated_server(host=ip)
if wait:
server.ssh.wait(timeout=300)
return server
def get_server_objects(self, wait=True):
"""Generate Server objects from all containers."""
for name in self.containers:
yield self.get_server_object(name, wait)
@provider.configure(name="LxcProvider")
class LxcProvider(provider.ProviderFactory):
"""Provide lxc container(s) on given host.
Sample configuration:
{
"type": "LxcProvider",
"distribution": "ubuntu",
"start_lxc_network": "10.1.1.0/24",
"containers_per_host": 32,
"tunnel_to": ["10.10.10.10"],
"forward_ssh": false,
"container_name_prefix": "rally-multinode-02",
"host_provider": {
"type": "ExistingServers",
"credentials": [{"user": "root", "host": "host.net"}]
}
}
"""
CONFIG_SCHEMA = {
"type": "object",
"properties": {
"type": {"type": "string"},
"distribution": {"type": "string"},
"release": {"type": "string"},
"start_lxc_network": {"type": "string",
"pattern": "^(\d+\.){3}\d+\/\d+$"},
"containers_per_host": {"type": "integer"},
"forward_ssh": {"type": "boolean"},
"tunnel_to": {"type": "array",
"elements": {"type": "string",
"pattern": "^(\d+\.){3}\d+$"}},
"container_name_prefix": {"type": "string"},
"host_provider": {"type": "object",
"properties": {"type": {"type": "string"}}},
},
"required": ["type", "containers_per_host",
"container_name_prefix", "host_provider"],
}
def validate(self):
super(LxcProvider, self).validate()
if "start_lxc_network" not in self.config:
return
lxc_net = netaddr.IPNetwork(self.config["start_lxc_network"])
num_containers = self.config["containers_per_host"]
if lxc_net.size - 3 < num_containers:
message = _("Network size is not enough for %d hosts.")
raise exceptions.InvalidConfigException(message % num_containers)
def get_host_provider(self):
return provider.ProviderFactory.get_provider(
self.config["host_provider"], self.deployment)
@utils.log_deploy_wrapper(LOG.info, _("Create containers on host"))
def create_servers(self):
host_provider = self.get_host_provider()
name_prefix = self.config["container_name_prefix"]
hosts = []
if "start_lxc_network" in self.config:
network = netaddr.IPNetwork(self.config["start_lxc_network"])
else:
network = None
distribution = self.config.get("distribution", "ubuntu")
release = self.config.get("release", None)
for server in host_provider.create_servers():
config = {"tunnel_to": self.config.get("tunnel_to", []),
"forward_ssh": self.config.get("forward_ssh", False)}
if network:
config["network"] = str(network)
host = LxcHost(server, config)
host.prepare()
ip = str(network.ip).replace(".", "-") if network else "0"
first_name = "%s-000-%s" % (name_prefix, ip)
host.create_container(first_name, distribution, release)
for i in range(1, self.config.get("containers_per_host", 1)):
name = "%s-%03d-%s" % (name_prefix, i, ip)
host.create_clone(name, first_name)
host.start_containers()
hosts.append(host)
if network:
network += 1
servers = []
for host in hosts:
for server in host.get_server_objects():
servers.append(server)
info = {"host": host.server.get_credentials(),
"config": host.config,
"forwarded_ports": host._port_cache.items(),
"container_names": host.containers}
self.resources.create(info)
return servers
@utils.log_deploy_wrapper(LOG.info, _("Destroy host(s)"))
def destroy_servers(self):
for resource in self.resources.get_all():
server = provider.Server.from_credentials(resource["info"]["host"])
lxc_host = LxcHost(server, resource["info"]["config"])
lxc_host.containers = resource["info"]["container_names"]
lxc_host.destroy_containers()
lxc_host.destroy_ports(resource["info"]["forwarded_ports"])
lxc_host.delete_tunnels()
self.resources.delete(resource["id"])
host_provider = self.get_host_provider()
host_provider.destroy_servers()
|
{
"content_hash": "e0957d23abe82fb2f23814f538fb2d86",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 79,
"avg_line_length": 38.30681818181818,
"alnum_prop": 0.534559477899733,
"repo_name": "shdowofdeath/rally",
"id": "347972769908467a4afd4c17d3f1cfe001dd802f",
"size": "14114",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rally/deployment/serverprovider/providers/lxc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "46737"
},
{
"name": "Python",
"bytes": "2421750"
},
{
"name": "Shell",
"bytes": "36795"
}
],
"symlink_target": ""
}
|
"""Testing utilities."""
import shutil
from six import iteritems
import tempfile
from django.conf import settings
from django.test.utils import override_settings
from django.utils.encoding import force_bytes
from django_downloadview.middlewares import is_download_response
from django_downloadview.response import (encode_basename_ascii,
encode_basename_utf8)
def setup_view(view, request, *args, **kwargs):
"""Mimic ``as_view()``, but returns view instance.
Use this function to get view instances on which you can run unit tests,
by testing specific methods.
This is an early implementation of
https://code.djangoproject.com/ticket/20456
``view``
A view instance, such as ``TemplateView(template_name='dummy.html')``.
Initialization arguments are the same you would pass to ``as_view()``.
``request``
A request object, typically built with
:class:`~django.test.client.RequestFactory`.
``args`` and ``kwargs``
"URLconf" positional and keyword arguments, the same you would pass to
:func:`~django.core.urlresolvers.reverse`.
"""
view.request = request
view.args = args
view.kwargs = kwargs
return view
class temporary_media_root(override_settings):
"""Temporarily override settings.MEDIA_ROOT with a temporary directory.
The temporary directory is automatically created and destroyed.
Use this function as a context manager:
>>> from django_downloadview.test import temporary_media_root
>>> from django.conf import settings # NoQA
>>> global_media_root = settings.MEDIA_ROOT
>>> with temporary_media_root():
... global_media_root == settings.MEDIA_ROOT
False
>>> global_media_root == settings.MEDIA_ROOT
True
Or as a decorator:
>>> @temporary_media_root()
... def use_temporary_media_root():
... return settings.MEDIA_ROOT
>>> tmp_media_root = use_temporary_media_root()
>>> global_media_root == tmp_media_root
False
>>> global_media_root == settings.MEDIA_ROOT
True
"""
def enable(self):
"""Create a temporary directory and use it to override
settings.MEDIA_ROOT."""
tmp_dir = tempfile.mkdtemp()
self.options['MEDIA_ROOT'] = tmp_dir
super(temporary_media_root, self).enable()
def disable(self):
"""Remove directory settings.MEDIA_ROOT then restore original
setting."""
shutil.rmtree(settings.MEDIA_ROOT)
super(temporary_media_root, self).disable()
class DownloadResponseValidator(object):
"""Utility class to validate DownloadResponse instances."""
def __call__(self, test_case, response, **assertions):
"""Assert that ``response`` is a valid DownloadResponse instance.
Optional ``assertions`` dictionary can be used to check additional
items:
* ``basename``: the basename of the file in the response.
* ``content_type``: the value of "Content-Type" header.
* ``mime_type``: the MIME type part of "Content-Type" header (without
charset).
* ``content``: the contents of the file.
* ``attachment``: whether the file is returned as attachment or not.
"""
self.assert_download_response(test_case, response)
for key, value in iteritems(assertions):
assert_func = getattr(self, 'assert_%s' % key)
assert_func(test_case, response, value)
def assert_download_response(self, test_case, response):
test_case.assertTrue(is_download_response(response))
def assert_basename(self, test_case, response, value):
"""Implies ``attachement is True``."""
ascii_name = encode_basename_ascii(value)
utf8_name = encode_basename_utf8(value)
check_utf8 = False
check_ascii = False
if ascii_name == utf8_name: # Only ASCII characters.
check_ascii = True
if "filename*=" in response['Content-Disposition']:
check_utf8 = True
else:
check_utf8 = True
if "filename=" in response['Content-Disposition']:
check_ascii = True
if check_ascii:
test_case.assertIn('filename="{name}"'.format(
name=ascii_name),
response['Content-Disposition'])
if check_utf8:
test_case.assertIn(
"filename*=UTF-8''{name}".format(name=utf8_name),
response['Content-Disposition'])
def assert_content_type(self, test_case, response, value):
test_case.assertEqual(response['Content-Type'], value)
def assert_mime_type(self, test_case, response, value):
test_case.assertTrue(response['Content-Type'].startswith(value))
def assert_content(self, test_case, response, value):
"""Assert value equals response's content (byte comparison)."""
parts = [force_bytes(s) for s in response.streaming_content]
test_case.assertEqual(b''.join(parts), force_bytes(value))
def assert_attachment(self, test_case, response, value):
if value:
test_case.assertTrue(
'attachment;' in response['Content-Disposition'])
else:
test_case.assertTrue(
'Content-Disposition' not in response
or 'attachment;' not in response['Content-Disposition'])
def assert_download_response(test_case, response, **assertions):
"""Make ``test_case`` assert that ``response`` meets ``assertions``.
Optional ``assertions`` dictionary can be used to check additional items:
* ``basename``: the basename of the file in the response.
* ``content_type``: the value of "Content-Type" header.
* ``mime_type``: the MIME type part of "Content-Type" header (without
charset).
* ``content``: the contents of the file.
* ``attachment``: whether the file is returned as attachment or not.
"""
validator = DownloadResponseValidator()
return validator(test_case, response, **assertions)
|
{
"content_hash": "7c4923bd69617255aeb61df315221ee4",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 78,
"avg_line_length": 34.87428571428571,
"alnum_prop": 0.6382107160412912,
"repo_name": "fladi/django-downloadview",
"id": "627adb5db8c09cc317c654581f707ac952237f79",
"size": "6103",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_downloadview/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "923"
},
{
"name": "Makefile",
"bytes": "2041"
},
{
"name": "Python",
"bytes": "145516"
}
],
"symlink_target": ""
}
|
import click
import os
from flask.cli import cli
@cli.command()
def clean():
"""Recursively remove *.pyc and *.pyo files."""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
filepath = os.path.join(dirpath, filename)
click.echo(f'Removing {filepath}')
os.remove(filepath)
|
{
"content_hash": "17f95a702e079f56bdeb1dd207b5c2dc",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 29.2,
"alnum_prop": 0.6004566210045662,
"repo_name": "briancappello/flask-react-spa",
"id": "c0d4ed1a12b1f75505a876da77cf3d3a750e057b",
"size": "498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/commands/clean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8579"
},
{
"name": "Dockerfile",
"bytes": "1009"
},
{
"name": "HTML",
"bytes": "18126"
},
{
"name": "JavaScript",
"bytes": "169637"
},
{
"name": "Makefile",
"bytes": "685"
},
{
"name": "Mako",
"bytes": "509"
},
{
"name": "Python",
"bytes": "282850"
},
{
"name": "Ruby",
"bytes": "5065"
},
{
"name": "Shell",
"bytes": "5231"
}
],
"symlink_target": ""
}
|
import numpy as np
from robosuite.controllers.base_controller import Controller
from robosuite.utils.control_utils import *
# Supported impedance modes
IMPEDANCE_MODES = {"fixed", "variable", "variable_kp"}
class JointPositionController(Controller):
"""
Controller for controlling robot arm via impedance control. Allows position control of the robot's joints.
NOTE: Control input actions assumed to be taken relative to the current joint positions. A given action to this
controller is assumed to be of the form: (dpos_j0, dpos_j1, ... , dpos_jn-1) for an n-joint robot
Args:
sim (MjSim): Simulator instance this controller will pull robot state updates from
eef_name (str): Name of controlled robot arm's end effector (from robot XML)
joint_indexes (dict): Each key contains sim reference indexes to relevant robot joint information, namely:
:`'joints'`: list of indexes to relevant robot joints
:`'qpos'`: list of indexes to relevant robot joint positions
:`'qvel'`: list of indexes to relevant robot joint velocities
actuator_range (2-tuple of array of float): 2-Tuple (low, high) representing the robot joint actuator range
input_max (float or Iterable of float): Maximum above which an inputted action will be clipped. Can be either be
a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the
latter, dimension should be the same as the control dimension for this controller
input_min (float or Iterable of float): Minimum below which an inputted action will be clipped. Can be either be
a scalar (same value for all action dimensions), or a list (specific values for each dimension). If the
latter, dimension should be the same as the control dimension for this controller
output_max (float or Iterable of float): Maximum which defines upper end of scaling range when scaling an input
action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for
each dimension). If the latter, dimension should be the same as the control dimension for this controller
output_min (float or Iterable of float): Minimum which defines upper end of scaling range when scaling an input
action. Can be either be a scalar (same value for all action dimensions), or a list (specific values for
each dimension). If the latter, dimension should be the same as the control dimension for this controller
kp (float or Iterable of float): positional gain for determining desired torques based upon the joint pos error.
Can be either be a scalar (same value for all action dims), or a list (specific values for each dim)
damping_ratio (float or Iterable of float): used in conjunction with kp to determine the velocity gain for
determining desired torques based upon the joint pos errors. Can be either be a scalar (same value for all
action dims), or a list (specific values for each dim)
impedance_mode (str): Impedance mode with which to run this controller. Options are {"fixed", "variable",
"variable_kp"}. If "fixed", the controller will have fixed kp and damping_ratio values as specified by the
@kp and @damping_ratio arguments. If "variable", both kp and damping_ratio will now be part of the
controller action space, resulting in a total action space of num_joints * 3. If "variable_kp", only kp
will become variable, with damping_ratio fixed at 1 (critically damped). The resulting action space will
then be num_joints * 2.
kp_limits (2-list of float or 2-list of Iterable of floats): Only applicable if @impedance_mode is set to either
"variable" or "variable_kp". This sets the corresponding min / max ranges of the controller action space
for the varying kp values. Can be either be a 2-list (same min / max for all kp action dims), or a 2-list
of list (specific min / max for each kp dim)
damping_ratio_limits (2-list of float or 2-list of Iterable of floats): Only applicable if @impedance_mode is
set to "variable". This sets the corresponding min / max ranges of the controller action space for the
varying damping_ratio values. Can be either be a 2-list (same min / max for all damping_ratio action dims),
or a 2-list of list (specific min / max for each damping_ratio dim)
policy_freq (int): Frequency at which actions from the robot policy are fed into this controller
qpos_limits (2-list of float or 2-list of Iterable of floats): Limits (rad) below and above which the magnitude
of a calculated goal joint position will be clipped. Can be either be a 2-list (same min/max value for all
joint dims), or a 2-list of list (specific min/max values for each dim)
interpolator (Interpolator): Interpolator object to be used for interpolating from the current joint position to
the goal joint position during each timestep between inputted actions
**kwargs: Does nothing; placeholder to "sink" any additional arguments so that instantiating this controller
via an argument dict that has additional extraneous arguments won't raise an error
Raises:
AssertionError: [Invalid impedance mode]
"""
def __init__(
self,
sim,
eef_name,
joint_indexes,
actuator_range,
input_max=1,
input_min=-1,
output_max=0.05,
output_min=-0.05,
kp=50,
damping_ratio=1,
impedance_mode="fixed",
kp_limits=(0, 300),
damping_ratio_limits=(0, 100),
policy_freq=20,
qpos_limits=None,
interpolator=None,
**kwargs, # does nothing; used so no error raised when dict is passed with extra terms used previously
):
super().__init__(
sim,
eef_name,
joint_indexes,
actuator_range,
)
# Control dimension
self.control_dim = len(joint_indexes["joints"])
# input and output max and min (allow for either explicit lists or single numbers)
self.input_max = self.nums2array(input_max, self.control_dim)
self.input_min = self.nums2array(input_min, self.control_dim)
self.output_max = self.nums2array(output_max, self.control_dim)
self.output_min = self.nums2array(output_min, self.control_dim)
# limits
self.position_limits = np.array(qpos_limits) if qpos_limits is not None else qpos_limits
# kp kd
self.kp = self.nums2array(kp, self.control_dim)
self.kd = 2 * np.sqrt(self.kp) * damping_ratio
# kp and kd limits
self.kp_min = self.nums2array(kp_limits[0], self.control_dim)
self.kp_max = self.nums2array(kp_limits[1], self.control_dim)
self.damping_ratio_min = self.nums2array(damping_ratio_limits[0], self.control_dim)
self.damping_ratio_max = self.nums2array(damping_ratio_limits[1], self.control_dim)
# Verify the proposed impedance mode is supported
assert impedance_mode in IMPEDANCE_MODES, (
"Error: Tried to instantiate OSC controller for unsupported "
"impedance mode! Inputted impedance mode: {}, Supported modes: {}".format(impedance_mode, IMPEDANCE_MODES)
)
# Impedance mode
self.impedance_mode = impedance_mode
# Add to control dim based on impedance_mode
if self.impedance_mode == "variable":
self.control_dim *= 3
elif self.impedance_mode == "variable_kp":
self.control_dim *= 2
# control frequency
self.control_freq = policy_freq
# interpolator
self.interpolator = interpolator
# initialize
self.goal_qpos = None
def set_goal(self, action, set_qpos=None):
"""
Sets goal based on input @action. If self.impedance_mode is not "fixed", then the input will be parsed into the
delta values to update the goal position / pose and the kp and/or damping_ratio values to be immediately updated
internally before executing the proceeding control loop.
Note that @action expected to be in the following format, based on impedance mode!
:Mode `'fixed'`: [joint pos command]
:Mode `'variable'`: [damping_ratio values, kp values, joint pos command]
:Mode `'variable_kp'`: [kp values, joint pos command]
Args:
action (Iterable): Desired relative joint position goal state
set_qpos (Iterable): If set, overrides @action and sets the desired absolute joint position goal state
Raises:
AssertionError: [Invalid action dimension size]
"""
# Update state
self.update()
# Parse action based on the impedance mode, and update kp / kd as necessary
jnt_dim = len(self.qpos_index)
if self.impedance_mode == "variable":
damping_ratio, kp, delta = action[:jnt_dim], action[jnt_dim : 2 * jnt_dim], action[2 * jnt_dim :]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) * np.clip(damping_ratio, self.damping_ratio_min, self.damping_ratio_max)
elif self.impedance_mode == "variable_kp":
kp, delta = action[:jnt_dim], action[jnt_dim:]
self.kp = np.clip(kp, self.kp_min, self.kp_max)
self.kd = 2 * np.sqrt(self.kp) # critically damped
else: # This is case "fixed"
delta = action
# Check to make sure delta is size self.joint_dim
assert len(delta) == jnt_dim, "Delta qpos must be equal to the robot's joint dimension space!"
if delta is not None:
scaled_delta = self.scale_action(delta)
else:
scaled_delta = None
self.goal_qpos = set_goal_position(
scaled_delta, self.joint_pos, position_limit=self.position_limits, set_pos=set_qpos
)
if self.interpolator is not None:
self.interpolator.set_goal(self.goal_qpos)
def run_controller(self):
"""
Calculates the torques required to reach the desired setpoint
Returns:
np.array: Command torques
"""
# Make sure goal has been set
if self.goal_qpos is None:
self.set_goal(np.zeros(self.control_dim))
# Update state
self.update()
desired_qpos = None
# Only linear interpolator is currently supported
if self.interpolator is not None:
# Linear case
if self.interpolator.order == 1:
desired_qpos = self.interpolator.get_interpolated_goal()
else:
# Nonlinear case not currently supported
pass
else:
desired_qpos = np.array(self.goal_qpos)
# torques = pos_err * kp + vel_err * kd
position_error = desired_qpos - self.joint_pos
vel_pos_error = -self.joint_vel
desired_torque = np.multiply(np.array(position_error), np.array(self.kp)) + np.multiply(vel_pos_error, self.kd)
# Return desired torques plus gravity compensations
self.torques = np.dot(self.mass_matrix, desired_torque) + self.torque_compensation
# Always run superclass call for any cleanups at the end
super().run_controller()
return self.torques
def reset_goal(self):
"""
Resets joint position goal to be current position
"""
self.goal_qpos = self.joint_pos
# Reset interpolator if required
if self.interpolator is not None:
self.interpolator.set_goal(self.goal_qpos)
@property
def control_limits(self):
"""
Returns the limits over this controller's action space, overrides the superclass property
Returns the following (generalized for both high and low limits), based on the impedance mode:
:Mode `'fixed'`: [joint pos command]
:Mode `'variable'`: [damping_ratio values, kp values, joint pos command]
:Mode `'variable_kp'`: [kp values, joint pos command]
Returns:
2-tuple:
- (np.array) minimum action values
- (np.array) maximum action values
"""
if self.impedance_mode == "variable":
low = np.concatenate([self.damping_ratio_min, self.kp_min, self.input_min])
high = np.concatenate([self.damping_ratio_max, self.kp_max, self.input_max])
elif self.impedance_mode == "variable_kp":
low = np.concatenate([self.kp_min, self.input_min])
high = np.concatenate([self.kp_max, self.input_max])
else: # This is case "fixed"
low, high = self.input_min, self.input_max
return low, high
@property
def name(self):
return "JOINT_POSITION"
|
{
"content_hash": "71f42e4983816f90f26b30de0f2d25a7",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 120,
"avg_line_length": 45.666666666666664,
"alnum_prop": 0.6440845498783455,
"repo_name": "ARISE-Initiative/robosuite",
"id": "054f1c717b9bbc6ade1a165aadcada4d586b4547",
"size": "13152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "robosuite/controllers/joint_pos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CMake",
"bytes": "552"
},
{
"name": "Python",
"bytes": "1197777"
}
],
"symlink_target": ""
}
|
default_app_config = 'providers.edu.chapman.apps.AppConfig'
|
{
"content_hash": "33a5d80be2af467b912e3f9a1d1e4caa",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 59,
"avg_line_length": 60,
"alnum_prop": 0.8,
"repo_name": "zamattiac/SHARE",
"id": "f3abca6ffae7694a429542fbc2e7be3db02580ad",
"size": "60",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/edu/chapman/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
}
|
"""Form file for editing."""
from django import forms
from django.contrib.auth.models import User
class ProfileForm(forms.ModelForm):
"""Form for photo addition."""
class Meta:
"""."""
model = User
fields = []
|
{
"content_hash": "f35c7fcdd26c85a55a96d8f4c03e88a2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 43,
"avg_line_length": 18.923076923076923,
"alnum_prop": 0.6097560975609756,
"repo_name": "cahudson94/django-imager",
"id": "835f651e5fac2bdb68b48ebff55f0e64cf87e092",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "49712"
},
{
"name": "HTML",
"bytes": "21382"
},
{
"name": "JavaScript",
"bytes": "97134"
},
{
"name": "Python",
"bytes": "75478"
}
],
"symlink_target": ""
}
|
"""add registry_trust_id to bay
Revision ID: adc3b7679ae
Revises: 40f325033343
Create Date: 2015-12-07 15:49:07.622122
"""
# revision identifiers, used by Alembic.
revision = 'adc3b7679ae'
down_revision = '40f325033343'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('bay', sa.Column('registry_trust_id',
sa.String(length=255), nullable=True))
|
{
"content_hash": "4339e52acdd01fc8dd476a356dffed2a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 56,
"avg_line_length": 21.105263157894736,
"alnum_prop": 0.7057356608478803,
"repo_name": "jay-lau/magnum",
"id": "c780d10f5e638052f99ea6962589f3329b31596e",
"size": "974",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "magnum/db/sqlalchemy/alembic/versions/adc3b7679ae_add_registry_trust_id_to_bay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "569"
},
{
"name": "Python",
"bytes": "393112"
}
],
"symlink_target": ""
}
|
import datetime
import re
import urlparse
import scrapy
from scrapy.exceptions import CloseSpider
from .base import BaseSpider
from ..items import Link, Loader
FG_URLS = [
'http://www.fangraphs.com/blogs/',
'http://www.fangraphs.com/community/',
'http://www.fangraphs.com/plus/',
]
DATE_RE = re.compile(
r'((?:January|February|March|April|May|June|July|August|September|October|November|December) [0-9]{1,2}, [0-9]{4})'
)
class FangraphsSpider(BaseSpider):
name = 'fangraphs'
allowed_domains = ['fangraphs.com']
start_urls = ('http://www.fangraphs.com/blogs/wp-login.php', )
def parse(self, response):
"""Creates authenticated Fangraphs session"""
username = self.settings.get('FG_USERNAME')
password = self.settings.get('FG_PASSWORD')
if not (username and password):
raise CloseSpider(reason='Please provide your Fangraphs login '
'via `FG_USERNAME` and `FG_PASSWORD` '
'settings.')
yield scrapy.FormRequest.from_response(
response,
formdata={'log': username,
'pwd': password,
'rememberme': 'forever',
'wp-submit': 'Log In',
'redirect_to': 'http://www.fangraphs.com/index.aspx',
'testcookie': '1'},
callback=self.post_login)
def post_login(self, response):
bits = urlparse.urlparse(response.url)
if '/blogs/wp-login.php' in bits.path:
self.logger.warning( response.body )
raise CloseSpider(reason='Fangraphs login failed. '
'Please check your credentials and '
'try again.')
# authentication successful, crawl on
for url in FG_URLS:
yield scrapy.Request(url, callback=self.parse_list_pages)
def parse_list_pages(self, response):
links = response.css('#blogcontent .post h2.posttitle a::attr(href)')
for link in links.extract():
url = urlparse.urljoin(response.url, link)
self.logger.debug('Requesting article URL: {}'.format(url))
yield scrapy.Request(url, callback=self.extract_links_from_source)
# find next page
pagination_href = (
response.xpath('//a[contains(., "Next Page")]/@href')
.extract_first())
# continue to next page if possible
if pagination_href:
url = urlparse.urljoin(response.url, pagination_href)
self.logger.debug('Requesting next article list URL: ' + url)
yield scrapy.Request(url, callback=self.parse_list_pages)
def extract_links_from_source(self, response):
"""Extracts links from an article page,
then spawns extraction of all links found.
"""
paywalled = (
response.xpath(
'//div[@class="fullpostentry" and contains(., "to read the rest of this post or")]'
)
.extract_first())
if paywalled:
raise CloseSpider(reason='Paywall not broken. Check your '
'Fangraphs credentials and try again.\n'
'\t{}'.format(response.url))
return super(FangraphsSpider, self).extract_links_from_source(response)
def get_article_links(self, selector):
return selector.css('.fullpostentry a::attr(href)').extract()
def get_publisher_code(self):
return self.settings['P_FG']
def get_pub_date(self, response):
value = response.css('div.post p.postmeta::text').extract_first()
value = value.strip()
value = DATE_RE.search(value)
if value is not None:
value = datetime.datetime.strptime(value.group(1), '%B %d, %Y')
return value.strftime('%Y-%m-%d')
return None
|
{
"content_hash": "6893c1410c4bae39ea785a9a5896b2c5",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 119,
"avg_line_length": 34.91228070175438,
"alnum_prop": 0.5723618090452262,
"repo_name": "mattdennewitz/baseball-pagerank",
"id": "adbb445e63cb646bc89463aba336d244b53c932f",
"size": "4005",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bbpr/bbpr/spiders/fangraphs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14196"
}
],
"symlink_target": ""
}
|
from .adaptor import Camera
from .feed import Feed
from .light_sensor import LightSensor
from .ocr import OCR
__version__ = '0.0.4'
|
{
"content_hash": "3f100adce866d8f830cb3c232837ae46",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 37,
"avg_line_length": 19.142857142857142,
"alnum_prop": 0.7388059701492538,
"repo_name": "zorg-framework/zorg-network-camera",
"id": "be7169a1a464c919f6578f16ffef8651e0339c24",
"size": "134",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zorg_network_camera/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13633"
}
],
"symlink_target": ""
}
|
import pandas as pd
import pytest
# noinspection PyUnresolvedReferences
from contrib.experimental.great_expectations_experimental.expectations.expect_queried_table_row_count_to_be import (
ExpectQueriedTableRowCountToBe,
)
from great_expectations.core.batch import BatchRequest, RuntimeBatchRequest
from great_expectations.data_context import DataContext
from great_expectations.self_check.util import build_spark_validator_with_data
from great_expectations.validator.validator import (
ExpectationValidationResult,
Validator,
)
sqlite_runtime_batch_request: RuntimeBatchRequest = RuntimeBatchRequest(
datasource_name="my_sqlite_db_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="titanic",
runtime_parameters={"query": "SELECT * FROM titanic LIMIT 100"},
batch_identifiers={"default_identifier_name": "test_identifier"},
batch_spec_passthrough={"create_temp_table": False},
)
sqlite_batch_request: BatchRequest = BatchRequest(
datasource_name="my_sqlite_db_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="titanic",
batch_spec_passthrough={"create_temp_table": False},
)
@pytest.mark.parametrize(
"batch_request,success,value,observed,row_condition",
[
(sqlite_runtime_batch_request, True, 100, 100, None),
(sqlite_batch_request, False, 100, 1313, None),
(sqlite_batch_request, False, 100, 96, 'col("Age")<18'),
(sqlite_runtime_batch_request, True, 70, 70, 'col("Age")>17'),
],
)
@pytest.mark.slow # 4.32s
def test_expect_queried_column_value_frequency_to_meet_threshold_sqlite(
batch_request,
success,
value,
observed,
row_condition,
titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
validator: Validator = context.get_validator(batch_request=batch_request)
result: ExpectationValidationResult = (
validator.expect_queried_table_row_count_to_be(
value=value,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] is success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"batch_request,success,query,value,observed,row_condition",
[
(
sqlite_runtime_batch_request,
True,
"SELECT COUNT(*) FROM titanic",
1313,
1313,
None,
),
(
sqlite_batch_request,
True,
"SELECT COUNT (*) FROM (SELECT * FROM titanic LIMIT 100)",
100,
100,
'col("Age")>17',
),
],
)
@pytest.mark.slow # 1.59s
def test_expect_queried_column_value_frequency_to_meet_threshold_override_query_sqlite(
batch_request,
success,
query,
value,
observed,
row_condition,
titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_v013_multi_datasource_pandas_and_sqlalchemy_execution_engine_data_context_with_checkpoints_v1_with_empty_store_stats_enabled
validator: Validator = context.get_validator(batch_request=batch_request)
result: ExpectationValidationResult = (
validator.expect_queried_table_row_count_to_be(
value=value,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] is success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"success,value,observed,row_condition",
[
(False, 100, 1313, None),
(False, 100, 96, 'col("Age")<18'),
],
)
def test_expect_queried_column_value_frequency_to_meet_threshold_spark(
success,
value,
observed,
row_condition,
spark_session,
basic_spark_df_execution_engine,
titanic_df,
):
df: pd.DataFrame = titanic_df
validator: Validator = build_spark_validator_with_data(df, spark_session)
result: ExpectationValidationResult = (
validator.expect_queried_table_row_count_to_be(
value=value,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] is success and result["result"]["observed_value"] == observed
)
@pytest.mark.parametrize(
"success,query,value,observed,row_condition",
[
(
True,
"SELECT COUNT (*) FROM (SELECT * FROM {active_batch} LIMIT 100)",
100,
100,
'col("Age")>17',
),
],
)
def test_expect_queried_column_value_frequency_to_meet_threshold_override_query_spark(
success,
query,
value,
observed,
row_condition,
spark_session,
basic_spark_df_execution_engine,
titanic_df,
):
df: pd.DataFrame = titanic_df
validator: Validator = build_spark_validator_with_data(df, spark_session)
result: ExpectationValidationResult = (
validator.expect_queried_table_row_count_to_be(
value=value,
query=query,
row_condition=row_condition,
condition_parser="great_expectations__experimental__",
)
)
assert (
result["success"] is success and result["result"]["observed_value"] == observed
)
|
{
"content_hash": "7c89f370a093f06fb98f0a70dd4b28f5",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 159,
"avg_line_length": 30.871657754010695,
"alnum_prop": 0.6563311969513251,
"repo_name": "great-expectations/great_expectations",
"id": "679c8b6ffbf12c8d7606a3604ca51611c89744a6",
"size": "5773",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/expectations/core/test_expect_queried_table_row_count_to_be.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23771"
},
{
"name": "Dockerfile",
"bytes": "2388"
},
{
"name": "HTML",
"bytes": "27311"
},
{
"name": "JavaScript",
"bytes": "45960"
},
{
"name": "Jinja",
"bytes": "66650"
},
{
"name": "Jupyter Notebook",
"bytes": "816323"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "Makefile",
"bytes": "657"
},
{
"name": "Python",
"bytes": "15728777"
},
{
"name": "Shell",
"bytes": "2930"
}
],
"symlink_target": ""
}
|
""" Module for translating ONNX operators into Mxnet operatoes"""
# pylint: disable=unused-argument,protected-access
from . import translation_utils
from .... import symbol
# Method definitions for the callable objects mapped in the import_helper module
def identity(attrs, inputs, cls):
"""Returns the identity function of the the input."""
return 'identity', attrs, inputs
def random_uniform(attrs, inputs, cls):
"""Draw random samples from a uniform distribtuion."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
return 'random_uniform', new_attr, inputs
def random_normal(attrs, inputs, cls):
"""Draw random samples from a Gaussian distribution."""
new_attr = translation_utils._remove_attributes(attrs, ['seed'])
new_attr = translation_utils._fix_attribute_names(new_attr, {'mean' : 'loc'})
return 'random_uniform', new_attr, inputs
# Arithmetic Operations
def add(attrs, inputs, cls):
"""Adding two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
op_value = translation_utils._fix_bias_shape('broadcast_add', inputs, cls)
return op_value, new_attr, inputs
return 'elemwise_add', new_attr, inputs
def subtract(attrs, inputs, cls):
"""Subtracting two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
return 'broadcast_sub', new_attr, inputs
return 'elemwise_sub', new_attr, inputs
def multiply(attrs, inputs, cls):
"""Multiply two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
op_value = translation_utils._fix_bias_shape('broadcast_mul', inputs, cls)
return op_value, new_attr, inputs
return 'elemwise_mul', new_attr, inputs
def divide(attrs, inputs, cls):
"""Divide two tensors"""
new_attr = {}
if 'broadcast' in attrs and attrs['broadcast'] == 1:
return 'broadcast_div', new_attr, inputs
return 'elemwise_div', new_attr, inputs
def absolute(attrs, inputs, cls):
"""Returns element-wise absolute value of the input."""
return 'abs', attrs, inputs
def negative(attrs, inputs, cls):
"""Negation of every element in a tensor"""
return 'negative', attrs, inputs
def add_n(attrs, inputs, cls):
"""Elementwise sum of arrays"""
return 'add_n', attrs, inputs
# Sorting and Searching
def argmax(attrs, inputs, cls):
"""Returns indices of the maximum values along an axis"""
return 'argmax', attrs, inputs
def argmin(attrs, inputs, cls):
"""Returns indices of the minimum values along an axis."""
return 'argmin', attrs, inputs
def maximum(attrs, inputs, cls):
"""
Elementwise maximum of arrays.
MXNet maximum compares only two symbols at a time.
ONNX can send more than two to compare.
Breaking into multiple mxnet ops to compare two symbols at a time
"""
if len(inputs) > 1:
mxnet_op = symbol.maximum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.maximum(mxnet_op, op_input)
else:
mxnet_op = inputs[0]
return mxnet_op, attrs, inputs
def minimum(attrs, inputs, cls):
"""Elementwise minimum of arrays."""
# MXNet minimum compares only two symbols at a time.
# ONNX can send more than two to compare.
# Breaking into multiple mxnet ops to compare two symbols at a time
if len(inputs) > 1:
mxnet_op = symbol.minimum(inputs[0], inputs[1])
for op_input in inputs[2:]:
mxnet_op = symbol.minimum(mxnet_op, op_input)
else:
mxnet_op = inputs[0]
return mxnet_op, attrs, inputs
#Hyperbolic functions
def tanh(attrs, inputs, cls):
"""Returns the hyperbolic tangent of the input array."""
return 'tanh', attrs, inputs
# Rounding
def ceil(attrs, inputs, cls):
""" Calculate ceil value for input """
return 'ceil', attrs, inputs
def floor(attrs, inputs, cls):
""" Calculate floor value for input """
return 'floor', attrs, inputs
# Joining and spliting
def concat(attrs, inputs, cls):
""" Joins input arrays along a given axis. """
new_attrs = translation_utils._fix_attribute_names(attrs, {'axis': 'dim'})
return 'concat', new_attrs, inputs
# Basic neural network functions
def sigmoid(attrs, inputs, cls):
"""Computes elementwise sigmoid of the input array"""
return 'sigmoid', attrs, inputs
def relu(attrs, inputs, cls):
"""Computes rectified linear function."""
return 'relu', attrs, inputs
def pad(attrs, inputs, cls):
""" Add padding to input tensor"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'pads' : 'pad_width',
'value' : 'constant_value'
})
new_attrs['pad_width'] = translation_utils._pad_sequence_fix(new_attrs.get('pad_width'))
return 'pad', new_attrs, inputs
def matrix_multiplication(attrs, inputs, cls):
"""Performs general matrix multiplication"""
return 'linalg_gemm2', attrs, inputs
def batch_norm(attrs, inputs, cls):
"""Batch normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'})
new_attrs = translation_utils._remove_attributes(new_attrs,
['spatial', 'is_test', 'consumed_inputs'])
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'cudnn_off': 1})
return 'BatchNorm', new_attrs, inputs
def leaky_relu(attrs, inputs, cls):
"""Leaky Relu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 0.01})
return 'LeakyReLU', new_attrs, inputs
def _elu(attrs, inputs, cls):
"""Elu function"""
if 'alpha' in attrs:
new_attrs = translation_utils._fix_attribute_names(attrs, {'alpha' : 'slope'})
else:
new_attrs = translation_utils._add_extra_attributes(attrs, {'slope': 1.0})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'act_type': 'elu'})
return 'LeakyReLU', new_attrs, inputs
def _prelu(attrs, inputs, cls):
"""PRelu function"""
new_attrs = translation_utils._add_extra_attributes(attrs, {'act_type': 'prelu'})
return 'LeakyReLU', new_attrs, inputs
def softmax(attrs, inputs, cls):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs
def conv(attrs, inputs, cls):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Convolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Convolution', new_attrs, inputs, cls)
return 'Convolution', new_attrs, inputs
def deconv(attrs, inputs, cls):
"""Compute N-D convolution on (N+2)-D input."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'kernel_shape' : 'kernel',
'strides' : 'stride',
'pads': 'pad',
'dilations': 'dilate',
'group': 'num_group'})
new_attrs = translation_utils._add_extra_attributes(new_attrs, {'num_group' : 1})
new_attrs = translation_utils._fix_bias('Deconvolution', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('Deconvolution', new_attrs, inputs, cls)
return 'Convolution', new_attrs, inputs
def fully_connected(attrs, inputs, cls):
"""Applies a linear transformation: Y=XWT+b."""
new_attrs = translation_utils._remove_attributes(attrs, ['axis'])
new_attrs = translation_utils._fix_bias('FullyConnected', new_attrs, len(inputs))
new_attrs = translation_utils._fix_channels('FullyConnected', new_attrs, inputs, cls)
return 'FullyConnected', new_attrs, inputs
def global_maxpooling(attrs, inputs, cls):
"""Performs max pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'max'})
return 'pooling', new_attrs, inputs
def global_avgpooling(attrs, inputs, cls):
"""Performs avg pooling on the input."""
new_attrs = translation_utils._add_extra_attributes(attrs, {'global_pool': True,
'kernel': (1, 1),
'pool_type': 'avg'})
return 'pooling', new_attrs, inputs
def linalg_gemm(attrs, inputs, cls):
"""Performs general matrix multiplication and accumulation"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'transA': 'transpose_a',
'transB': 'transpose_b'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return translation_utils._fix_gemm('FullyConnected', inputs, new_attrs, cls)
def local_response_norm(op_name, attrs, inputs):
"""Local Response Normalization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'bias': 'knorm',
'size' : 'nsize'})
return 'LRN', new_attrs, inputs
def dropout(op_name, attrs, inputs):
"""Dropout Regularization."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'ratio': 'p'})
new_attrs = translation_utils._remove_attributes(new_attrs, ['is_test'])
return 'Dropout', new_attrs, inputs
# Changing shape and type.
def reshape(attrs, inputs, cls):
"""Reshape the given array by the shape attribute."""
return 'reshape', attrs, inputs
def cast(attrs, inputs, cls):
""" Cast input to a given dtype"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'to' : 'dtype'})
return 'cast', new_attrs, inputs
def split(attrs, inputs, cls):
"""Splits an array along a particular axis into multiple sub-arrays."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'split' : 'num_outputs'})
return 'split', new_attrs, inputs
def _slice(attrs, inputs, cls):
"""Returns a slice of the input tensor along multiple axes."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis',
'ends' : 'end',
'starts' : 'begin'})
# onnx slice provides slicing on multiple axis. Adding multiple slice_axis operator
# for multiple axes from mxnet
begin = new_attrs.get('begin')
end = new_attrs.get('end')
axes = new_attrs.get('axis', tuple(range(len(begin))))
slice_op = symbol.slice_axis(inputs[0], axis=axes[0], begin=begin[0], end=end[0])
if len(axes) > 1:
for i, axis in enumerate(axes):
slice_op = symbol.slice_axis(slice_op, axis=axis, begin=begin[i], end=end[i])
return slice_op, new_attrs, inputs
def transpose(attrs, inputs, cls):
"""Transpose the input array."""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'perm' : 'axes'})
return 'transpose', new_attrs, inputs
def squeeze(attrs, inputs, cls):
"""Remove single-dimensional entries from the shape of a tensor."""
# MXNet doesnt have a squeeze operator.
# Using "split" to perform similar operation.
new_attrs = translation_utils._fix_attribute_names(attrs,
{'axes' : 'axis'})
axes = new_attrs.get('axis')
mxnet_op = symbol.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)
for i in axes[1:]:
mxnet_op = symbol.split(mxnet_op, axis=i-1, num_outputs=1, squeeze_axis=1)
return mxnet_op, new_attrs, inputs
#Powers
def reciprocal(attrs, inputs, cls):
"""Returns the reciprocal of the argument, element-wise."""
return 'reciprocal', attrs, inputs
def squareroot(attrs, inputs, cls):
"""Returns element-wise square-root value of the input."""
return 'sqrt', attrs, inputs
def power(attrs, inputs, cls):
"""Returns element-wise result of base element raised to powers from exp element."""
new_attrs = translation_utils._fix_attribute_names(attrs, {'exponent':'exp'})
if 'broadcast' in attrs and attrs['broadcast'] == 1:
new_attrs = translation_utils._remove_attributes(new_attrs, ['broadcast'])
return 'broadcast_power', new_attrs, inputs
return 'pow', new_attrs, inputs
def exponent(attrs, inputs, cls):
"""Elementwise exponent of input array."""
return 'exp', attrs, inputs
def _log(attrs, inputs, cls):
"""Elementwise log of input array."""
return 'log', attrs, inputs
# Reduce Functions
def reduce_max(attrs, inputs, cls):
"""Reduce the array along a given axis by maximum value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'max', new_attrs, inputs
def reduce_mean(attrs, inputs, cls):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'mean', new_attrs, inputs
def reduce_min(attrs, inputs, cls):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'min', new_attrs, inputs
def reduce_sum(attrs, inputs, cls):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'sum', new_attrs, inputs
def reduce_prod(attrs, inputs, cls):
"""Reduce the array along a given axis by mean value"""
new_attrs = translation_utils._fix_attribute_names(attrs, {'axes':'axis'})
return 'prod', new_attrs, inputs
def avg_pooling(attrs, inputs, cls):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pool_type': 'avg',
'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('avg', inputs, new_attrs)
return new_op, new_attrs, inputs
def max_pooling(attrs, inputs, cls):
""" Average pooling"""
new_attrs = translation_utils._fix_attribute_names(attrs,
{'kernel_shape': 'kernel',
'strides': 'stride',
'pads': 'pad',
})
new_attrs = translation_utils._add_extra_attributes(new_attrs,
{'pool_type': 'avg',
'pooling_convention': 'valid'
})
new_op = translation_utils._fix_pooling('max', inputs, new_attrs)
return new_op, new_attrs, inputs
|
{
"content_hash": "c5be9ae111e7064165a519fb7de9e8f7",
"timestamp": "",
"source": "github",
"line_count": 393,
"max_line_length": 95,
"avg_line_length": 42.06870229007634,
"alnum_prop": 0.5763624266618279,
"repo_name": "jiajiechen/mxnet",
"id": "a67c18199eb811aaa6a460f13f27742473a98763",
"size": "17335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/mxnet/contrib/onnx/_import/op_translations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12255"
},
{
"name": "C",
"bytes": "109701"
},
{
"name": "C++",
"bytes": "4534403"
},
{
"name": "CMake",
"bytes": "73731"
},
{
"name": "Cuda",
"bytes": "779964"
},
{
"name": "Groovy",
"bytes": "217"
},
{
"name": "Java",
"bytes": "20406"
},
{
"name": "Jupyter Notebook",
"bytes": "1319611"
},
{
"name": "Makefile",
"bytes": "48611"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "1057595"
},
{
"name": "Perl 6",
"bytes": "4176"
},
{
"name": "Python",
"bytes": "4431956"
},
{
"name": "R",
"bytes": "287257"
},
{
"name": "Rebol",
"bytes": "353"
},
{
"name": "Scala",
"bytes": "910341"
},
{
"name": "Shell",
"bytes": "199166"
}
],
"symlink_target": ""
}
|
from django import template
from jmboyourwords.models import YourStoryCompetition
from django.contrib.sites.models import Site
register = template.Library()
@register.simple_tag(takes_context=True)
def get_your_words_competitions(context, limit=5, var_name='your_words_list'):
stories = YourStoryCompetition.objects.filter(published=True)\
.filter(sites=Site.objects.get_current())\
.order_by('-publish_on')
context[var_name] = stories
return ""
|
{
"content_hash": "2b9ecbb53a167da142442129b7ab1e89",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 84,
"avg_line_length": 38.92857142857143,
"alnum_prop": 0.6513761467889908,
"repo_name": "praekelt/jmbo-your-words",
"id": "9bcee805c29c3d09086429eb9dc7ca34d05d4324",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "jmboyourwords/templatetags/your_words_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "211"
},
{
"name": "Python",
"bytes": "42584"
}
],
"symlink_target": ""
}
|
"""
:author: Damian Eads, 2009
:license: modified BSD
"""
import numpy as np
from scipy import ndimage
def square(width, dtype=np.uint8):
"""
Generates a flat, square-shaped structuring element. Every pixel
along the perimeter has a chessboard distance no greater than radius
(radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width and height of the square
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, width), dtype=dtype)
def rectangle(width, height, dtype=np.uint8):
"""
Generates a flat, rectangular-shaped structuring element of a
given width and height. Every pixel in the rectangle belongs
to the neighboorhood.
Parameters
----------
width : int
The width of the rectangle
height : int
The height of the rectangle
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, height), dtype=dtype)
def diamond(radius, dtype=np.uint8):
"""
Generates a flat, diamond-shaped structuring element of a given
radius. A pixel is part of the neighborhood (i.e. labeled 1) if
the city block/manhattan distance between it and the center of the
neighborhood is no greater than radius.
Parameters
----------
radius : int
The radius of the diamond-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
half = radius
(I, J) = np.meshgrid(range(0, radius * 2 + 1), range(0, radius * 2 + 1))
s = np.abs(I - half) + np.abs(J - half)
return np.array(s <= radius, dtype=dtype)
def disk(radius, dtype=np.uint8):
"""
Generates a flat, disk-shaped structuring element of a given radius.
A pixel is within the neighborhood if the euclidean distance between
it and the origin is no greater than radius.
Parameters
----------
radius : int
The radius of the disk-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
L = np.linspace(-radius, radius, 2 * radius + 1)
(X, Y) = np.meshgrid(L, L)
s = X**2
s += Y**2
return np.array(s <= radius * radius, dtype=dtype)
def cube(width, dtype=np.uint8):
"""
Generates a cube-shaped structuring element (the 3D equivalent of
a square). Every pixel along the perimeter has a chessboard distance
no greater than radius (radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width, height and depth of the cube
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
A structuring element consisting only of ones, i.e. every
pixel belongs to the neighborhood.
"""
return np.ones((width, width, width), dtype=dtype)
def octahedron(radius, dtype=np.uint8):
"""
Generates a octahedron-shaped structuring element of a given radius
(the 3D equivalent of a diamond). A pixel is part of the
neighborhood (i.e. labeled 1) if the city block/manhattan distance
between it and the center of the neighborhood is no greater than
radius.
Parameters
----------
radius : int
The radius of the octahedron-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
# note that in contrast to diamond(), this method allows non-integer radii
n = 2 * radius + 1
Z, Y, X = np.mgrid[-radius:radius:n*1j,
-radius:radius:n*1j,
-radius:radius:n*1j]
s = np.abs(X) + np.abs(Y) + np.abs(Z)
return np.array(s <= radius, dtype=dtype)
def ball(radius, dtype=np.uint8):
"""
Generates a ball-shaped structuring element of a given radius (the
3D equivalent of a disk). A pixel is within the neighborhood if the
euclidean distance between it and the origin is no greater than
radius.
Parameters
----------
radius : int
The radius of the ball-shaped structuring element.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
n = 2 * radius + 1
Z, Y, X = np.mgrid[-radius:radius:n*1j,
-radius:radius:n*1j,
-radius:radius:n*1j]
s = X**2 + Y**2 + Z**2
return np.array(s <= radius * radius, dtype=dtype)
def octagon(m, n, dtype=np.uint8):
"""
Generates an octagon shaped structuring element with a given size of
horizontal and vertical sides and a given height or width of slanted
sides. The slanted sides are 45 or 135 degrees to the horizontal axis
and hence the widths and heights are equal.
Parameters
----------
m : int
The size of the horizontal and vertical sides.
n : int
The height or width of the slanted sides.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
from . import convex_hull_image
selem = np.zeros((m + 2*n, m + 2*n))
selem[0, n] = 1
selem[n, 0] = 1
selem[0, m + n - 1] = 1
selem[m + n - 1, 0] = 1
selem[-1, n] = 1
selem[n, -1] = 1
selem[-1, m + n - 1] = 1
selem[m + n - 1, -1] = 1
selem = convex_hull_image(selem).astype(dtype)
return selem
def star(a, dtype=np.uint8):
"""
Generates a star shaped structuring element that has 8 vertices and is an
overlap of square of size `2*a + 1` with its 45 degree rotated version.
The slanted sides are 45 or 135 degrees to the horizontal axis.
Parameters
----------
a : int
Parameter deciding the size of the star structural element. The side
of the square array returned is `2*a + 1 + 2*floor(a / 2)`.
Other Parameters
----------------
dtype : data-type
The data type of the structuring element.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
from . import convex_hull_image
if a == 1:
bfilter = np.zeros((3, 3), dtype)
bfilter[:] = 1
return bfilter
m = 2 * a + 1
n = a // 2
selem_square = np.zeros((m + 2 * n, m + 2 * n))
selem_square[n: m + n, n: m + n] = 1
c = (m + 2 * n - 1) // 2
selem_rotated = np.zeros((m + 2 * n, m + 2 * n))
selem_rotated[0, c] = selem_rotated[-1, c] = selem_rotated[c, 0] = selem_rotated[c, -1] = 1
selem_rotated = convex_hull_image(selem_rotated).astype(int)
selem = selem_square + selem_rotated
selem[selem > 0] = 1
return selem.astype(dtype)
def _default_selem(ndim):
"""
Generates a cross-shaped structuring element (connectivity=1). This is the
default structuring element (selem) if no selem was specified.
Parameters
----------
ndim : int
Number of dimensions of the image.
Returns
-------
selem : ndarray
The structuring element where elements of the neighborhood
are 1 and 0 otherwise.
"""
return ndimage.morphology.generate_binary_structure(ndim, 1)
|
{
"content_hash": "e4f63a06b979a0c05610d2dc66c83bcc",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 95,
"avg_line_length": 27.268370607028753,
"alnum_prop": 0.6032806092560047,
"repo_name": "SamHames/scikit-image",
"id": "5e0606c087767fd0cc7da0cf4e061bbb62d221f6",
"size": "8535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "skimage/morphology/selem.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "70225"
},
{
"name": "CSS",
"bytes": "3629"
},
{
"name": "JavaScript",
"bytes": "787"
},
{
"name": "Python",
"bytes": "2219600"
},
{
"name": "Shell",
"bytes": "3346"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import itertools
import os.path
import shutil
import pytest
from pre_commit import git
from pre_commit.staged_files_only import staged_files_only
from pre_commit.util import cmd_output
from testing.auto_namedtuple import auto_namedtuple
from testing.fixtures import git_dir
from testing.util import cwd
from testing.util import get_resource_path
from testing.util import git_commit
FOO_CONTENTS = '\n'.join(('1', '2', '3', '4', '5', '6', '7', '8', ''))
@pytest.fixture
def patch_dir(tempdir_factory):
return tempdir_factory.get()
def get_short_git_status():
git_status = cmd_output('git', 'status', '-s')[1]
line_parts = [line.split() for line in git_status.splitlines()]
return {v: k for k, v in line_parts}
@pytest.fixture
def foo_staged(in_git_dir):
foo = in_git_dir.join('foo')
foo.write(FOO_CONTENTS)
cmd_output('git', 'add', 'foo')
yield auto_namedtuple(path=in_git_dir.strpath, foo_filename=foo.strpath)
def _test_foo_state(
path,
foo_contents=FOO_CONTENTS,
status='A',
encoding='UTF-8',
):
assert os.path.exists(path.foo_filename)
with open(path.foo_filename, encoding=encoding) as f:
assert f.read() == foo_contents
actual_status = get_short_git_status()['foo']
assert status == actual_status
def test_foo_staged(foo_staged):
_test_foo_state(foo_staged)
def test_foo_nothing_unstaged(foo_staged, patch_dir):
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
_test_foo_state(foo_staged)
def test_foo_something_unstaged(foo_staged, patch_dir):
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write('herp\nderp\n')
_test_foo_state(foo_staged, 'herp\nderp\n', 'AM')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
_test_foo_state(foo_staged, 'herp\nderp\n', 'AM')
def test_does_not_crash_patch_dir_does_not_exist(foo_staged, patch_dir):
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write('hello\nworld\n')
shutil.rmtree(patch_dir)
with staged_files_only(patch_dir):
pass
def test_something_unstaged_ext_diff_tool(foo_staged, patch_dir, tmpdir):
diff_tool = tmpdir.join('diff-tool.sh')
diff_tool.write('#!/usr/bin/env bash\necho "$@"\n')
cmd_output('git', 'config', 'diff.external', diff_tool.strpath)
test_foo_something_unstaged(foo_staged, patch_dir)
def test_foo_something_unstaged_diff_color_always(foo_staged, patch_dir):
cmd_output('git', 'config', '--local', 'color.diff', 'always')
test_foo_something_unstaged(foo_staged, patch_dir)
def test_foo_both_modify_non_conflicting(foo_staged, patch_dir):
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write(f'{FOO_CONTENTS}9\n')
_test_foo_state(foo_staged, f'{FOO_CONTENTS}9\n', 'AM')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
# Modify the file as part of the "pre-commit"
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write(FOO_CONTENTS.replace('1', 'a'))
_test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
_test_foo_state(foo_staged, f'{FOO_CONTENTS.replace("1", "a")}9\n', 'AM')
def test_foo_both_modify_conflicting(foo_staged, patch_dir):
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write(FOO_CONTENTS.replace('1', 'a'))
_test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
# Modify in the same place as the stashed diff
with open(foo_staged.foo_filename, 'w') as foo_file:
foo_file.write(FOO_CONTENTS.replace('1', 'b'))
_test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'b'), 'AM')
_test_foo_state(foo_staged, FOO_CONTENTS.replace('1', 'a'), 'AM')
@pytest.fixture
def img_staged(in_git_dir):
img = in_git_dir.join('img.jpg')
shutil.copy(get_resource_path('img1.jpg'), img.strpath)
cmd_output('git', 'add', 'img.jpg')
yield auto_namedtuple(path=in_git_dir.strpath, img_filename=img.strpath)
def _test_img_state(path, expected_file='img1.jpg', status='A'):
assert os.path.exists(path.img_filename)
with open(path.img_filename, 'rb') as f1:
with open(get_resource_path(expected_file), 'rb') as f2:
assert f1.read() == f2.read()
actual_status = get_short_git_status()['img.jpg']
assert status == actual_status
def test_img_staged(img_staged):
_test_img_state(img_staged)
def test_img_nothing_unstaged(img_staged, patch_dir):
with staged_files_only(patch_dir):
_test_img_state(img_staged)
_test_img_state(img_staged)
def test_img_something_unstaged(img_staged, patch_dir):
shutil.copy(get_resource_path('img2.jpg'), img_staged.img_filename)
_test_img_state(img_staged, 'img2.jpg', 'AM')
with staged_files_only(patch_dir):
_test_img_state(img_staged)
_test_img_state(img_staged, 'img2.jpg', 'AM')
def test_img_conflict(img_staged, patch_dir):
"""Admittedly, this shouldn't happen, but just in case."""
shutil.copy(get_resource_path('img2.jpg'), img_staged.img_filename)
_test_img_state(img_staged, 'img2.jpg', 'AM')
with staged_files_only(patch_dir):
_test_img_state(img_staged)
shutil.copy(get_resource_path('img3.jpg'), img_staged.img_filename)
_test_img_state(img_staged, 'img3.jpg', 'AM')
_test_img_state(img_staged, 'img2.jpg', 'AM')
@pytest.fixture
def repo_with_commits(tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
open('foo', 'a+').close()
cmd_output('git', 'add', 'foo')
git_commit()
rev1 = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
git_commit()
rev2 = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
yield auto_namedtuple(path=path, rev1=rev1, rev2=rev2)
def checkout_submodule(rev):
cmd_output('git', 'checkout', rev, cwd='sub')
@pytest.fixture
def sub_staged(repo_with_commits, tempdir_factory):
path = git_dir(tempdir_factory)
with cwd(path):
open('bar', 'a+').close()
cmd_output('git', 'add', 'bar')
git_commit()
cmd_output(
'git', 'submodule', 'add', repo_with_commits.path, 'sub',
)
checkout_submodule(repo_with_commits.rev1)
cmd_output('git', 'add', 'sub')
yield auto_namedtuple(
path=path,
sub_path=os.path.join(path, 'sub'),
submodule=repo_with_commits,
)
def _test_sub_state(path, rev='rev1', status='A'):
assert os.path.exists(path.sub_path)
with cwd(path.sub_path):
actual_rev = cmd_output('git', 'rev-parse', 'HEAD')[1].strip()
assert actual_rev == getattr(path.submodule, rev)
actual_status = get_short_git_status()['sub']
assert actual_status == status
def test_sub_staged(sub_staged):
_test_sub_state(sub_staged)
def test_sub_nothing_unstaged(sub_staged, patch_dir):
with staged_files_only(patch_dir):
_test_sub_state(sub_staged)
_test_sub_state(sub_staged)
def test_sub_something_unstaged(sub_staged, patch_dir):
checkout_submodule(sub_staged.submodule.rev2)
_test_sub_state(sub_staged, 'rev2', 'AM')
with staged_files_only(patch_dir):
# This is different from others, we don't want to touch subs
_test_sub_state(sub_staged, 'rev2', 'AM')
_test_sub_state(sub_staged, 'rev2', 'AM')
def test_submodule_does_not_discard_changes(sub_staged, patch_dir):
with open('bar', 'w') as f:
f.write('unstaged changes')
foo_path = os.path.join(sub_staged.sub_path, 'foo')
with open(foo_path, 'w') as f:
f.write('foo contents')
with staged_files_only(patch_dir):
with open('bar') as f:
assert f.read() == ''
with open(foo_path) as f:
assert f.read() == 'foo contents'
with open('bar') as f:
assert f.read() == 'unstaged changes'
with open(foo_path) as f:
assert f.read() == 'foo contents'
def test_submodule_does_not_discard_changes_recurse(sub_staged, patch_dir):
cmd_output('git', 'config', 'submodule.recurse', '1', cwd=sub_staged.path)
test_submodule_does_not_discard_changes(sub_staged, patch_dir)
def test_stage_utf8_changes(foo_staged, patch_dir):
contents = '\u2603'
with open('foo', 'w', encoding='UTF-8') as foo_file:
foo_file.write(contents)
_test_foo_state(foo_staged, contents, 'AM')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
_test_foo_state(foo_staged, contents, 'AM')
def test_stage_non_utf8_changes(foo_staged, patch_dir):
contents = 'ú'
# Produce a latin-1 diff
with open('foo', 'w', encoding='latin-1') as foo_file:
foo_file.write(contents)
_test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
_test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
def test_non_utf8_conflicting_diff(foo_staged, patch_dir):
"""Regression test for #397"""
# The trailing whitespace is important here, this triggers git to produce
# an error message which looks like:
#
# ...patch1471530032:14: trailing whitespace.
# [[unprintable character]][[space character]]
# error: patch failed: foo:1
# error: foo: patch does not apply
#
# Previously, the error message (though discarded immediately) was being
# decoded with the UTF-8 codec (causing a crash)
contents = 'ú \n'
with open('foo', 'w', encoding='latin-1') as foo_file:
foo_file.write(contents)
_test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
with staged_files_only(patch_dir):
_test_foo_state(foo_staged)
# Create a conflicting diff that will need to be rolled back
with open('foo', 'w') as foo_file:
foo_file.write('')
_test_foo_state(foo_staged, contents, 'AM', encoding='latin-1')
def _write(b):
with open('foo', 'wb') as f:
f.write(b)
def assert_no_diff():
tree = cmd_output('git', 'write-tree')[1].strip()
cmd_output('git', 'diff-index', tree, '--exit-code')
bool_product = tuple(itertools.product((True, False), repeat=2))
@pytest.mark.parametrize(('crlf_before', 'crlf_after'), bool_product)
@pytest.mark.parametrize('autocrlf', ('true', 'false', 'input'))
def test_crlf(in_git_dir, patch_dir, crlf_before, crlf_after, autocrlf):
cmd_output('git', 'config', '--local', 'core.autocrlf', autocrlf)
before, after = b'1\n2\n', b'3\n4\n\n'
before = before.replace(b'\n', b'\r\n') if crlf_before else before
after = after.replace(b'\n', b'\r\n') if crlf_after else after
_write(before)
cmd_output('git', 'add', 'foo')
_write(after)
with staged_files_only(patch_dir):
assert_no_diff()
def test_whitespace_errors(in_git_dir, patch_dir):
cmd_output('git', 'config', '--local', 'apply.whitespace', 'error')
test_crlf(in_git_dir, patch_dir, True, True, 'true')
def test_autocrlf_committed_crlf(in_git_dir, patch_dir):
"""Regression test for #570"""
cmd_output('git', 'config', '--local', 'core.autocrlf', 'false')
_write(b'1\r\n2\r\n')
cmd_output('git', 'add', 'foo')
git_commit()
cmd_output('git', 'config', '--local', 'core.autocrlf', 'true')
_write(b'1\r\n2\r\n\r\n\r\n\r\n')
with staged_files_only(patch_dir):
assert_no_diff()
def test_intent_to_add(in_git_dir, patch_dir):
"""Regression test for #881"""
_write(b'hello\nworld\n')
cmd_output('git', 'add', '--intent-to-add', 'foo')
assert git.intent_to_add_files() == ['foo']
with staged_files_only(patch_dir):
assert_no_diff()
assert git.intent_to_add_files() == ['foo']
|
{
"content_hash": "5b35a22b575f9f251cb072861ea45be3",
"timestamp": "",
"source": "github",
"line_count": 384,
"max_line_length": 78,
"avg_line_length": 31.096354166666668,
"alnum_prop": 0.6353739217820953,
"repo_name": "pre-commit/pre-commit",
"id": "a91f315198230d9da603573fedc3f9695e7fa0f8",
"size": "11943",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/staged_files_only_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "753"
},
{
"name": "Dart",
"bytes": "142"
},
{
"name": "Dockerfile",
"bytes": "508"
},
{
"name": "Go",
"bytes": "240"
},
{
"name": "JavaScript",
"bytes": "128"
},
{
"name": "Lua",
"bytes": "513"
},
{
"name": "Perl",
"bytes": "532"
},
{
"name": "PowerShell",
"bytes": "744"
},
{
"name": "Python",
"bytes": "511310"
},
{
"name": "R",
"bytes": "24268"
},
{
"name": "Ruby",
"bytes": "829"
},
{
"name": "Rust",
"bytes": "56"
},
{
"name": "Shell",
"bytes": "3952"
},
{
"name": "Swift",
"bytes": "181"
}
],
"symlink_target": ""
}
|
import logging
import os
import time
import zopkio.adhoc_deployer as adhoc_deployer
from zopkio.remote_host_helper import get_ssh_client, exec_with_env
import zopkio.runtime as runtime
logger = logging.getLogger(__name__)
class StreamProcessor:
"""
Represents a standalone StreamProcessor that uses zookeeper for coordination. Used in standalone failure tests to
to manage the lifecycle of linux process(start, kill, pause) associated with the StreamProcessor.
"""
def __init__(self, host_name, processor_id):
"""
:param host_name: Represents the host name in which this StreamProcessor will run.
:param processor_id: Represents the processor_id of StreamProcessor.
"""
start_cmd = 'export SAMZA_LOG_DIR=\"deploy/{0}\"; export JAVA_OPTS=\"$JAVA_OPTS -Xmx2G\"; ./bin/run-class.sh org.apache.samza.test.integration.LocalApplicationRunnerMain --config-path ./config/standalone.failure.test.properties --operation run --config processor.id={0} >> /tmp/{0}.log &'
self.username = runtime.get_username()
self.password = runtime.get_password()
self.processor_id = processor_id
self.host_name = host_name
self.processor_start_command = start_cmd.format(self.processor_id)
logger.info('Running processor start command: {0}'.format(self.processor_start_command))
self.deployment_config = {
'install_path': os.path.join(runtime.get_active_config('remote_install_path'), 'deploy/{0}'.format(self.processor_id)),
'executable': 'samza-test_2.11-1.6.0-SNAPSHOT.tgz',
'post_install_cmds': [],
'start_command': self.processor_start_command,
'stop_command': '',
'extract': True,
'sync': True,
}
self.deployer = adhoc_deployer.SSHDeployer(self.processor_id, self.deployment_config)
def start(self):
"""
Submits the StreamProcessor for execution on a host: host_name.
"""
logger.info("Starting processor with id: {0}.".format(self.processor_id))
self.deployer.start(self.processor_id, {'hostname': self.host_name})
def get_processor_id(self):
"""
Returns the processorId of the StreamProcessor.
"""
return self.processor_id
def kill(self):
"""
Kills the StreamProcessor process through SIGKILL signal.
"""
self.__send_signal_to_processor("SIGKILL")
def pause(self):
"""
Pauses the StreamProcessor process through SIGSTOP signal.
"""
self.__send_signal_to_processor("SIGSTOP")
def resume(self):
"""
Resumes the stream processor process through SIGCONT signal.
"""
self.__send_signal_to_processor("SIGCONT")
def __send_signal_to_processor(self, signal):
"""
Sends a signal(:param signal) to the linux process of the StreamProcessor.
"""
linux_process_pids = self.__get_pid()
for linux_process_pid in linux_process_pids:
command = "kill -{0} {1}".format(signal, linux_process_pid)
result = self.__execute_command(command)
logger.info("Result of {0} is: {1}.".format(command, result))
def __get_pid(self):
"""
Determines the linux process id associated with this StreamProcessor.
"""
ps_command = "ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'".format(self.processor_id)
non_failing_command = "{0}; if [ $? -le 1 ]; then true; else false; fi;".format(ps_command)
logger.info("Executing command: {0}.".format(non_failing_command))
full_output = self.__execute_command(non_failing_command)
pids = []
if len(full_output) > 0:
pids = [int(pid_str) for pid_str in full_output.split('\n') if pid_str.isdigit()]
return pids
def __execute_command(self, command):
"""
Executes the :param command on host: self.host_name.
"""
with get_ssh_client(self.host_name, username=self.username, password=self.password) as ssh:
chan = exec_with_env(ssh, command, msg="Failed to get PID", env={})
execution_result = ''
while True:
result_buffer = chan.recv(16)
if len(result_buffer) == 0:
break
execution_result += result_buffer
return execution_result
|
{
"content_hash": "e8d011d9cedd1cf7451eb68d37822a10",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 297,
"avg_line_length": 42.51428571428571,
"alnum_prop": 0.6176075268817204,
"repo_name": "lhaiesp/samza",
"id": "f42590b30c28ee85fb1e39a97a3ba613a2cfba03",
"size": "5249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samza-test/src/main/python/stream_processor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4148"
},
{
"name": "HTML",
"bytes": "13819"
},
{
"name": "Java",
"bytes": "7499803"
},
{
"name": "JavaScript",
"bytes": "3373"
},
{
"name": "Python",
"bytes": "88154"
},
{
"name": "Scala",
"bytes": "1144335"
},
{
"name": "Shell",
"bytes": "48633"
},
{
"name": "XSLT",
"bytes": "7116"
}
],
"symlink_target": ""
}
|
"""Hello world Lambda function for mu testing.
"""
import json
import sys
def main(event, context):
from c7n.utils import parse_cidr
parse_cidr('10.0.0.0/24') # while we're here, ensure ipaddress availability
json.dump(event, sys.stdout)
def get_function(session_factory, name, role, events):
from c7n.mu import (LambdaFunction, custodian_archive)
config = dict(
name=name,
handler='helloworld.main',
runtime='python2.7',
memory_size=512,
timeout=15,
role=role,
description='Hello World',
events=events)
archive = custodian_archive()
archive.add_py_file(__file__)
archive.close()
return LambdaFunction(config, archive)
|
{
"content_hash": "c954f5aad3cfa64233b976311a6ea0d6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 24.233333333333334,
"alnum_prop": 0.6478679504814305,
"repo_name": "alfredgamulo/cloud-custodian",
"id": "46af14eac5c0a2e7e26feda0885e9b9f25a3ae9a",
"size": "806",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/data/helloworld.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2126"
},
{
"name": "Go",
"bytes": "146637"
},
{
"name": "HCL",
"bytes": "33977"
},
{
"name": "Jinja",
"bytes": "19775"
},
{
"name": "Makefile",
"bytes": "14242"
},
{
"name": "PowerShell",
"bytes": "1804"
},
{
"name": "Python",
"bytes": "6579430"
},
{
"name": "Shell",
"bytes": "15323"
},
{
"name": "Smarty",
"bytes": "359"
}
],
"symlink_target": ""
}
|
r"""volume.py tests"""
from corelib.units.volume import m3
def test_volume():
r"""Test expected values"""
expected_value = 1.
atol = 1e-10
assert expected_value - atol <= m3(litre=1000.) <= expected_value + atol
|
{
"content_hash": "9bad34c30096c806a84eab8196d457ca",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 76,
"avg_line_length": 23,
"alnum_prop": 0.6478260869565218,
"repo_name": "fullmar/corelib",
"id": "b55ec2de8859b3ef593718547a8e9c926b1e86d9",
"size": "269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_units/test_volume.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47676"
}
],
"symlink_target": ""
}
|
from functools import wraps
import flask
from zeeguu.model.user import User
# we define the blueprint here, and extended it in several files
account = flask.Blueprint("account", __name__)
@account.before_request
def setup():
if "user_id" in flask.session:
flask.g.user = User.query.get(flask.session["user_id"])
else:
flask.g.user = None
def login_first(fun):
"""
Function Wrapper
Makes sure that the user is logged_in.
If not, appends the intended url to the login url,
and redirects to login.
"""
@wraps(fun)
def decorated_function(*args, **kwargs):
if "user_id" in flask.session:
flask.g.user = User.query.get(flask.session["user_id"])
else:
flask.g.user = None
if flask.g.user:
return fun(*args, **kwargs)
else:
next_url = flask.request.url
login_url = '%s?next=%s' % (flask.url_for('account.login'), next_url)
return flask.redirect(login_url)
return decorated_function
from . import bookmarks
from . import home
from . import login
from . import reading
from . import reset_pass
from . import static_pages
from . import user_stats
from . import teacher
from . import watch_connect
|
{
"content_hash": "6c342570798c2162d160079e0b17c3a8",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 81,
"avg_line_length": 23.92452830188679,
"alnum_prop": 0.6356466876971609,
"repo_name": "MrAlexDeluxe/Zeeguu-Web",
"id": "fc1452fb2c541a9b91b86a09720efe93eddeaf35",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zeeguu_web/account/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53899"
},
{
"name": "HTML",
"bytes": "49472"
},
{
"name": "JavaScript",
"bytes": "176094"
},
{
"name": "Python",
"bytes": "20781"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
'''
Cockcoin base58 encoding and decoding.
Based on https://cockcointalk.org/index.php?topic=1026.0 (public domain)
'''
import hashlib
# for compatibility with following code...
class SHA256:
new = hashlib.sha256
if str != bytes:
# Python 3.x
def ord(c):
return c
def chr(n):
return bytes( (n,) )
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
__b58base = len(__b58chars)
b58chars = __b58chars
def b58encode(v):
""" encode v, which is a string of bytes, to base58.
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= __b58base:
div, mod = divmod(long_value, __b58base)
result = __b58chars[mod] + result
long_value = div
result = __b58chars[long_value] + result
# Cockcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (__b58chars[0]*nPad) + result
def b58decode(v, length = None):
""" decode v into a string of len bytes
"""
long_value = 0
for (i, c) in enumerate(v[::-1]):
long_value += __b58chars.find(c) * (__b58base**i)
result = bytes()
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == __b58chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def checksum(v):
"""Return 32-bit checksum based on SHA256"""
return SHA256.new(SHA256.new(v).digest()).digest()[0:4]
def b58encode_chk(v):
"""b58encode a string, with 32-bit checksum"""
return b58encode(v + checksum(v))
def b58decode_chk(v):
"""decode a base58 string, check and remove checksum"""
result = b58decode(v)
if result is None:
return None
h3 = checksum(result[:-4])
if result[-4:] == checksum(result[:-4]):
return result[:-4]
else:
return None
def get_bcaddress_version(strAddress):
""" Returns None if strAddress is invalid. Otherwise returns integer version of address. """
addr = b58decode_chk(strAddress)
if addr is None or len(addr)!=21: return None
version = addr[0]
return ord(version)
if __name__ == '__main__':
# Test case (from http://gitorious.org/cockcoin/python-base58.git)
assert get_bcaddress_version('15VjRaDX9zpbA8LVnbrCAFzrVzN7ixHNsC') is 0
_ohai = 'o hai'.encode('ascii')
_tmp = b58encode(_ohai)
assert _tmp == 'DYB3oMS'
assert b58decode(_tmp, 5) == _ohai
print("Tests passed")
|
{
"content_hash": "705d6644ed69ce5e1db39531d35e3b7c",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 27.134615384615383,
"alnum_prop": 0.6080793763288448,
"repo_name": "cockcoin/cockcoin",
"id": "b121be88ce092375f1c4fd2223b2a2d70c8a53fe",
"size": "2822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/testgen/base58.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from hidparser import DeviceBuilder
from hidparser.Item import ItemType, Item, ValueItem
from hidparser.Device import Unit as _Unit
from hidparser.UsagePage import UsagePage
class UsagePageItem(ValueItem):
usage_page = None
def __init__(self, *args, **kwargs):
kwargs["signed"] = False
super(UsagePageItem, self).__init__(**kwargs)
if len(self.data) not in [1,2]:
raise ValueError("UsagePage has invalid length")
self.usage_page = UsagePage.find_usage_page(self.value)
def visit(self, descriptor: DeviceBuilder):
descriptor.set_usage_page(UsagePage.find_usage_page(self.value))
@classmethod
def _get_tag(cls):
return 0x04
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.usage_page.__name__)
class LogicalMinimumItem(ValueItem):
def visit(self, descriptor: DeviceBuilder):
descriptor.set_logical_range(minimum=self.value)
@classmethod
def _get_tag(cls):
return 0x14
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class LogicalMaximumItem(ValueItem):
def visit(self, descriptor: DeviceBuilder):
descriptor.set_logical_range(maximum=self.value)
@classmethod
def _get_tag(cls):
return 0x24
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class PhysicalMinimumItem(ValueItem):
def visit(self, descriptor: DeviceBuilder):
descriptor.set_physical_range(minimum=self.value)
@classmethod
def _get_tag(cls):
return 0x34
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class PhysicalMaximumItem(ValueItem):
def visit(self, descriptor: DeviceBuilder):
descriptor.set_physical_range(maximum=self.value)
@classmethod
def _get_tag(cls):
return 0x44
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class UnitExponentItem(ValueItem):
def visit(self, descriptor: DeviceBuilder):
descriptor.unit_exponent = self.value
@classmethod
def _get_tag(cls):
return 0x54
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class UnitItem(Item):
def visit(self, descriptor: DeviceBuilder):
descriptor.unit = _Unit.from_bytes(self.data)
@classmethod
def _get_tag(cls):
return 0x64
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class ReportSizeItem(ValueItem):
def __init__(self, *args, **kwargs):
kwargs["signed"] = False
super(ReportSizeItem, self).__init__(*args, **kwargs)
def visit(self, descriptor: DeviceBuilder):
descriptor.report_size = self.value
@classmethod
def _get_tag(cls):
return 0x74
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class ReportIdItem(ValueItem):
def __init__(self, *args, **kwargs):
kwargs["signed"] = False
super(ReportIdItem, self).__init__(*args, **kwargs)
def visit(self, descriptor: DeviceBuilder):
descriptor.set_report_id(self.value)
@classmethod
def _get_tag(cls):
return 0x84
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class ReportCountItem(ValueItem):
def __init__(self, *args, **kwargs):
kwargs["signed"] = False
super(ReportCountItem, self).__init__(*args, **kwargs)
def visit(self, descriptor: DeviceBuilder):
descriptor.report_count = self.value
@classmethod
def _get_tag(cls):
return 0x94
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class PushItem(Item):
def visit(self, descriptor: DeviceBuilder):
descriptor.push()
@classmethod
def _get_tag(cls):
return 0xA4
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
class PopItem(Item):
def visit(self, descriptor: DeviceBuilder):
descriptor.pop()
@classmethod
def _get_tag(cls):
return 0xB4
@classmethod
def _get_type(cls):
return ItemType.GLOBAL
|
{
"content_hash": "4a684658e5452de29799e6f2cdb4f1b1",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 83,
"avg_line_length": 22.510752688172044,
"alnum_prop": 0.6393599235729639,
"repo_name": "NZSmartie/PyHIDParser",
"id": "9b2901eb6d64fb7ee4636618c241ae38681dec98",
"size": "4187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hidparser/ItemGlobal.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127050"
}
],
"symlink_target": ""
}
|
""" Test the Cookiecutter template.
A template project is created in a temporary directory, the application is
installed into a self-contained venv environment, and the application test
suite is run.
"""
from json import loads
from pathlib import Path
from shlex import split
from shutil import which
from subprocess import check_call
from tempfile import TemporaryDirectory
from venv import create
from cookiecutter.main import cookiecutter
def main() -> int:
""" Execute the test.
"""
# TODO: Convert to f-strings when Python 3.5 support is dropped.
template = Path(__file__).resolve().parents[1]
defaults = loads(template.joinpath("cookiecutter.json").read_text())
with TemporaryDirectory() as tmpdir:
cookiecutter(str(template), no_input=True, output_dir=tmpdir)
cwd = Path(tmpdir) / defaults["project_slug"]
create(str(cwd / "venv"), with_pip=True)
bin = str(cwd / "venv" / "bin") # TODO: Python 3.5 workaround
pip = which("pip", path=bin) or "pip" # Travis CI workaround
install = "{:s} install .".format(pip)
for req in cwd.glob("**/requirements.txt"):
install = " ".join((install, "--requirement={!s}".format(req)))
cwd = str(cwd) # TODO: Python 3.5 workaround
check_call(split(install), cwd=cwd)
pytest = which("pytest", path=bin) or "pytest" # Travis CI workaround
check_call(split("{:s} --verbose test".format(pytest)), cwd=cwd)
return 0
# Make the script executable.
if __name__ == "__main__":
raise SystemExit(main())
|
{
"content_hash": "a5297aed66943bad5422bb8cb6379544",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 35.31111111111111,
"alnum_prop": 0.657016991818754,
"repo_name": "mdklatt/cookiecutter-python-lib",
"id": "5b53d17962f36f5341074b595f312027b9a75671",
"size": "1589",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/test_template.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13589"
}
],
"symlink_target": ""
}
|
import os
import sys
import pygame
import copy
import math
from common.constants import *
from client.constants import *
class RezSpark:
def __init__(self, inCharacter):
self.character = inCharacter
self.pos = [int(self.character.precisePos[0]),
int(self.character.precisePos[1])]
self.color = TERRITORY_DOT_COLORS[self.character.team+1]
self.circles = []
self.tick = REZ_SPARK_TICK_MAX
def update(self):
if (not self.character is None) and (not self.character.rezzing):
self.character = None
self.circles = [1, (1+REZ_SPARK_TICK_MAX)]
newList = []
for i in range(len(self.circles)):
self.circles[i] += REZ_SPARK_INCREASE
c = self.circles[i]
alpha = self.getAlpha(c)
if alpha > 0:
newList.append(c)
self.circles = newList
if not self.character is None:
self.tick += 1
if self.tick >= REZ_SPARK_TICK_MAX:
self.tick = 0
self.circles.append(1)
else:
self.tick = 0
def draw(self, screen, inZoom, inOffset):
pos = ( int((self.pos[0] * inZoom) + inOffset[0]),
int((self.pos[1] * inZoom) + inOffset[1]) )
for c in self.circles:
alpha = self.getAlpha(c)
color = [REZ_SPARK_COLOR[0], REZ_SPARK_COLOR[1],
REZ_SPARK_COLOR[2], alpha]
size = c
if self.character is None:
size *= REZ_SPARK_BIG_MULT
pygame.draw.circle(screen, self.color, pos, size, 1)
def getAlpha(self, val):
if self.character is None:
mult = REZ_SPARK_BIG_ALPHA_MULT
else:
mult = REZ_SPARK_ALPHA_MULT
result = 255 - (val * mult)
if (result < 0):
result = 0
return result
def isRemovable(self):
return ((self.character is None) and (len(self.circles) == 0))
|
{
"content_hash": "b87a464562105a3dce8b8cb03822f68f",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 73,
"avg_line_length": 26.69736842105263,
"alnum_prop": 0.5347461803844258,
"repo_name": "Wopple/fimbulvetr",
"id": "5427b5a3177546c3ccdb634f953325fbef8799ec",
"size": "2029",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/client/rezspark.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "621329"
},
{
"name": "Shell",
"bytes": "85"
}
],
"symlink_target": ""
}
|
import sys
def update_status(text):
print("""<script id="___autoremovable">
$("#status").html(""" + repr(text.replace('\n', '<br />')) + """);
$("#___autoremovable").remove();
</script>""")
# print("New status:", text, file=sys.stderr)
sys.stdout.flush()
|
{
"content_hash": "252a07d935841812f4f4e4892e737b35",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 74,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.5223367697594502,
"repo_name": "Alexponomarev7/plotter",
"id": "3a0cf08c6fa74581822ce31a9dc71b74d456c7f8",
"size": "315",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgi-bin/lib/web.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "997"
},
{
"name": "JavaScript",
"bytes": "3236"
},
{
"name": "Python",
"bytes": "38113"
},
{
"name": "Shell",
"bytes": "233"
}
],
"symlink_target": ""
}
|
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from entertainment_tonight.models import Event
class EventCreate(CreateView):
# trying to create a new event
model = Event
fields = ['event_name', 'event_location', 'event_address', 'event_type', 'upload_photo', 'event_creator','created_at','event_date']
|
{
"content_hash": "193c4d9eb6728a2d45e5861c97276cc8",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 135,
"avg_line_length": 38,
"alnum_prop": 0.7368421052631579,
"repo_name": "ashleyf1996/OOP_Web_Application_Assignment3",
"id": "b4d7d77be9890874a568894fdf06e3c1b9cc7182",
"size": "343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entertainment_tonight/static/entertainment_tonight/form.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "18889"
},
{
"name": "JavaScript",
"bytes": "42"
},
{
"name": "Python",
"bytes": "29498"
}
],
"symlink_target": ""
}
|
import numpy as np
import theano
import theano.tensor as T
import theano.tensor.nnet as nnet
from mlbase.layers import layer
from mlbase.util import floatX
__all__ = [
'UpConv2d',
]
class UpConv2d(layer.Layer):
"""
Theano explanation of the ops can be found at:
http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html
The example code is from
https://github.com/Lasagne/Lasagne/blob/master/lasagne/layers/conv.py#L613
Same idea can be seen in
https://github.com/mila-udem/blocks/blob/master/blocks/bricks/conv.py#L266-L268
Example of using dnn from cudnn is from here:
https://github.com/Newmu/dcgan_code/blob/master/lib/ops.py#L85
Note that this op has multiple names:
* transposed convolution
* deconvolution (right term?)
* upconvolution
"""
debugname = 'upconv2d'
LayerTypeName = 'UpConv2d'
yaml_tag = u'!UpConv2d'
def __init__(self, filter_size=(2,2),
input_feature_map_dim=None,
output_feature_map_dim=None,
feature_map_multiplier=1,
subsample=(2,2),border='valid'):
"""
The default configureation will upsample input by 2x2 for each feature map.
"""
super(UpConv2d, self).__init__()
self.filterSize = filter_size
self.inputFeatureDim = input_feature_map_dim
self.outputFeatureDim = output_feature_map_dim
self.mapMulti = feature_map_multiplier
self.border = border
self.subsample = subsample
self.batchSize = None
self.dataSize = None
self.isAfterFullConn = False
self.w = None
def getpara(self):
return [self.w]
def forward(self, inputtensor):
x = inputtensor[0]
#input_shape=(self.batchSize,
# self.inputFeatureDim,
# int(self.dataSize[0]*self.subsample[0]),
# int(self.dataSize[1]*self.subsample[1]))
#filter_shape=(self.outputFeatureDim,
# self.inputFeatureDim,
# *self.filterSize)
#print("{}, {}".format(input_shape, filter_shape))
if self.isAfterFullConn:
x = T.reshape(x, (T.shape(x)[0], self.outputFeature, 1, 1))
# All input/output are refering to convolutional operator
# So when using it, think in oppersite way.
y = T.nnet.abstract_conv.conv2d_grad_wrt_inputs(x, self.w,
input_shape=(None,
self.inputFeatureDim,
#None, None is also ok for inputFeatureDim,
int(self.dataSize[0]*self.subsample[0]),
int(self.dataSize[1]*self.subsample[1])),
filter_shape=(self.outputFeatureDim,
self.inputFeatureDim,
*self.filterSize),
border_mode='valid',
subsample=self.subsample)
return (y,)
predictForward = forward
def forwardSize(self, inputsize):
isize = inputsize[0]
#print("upconv2d input size: {}".format(isize))
if not (len(isize) == 4 or len(isize) == 2):
raise IndexError
self.batchSize = isize[0]
if len(isize) == 2:
self.isAfterFullConn = True
self.dataSize = (1,1,)
self.outputFeatureDim = isize[1]
self.inputFeatureDim = isize[1] // self.mapMulti
elif len(isize) == 4:
if self.mapMulti is None and isize[1] != self.outputFeatureDim:
raise IndexError
self.dataSize = isize[2:]
if self.mapMulti is not None:
self.outputFeatureDim = isize[1]
self.inputFeatureDim = isize[1] // self.mapMulti
initweight = floatX(np.random.randn(self.outputFeatureDim,
self.inputFeatureDim,
*self.filterSize) * 0.01)
self.w = theano.shared(initweight, borrow=True)
retSize = None
if self.border == 'valid':
retSize = [(isize[0],
self.inputFeatureDim,
int(isize[2]*self.subsample[0]),
int(isize[3]*self.subsample[1]))]
else:
raise NotImplementedError
#print('upconv2d output size: {}'.format(retSize))
return retSize
def fillToObjMap(self):
objDict = super(UpConv2d, self).fillToObjMap()
objDict['w'] = self.w
objDict['inputFeatureDim'] = self.inputFeatureDim
objDict['outputFeatureDim'] = self.outputFeatureDim
objDict['filterSize'] = self.filterSize
objDict['dataSize'] = self.dataSize
objDict['subsample'] = self.subsample
return objDict
def loadFromObjMap(self, objDict):
super(UpConv2d, self).loadFromObjMap(objDict)
self.w = objDict['w']
self.inputFeatureDim = objDict['inputFeatureDim']
self.outputFeatureDim = objDict['outputFeatureDim']
self.filterSize = objDict['filterSize']
self.dataSize = objDict['dataSize']
self.subsample = objDict['subsample']
return
@classmethod
def to_yaml(cls, dumper, data):
objDict = data.fillToObjMap()
node = dumper.represent_mapping(UpConv2d.yaml_tag, objDict)
return node
@classmethod
def from_yaml(cls, loader, node):
objDict = loader.construct_mapping(node)
ret = UpConv2d()
ret.loadFromObjMap(objDict)
return ret
|
{
"content_hash": "68d0100a2a65322036f4c0c414967760",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 112,
"avg_line_length": 36.65868263473054,
"alnum_prop": 0.5313622999019928,
"repo_name": "aissehust/sesame-paste-noodle",
"id": "fbc878cec7d282242a4cc9f09c2885167a122f85",
"size": "6122",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlbase/layers/generative.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "190535"
}
],
"symlink_target": ""
}
|
"""
This program shows the basic usage of several features in the
sys and os modules. You may need to change the values of the
ds and di variables to get a meaningful result.
"""
import os, sys
ds = '/home/student/pydata/words.txt'
di = '/home/student/pydata'
# ds = 'c:/pydata/words.txt'
# di = 'c:/pydata'
print sys.argv
print os.getcwd()
print os.path.exists(ds), os.path.isdir(di), os.path.isfile(ds)
print os.path.isdir(ds), os.path.isfile(di)
print os.path.basename(ds), os.path.dirname(ds)
for item in os.environ:
print item, os.environ[item]
sys.exit()
print 'Did we get this far?'
|
{
"content_hash": "3d8f21b974126b92a57c246efd3f73fc",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 29.428571428571427,
"alnum_prop": 0.6877022653721683,
"repo_name": "jeremyprice/PythonForDevs",
"id": "2f3dc1e1d55fc0d2b5073c6365576f0ca723683c",
"size": "618",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LabsDone/lab22_os_sys.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8247"
},
{
"name": "HTML",
"bytes": "40466"
},
{
"name": "JavaScript",
"bytes": "14304"
},
{
"name": "Python",
"bytes": "48537"
}
],
"symlink_target": ""
}
|
import time
from datetime import datetime
from database.model import Session
from database.message_model import Message
from monitor.system.control import shutdown, restart
from monitor.system.update import preform_update
def check_for_messages():
session = Session()
current_time = datetime.now()
# delete all expired messages
session.query(Message).filter(Message.valid_to <= current_time).delete()
messages = session.query(Message).filter((Message.destination == 'monitor') &
(Message.valid_from <= current_time)).all()
for message in messages:
if message.classification == 'control':
process_control(session, message)
session.close()
return
def process_control(session, message):
command = message.payload['command']
if command == "shutdown":
session.delete(message)
session.commit()
shutdown()
return
elif command == 'restart':
session.delete(message)
session.commit()
restart()
return
elif command == 'update':
update_id = message.payload['update_id']
preform_update(update_id, 'farm_monitor')
session.delete(message)
session.commit()
return
return
def controller_manager():
while True:
try:
time.sleep(5)
check_for_messages()
except KeyboardInterrupt:
return
return
if __name__ == '__main__':
controller_manager()
|
{
"content_hash": "9f7beea758d0bdc5428298c897bd6505",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 88,
"avg_line_length": 22.84285714285714,
"alnum_prop": 0.5878674171357098,
"repo_name": "nstoik/sensor_monitor",
"id": "e7bdb3b0c3881c7e018ec63e5d4067febc464cb4",
"size": "1599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monitor/controller/manager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import functools
# third-party imports
import click
import pytest
from click.testing import CliRunner
# local imports
from shpkpr.cli import options
@pytest.fixture
def runner():
@click.command()
@options.marathon_client
def _test(marathon_client, **kw):
pass
runner = CliRunner()
return functools.partial(runner.invoke, _test)
def test_marathon_client_fails_with_no_args(runner):
result = runner()
assert result.exit_code == 2
def test_marathon_client_succeeds_with_marathon_url(runner):
result = runner(["--marathon-url", "http://example.com"])
assert result.exit_code == 0
def test_marathon_client_succeeds_with_marathon_url_and_basic_auth(runner):
result = runner([
"--marathon-url", "https://example.com",
"--username", "someuser",
"--password", "somepassword",
])
assert result.exit_code == 0
def test_marathon_client_fails_with_marathon_url_and_basic_auth_without_ssl(runner):
result = runner([
"--marathon-url", "http://example.com",
"--username", "someuser",
"--password", "somepassword",
])
assert result.exit_code == 2
def test_marathon_client_prompts_for_basic_auth_password(runner):
result = runner([
"--marathon-url", "https://example.com",
"--username", "someuser",
], input="somepassword\n")
assert result.exit_code == 0
def test_marathon_client_succeeds_fails_when_missing_basic_auth_username(runner):
result = runner([
"--marathon-url", "https://example.com",
"--password", "somepassword",
])
assert result.exit_code == 2
|
{
"content_hash": "4339a37d35fdaa86f34552a01ebaec43",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 84,
"avg_line_length": 24.014705882352942,
"alnum_prop": 0.6466625842008573,
"repo_name": "shopkeep/shpkpr",
"id": "b2945372becffa25f820e564067d6a33500882a6",
"size": "1650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cli/test_options.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "172"
},
{
"name": "Makefile",
"bytes": "2358"
},
{
"name": "Python",
"bytes": "139438"
},
{
"name": "Shell",
"bytes": "1111"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine.resources import wait_condition as wc_base
from heat.engine import support
LOG = logging.getLogger(__name__)
class HeatWaitCondition(resource.Resource):
support_status = support.SupportStatus(version='2014.2')
PROPERTIES = (
HANDLE, TIMEOUT, COUNT,
) = (
'handle', 'timeout', 'count',
)
ATTRIBUTES = (
DATA,
) = (
'data',
)
properties_schema = {
HANDLE: properties.Schema(
properties.Schema.STRING,
_('A reference to the wait condition handle used to signal this '
'wait condition.'),
required=True
),
TIMEOUT: properties.Schema(
properties.Schema.NUMBER,
_('The number of seconds to wait for the correct number of '
'signals to arrive.'),
required=True,
constraints=[
constraints.Range(1, 43200),
]
),
COUNT: properties.Schema(
properties.Schema.INTEGER,
_('The number of success signals that must be received before '
'the stack creation process continues.'),
constraints=[
constraints.Range(min=1),
],
default=1,
update_allowed=True
),
}
attributes_schema = {
DATA: attributes.Schema(
_('JSON string containing data associated with wait '
'condition signals sent to the handle.'),
cache_mode=attributes.Schema.CACHE_NONE,
type=attributes.Schema.STRING
),
}
def __init__(self, name, definition, stack):
super(HeatWaitCondition, self).__init__(name, definition, stack)
def _get_handle_resource(self):
return self.stack.resource_by_refid(self.properties[self.HANDLE])
def _wait(self, handle, started_at, timeout_in):
if timeutils.is_older_than(started_at, timeout_in):
exc = wc_base.WaitConditionTimeout(self, handle)
LOG.info(_LI('%(name)s Timed out (%(timeout)s)'),
{'name': str(self), 'timeout': str(exc)})
raise exc
handle_status = handle.get_status()
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = wc_base.WaitConditionFailure(self, handle)
LOG.info(_LI('%(name)s Failed (%(failure)s)'),
{'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info(_LI("%s Succeeded"), str(self))
return True
return False
def handle_create(self):
handle = self._get_handle_resource()
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_create_complete(self, data):
return self._wait(*data)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
handle = self._get_handle_resource()
started_at = timeutils.utcnow()
return handle, started_at, float(self.properties[self.TIMEOUT])
def check_update_complete(self, data):
return self._wait(*data)
def handle_delete(self):
handle = self._get_handle_resource()
if handle:
handle.metadata_set({})
def _resolve_attribute(self, key):
handle = self._get_handle_resource()
if key == self.DATA:
meta = handle.metadata_get(refresh=True)
res = {k: meta[k][handle.DATA] for k in meta}
LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
% {'name': self.name,
'key': key,
'res': res})
return six.text_type(jsonutils.dumps(res))
def resource_mapping():
return {
'OS::Heat::WaitCondition': HeatWaitCondition,
}
|
{
"content_hash": "9beeab9b90eb0b0e052894c2959640f5",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 77,
"avg_line_length": 31.963768115942027,
"alnum_prop": 0.5753797324869644,
"repo_name": "srznew/heat",
"id": "21f5a316d6cfe74568ae8adfe4350c1fd4b163b1",
"size": "4986",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/heat/wait_condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6529810"
},
{
"name": "Shell",
"bytes": "33395"
}
],
"symlink_target": ""
}
|
__author__ = 'himanshu'
import json
import requests
def _setup_request_data(filename):
file= open(filename, 'rb')
data = {}
lineno = 0
lines = file.readlines()
for l in lines:
lineno+=1
if l=="\n" or l=="":
break
parts = l.split(":")
if len(parts)>1:
key = parts[0].rstrip('\n')
key = key.lower()
value = parts[1].rstrip('\n')
data[key] = value
message= '\n'.join(lines[lineno:])
message = message.rstrip('\n')
data['message'] = unicode(message,'utf8','ignore')
file.close()
return data
def test(filename, full_report=False, teach=False):
data = _setup_request_data(filename)
if teach:
data['is_spam']=True
data = json.dumps(data)
if full_report:
r = requests.post("http://localhost:8000?full_report=true", data=data)
print r.text
elif teach:
r = requests.post("http://localhost:8000/teach", data=data)
print r.text
else:
r = requests.post("http://localhost:8000", data=data)
print r.text
# test("./spam")
# test("./long_ham.txt")
# test("./spam", full_report=True)
# test("./spam", teach=True)
|
{
"content_hash": "2426da4c5354a955e16480d9bb6feeb7",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 25.84,
"alnum_prop": 0.521671826625387,
"repo_name": "himanshuo/spamassassin",
"id": "4cd1f65d5a95cbb330d437dff6ebc680db5b16ed",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35814"
},
{
"name": "Shell",
"bytes": "17688"
}
],
"symlink_target": ""
}
|
from src.base.solution import Solution
from src.tests.part1.q290_test_word_pattern import WordPatternTestCases
"""
https://leetcode.com/problems/word-pattern/#/description
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space.
"""
class WordPattern(Solution):
def verify_output(self, test_output, output):
return test_output == output
def run_test(self, input):
return self.wordPattern(input[0], input[1])
def gen_test_cases(self):
return WordPatternTestCases()
def print_output(self, output):
super(WordPattern, self).print_output(output)
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
plkp = dict()
wlkp = dict()
idx = 0
plen = len(pattern)
words = str.split(" ")
slen = len(words)
if plen != slen:
return False
while idx < plen:
ch = pattern[idx]
word = words[idx]
# make sure one charactor ma
if ch in plkp:
tmp = plkp[ch]
if tmp != word:
return False
else:
plkp[ch] = word
if word in wlkp:
tmp = wlkp[word]
if tmp != ch:
return False
else:
wlkp[word] = ch
idx += 1
return True
if __name__ == '__main__':
solution = WordPattern()
solution.run_tests()
|
{
"content_hash": "91f36c50f11a76addc927d1d108cc6c4",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 119,
"avg_line_length": 26.753246753246753,
"alnum_prop": 0.5733009708737864,
"repo_name": "hychrisli/PyAlgorithms",
"id": "ebae037a464bdbae5299c83808e35e104ca02eee",
"size": "2060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/solutions/part1/q290_word_pattern.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "201747"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import discord
from decimal import Decimal
from discord.ext import commands
from django.db import transaction
from django.utils import timezone
from orm.models import RaidMessage
class Rsvp(commands.Cog):
"""Reservation system"""
def __init__(self, bot):
self.bot = bot
async def __after_invoke(self, ctx):
if isinstance(ctx.channel, discord.TextChannel):
await ctx.message.delete()
@commands.command()
async def join(self, ctx, raid_id: str, party_size: str = '1', *notes: str):
"""Used to indicate that you wish to attend a raid. Can also provide the size of your party (including you) and any notes."""
author = ctx.author
if len(notes) == 0:
notes = None
else:
notes = ' '.join(str(x) for x in notes)
# If the message is coming from PM we want to use the server's version of the user.
if isinstance(ctx.channel, discord.abc.PrivateChannel):
author = ctx.bot_guild.get_member(author.id)
raid = ctx.raids.get_raid(raid_id)
await self.add_user_to_raid(raid, self.bot, ctx.channel, author, party_size, notes)
@staticmethod
async def add_user_to_raid(raid, bot, origin_channel, user, party_size: str = '1', notes: str = None):
private_raid_channel = raid.private_discord_channel
if private_raid_channel is None:
overwrites = {
bot.bot_guild.default_role: bot.private_channel_no_access,
bot.bot_guild.me: bot.private_channel_access
}
if raid.is_exclusive:
private_raid_channel = await bot.bot_guild.create_text_channel(f'ex-raid-{raid.display_id}-chat',
overwrites=overwrites)
else:
private_raid_channel = await bot.bot_guild.create_text_channel(f'raid-{raid.display_id}-chat',
overwrites=overwrites)
if bot.config.discord_raid_category is not None:
await private_raid_channel.edit(category=bot.config.discord_raid_category)
raid.private_discord_channel = private_raid_channel
# Send the raid card to the top of the channel.
private_raid_card = await raid.private_discord_channel.send(embed=raid.embed)
# Add reaction to allow for easy leaving the raid.
if not bot.raids.logging_out:
await private_raid_card.add_reaction('❌')
await private_raid_card.add_reaction('1⃣')
await private_raid_card.add_reaction('2⃣')
await private_raid_card.add_reaction('3⃣')
await private_raid_card.add_reaction('4⃣')
bot.raids.private_channel_raids[private_raid_card.id] = raid
raid.messages.append(private_raid_card)
with transaction.atomic():
raid.private_channel = private_raid_channel.id
raid.save()
RaidMessage(raid=raid, channel=private_raid_channel.id, message=private_raid_card.id).save()
# Add this user to the raid and update all the embeds for the raid.
result_tuple = bot.raids.add_participant(raid, user.id, user.display_name, party_size, notes)
for msg in raid.messages:
try:
await msg.edit(embed=raid.embed)
except discord.errors.NotFound:
raid.messages.remove(msg)
pass
# Add the user to the private channel for the raid
await raid.private_discord_channel.set_permissions(user, overwrite=bot.private_channel_access)
await raid.private_discord_channel.send(f'{user.mention}{result_tuple[0].details()}')
# Send message to the RSVP channel if the command was invoked publicly
if bot.rsvp_channel is not None and isinstance(origin_channel, discord.abc.GuildChannel):
await bot.rsvp_channel.send(result_tuple[1])
@commands.command()
async def leave(self, ctx, raid_id: str):
"""Used to indicate to others that you are no longer attending the indicated raid."""
author = ctx.author
# If the message is coming from PM we want to use the server's version of the user.
if isinstance(ctx.channel, discord.abc.PrivateChannel):
author = ctx.bot_guild.get_member(author.id)
raid = ctx.raids.get_raid(raid_id)
await self.remove_user_from_raid(raid, self.bot, ctx.channel, author)
@staticmethod
async def remove_user_from_raid(raid, bot, origin_channel, user):
display_msg = bot.raids.remove_participant(raid, user.id, user.display_name)
if display_msg is not None:
# Remove the user to the private channel for the raid
await raid.private_discord_channel.set_permissions(user, overwrite=None)
await raid.private_discord_channel.send(f'**{user.display_name}** is no longer attending')
for msg in raid.messages:
await msg.edit(embed=raid.embed)
if bot.rsvp_channel is not None and isinstance(origin_channel, discord.abc.GuildChannel):
await bot.rsvp_channel.send(display_msg)
@commands.command()
async def who(self, ctx, raid_id: str):
"""Used to get a listing of who is attend the provided raid."""
raid = ctx.raids.get_raid(raid_id)
msg = ctx.raids.get_participant_printout(raid)
await ctx.author.send(msg)
@commands.command(aliases=['raid'])
async def details(self, ctx, raid_id: str):
"""Used to get the raid card PM to you by the bot. Useful for quickly learning the location of a particular raid."""
raid = ctx.raids.get_raid(raid_id)
await ctx.author.send(embed=raid.embed)
@commands.command(aliases=['reportegg'])
@commands.has_role('Raid Reporter')
async def report_egg(self, ctx, gym_name: str, level: int, latitude: Decimal, longitude: Decimal,
minutes_remaining: int):
"""Creates an raid that user can join via the RSVP commands.
If a gym name is multiple words wrap the gym name with double quotes.
"""
hatch_time = timezone.localtime(timezone.now()) + timedelta(minutes=minutes_remaining)
raid = await ctx.raids.create_manual_raid(ctx.author.id, gym_name=gym_name, raid_level=level,
latitude=latitude, longitude=longitude,
expiration=hatch_time,
is_egg=True)
await self.finish_reporting_manual_raid(ctx, raid)
@commands.command(aliases=['reportraid'])
@commands.has_role('Raid Reporter')
async def report_raid(self, ctx, gym_name: str, level: int, pokemon_name: str, latitude: Decimal,
longitude: Decimal,
minutes_remaining: int):
"""Creates an raid that user can join via the RSVP commands.
If a gym name is multiple words wrap the gym name with double quotes.
"""
expiration = timezone.localtime(timezone.now()) + timedelta(minutes=minutes_remaining)
raid = await ctx.raids.create_manual_raid(ctx.author.id, gym_name=gym_name,
raid_level=level, pokemon_name=pokemon_name,
latitude=latitude, longitude=longitude,
expiration=expiration, is_egg=False)
await self.finish_reporting_manual_raid(ctx, raid)
# Handles duplicate management as well as communicating the raid to the appropriate channels.
async def finish_reporting_manual_raid(self, ctx, raid):
hash_val = hash(raid)
if hash_val in ctx.raids.hashed_active_raids:
raid = ctx.raids.hashed_active_raids[hash_val]
await ctx.author.send(f"This raid was already reported. It's number is {raid.display_id}")
else:
ctx.raids.track_raid(raid)
raid.embed = ctx.raids.build_manual_raid_embed(raid)
objects_to_save = await ctx.zones.send_to_raid_zones(raid, ctx.bot)
RaidMessage.objects.bulk_create(objects_to_save)
await ctx.send(f'Created raid #{raid.display_id}')
@commands.command(aliases=['hatch'])
@commands.has_role('Raid Reporter')
async def hatched(self, ctx, raid_id: str, pokemon_name: str):
"""Reports what pokemon hatched from a raid egg. Existing cards for the raid will be deleted and new ones sent out.
If a pokemon name is multiple words wrap it with double quotes.
"""
raid = ctx.raids.get_raid(raid_id)
if raid.is_exclusive:
await ctx.author.send(f"This operation cannot be performed on EX raids")
elif not raid.is_egg:
await ctx.author.send(f"This raid has already hatched.")
else:
raid.is_egg = False
raid.pokemon_name = pokemon_name
raid.save()
raid.embed = ctx.raids.build_manual_raid_embed(raid)
for m in raid.messages:
try:
if not ctx.bot.raids.logging_out:
if m.id in ctx.bot.raids.message_to_raid:
del ctx.bot.raids.message_to_raid[m.id]
elif m.id in ctx.bot.raids.private_channel_raids:
del ctx.bot.raids.private_channel_raids[m.id]
await m.delete()
except discord.NotFound:
pass
raid.messages = []
if len(raid.participants) > 0:
ctx.bot.raids.update_embed_participants(raid)
# Send the new embed to the private channel
if raid.private_channel is not None:
private_raid_card = await raid.private_discord_channel.send(embed=raid.embed)
# Add reaction to allow for easy leaving the raid.
if not ctx.bot.raids.logging_out:
await private_raid_card.add_reaction('❌')
await private_raid_card.add_reaction('1⃣')
await private_raid_card.add_reaction('2⃣')
await private_raid_card.add_reaction('3⃣')
await private_raid_card.add_reaction('4⃣')
ctx.bot.raids.private_channel_raids[private_raid_card.id] = raid
raid.messages.append(private_raid_card)
objects_to_save = await ctx.zones.send_to_raid_zones(raid, ctx.bot)
RaidMessage.objects.bulk_create(objects_to_save)
await ctx.send(f'Hatched raid #{raid.display_id}')
def setup(bot):
bot.add_cog(Rsvp(bot))
|
{
"content_hash": "03de45ee76c6ba3de5331daf1223dc0e",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 133,
"avg_line_length": 45.79497907949791,
"alnum_prop": 0.5988122430333486,
"repo_name": "peter-obrien/organizer",
"id": "995494d21ca2b98584e51696cb06bb5d78c24357",
"size": "10965",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "cogs/rsvp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104167"
}
],
"symlink_target": ""
}
|
class Solution(object):
def detectCapitalUse(self, word):
"""
:type word: str
:rtype: bool
"""
if word == word.upper():
return True
elif word[1:] == word.lower()[1:]:
return True
else:
return False
|
{
"content_hash": "751a45fb3e97d06dcbea4e0f03846c90",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 42,
"avg_line_length": 24.583333333333332,
"alnum_prop": 0.4576271186440678,
"repo_name": "njfahey/LeetCode",
"id": "481b005b1ac0fc28c5a7c0db024253408170f9dd",
"size": "295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Problems/520/520 Detect Capital.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "8857"
},
{
"name": "Python",
"bytes": "10932"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='report',
name='processed',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "52745220994906ad7a88aa2a1acc7b87",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 20.61111111111111,
"alnum_prop": 0.5902964959568733,
"repo_name": "ben174/bart-crime",
"id": "3e736edd17dfc196a8c4fd31c53e4cffac377b77",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reports/migrations/0002_report_processed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "18475"
},
{
"name": "Python",
"bytes": "55686"
}
],
"symlink_target": ""
}
|
import httplib as http
import importlib
import pkgutil
import pytest
from pytz import utc
from datetime import datetime
import urllib
from nose.tools import * # noqa:
import re
from tests.base import ApiTestCase, DbTestCase
from osf_tests import factories
from tests.utils import make_drf_request_with_version
from api.base.settings.defaults import API_BASE
from api.base.serializers import JSONAPISerializer, BaseAPISerializer
from api.base import serializers as base_serializers
from api.nodes.serializers import NodeSerializer, RelationshipField
from api.waffle.serializers import WaffleSerializer, BaseWaffleSerializer
from api.registrations.serializers import RegistrationSerializer
SER_MODULES = []
for loader, name, _ in pkgutil.iter_modules(['api']):
if name != 'base' and name != 'test':
try:
SER_MODULES.append(
importlib.import_module(
'api.{}.serializers'.format(name)
)
)
except ImportError:
pass
SER_CLASSES = []
for mod in SER_MODULES:
for name, val in mod.__dict__.items():
try:
if issubclass(val, BaseAPISerializer):
if 'JSONAPI' in name or 'BaseAPI' in name:
continue
SER_CLASSES.append(val)
except TypeError:
pass
class FakeModel(object):
def null_field(self):
return None
def valued_field(self):
return 'Some'
null = None
foo = 'bar'
pk = '1234'
class FakeSerializer(base_serializers.JSONAPISerializer):
class Meta:
type_ = 'foos'
links = base_serializers.LinksField({
'null_field': 'null_field',
'valued_field': 'valued_field',
})
null_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<null>'},
)
valued_link_field = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<foo>'},
)
def null_field(*args, **kwargs):
return None
def valued_field(*args, **kwargs):
return 'http://foo.com'
class TestSerializerMetaType(ApiTestCase):
def test_expected_serializers_have_meta_types(self):
for ser in SER_CLASSES:
assert hasattr(
ser, 'Meta'
), 'Serializer {} has no Meta'.format(ser)
assert hasattr(
ser.Meta, 'type_'
) or hasattr(
ser.Meta, 'get_type'
), 'Serializer {} has no Meta.type_ or Meta.get_type()'.format(ser)
class TestNodeSerializerAndRegistrationSerializerDifferences(ApiTestCase):
"""
All fields on the Node Serializer other than the few we can serialize for withdrawals must be redeclared on the
Registration Serializer and wrapped in HideIfWithdrawal
HideIfRegistration fields should not be serialized on registrations.
"""
def setUp(self):
super(TestNodeSerializerAndRegistrationSerializerDifferences, self).setUp()
self.node = factories.ProjectFactory(is_public=True)
self.registration = factories.RegistrationFactory(
project=self.node, is_public=True)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
self.reg_url = '/{}registrations/{}/'.format(
API_BASE, self.registration._id)
def test_registration_serializer(self):
# fields that are visible for withdrawals
visible_on_withdrawals = [
'contributors',
'bibliographic_contributors',
'implicit_contributors',
'date_created',
'date_modified',
'description',
'id',
'links',
'registration',
'article_doi',
'title',
'type',
'category',
'root',
'parent',
'affiliated_institutions',
'identifiers',
'current_user_can_comment',
'current_user_is_contributor',
'current_user_is_contributor_or_group_member',
'preprint',
'subjects',
'wiki_enabled']
# fields that do not appear on registrations
non_registration_fields = ['registrations', 'draft_registrations', 'templated_by_count', 'settings', 'children', 'groups']
for field in NodeSerializer._declared_fields:
assert_in(field, RegistrationSerializer._declared_fields)
reg_field = RegistrationSerializer._declared_fields[field]
if field not in visible_on_withdrawals and field not in non_registration_fields:
assert_true(
isinstance(reg_field, base_serializers.HideIfWithdrawal) or
isinstance(reg_field, base_serializers.ShowIfVersion) or
isinstance(reg_field, base_serializers.ShowIfAdminScopeOrAnonymous)
)
def test_hide_if_registration_fields(self):
node_res = self.app.get(self.url)
node_relationships = node_res.json['data']['relationships']
registration_res = self.app.get(self.reg_url)
registration_relationships = registration_res.json['data']['relationships']
hide_if_registration_fields = [
field for field in NodeSerializer._declared_fields if isinstance(
NodeSerializer._declared_fields[field],
base_serializers.HideIfRegistration)]
for field in hide_if_registration_fields:
assert_in(field, node_relationships)
assert_not_in(field, registration_relationships)
class TestNullLinks(ApiTestCase):
def test_null_links_are_omitted(self):
req = make_drf_request_with_version(version='2.0')
rep = FakeSerializer(FakeModel, context={'request': req}).data['data']
assert_not_in('null_field', rep['links'])
assert_in('valued_field', rep['links'])
assert_not_in('null_link_field', rep['relationships'])
class TestApiBaseSerializers(ApiTestCase):
def setUp(self):
super(TestApiBaseSerializers, self).setUp()
self.user = factories.AuthUserFactory()
self.auth = factories.Auth(self.user)
self.node = factories.ProjectFactory(is_public=True)
for i in range(5):
factories.ProjectFactory(is_public=True, parent=self.node)
self.linked_node = factories.NodeFactory(
creator=self.user, is_public=True)
self.node.add_pointer(self.linked_node, auth=self.auth)
self.url = '/{}nodes/{}/'.format(API_BASE, self.node._id)
def test_serializers_have_get_absolute_url_method(self):
serializers = JSONAPISerializer.__subclasses__()
base_get_absolute_url = JSONAPISerializer.get_absolute_url
for serializer in serializers:
# Waffle endpoints are nonstandard
if serializer == WaffleSerializer or serializer == BaseWaffleSerializer:
continue
if not re.match('^(api_test|test).*', serializer.__module__):
assert hasattr(
serializer, 'get_absolute_url'
), 'No get_absolute_url method'
assert_not_equal(
serializer.get_absolute_url,
base_get_absolute_url
)
def test_counts_not_included_in_link_fields_by_default(self):
res = self.app.get(self.url)
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_counts_included_in_link_fields_with_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': True})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False):
link = relation['links'].values()[0]
assert_in('count', link['meta'], field)
def test_related_counts_excluded_query_param_false(self):
res = self.app.get(self.url, params={'related_counts': False})
relationships = res.json['data']['relationships']
for relation in relationships.values():
if relation == {'data': None}:
continue
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
else:
link = relation['links'].values()[0]
link_meta = link.get('meta', {})
assert_not_in('count', link_meta)
def test_invalid_related_counts_value_raises_bad_request(self):
res = self.app.get(
self.url,
params={'related_counts': 'fish'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
def test_invalid_embed_value_raise_bad_request(self):
res = self.app.get(
self.url,
params={'embed': 'foo'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
'The following fields are not embeddable: foo'
)
def test_embed_does_not_remove_relationship(self):
res = self.app.get(self.url, params={'embed': 'root'})
assert_equal(res.status_code, 200)
assert_in(
self.url,
res.json['data']['relationships']['root']['links']['related']['href']
)
def test_counts_included_in_children_field_with_children_related_counts_query_param(
self):
res = self.app.get(self.url, params={'related_counts': 'children'})
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_counts_included_in_children_and_contributors_fields_with_field_csv_related_counts_query_param(
self):
res = self.app.get(
self.url,
params={'related_counts': 'children,contributors'}
)
relationships = res.json['data']['relationships']
for key, relation in relationships.items():
if relation == {}:
continue
field = NodeSerializer._declared_fields[key]
if getattr(field, 'field', None):
field = field.field
if isinstance(relation, list):
for item in relation:
link = item['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
elif relation != {'data': None}:
link = relation['links'].values()[0]
related_meta = getattr(field, 'related_meta', {})
if related_meta and related_meta.get('count', False) and key == 'children' or key == 'contributors':
assert_in('count', link['meta'])
else:
assert_not_in('count', link.get('meta', {}))
def test_error_when_requesting_related_counts_for_attribute_field(self):
res = self.app.get(
self.url,
params={'related_counts': 'title'},
expect_errors=True
)
assert_equal(res.status_code, http.BAD_REQUEST)
assert_equal(
res.json['errors'][0]['detail'],
"Acceptable values for the related_counts query param are 'true', 'false', or any of the relationship fields; got 'title'"
)
@pytest.mark.django_db
class TestRelationshipField:
# We need a Serializer to test the Relationship field (needs context)
class BasicNodeSerializer(JSONAPISerializer):
parent = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'}
)
parent_with_meta = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_count', 'extra': 'get_extra'},
)
self_and_related_field = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
self_view='nodes:node-contributors',
self_view_kwargs={'node_id': '<_id>'},
)
two_url_kwargs = RelationshipField(
# fake url, for testing purposes
related_view='nodes:node-pointer-detail',
related_view_kwargs={'node_id': '<_id>', 'node_link_id': '<_id>'},
)
# If related_view_kwargs is a callable, this field _must_ match the property name on
# the target record
registered_from = RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if n and n.is_registration else 'nodes:node-detail',
related_view_kwargs=lambda n: {'node_id': '<registered_from._id>'})
field_with_filters = base_serializers.RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<_id>'},
filter={'target': 'hello', 'woop': 'yea'}
)
class Meta:
type_ = 'nodes'
def get_count(self, obj):
return 1
def get_extra(self, obj):
return 'foo'
# TODO: Expand tests
# Regression test for https://openscience.atlassian.net/browse/OSF-4832
def test_serializing_meta(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
meta = data['relationships']['parent_with_meta']['links']['related']['meta']
assert_not_in('count', meta)
assert_in('extra', meta)
assert_equal(meta['extra'], 'foo')
def test_serializing_empty_to_one(self):
req = make_drf_request_with_version(version='2.2')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
# This node is not registered_from another node hence it is an empty-to-one.
assert 'registered_from' not in data['relationships']
# In 2.9, API returns null for empty relationships
# https://openscience.atlassian.net/browse/PLAT-840
req = make_drf_request_with_version(version='2.9')
node = factories.NodeFactory()
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert data['relationships']['registered_from']['data'] is None
def test_self_and_related_fields(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
relationship_field = data['relationships']['self_and_related_field']['links']
assert_in(
'/v2/nodes/{}/contributors/'.format(node._id),
relationship_field['self']['href']
)
assert_in(
'/v2/nodes/{}/'.format(node._id),
relationship_field['related']['href']
)
def test_field_with_two_kwargs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['two_url_kwargs']['links']
assert_in(
'/v2/nodes/{}/node_links/{}/'.format(node._id, node._id),
field['related']['href']
)
def test_field_with_two_filters(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
field = data['relationships']['field_with_filters']['links']
assert_in(
urllib.quote('filter[target]=hello', safe='?='),
field['related']['href']
)
assert_in(
urllib.quote('filter[woop]=yea', safe='?='),
field['related']['href']
)
def test_field_with_callable_related_attrs(self):
req = make_drf_request_with_version(version='2.0')
project = factories.ProjectFactory()
node = factories.NodeFactory(parent=project)
data = self.BasicNodeSerializer(
node, context={'request': req}
).data['data']
assert_not_in('registered_from', data['relationships'])
registration = factories.RegistrationFactory(project=node)
data = self.BasicNodeSerializer(
registration, context={
'request': req}
).data['data']
field = data['relationships']['registered_from']['links']
assert_in('/v2/nodes/{}/'.format(node._id), field['related']['href'])
class TestShowIfVersion(ApiTestCase):
def setUp(self):
super(TestShowIfVersion, self).setUp()
self.node = factories.NodeFactory()
self.registration = factories.RegistrationFactory()
def test_node_links_allowed_version_node_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_in('node_links', data['relationships'])
def test_node_links_bad_version_node_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_not_in('node_links', data['relationships'])
def test_node_links_allowed_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.0')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_in('node_links', data['relationships'])
def test_node_links_bad_version_registration_serializer(self):
req = make_drf_request_with_version(version='2.1')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
def test_node_links_withdrawn_registration(self):
factories.WithdrawnRegistrationFactory(
registration=self.registration)
req = make_drf_request_with_version(version='2.0')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
req = make_drf_request_with_version(version='2.1')
data = RegistrationSerializer(
self.registration,
context={'request': req}
).data['data']
assert_not_in('node_links', data['relationships'])
class VersionedDateTimeField(DbTestCase):
def setUp(self):
super(VersionedDateTimeField, self).setUp()
self.node = factories.NodeFactory()
self.old_date = datetime.utcnow() # naive dates before django-osf
self.old_date_without_microseconds = self.old_date.replace(
microsecond=0)
self.new_date = datetime.utcnow().replace(
tzinfo=utc) # non-naive after django-osf
self.new_date_without_microseconds = self.new_date.replace(
microsecond=0)
self.old_format = '%Y-%m-%dT%H:%M:%S.%f'
self.old_format_without_microseconds = '%Y-%m-%dT%H:%M:%S'
self.new_format = '%Y-%m-%dT%H:%M:%S.%fZ'
def test_old_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.old_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_old_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.new_format),
data['attributes']['date_modified']
)
def test_old_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.old_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.old_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.old_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_old_format(self):
req = make_drf_request_with_version(version='2.0')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.old_format_without_microseconds
),
data['attributes']['date_modified']
)
def test_new_date_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.new_date, self.new_format),
data['attributes']['date_modified']
)
def test_new_date_without_microseconds_formats_to_new_format(self):
req = make_drf_request_with_version(version='2.2')
setattr(self.node, 'last_logged', self.new_date_without_microseconds)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(
self.new_date_without_microseconds,
self.new_format
),
data['attributes']['date_modified']
)
# regression test for https://openscience.atlassian.net/browse/PLAT-1350
# VersionedDateTimeField was treating version 2.10 and higher as decimals,
# less than 2.2
def test_old_date_formats_to_new_format_with_2_10(self):
req = make_drf_request_with_version(version='2.10')
setattr(self.node, 'last_logged', self.old_date)
data = NodeSerializer(self.node, context={'request': req}).data['data']
assert_equal(
datetime.strftime(self.old_date, self.new_format),
data['attributes']['date_modified']
)
|
{
"content_hash": "edb45955ac542ac8c40155a71ff603db",
"timestamp": "",
"source": "github",
"line_count": 687,
"max_line_length": 134,
"avg_line_length": 38.36535662299855,
"alnum_prop": 0.5848541184505065,
"repo_name": "mattclark/osf.io",
"id": "7ff4d4d74ceb4ad44198731ff3bd7afc40b5ee2b",
"size": "26381",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/base/test_serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "8456"
},
{
"name": "HTML",
"bytes": "317371"
},
{
"name": "JavaScript",
"bytes": "1792241"
},
{
"name": "Mako",
"bytes": "654772"
},
{
"name": "Python",
"bytes": "10166997"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
'''
--------------------------------------------------------------
Before running this Airflow module...
Install StarThinker in cloud composer ( recommended ):
From Release: pip install starthinker
From Open Source: pip install git+https://github.com/google/starthinker
Or push local code to the cloud composer plugins directory ( if pushing local code changes ):
source install/deploy.sh
4) Composer Menu
l) Install All
--------------------------------------------------------------
If any recipe task has "auth" set to "user" add user credentials:
1. Ensure an RECIPE['setup']['auth']['user'] = [User Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_user", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/deploy_commandline.md#optional-setup-user-credentials
--------------------------------------------------------------
If any recipe task has "auth" set to "service" add service credentials:
1. Ensure an RECIPE['setup']['auth']['service'] = [Service Credentials JSON]
OR
1. Visit Airflow UI > Admin > Connections.
2. Add an Entry called "starthinker_service", fill in the following fields. Last step paste JSON from authentication.
- Conn Type: Google Cloud Platform
- Project: Get from https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md
- Keyfile JSON: Get from: https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md
--------------------------------------------------------------
Line Item To BigQuery Via Query
Move using an Id query.
- Specify the query that will pull the lineitem ids to download.
- Specify the dataset and table where the lineitems will be written.
- The schema will match <a href='https://developers.google.com/bid-manager/guides/entity-write/format' target='_blank'>Entity Write Format</a>.
--------------------------------------------------------------
This StarThinker DAG can be extended with any additional tasks from the following sources:
- https://google.github.io/starthinker/
- https://github.com/google/starthinker/tree/master/dags
'''
from starthinker.airflow.factory import DAG_Factory
INPUTS = {
'auth_read': 'user', # Credentials used for reading data.
'id_dataset': '',
'id_query': 'SELECT * FROM `Dataset.Table`;',
'id_legacy': False,
'destination_dataset': '',
'destination_table': '',
}
RECIPE = {
'tasks': [
{
'lineitem': {
'auth': {
'field': {
'name': 'auth_read',
'kind': 'authentication',
'order': 1,
'default': 'user',
'description': 'Credentials used for reading data.'
}
},
'read': {
'line_items': {
'single_cell': True,
'bigquery': {
'dataset': {
'field': {
'name': 'id_dataset',
'kind': 'string',
'order': 1,
'default': ''
}
},
'query': {
'field': {
'name': 'id_query',
'kind': 'string',
'order': 2,
'default': 'SELECT * FROM `Dataset.Table`;'
}
},
'legacy': {
'field': {
'name': 'id_legacy',
'kind': 'boolean',
'order': 3,
'default': False
}
}
}
},
'out': {
'bigquery': {
'dataset': {
'field': {
'name': 'destination_dataset',
'kind': 'string',
'order': 4,
'default': ''
}
},
'table': {
'field': {
'name': 'destination_table',
'kind': 'string',
'order': 5,
'default': ''
}
}
}
}
}
}
}
]
}
dag_maker = DAG_Factory('lineitem_read_to_bigquery_via_query', RECIPE, INPUTS)
dag = dag_maker.generate()
if __name__ == "__main__":
dag_maker.print_commandline()
|
{
"content_hash": "457e6590d43340306590c87c94768a14",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 145,
"avg_line_length": 31.231292517006803,
"alnum_prop": 0.49705946416902635,
"repo_name": "google/starthinker",
"id": "88b5a8bd498e51679ea7e1f36fd5112f44928731",
"size": "5333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dags/lineitem_read_to_bigquery_via_query_dag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "89775"
},
{
"name": "Jupyter Notebook",
"bytes": "1088964"
},
{
"name": "Python",
"bytes": "2356647"
},
{
"name": "Shell",
"bytes": "89492"
}
],
"symlink_target": ""
}
|
from string import Template
from code_generator import CodeGenerator
from collections import OrderedDict
from java_class import InternalJavaFileData
class WrapperGenerator(CodeGenerator):
""" Generator class thar generates wrapper layer code."""
def __init__(self, java_data, class_loader):
super(WrapperGenerator, self).__init__(java_data, class_loader)
def RunTask(self):
self._generated_class_name = self._java_data.wrapper_name
self._generated_code = self.GenerateWrapperClass()
def GenerateWrapperClass(self):
wrapper_template = Template("""\
${PACKAGE_SECTION}
${IMPORT_SECTION}
${DOC_SECTION}
public ${CLASS_MODIFIER}class ${CLASS_NAME} \
${CLASS_EXTENDS} ${CLASS_IMPLEMENTS} {
${FIELDS_SECTION}
${ENUMS_SECTION}
${MEMBERSS_SECTION}
${INTERNALLY_CONSTRUCOR_SECTION}
${METHODS_SECTION}
${REFLECTION_SECTION}
}
""")
import_string = self.GenerateImportRules()
import_string += "import java.util.ArrayList;\n"
value = {'PACKAGE_SECTION': self.GeneratePackage(),
'IMPORT_SECTION': import_string,
'DOC_SECTION': self.GenerateDoc(self._java_data.class_doc),
'CLASS_MODIFIER': self.GenerateClassModifier(),
'CLASS_NAME': self._java_data.wrapper_name,
'CLASS_EXTENDS': self.GenerateClassExtends(),
'CLASS_IMPLEMENTS': self.GenerateClassImplements(),
'ENUMS_SECTION': self.GenerateEnums(),
'FIELDS_SECTION': self.GenerateFields(),
'MEMBERSS_SECTION': self.GenerateMembers(),
'INTERNALLY_CONSTRUCOR_SECTION':
self.GenerateInternallyConstructor(),
'METHODS_SECTION': self.GenerateMethods(),
'REFLECTION_SECTION': self.GenerateReflectionInit()}
return wrapper_template.substitute(value)
def GeneratePackage(self):
# Remove the 'internal' folder from internal package.
package_name = self._java_data.package_name.replace('.internal', '')
return 'package %s;\n' % (package_name)
def GenerateClassModifier(self):
for method in self._java_data.methods:
if method.is_abstract:
return 'abstract '
return ''
def GenerateClassExtends(self):
annotations = self._java_data.class_annotations
if annotations.has_key(InternalJavaFileData.ANNOTATION_EXTEND_CLASS):
to_extend = annotations[InternalJavaFileData.ANNOTATION_EXTEND_CLASS]
return ' extends %s ' % (to_extend.replace('.class', ''))
return ''
def GenerateClassImplements(self):
annotations = self._java_data.class_annotations
if annotations.has_key(InternalJavaFileData.ANNOTATION_IMPL):
to_implement = annotations[InternalJavaFileData.ANNOTATION_IMPL]
impl_interface = to_implement.replace('.class', '')
if self.IsInternalClass(impl_interface):
impl_interface = self.GetJavaData(impl_interface).wrapper_name
return ' implements %s ' % (impl_interface)
return ''
def GenerateFields(self):
field_template = Template("""\
${DOC}
public final static ${TYPE} ${NAME} = ${VALUE};
""")
fields_string = ''
for field in self._java_data.fields:
value = {'DOC': self.GenerateDoc(field.field_doc),
'TYPE': field.field_type,
'NAME': field.field_name,
'VALUE': field.field_value}
fields_string += field_template.substitute(value)
if not self._java_data.HasNoInstanceAnnotation():
fields_string += " private ArrayList<Object> constructorTypes;\n"
fields_string += " private ArrayList<Object> constructorParams;\n"
fields_string += " private ReflectMethod postWrapperMethod;\n"
return fields_string
def GenerateEnums(self):
enum_template = Template("""\
${DOC}
public enum ${NAME} {${CONTENT}
}
private ReflectMethod ${ENUM_VALUE_OF_METHOD} = new ReflectMethod();
private Object Convert${NAME}(${NAME} type) {
return ${ENUM_VALUE_OF_METHOD}.invoke(type.toString());
}
""")
enums_string = ''
for enum in self._java_data.enums.values():
value = {'DOC': self.GenerateDoc(enum.enum_doc),
'NAME': enum.enum_name.replace('Internal', ''),
'CONTENT': enum.enum_declaration,
'ENUM_VALUE_OF_METHOD': enum.EnumMethodValueOfName()}
enums_string += enum_template.substitute(value)
return enums_string
def GenerateMembers(self):
if self._java_data.HasNoInstanceAnnotation():
members_string = """\
private static XWalkCoreWrapper coreWrapper;
"""
else:
members_string = """\
private XWalkCoreWrapper coreWrapper;
private Object bridge;
Object getBridge() {
return bridge;
}
"""
return members_string;
def GenerateInternallyConstructor(self):
if self._java_data.HasNoInstanceAnnotation():
return ''
if not self._java_data.HasCreateInternallyAnnotation():
return ''
constructor_template = Template("""\
public ${CLASS_NAME}(Object bridge) {
this.bridge = bridge;
reflectionInit();
}
""")
value = {'CLASS_NAME': self._java_data.wrapper_name};
return constructor_template.substitute(value)
def GenerateWrapperDefaultConstructor(self):
template = Template("""\
public ${NAME}() {
${WRAP_LINES}
reflectionInit();
}
""")
wrap_string = " constructorTypes = new ArrayList<Object>();\n"
wrap_string += " constructorParams = new ArrayList<Object>();\n"
value = {'NAME': self._java_data.wrapper_name,
'WRAP_LINES': wrap_string}
return template.substitute(value)
def GenerateMethods(self):
methods_string = ''
# Generate method definitions.
if self._java_data.need_default_constructor:
methods_string += self.GenerateWrapperDefaultConstructor()
for method in self._java_data.methods:
methods_string += method.GenerateMethodsStringForWrapper()
return methods_string
def GenerateReflectionInit(self):
if self._java_data.HasNoInstanceAnnotation():
ref_methods_string = ''
ref_method_template = Template("""\
${METHOD_DECLARE_NAME}.init(null, bridgeClass, "${METHOD_NAME}"${PARAMS});
""")
for method in self._java_data.methods:
if not method.is_static:
continue
value = { 'METHOD_DECLARE_NAME': method._method_declare_name,
'METHOD_NAME': method.method_name,
'PARAMS': method._wrapper_params_declare_for_bridge}
ref_methods_string += ref_method_template.substitute(value)
ref_init_template = Template("""\
static void reflectionInit() {
if (coreWrapper != null) return;
XWalkCoreWrapper.initEmbeddedMode();
coreWrapper = XWalkCoreWrapper.getInstance();
if (coreWrapper == null) {
XWalkCoreWrapper.reserveReflectClass(${CLASS_NAME}.class);
return;
}
Class<?> bridgeClass = coreWrapper.getBridgeClass("${BRIDGE_NAME}");
${REF_METHODS} }
""")
value = { 'CLASS_NAME': self._java_data.wrapper_name,
'BRIDGE_NAME': self._java_data._bridge_name,
'REF_METHODS': ref_methods_string}
return ref_init_template.substitute(value)
ref_init_string = """\
coreWrapper = XWalkCoreWrapper.getInstance();
if (coreWrapper == null) {
XWalkCoreWrapper.reserveReflectObject(this);
return;
}
"""
if not self._java_data.HasCreateInternallyAnnotation():
init_templete = Template("""
int length = constructorTypes.size();
Class<?>[] paramTypes = new Class<?>[length+1];
for (int i = 0; i < length; ++i) {
Object type = constructorTypes.get(i);
if (type instanceof String) {
paramTypes[i] = coreWrapper.getBridgeClass((String) type);
constructorParams.set(i, \
coreWrapper.getBridgeObject(constructorParams.get(i)));
} else if (type instanceof Class<?>) {
paramTypes[i] = (Class<?>) type;
} else {
assert(false);
}
}
paramTypes[length] = Object.class;
constructorParams.add(this);
ReflectConstructor constructor = new ReflectConstructor(
coreWrapper.getBridgeClass(\"${BRIDGE_NAME}\"), paramTypes);
bridge = constructor.newInstance(constructorParams.toArray());
if (postWrapperMethod != null) postWrapperMethod.invoke();
""")
value = {'BRIDGE_NAME': self._java_data._bridge_name}
ref_init_string += init_templete.substitute(value)
ref_enum_template = Template("""\
${METHOD}.init(null,
coreWrapper.getBridgeClass("${ENUM}"), "valueOf", String.class);
""")
ref_methods_string = ''
for enum in self._java_data.enums.values():
value = {'METHOD': enum.EnumMethodValueOfName(),
'ENUM': self._java_data.GetBridgeName(enum.enum_name)}
ref_methods_string += ref_enum_template.substitute(value)
ref_method_template = Template("""\
${METHOD_DECLARE_NAME}.init(bridge, null,
"${METHOD}Super"${PARAMS});
""")
if (ref_methods_string != ''):
ref_methods_string += "\n"
for method in self._java_data.methods:
if method.is_constructor or method.is_static or method.is_abstract \
or method.is_delegate or method.disable_reflect_method:
continue
value = { 'METHOD_DECLARE_NAME': method._method_declare_name,
'METHOD': method.method_name,
'PARAMS': method._wrapper_params_declare_for_bridge}
ref_methods_string += ref_method_template.substitute(value)
ref_init_template = Template("""\
void reflectionInit() {
XWalkCoreWrapper.initEmbeddedMode();
${REF_INIT}
${REF_METHODS} }
""")
value = {'REF_INIT': ref_init_string,
'REF_METHODS': ref_methods_string}
return ref_init_template.substitute(value)
|
{
"content_hash": "e7ad0e8584e0b560bea22df725170fca",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 82,
"avg_line_length": 33.92176870748299,
"alnum_prop": 0.6359169758347538,
"repo_name": "Bysmyyr/crosswalk",
"id": "5e406173e7ce064eb1cef50a86dd5aaa7bbb4c9f",
"size": "10160",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/reflection_generator/wrapper_generator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "36483"
},
{
"name": "C#",
"bytes": "10797"
},
{
"name": "C++",
"bytes": "1709108"
},
{
"name": "CSS",
"bytes": "1224"
},
{
"name": "HTML",
"bytes": "113954"
},
{
"name": "Java",
"bytes": "1384913"
},
{
"name": "JavaScript",
"bytes": "116287"
},
{
"name": "Objective-C",
"bytes": "2491"
},
{
"name": "Objective-C++",
"bytes": "16628"
},
{
"name": "Python",
"bytes": "137130"
},
{
"name": "Shell",
"bytes": "2686"
}
],
"symlink_target": ""
}
|
'''
Python and Ansible for Network Engineers
Week 4, Exercise 1
Use Paramiko to retrieve the entire 'show version' output.
'''
import paramiko
from getpass import getpass
import time
MAX_BUFFER = 65535
def disable_paging(ssh_conn):
ssh_conn.send('terminal length 0\n')
def send_command(ssh_conn, cmd):
ssh_conn.send(cmd + '\n')
time.sleep(1)
def main():
'''
Use Paramiko to retrieve the entire 'show version' output.
'''
# ip_addr = raw_input("enter IP address: ")
ip_addr = '184.105.247.71'
username = 'pyclass'
password = getpass()
port = 22
ssh_conn_pre = paramiko.SSHClient()
ssh_conn_pre.load_system_host_keys()
ssh_conn_pre.connect(ip_addr, username=username, password=password,
look_for_keys=False, allow_agent=False)
ssh_conn = ssh_conn_pre.invoke_shell()
ssh_conn.settimeout(6.0)
time.sleep(1)
disable_paging(ssh_conn)
output = send_command(ssh_conn, cmd='show version')
output = ssh_conn.recv(MAX_BUFFER)
print output
if __name__ == "__main__":
main()
|
{
"content_hash": "b8cc5bdbec21cd4b805113c720beaa9b",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 71,
"avg_line_length": 20.60377358490566,
"alnum_prop": 0.6401098901098901,
"repo_name": "ibyt32/pynet_test",
"id": "87e179ad59b7f2a311db0d596dc945a39a350a8f",
"size": "1114",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "w4e1_paramiko.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23461"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from collections import namedtuple
import io
import os
import struct
import uuid
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import algorithms, Cipher, modes
from .exceptions import (
InvalidCiphertextException,
AccessDeniedException,
NotFoundException,
)
MASTER_KEY_LEN = 32
KEY_ID_LEN = 36
IV_LEN = 12
TAG_LEN = 16
HEADER_LEN = KEY_ID_LEN + IV_LEN + TAG_LEN
# NOTE: This is just a simple binary format. It is not what KMS actually does.
CIPHERTEXT_HEADER_FORMAT = ">{key_id_len}s{iv_len}s{tag_len}s".format(
key_id_len=KEY_ID_LEN, iv_len=IV_LEN, tag_len=TAG_LEN
)
Ciphertext = namedtuple("Ciphertext", ("key_id", "iv", "ciphertext", "tag"))
def generate_key_id():
return str(uuid.uuid4())
def generate_data_key(number_of_bytes):
"""Generate a data key."""
return os.urandom(number_of_bytes)
def generate_master_key():
"""Generate a master key."""
return generate_data_key(MASTER_KEY_LEN)
def _serialize_ciphertext_blob(ciphertext):
"""Serialize Ciphertext object into a ciphertext blob.
NOTE: This is just a simple binary format. It is not what KMS actually does.
"""
header = struct.pack(
CIPHERTEXT_HEADER_FORMAT,
ciphertext.key_id.encode("utf-8"),
ciphertext.iv,
ciphertext.tag,
)
return header + ciphertext.ciphertext
def _deserialize_ciphertext_blob(ciphertext_blob):
"""Deserialize ciphertext blob into a Ciphertext object.
NOTE: This is just a simple binary format. It is not what KMS actually does.
"""
header = ciphertext_blob[:HEADER_LEN]
ciphertext = ciphertext_blob[HEADER_LEN:]
key_id, iv, tag = struct.unpack(CIPHERTEXT_HEADER_FORMAT, header)
return Ciphertext(
key_id=key_id.decode("utf-8"), iv=iv, ciphertext=ciphertext, tag=tag
)
def _serialize_encryption_context(encryption_context):
"""Serialize encryption context for use a AAD.
NOTE: This is not necessarily what KMS does, but it retains the same properties.
"""
aad = io.BytesIO()
for key, value in sorted(encryption_context.items(), key=lambda x: x[0]):
aad.write(key.encode("utf-8"))
aad.write(value.encode("utf-8"))
return aad.getvalue()
def encrypt(master_keys, key_id, plaintext, encryption_context):
"""Encrypt data using a master key material.
NOTE: This is not necessarily what KMS does, but it retains the same properties.
NOTE: This function is NOT compatible with KMS APIs.
:param dict master_keys: Mapping of a KmsBackend's known master keys
:param str key_id: Key ID of moto master key
:param bytes plaintext: Plaintext data to encrypt
:param dict[str, str] encryption_context: KMS-style encryption context
:returns: Moto-structured ciphertext blob encrypted under a moto master key in master_keys
:rtype: bytes
"""
try:
key = master_keys[key_id]
except KeyError:
is_alias = key_id.startswith("alias/") or ":alias/" in key_id
raise NotFoundException(
"{id_type} {key_id} is not found.".format(
id_type="Alias" if is_alias else "keyId", key_id=key_id
)
)
iv = os.urandom(IV_LEN)
aad = _serialize_encryption_context(encryption_context=encryption_context)
encryptor = Cipher(
algorithms.AES(key.key_material), modes.GCM(iv), backend=default_backend()
).encryptor()
encryptor.authenticate_additional_data(aad)
ciphertext = encryptor.update(plaintext) + encryptor.finalize()
return _serialize_ciphertext_blob(
ciphertext=Ciphertext(
key_id=key_id, iv=iv, ciphertext=ciphertext, tag=encryptor.tag
)
)
def decrypt(master_keys, ciphertext_blob, encryption_context):
"""Decrypt a ciphertext blob using a master key material.
NOTE: This is not necessarily what KMS does, but it retains the same properties.
NOTE: This function is NOT compatible with KMS APIs.
:param dict master_keys: Mapping of a KmsBackend's known master keys
:param bytes ciphertext_blob: moto-structured ciphertext blob encrypted under a moto master key in master_keys
:param dict[str, str] encryption_context: KMS-style encryption context
:returns: plaintext bytes and moto key ID
:rtype: bytes and str
"""
try:
ciphertext = _deserialize_ciphertext_blob(ciphertext_blob=ciphertext_blob)
except Exception:
raise InvalidCiphertextException()
aad = _serialize_encryption_context(encryption_context=encryption_context)
try:
key = master_keys[ciphertext.key_id]
except KeyError:
raise AccessDeniedException(
"The ciphertext refers to a customer master key that does not exist, "
"does not exist in this region, or you are not allowed to access."
)
try:
decryptor = Cipher(
algorithms.AES(key.key_material),
modes.GCM(ciphertext.iv, ciphertext.tag),
backend=default_backend(),
).decryptor()
decryptor.authenticate_additional_data(aad)
plaintext = decryptor.update(ciphertext.ciphertext) + decryptor.finalize()
except Exception:
raise InvalidCiphertextException()
return plaintext, ciphertext.key_id
|
{
"content_hash": "80c5df04d515d0c3db19f88e5fd525f8",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 114,
"avg_line_length": 33.18012422360248,
"alnum_prop": 0.685698240359416,
"repo_name": "william-richard/moto",
"id": "4eacba1a62e1dbfaaeb47bb24cb5c706f3a305eb",
"size": "5342",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/kms/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
}
|
from .nbytes import nbytes
from .score import Scorer
from heapdict import heapdict
import time
def cost(nbytes, time):
return float(time) / nbytes / 1e9
def memo_key(args, kwargs):
result = (args, frozenset(kwargs.items()))
try:
hash(result)
except TypeError:
result = tuple(map(id, args)), str(kwargs)
return result
class Cache(object):
""" A cache that prefers long-running cheap-to-store computations
This cache prefers computations that have the following properties:
1. Costly to compute (seconds)
2. Cheap to store (bytes)
3. Frequently used
4. Recently used
Parameters
----------
available_bytes: int
The number of bytes of data to keep in the cache
limit: float
The minimum cost something must be to consider to keep in the cache
scorer: optional with halflife
A Scorer object (see cachey/scorer.py)
halflife: int, optional with scorer
The halflife in number of touches of the score of a piece of data
nbytes: function (defaults to cachey/nbytes.py)
Function to compute the number of bytes of an input.
cost: function (defaults to cost())
Determine cost from nbytes and time
Example
-------
>>> from cachey import Cache
>>> c = Cache(1e9, 10) # 1GB of space, costs must be 10 or higher
>>> c.put('x', 1, cost=50)
>>> c.get('x')
1
>>> def inc(x):
... return x + 1
>>> memo_inc = c.memoize(inc) # Memoize functions
"""
def __init__(self, available_bytes, limit=0, scorer=None, halflife=1000,
nbytes=nbytes, cost=cost, hit=None, miss=None):
if scorer is None:
scorer = Scorer(halflife)
self.scorer = scorer
self.available_bytes = available_bytes
self.limit = limit
self.get_nbytes = nbytes
self.cost = cost
self.hit = hit
self.miss = miss
self.data = dict()
self.heap = heapdict()
self.nbytes = dict()
self.total_bytes = 0
def put(self, key, value, cost, nbytes=None):
""" Put key-value data into cache with associated cost
>>> c = Cache(1e9, 10)
>>> c.put('x', 10, cost=50)
>>> c.get('x')
10
"""
if nbytes is None:
nbytes = self.get_nbytes(value)
if cost >= self.limit and nbytes < self.available_bytes:
score = self.scorer.touch(key, cost)
if (nbytes + self.total_bytes < self.available_bytes or
not self.heap or score > self.heap.peekitem()[1]):
self.data[key] = value
self.heap[key] = score
self.nbytes[key] = nbytes
self.total_bytes += nbytes
self.shrink()
def get(self, key, default=None):
""" Get value associated with key. Returns None if not present
>>> c = Cache(1e9, 10)
>>> c.put('x', 10, cost=50)
>>> c.get('x')
10
"""
score = self.scorer.touch(key)
if key in self.data:
value = self.data[key]
if self.hit is not None:
self.hit(key, value)
self.heap[key] = score
return value
else:
if self.miss is not None:
self.miss(key)
return default
def retire(self, key):
""" Retire/remove a key from the cache
See Also:
shrink
"""
val = self.data.pop(key)
self.total_bytes -= self.nbytes.pop(key)
def _shrink_one(self):
key, score = self.heap.popitem()
self.retire(key)
def shrink(self):
""" Retire keys from the cache until we're under bytes budget
See Also:
retire
"""
if self.total_bytes <= self.available_bytes:
return
while self.total_bytes > self.available_bytes:
self._shrink_one()
def __contains__(self, key):
return key in self.data
def clear(self):
while self:
self._shrink_one()
def __nonzero__(self):
return not not self.data
def memoize(self, func, key=memo_key):
""" Create a cached function
>>> def inc(x):
... return x + 1
>>> c = Cache(1e9)
>>> memo_inc = c.memoize(inc)
>>> memo_inc(1) # computes first time
2
>>> memo_inc(1) # uses cached result (if computation has a high score)
2
"""
def cached_func(*args, **kwargs):
k = (func, key(args, kwargs))
result = self.get(k)
if result is None:
start = time.time()
result = func(*args, **kwargs)
end = time.time()
nb = nbytes(result)
self.put(k, result, cost(nb, end - start), nbytes=nb)
return result
return cached_func
|
{
"content_hash": "c813f62f36dbe4c4a7737579b3bf3b6a",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 79,
"avg_line_length": 27.926966292134832,
"alnum_prop": 0.540132770066385,
"repo_name": "Winterflower/cachey",
"id": "b8ad6bdb4e9094a010e4e3066b86a13bb0d279e1",
"size": "4971",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cachey/cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10711"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
import datetime
from django.utils import timezone
from .models import Question
from django.core.urlresolvers import reverse
# Create your tests here.
class QuestionMethodTests(TestCase):
def test_was_published_recently_with_future_question(self):
time = timezone.now() + datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_old_question(self):
time = timezone.now() - datetime.timedelta(days=30)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), False)
def test_was_published_recently_with_recently_question(self):
time = timezone.now() - datetime.timedelta(hours=1)
future_question = Question(pub_date=time)
self.assertEqual(future_question.was_published_recently(), True)
def create_question(question_text, days):
time = timezone.now() + datetime.timedelta(days=days)
return Question.objects.create(question_text = question_text, pub_date = time)
class QuestionViewTests(TestCase):
def test_index_view_with_no_question(self):
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['lastest_question_list'], [])
def test_index_view_with_past_question(self):
create_question(question_text="Past Question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['lastest_question_list'], ['<Question: Past Question.>'])
def test_index_view_with_future_question(self):
create_question(question_text="Future Question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.", status_code=200)
self.assertQuerysetEqual(response.context['lastest_question_list'], [])
def test_index_view_with_future_and_past_question(self):
create_question(question_text="Past Question.", days=-30)
create_question(question_text="Future Question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['lastest_question_list'], ['<Question: Past Question.>'])
def test_index_view_with_future_and_past_question(self):
create_question(question_text="Past Question 1.", days=-30)
create_question(question_text="Past Question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(response.context['lastest_question_list'], ['<Question: Past Question 2.>', '<Question: Past Question 1.>'])
|
{
"content_hash": "2d0e80ded611710f6199d6f4cde72604",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 141,
"avg_line_length": 46.435483870967744,
"alnum_prop": 0.7061479680444599,
"repo_name": "LoopSun/PythonWay",
"id": "1c6ee4a7947ef225223b5f88701e1e1afb00d6d2",
"size": "2879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "GetYourStar/DjangoProj/packets/polls/tests.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "222"
},
{
"name": "HTML",
"bytes": "3355"
},
{
"name": "Python",
"bytes": "59983"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/endor/shared_endor_catwalk.iff"
result.attribute_template_id = -1
result.stfName("building_name","base_filler_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "6f08ba730d8d935722f4e4c77bd42a1a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 67,
"avg_line_length": 24,
"alnum_prop": 0.7019230769230769,
"repo_name": "anhstudios/swganh",
"id": "5e11cee4a913b1a82ca31153ba9d78aa6e79510a",
"size": "457",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/building/endor/shared_endor_catwalk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from JumpScale import j
descr = """
create and start a routeros image
"""
organization = "jumpscale"
author = "deboeckj@codescalers.com"
license = "bsd"
version = "1.0"
category = "deploy.routeros"
period = 0 # always in sec
enable = True
async = True
queue = 'hypervisor'
def action(networkid, publicip, publicgwip, publiccidr, password):
import JumpScale.lib.routeros
import pexpect
import netaddr
import libvirt
import JumpScale.lib.ovsnetconfig
import time
import netaddr
DEFAULTGWIP = j.application.config.get("vfw.default.ip")
BACKPLANE = 'vxbackend'
netrange = j.application.config.get("vfw.netrange.internal")
defaultpasswd=j.application.config.get("vfw.admin.passwd")
nc = j.system.ovsnetconfig
con = libvirt.open()
data = {'nid': j.application.whoAmI.nid,
'gid': j.application.whoAmI.gid,
'username': 'vscalers',
'password': defaultpasswd
}
j.packages.findNewest('', 'routeros_config').install()
networkidHex = '%04x' % int(networkid)
internalip = str(netaddr.IPAddress(netaddr.IPNetwork(netrange).first + int(networkid)))
networkname = "space_%s" % networkidHex
name = 'routeros_%s' % networkidHex
destinationdir = '/mnt/vmstor/routeros/%s' % networkidHex
def cleanup():
print "CLEANUP: %s/%s"%(networkid,networkidHex)
try:
dom = con.lookupByName(name)
dom.destroy()
dom.undefine()
except libvirt.libvirtError:
pass
j.system.fs.removeDirTree(destinationdir)
def deleteNet(net):
try:
net.destroy()
except:
pass
try:
net.undefine()
except:
pass
try:
for net in con.listAllNetworks():
if net.name() == networkname:
deleteNet(net)
break
except:
pass
cleanup()
try:
#setup network vxlan
nc.ensureVXNet(int(networkid), BACKPLANE)
xml = ''' <network>
<name>%(networkname)s</name>
<forward mode="bridge"/>
<bridge name='%(networkname)s'/>
<virtualport type='openvswitch'/>
</network>''' % {'networkname': networkname}
private = con.networkDefineXML(xml)
private.create()
private.setAutostart(True)
j.system.fs.createDir(destinationdir)
destinationfile = 'routeros-small-%s.qcow2' % networkidHex
destinationfile = j.system.fs.joinPaths(destinationdir, destinationfile)
imagedir = j.system.fs.joinPaths(j.dirs.baseDir, 'apps/routeros_template/routeros_template_backup')
imagefile = j.system.fs.joinPaths(imagedir, 'routeros-small-NETWORK-ID.qcow2')
xmlsource = j.system.fs.fileGetContents(j.system.fs.joinPaths(imagedir, 'routeros-template.xml'))
xmlsource = xmlsource.replace('NETWORK-ID', networkidHex)
j.system.fs.copyFile(imagefile, destinationfile)
try:
dom = con.defineXML(xmlsource)
dom.create()
except libvirt.libvirtError, e:
cleanup()
raise RuntimeError("Could not create VFW vm from template, network id:%s:%s\n%s"%(networkid,networkidHex,e))
data['internalip'] = internalip
if not j.system.net.tcpPortConnectionTest(internalip,22):
print "OK no other router found."
else:
raise RuntimeError("IP conflict there is router with %s"%internalip)
try:
run = pexpect.spawn("virsh console %s" % name)
print "Waiting to attach to console"
run.expect("Connected to domain", timeout=10)
run.sendline() #first enter to clear welcome message of kvm console
print 'Waiting for Login'
run.expect("Login:", timeout=60)
run.sendline("vscalers")
run.expect("Password:", timeout=2)
run.sendline(defaultpasswd)
print 'waiting for prompt'
run.expect("\] >", timeout=60) # wait for primpt
run.send("/ip addr add address=%s/22 interface=internal\r\n" % internalip)
print 'waiting for end of command'
run.expect("\] >", timeout=2) # wait for primpt
run.send("\r\n")
run.close()
except Exception,e:
cleanup()
raise RuntimeError("Could not set internal ip on VFW, network id:%s:%s\n%s"%(networkid,networkidHex,e))
print "wait max 30 sec on tcp port 22 connection to '%s'"%internalip
if j.system.net.waitConnectionTest(internalip,22,timeout=30):
print "Router is accessible, initial configuration probably ok."
else:
raise RuntimeError("Could not connect to router on %s"%internalip)
ro=j.clients.routeros.get(internalip,"vscalers",defaultpasswd)
try:
ro.ipaddr_remove(DEFAULTGWIP)
ro.resetMac("internal")
except Exception,e:
raise RuntimeError("Could not cleanup VFW temp ip addr, network id:%s:%s\n%s"%(networkid,networkidHex,e))
ro.do("/system/identity/set",{"name":"%s/%s"%(networkid,networkidHex)})
toremove=[ item for item in ro.list("/") if item.find('.backup')<>-1]
for item in toremove:
ro.delfile(item)
if not "skins" in ro.list("/"):
ro.mkdir("/skins")
ro.uploadFilesFromDir("keys")
ro.uploadFilesFromDir("skins","/skins")
time.sleep(10)
ro.executeScript("/ip address remove numbers=[/ip address find network=192.168.1.0]")
ro.executeScript("/ip address remove numbers=[/ip address find network=192.168.103.0]")
ro.uploadExecuteScript("basicnetwork")
ro.ipaddr_set('public', "%s/%s" % (publicip, publiccidr), single=True)
ipaddr=[]
for item in ro.ipaddr_getall():
if item["interface"]=="public":
ipaddr.append(item["ip"])
if not ipaddr:
raise RuntimeError("Each VFW needs to have 1 public ip addr at this state, this vfw has not")
ro.ipaddr_set('cloudspace-bridge', '192.168.103.1/24',single=True)
ro.uploadExecuteScript("route", vars={'$gw': publicgwip})
ro.uploadExecuteScript("ppp")
ro.uploadExecuteScript("customer")
ro.uploadExecuteScript("systemscripts")
cmd="/certificate import file-name=ca.crt passphrase='123456'"
#ro.executeScript(cmd)
#import file-name=RB450.crt passphrase="123456"
#import file-name=RB450.pem passphrase="123456"
cmd="/user set numbers=[/user find name=admin] password=\"%s\""% password
ro.executeScript(cmd)
cmd="/ppp secret remove numbers=[/ppp secret find name=admin]"
ro.executeScript(cmd)
cmd="/ppp secret add name=admin service=pptp password=\"%s\" profile=default"%password
ro.executeScript(cmd)
cmd="/ip neighbor discovery set [ /interface ethernet find name=public ] discover=no"
ro.executeScript(cmd)
print "change port for www"
ro.executeScript("/ip service set port=9080 numbers=[/ip service find name=www]")
print "disable telnet"
ro.executeScript("/ip service disable numbers=[/ip service find name=telnet]")
print "change port for ftp"
ro.executeScript("/ip service set port=9021 numbers=[/ip service find name=ftp]")
print "change port for ssh"
ro.executeScript("/ip service set port=9022 numbers=[/ip service find name=ssh]")
print "reboot of router"
cmd="/system reboot"
try:
ro.executeScript(cmd)
except Exception,e:
pass
print "reboot busy"
start = time.time()
timeout = 60
while time.time() - start < timeout:
try:
ro=j.clients.routeros.get(internalip,"vscalers",defaultpasswd)
if ro.ping(publicgwip):
print "Failed to ping %s waiting..." % publicgwip
break
except:
print 'Failed to connect will try again in 3sec'
time.sleep(3)
else:
raise RuntimeError("Could not ping to:%s for VFW %s"%(publicgwip, networkid))
print "wait max 2 sec on tcp port 9022 connection to '%s'"%internalip
if j.system.net.waitConnectionTest(internalip,9022,timeout=2):
print "Router is accessible, configuration probably ok."
else:
raise RuntimeError("Internal ssh is not accsessible.")
except:
cleanup()
raise
return data
|
{
"content_hash": "6c6cfcdf4691d6e4130b62d0549b247d",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 120,
"avg_line_length": 37.05106382978723,
"alnum_prop": 0.6038819340760307,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "40d83a1dfb7a6b23a4a9ddb8b915e83930ba563d",
"size": "8707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/vfw/jumpscripts/vfw/vfs_create_routeros.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
import os
import sys
import subprocess
if __name__ == '__main__':
os.environ['PYTHONPATH'] = '.'
fab_location = subprocess.check_output('which fab'.split(' '))
fab_location = fab_location.strip()
fab_deploy_cmd = []
fab_deploy_cmd.append(fab_location)
fab_deploy_cmd.append('-f')
fab_deploy_cmd.append('{0}/django_deploys_fabfile.py'.format(
os.path.dirname(fab_location)))
fab_deploy_cmd.extend(sys.argv[1:])
fab_deploy_cmd = ' '.join(fab_deploy_cmd)
os.system(fab_deploy_cmd)
|
{
"content_hash": "348a86d268e990eb08e843535025f90d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 32.9375,
"alnum_prop": 0.6432637571157496,
"repo_name": "rpq/django_deploys",
"id": "b83f6955f82d1c1d0dd829e9e78f47eaccc3cb3e",
"size": "576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/scripts/django_deploys.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8052"
}
],
"symlink_target": ""
}
|
from .utils.module_loading import import_module
from .conversation import Statement
class ChatBot(object):
def __init__(self, name, **kwargs):
self.name = name
storage_adapter = kwargs.get("storage_adapter",
"chatterbot.adapters.storage.JsonDatabaseAdapter"
)
logic_adapter = kwargs.get("logic_adapter",
"chatterbot.adapters.logic.ClosestMatchAdapter"
)
io_adapter = kwargs.get("io_adapter",
"chatterbot.adapters.io.TerminalAdapter"
)
StorageAdapter = import_module(storage_adapter)
self.storage = StorageAdapter(**kwargs)
LogicAdapter = import_module(logic_adapter)
self.logic = LogicAdapter()
IOAdapter = import_module(io_adapter)
self.io = IOAdapter()
self.recent_statements = []
def get_last_statement(self):
"""
Returns the last statement that was issued to the chat bot.
If there was no last statement then return None.
"""
if len(self.recent_statements) == 0:
return None
return self.recent_statements[-1]
def get_most_frequent_response(self, response_list):
"""
Returns the statement with the greatest number of occurrences.
"""
# Initialize the matching responce to the first response.
# This will be returned in the case that no match can be found.
matching_response = response_list[0]
# The statement passed in must be an existing statement within the database
found_statement = self.storage.find(matching_response.text)
occurrence_count = found_statement.get_occurrence_count()
for statement in response_list:
statement_data = self.storage.find(statement.text)
statement_occurrence_count = statement_data.get_occurrence_count()
# Keep the more common statement
if statement_occurrence_count >= occurrence_count:
matching_response = statement
occurrence_count = statement_occurrence_count
# Choose the most commonly occuring matching response
return matching_response
def get_first_response(self, response_list):
"""
Return the first statement in the response list.
"""
return response_list[0]
def get_random_response(self, response_list):
"""
Choose a random response from the selection.
"""
from random import choice
return choice(response_list)
def get_response(self, input_text):
"""
Return the bot's response based on the input.
"""
input_statement = Statement(input_text)
# If no responses exist, use the input text
if not self.storage.count():
response = Statement(input_text)
self.storage.update(response)
self.recent_statements.append(response)
# Process the response output with the IO adapter
response = self.io.process_response(response)
return response
all_statements = self.storage.filter()
text_of_all_statements = []
for statement in all_statements:
text_of_all_statements.append(statement.text)
# Select the closest match to the input statement
closest_match = self.logic.get(
input_text, text_of_all_statements
)
closest_match = self.storage.find(closest_match)
# Check if the closest match is an exact match
if closest_match == input_statement:
input_statement = closest_match
# Get all statements that are in response to the closest match
response_list = self.storage.filter(
in_response_to__contains=closest_match.text
)
if response_list:
response = self.get_most_frequent_response(response_list)
#response = self.get_first_response(response_list)
#response = self.get_random_response(response_list)
else:
response = self.storage.get_random()
previous_statement = self.get_last_statement()
if previous_statement:
input_statement.add_response(previous_statement)
input_statement.update_occurrence_count()
# Update the database after selecting a response
self.storage.update(input_statement)
self.recent_statements.append(response)
# Process the response output with the IO adapter
response = self.io.process_response(response)
return response
def get_input(self):
return self.io.process_input()
def train(self, conversation):
"""
Update or create the data for a statement.
"""
for text in conversation:
statement = self.storage.find(text)
# Create the statement if a match was not found
if not statement:
statement = Statement(text)
else:
statement.update_occurrence_count()
previous_statement = self.get_last_statement()
if previous_statement:
statement.add_response(previous_statement)
self.recent_statements.append(statement)
self.storage.update(statement)
|
{
"content_hash": "3346b75de43089e5e383effc0e6393b1",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 83,
"avg_line_length": 31.672619047619047,
"alnum_prop": 0.6203721105055441,
"repo_name": "iAmMrinal0/ChatterBot",
"id": "36cfa41424ab6f26753de8d6e7e880228be1f6ec",
"size": "5321",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "chatterbot/chatterbot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "70077"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from builtins import open
from textwrap import dedent
from pants.base.build_environment import get_buildroot
from pants.fs.archive import ZIP
from pants.java.distribution.distribution import DistributionLocator
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
from pants_test.subsystem.subsystem_util import init_subsystem
class ShaderIntegrationTest(PantsRunIntegrationTest):
def test_shader_project(self):
"""Test that the binary target at the ``shading_project`` can be built and run.
Explicitly checks that the classes end up with the correct shaded fully qualified classnames.
"""
shading_project = 'testprojects/src/java/org/pantsbuild/testproject/shading'
self.assert_success(self.run_pants(['clean-all']))
self.assert_success(self.run_pants(['binary', shading_project]))
expected_classes = {
# Explicitly excluded by a shading_exclude() rule.
'org/pantsbuild/testproject/shadingdep/PleaseDoNotShadeMe.class',
# Not matched by any rule, so stays the same.
'org/pantsbuild/testproject/shading/Main.class',
# Shaded with the target_id prefix, along with the default pants prefix.
('__shaded_by_pants__/org/pantsbuild/testproject/shadingdep/otherpackage/'
'ShadeWithTargetId.class'),
# Also shaded with the target_id prefix and default pants prefix, but for a different target
# (so the target_id is different).
('__shaded_by_pants__/org/pantsbuild/testproject/shading/ShadeSelf.class'),
# All these are shaded by the same shading_relocate_package(), which is recursive by default.
'__shaded_by_pants__/org/pantsbuild/testproject/shadingdep/subpackage/Subpackaged.class',
'__shaded_by_pants__/org/pantsbuild/testproject/shadingdep/SomeClass.class',
'__shaded_by_pants__/org/pantsbuild/testproject/shadingdep/Dependency.class',
# Shaded by a shading_relocate() that completely renames the package and class name.
'org/pantsbuild/testproject/foo/bar/MyNameIsDifferentNow.class',
}
path = os.path.join('dist', 'shading.jar')
init_subsystem(DistributionLocator)
execute_java = DistributionLocator.cached(minimum_version='1.6').execute_java
self.assertEqual(0, execute_java(classpath=[path],
main='org.pantsbuild.testproject.shading.Main'))
self.assertEqual(0, execute_java(classpath=[path],
main='org.pantsbuild.testproject.foo.bar.MyNameIsDifferentNow'))
received_classes = set()
with temporary_dir() as tempdir:
ZIP.extract(path, tempdir, filter_func=lambda f: f.endswith('.class'))
for root, dirs, files in os.walk(tempdir):
for name in files:
received_classes.add(os.path.relpath(os.path.join(root, name), tempdir))
self.assertEqual(expected_classes, received_classes)
def test_no_deployjar_run(self):
"""Shading continues to work with --no-deployjar.
All jars including the main jar as well as libraries will run through shader.
"""
self.assertEqual({
'Gson': 'moc.elgoog.nosg.Gson',
'Third': 'org.pantsbuild.testproject.shading.Third',
'Second': 'hello.org.pantsbuild.testproject.shading.Second',
},
json.loads(self.bundle_and_run(
'testprojects/src/java/org/pantsbuild/testproject/shading:third',
'testprojects.src.java.org.pantsbuild.testproject.shading.third',
bundle_jar_name='third',
bundle_options=['--no-deployjar'],
# The shaded jars are no longer symlinks to .pants.d, they are actual files.
library_jars_are_symlinks=False,
expected_bundle_content=[
'libs/3rdparty.gson-0.jar',
'libs/testprojects.src.java.org.pantsbuild.testproject.shading.third_lib-0.jar',
'third.jar']).strip()))
def test_deployjar_run(self):
self.assertEqual({
'Gson': 'moc.elgoog.nosg.Gson',
'Third': 'org.pantsbuild.testproject.shading.Third',
'Second': 'hello.org.pantsbuild.testproject.shading.Second',
},
json.loads(self.bundle_and_run(
'testprojects/src/java/org/pantsbuild/testproject/shading:third',
'testprojects.src.java.org.pantsbuild.testproject.shading.third',
bundle_jar_name='third',
bundle_options=['--deployjar'],
expected_bundle_content=[
'third.jar']).strip()))
def test_shading_does_not_influence_permissions(self):
with temporary_dir(root_dir=get_buildroot()) as tmpdir:
tmp_name = os.path.basename(tmpdir)
with open(os.path.join(tmpdir, 'Foo.java'), 'w+') as f:
f.write('public class Foo {}\n')
with open(os.path.join(tmpdir, 'BUILD.lib'), 'w+') as f:
f.write(dedent("""
java_library(name='lib',
sources=['Foo.java'],
)
"""))
with open(os.path.join(tmpdir, 'BUILD'), 'w+') as f:
f.write(dedent("""
jvm_binary(name='{name}',
basename='{name}',
dependencies=[
':lib',
],
)
""").format(name=tmp_name))
jar_path = os.path.join('dist', '{}.jar'.format(tmp_name))
# Build a binary with no shading, and record the permissions.
self.run_pants(['clean-all'])
self.assert_success(self.run_pants(['binary', tmpdir]))
permissions = os.stat(jar_path).st_mode
with open(os.path.join(tmpdir, 'BUILD'), 'w') as f:
f.write(dedent("""
jvm_binary(name='{name}',
basename='{name}',
dependencies=[
':lib',
],
shading_rules=[
shading_relocate_package('org.foo.bar'),
],
)
""").format(name=tmp_name))
# Build the binary again with shading; permissions shouldn't be different.
self.run_pants(['clean-all'])
os.remove(jar_path)
self.assert_success(self.run_pants(['binary', tmpdir]))
self.assertEqual(permissions, os.stat(jar_path).st_mode)
|
{
"content_hash": "20c6f656bc4c23d939cc95bc67af0b39",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 102,
"avg_line_length": 43.32867132867133,
"alnum_prop": 0.6528405422853454,
"repo_name": "twitter/pants",
"id": "effcb2e1efd81f706c6f63abd2ea5374262ea298",
"size": "6343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/subsystems/test_shader_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5639"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "85294"
},
{
"name": "Java",
"bytes": "498956"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "6700799"
},
{
"name": "Rust",
"bytes": "765598"
},
{
"name": "Scala",
"bytes": "89346"
},
{
"name": "Shell",
"bytes": "94395"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="visible", parent_name="surface", **kwargs):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", [True, False, "legendonly"]),
**kwargs
)
|
{
"content_hash": "db6bcec23b85de6787bdf92f4f6dabce",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 39.61538461538461,
"alnum_prop": 0.6058252427184466,
"repo_name": "plotly/python-api",
"id": "a4ef75a333189a0d1ba62e361ae3920ce0150aac",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/surface/_visible.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from django.db import models
class Konkurencja(models.Model):
Nazwa = models.CharField(max_length=20,primary_key=True,unique=True,default='')
Kon = models.CharField(max_length=4,primary_key=False,unique=True,default='')
class Meta:
verbose_name_plural = 'Konkurencje'
ordering = ('Nazwa',)
def __unicode__(self):
return self.Nazwa
class Kategoria(models.Model):
Kat = models.CharField(max_length=4,primary_key=True,unique=True,default='')
class Meta:
verbose_name_plural = 'Kategorie'
ordering = ('Kat',)
def __unicode__(self):
return self.Kat
class Uzytkownik(models.Model):
Imie = models.CharField(max_length=20,default='')
Nazwisko = models.CharField(max_length=30,default='')
login = models.CharField(max_length=30,primary_key=True,unique=True)
uprawnienia = models.CharField(max_length=40,primary_key=False,default='0000000000000000000000000000000000000000')
TS = models.CharField(max_length=3,default='S',choices=(('T','Trener'),('S','Sędzia')))
Team = models.CharField(max_length=20,primary_key=False,blank=True)
class Meta:
verbose_name_plural = 'Użytkownicy'
ordering = ('login',)
def __unicode__(self):
return self.login
class Druzyna(models.Model):
Nazwa = models.CharField(max_length=20,primary_key=True,unique=True,default='')
Trener = models.OneToOneField(Uzytkownik,primary_key=False)
Konkurencja = models.ForeignKey(Konkurencja,primary_key=False,null=True,blank=True)
Kategoria = models.ForeignKey(Kategoria,primary_key=False,null=True,blank=True)
Wynik_K = models.DecimalField(default=0.0,max_digits=4,decimal_places=2)
Wynik_S = models.DecimalField(default=0.0,max_digits=4,decimal_places=2)
class Meta:
verbose_name_plural = 'Drużyny'
ordering = ('-Wynik_S',)
def __unicode__(self):
return self.Nazwa
class Uczestnik(models.Model):
ID = models.AutoField(primary_key=True,unique=True)
Druzyna = models.ForeignKey(Druzyna,primary_key=False)
Imie = models.CharField(max_length=20,default='')
Nazwisko = models.CharField(max_length=30,default='')
Lider = models.BooleanField(default=False)
class Meta:
verbose_name_plural = 'Uczestnicy'
ordering = ('Nazwisko','Imie',)
def __unicode__(self):
return self.Imie + ' ' + self.Nazwisko
class Ocena(models.Model):
ID = models.AutoField(primary_key=True,unique=True)
Druzyna = models.ForeignKey(Druzyna,primary_key=False)
Sedzia = models.ForeignKey(Uzytkownik,primary_key=False)
Czas = models.DateField()
Spontan = models.BooleanField(default=False)
Ocena = models.IntegerField(choices=((0,'0'),(1,'1'),(2,'2'),(3,'3'),(4,'4'),(5,'5'),(6,'6'),(7,'7'),(8,'8'),(9,'9'),(10,'10')))
class Meta:
verbose_name_plural = 'Oceny'
ordering = ('Czas','Druzyna',)
def __unicode__(self):
if self.Spontan:
kon = 'Spontan'
else:
kon = str(self.Druzyna.Konkurencja)
return str(self.Druzyna) + ' - ' + kon + ': ' + str(self.Ocena) + ' (' + str(self.Sedzia) + ')'
|
{
"content_hash": "40bf519138c20a7bf7eb1a64fdc9aefa",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 132,
"avg_line_length": 36.22222222222222,
"alnum_prop": 0.6343558282208589,
"repo_name": "superdyzio/PWR-Stuff",
"id": "63052b327985c018a1a1522593eab465a3aea926",
"size": "3279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIR-ARR/Bazy Danych/odyseja/oceny/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "17829"
},
{
"name": "Batchfile",
"bytes": "1042"
},
{
"name": "C",
"bytes": "2403055"
},
{
"name": "C#",
"bytes": "625528"
},
{
"name": "C++",
"bytes": "3066245"
},
{
"name": "CMake",
"bytes": "983251"
},
{
"name": "CSS",
"bytes": "218848"
},
{
"name": "Common Lisp",
"bytes": "378578"
},
{
"name": "HTML",
"bytes": "4999679"
},
{
"name": "Java",
"bytes": "475300"
},
{
"name": "JavaScript",
"bytes": "266296"
},
{
"name": "M",
"bytes": "2385"
},
{
"name": "M4",
"bytes": "3010"
},
{
"name": "Makefile",
"bytes": "3734730"
},
{
"name": "Matlab",
"bytes": "160418"
},
{
"name": "OCaml",
"bytes": "2021"
},
{
"name": "PHP",
"bytes": "10629"
},
{
"name": "Perl",
"bytes": "7551"
},
{
"name": "PowerShell",
"bytes": "31323"
},
{
"name": "Python",
"bytes": "607184"
},
{
"name": "QMake",
"bytes": "1211"
},
{
"name": "Scala",
"bytes": "4781"
},
{
"name": "Shell",
"bytes": "1550640"
},
{
"name": "Tcl",
"bytes": "4143"
},
{
"name": "q",
"bytes": "1050"
}
],
"symlink_target": ""
}
|
from captcha.fields import ReCaptchaField
from django import forms
from django.conf import settings
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.urls import reverse
from allauth.account.forms import (
ChangePasswordForm as AllauthChangePasswordForm,
LoginForm as AllauthLoginForm,
ResetPasswordForm as AllauthResetPasswordForm,
SignupForm as AllauthSignupForm,
)
from allauth.account.models import EmailAddress
from allauth.account.utils import filter_users_by_email
from allauth.socialaccount.forms import SignupForm as AllauthSocialSignupForm
from .models import Member
def _clean_password(child_class, self_instance, password_field_name):
"""
A custom password validator that enforces a minimum length.
"""
min_len = settings.ACCOUNT_PASSWORD_MIN_LENGTH
# Also use parent method if django-user-accounts ever implements it.
parent_clean_password = getattr(
super(child_class, self_instance), "clean_" + password_field_name, None
)
if parent_clean_password:
parent_clean_password()
if len(self_instance.cleaned_data[password_field_name]) < min_len:
raise forms.ValidationError(
"Password should be at least " + "%d characters long." % min_len
)
return self_instance.cleaned_data[password_field_name]
class MemberLoginForm(AllauthLoginForm):
"""
A subclass of django-allauth's form that checks user is a Member.
"""
authentication_fail_message = (
"Your password didn't match the " + "username or email you provided."
)
def clean(self):
"""Check that the user is a Member."""
cleaned_data = super().clean()
if self._errors:
return
if self.user:
try:
Member.objects.get(user=self.user)
except Member.DoesNotExist:
raise forms.ValidationError("This account doesn't have a Member role.")
return cleaned_data
class MemberSignupForm(AllauthSignupForm):
"""
A subclass of django-allauth's SignupForm with additions.
A `terms` field is added for the Terms of Use checkbox, a `name` field
is added to store a Member's username, and additional validation is
added for passwords to impose a minimum length.
"""
name = forms.CharField(max_length=30)
terms = forms.BooleanField()
class Meta: # noqa: D101
fields = "__all__"
def clean_password(self):
return _clean_password(AllauthSignupForm, self, "password")
class ChangePasswordForm(AllauthChangePasswordForm):
"""
A subclass of account's ChangePasswordForm that checks password length.
"""
def clean_password_new(self):
return _clean_password(ChangePasswordForm, self, "password_new")
class PasswordResetForm(forms.Form):
"""
Change the user's password, matches our template better than the form class
shipped by allauth.
"""
password = forms.CharField(
label="New Password", widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label="New Password (again)", widget=forms.PasswordInput(render_value=False)
)
def clean(self):
super().clean()
if self._errors:
return
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
self.add_error(
"password_confirm", "You must type the same password each time."
)
return self.cleaned_data
def clean_password(self):
return _clean_password(PasswordResetForm, self, "password")
class MemberProfileEditForm(forms.ModelForm):
"""
A form for editing a member's profile information.
"""
class Meta: # noqa: D101
model = Member
fields = ("profile_image", "about_me")
class MemberContactSettingsEditForm(forms.ModelForm):
"""
A form for editing a member's contact preferences.
"""
class Meta: # noqa: D101
model = Member
fields = ("newsletter", "allow_user_messages")
class MemberChangeNameForm(forms.ModelForm):
"""
A form for editing a member's name.
"""
class Meta: # noqa: D101
model = Member
fields = ("name",)
class ActivityMessageForm(forms.Form):
"""
A form that allows a user to send a message to a project.
"""
message = forms.CharField(widget=forms.Textarea)
if not settings.DEBUG:
captcha = ReCaptchaField()
def send_mail(self, project_member_id, project):
params = {
"message": self.cleaned_data["message"],
"project_member_id": project_member_id,
"project": project,
}
plain = render_to_string("email/activity-message.txt", params)
html = render_to_string("email/activity-message.html", params)
send_mail(
"Open Humans: message from project member {}".format(project_member_id),
plain,
"no-reply@example.com",
[project.contact_email],
html_message=html,
)
class EmailUserForm(forms.Form):
"""
A form that allows one user to email another user.
"""
message = forms.CharField(widget=forms.Textarea)
captcha = ReCaptchaField()
def send_mail(self, sender, receiver):
params = {
"message": self.cleaned_data["message"],
"sender": sender,
"receiver": receiver,
}
plain = render_to_string("email/user-message.txt", params)
html = render_to_string("email/user-message.html", params)
send_mail(
"Open Humans: message from {} ({})".format(
sender.member.name, sender.username
),
plain,
sender.member.primary_email.email,
[receiver.member.primary_email.email],
html_message=html,
)
class ResetPasswordForm(AllauthResetPasswordForm):
"""
Subclass django-allauths's ResetPasswordForm to capture the bit where we
say what the return uri is.
"""
def save(self, request, **kwargs):
next_url = request.session.pop("next_url", reverse(settings.LOGIN_REDIRECT_URL))
ret = super().save(request, **kwargs)
# Use the lookup method allauth uses to get relevant members.
users = filter_users_by_email(ret)
for user in users:
member = Member.objects.get(user=user)
member.password_reset_redirect = next_url
member.save()
return ret
class SocialSignupForm(AllauthSocialSignupForm):
"""
Add in extra form bits that we need that allauth's social account signup
form does not provide by default.
"""
name = forms.CharField(
max_length=60,
widget=forms.TextInput(attrs={"placeholder": "Write your name here"}),
)
newsletter = forms.BooleanField(required=False)
allow_contact = forms.BooleanField(required=False)
terms = forms.BooleanField()
def save(self, request):
"""
Make sure to also populate the member table
"""
user = super().save(request)
member = Member(user=user)
member.name = self.cleaned_data["name"]
member.newsletter = self.cleaned_data["newsletter"]
member.allow_user_messages = self.cleaned_data["allow_contact"]
member.save()
# And, populate the email field in the user table
account_emailaddress = EmailAddress.objects.get(
email=self.cleaned_data["email"]
)
user.email = account_emailaddress.email
user.save()
# We are trusting emails provided by Facebook and Google
account_emailaddress.verified = True
account_emailaddress.save()
return user
|
{
"content_hash": "533fe502c898581866846f7ee28879e5",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 88,
"avg_line_length": 30.216730038022813,
"alnum_prop": 0.6398640996602492,
"repo_name": "PersonalGenomesOrg/open-humans",
"id": "770e7652214b5f9dbcc1f829ddd40d5d697bd1c2",
"size": "7947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "open_humans/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19829"
},
{
"name": "HTML",
"bytes": "296839"
},
{
"name": "JavaScript",
"bytes": "25622"
},
{
"name": "Python",
"bytes": "435909"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
}
|
""" routes.py - NumberToStringApi - A REST API for translating numbers to strings
"""
# Python stdlib
import ConfigParser
import json
import logging
import pprint as pp
import sys
# 3rd Party Modules
from flask import request
from flask import abort
from flask import make_response
# Codero Modules
import NumberToString
from NumberToString.tests.NumberToStringTests import list_installed_locales
class NumberToStringAPI():
""" class NumberToStringAPI
Methods:
__init__() - constructor
- loads config values, sets up logging, loads the RackTables module
loadRoutes()
- Iterates over the routes table, adding each route to the flask context.
"""
# Routes
routes = {
'translation/<locale>/<int:number>' : {
'methods' : ['GET'],
'function' : 'get_translation',
},
}
# END routes
def __init__(self, flask, config_file='NumberToStringAPI.config'):
""" __init__ - NumberToStringAPI constructor
1. Save flask context - self.flask
2. load config file - config values saved to self.config dictionary
3. Set up logging - self.LOGGER
4. load NumberToString module - self.machine
"""
# 1. Save flask context
self.flask = flask
# 2. Load Config
conf_parser = ConfigParser.SafeConfigParser()
conf_parser.read(config_file)
self.config = dict(conf_parser.items('NumberToString'))
# 3. Set up logger
logging.basicConfig(stream=sys.stderr,
format='%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s')
logging.getLogger("NumberToStringAPI").setLevel(logging.INFO)
self.LOGGER = logging.getLogger("NumberToStringAPI")
try:
self.LOGGER.info("START NumberToStringAPI")
except Exception as e:
sys.stderr.write("Fatal Error: Could not instantiate LOGGER\n")
raise
# 4. Load a machine for each available locale
self.config['locales'] = list_installed_locales()
self.machines = {}
for locale in self.config['locales']:
self.machines[locale] = NumberToString.NumberToStringMachine(locale=locale)
# END __init__()
def loadRoutes(self):
""" loadRoutes() - Iterate over the routes table, adding each route to the flask context.
"""
self.LOGGER.info("Entered loadRoutes")
api_route_prefix = '/%s/%s/' % (self.config['api_name'], self.config['api_version'])
for route, attributes in self.routes.items():
self.flask.add_url_rule(api_route_prefix + route,
attributes['function'],
getattr(self, attributes['function']),
methods=attributes['methods'])
# END loadRoutes()
def get_translation(self, locale, number):
"""
GET variables:
string locale: locale to translate to, e.g. en_US
int number: the number to translate
@return Dict mixed: a json-formatted dictionary containing status and the result
"""
self.LOGGER.info("Entered get_translation")
# 1. Validate input parameters
pretty=False
try:
if 'pretty' in request.args:
pretty = request.args['pretty']
except KeyError as e:
self.LOGGER.error("%r" % e)
abort(400)
if pretty and pretty == 'true':
pretty_print = {'sort_keys' : True,
'indent' : 4,
}
pretty_print_html_begin = '<pre>'
pretty_print_html_end = '</pre>'
else:
pretty_print = {}
pretty_print_html_begin = ""
pretty_print_html_end = ""
try:
result = self.machines[locale].translate(number)
status = 'SUCCESS'
except KeyError as e:
self.LOGGER.error("%r", e)
result = "invalid locale"
status = 'FAILURE'
except OverflowError as e:
self.LOGGER.error("%r", e)
result = "OverflowError: %d is too large to translate"
status = 'FAILURE'
except Exception as e:
self.LOGGER.error("%r", e)
result = "UNKNOWN ERROR: %r" % e
status = 'FAILURE'
response_str = pretty_print_html_begin + \
json.dumps({ 'status' : status,
'result' : result,
}, **pretty_print
) + \
pretty_print_html_end
response = make_response(response_str)
response.headers['Access-Control-Allow-Origin'] = "*"
return response
# END get_addresses()
# END class NumberToStringAPI
|
{
"content_hash": "310fca13cc68d564632c2f7d9642ee5c",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 124,
"avg_line_length": 33.951388888888886,
"alnum_prop": 0.5655553282879935,
"repo_name": "kedron/python-number-to-string",
"id": "970101abbabd271dc9caa9834d94602bea995f37",
"size": "4889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/flask/NumberToStringAPI.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Puppet",
"bytes": "1328"
},
{
"name": "Python",
"bytes": "19632"
},
{
"name": "Ruby",
"bytes": "1556"
},
{
"name": "Shell",
"bytes": "2462"
}
],
"symlink_target": ""
}
|
"""
Tv Shows related regex.
"""
from dsl import *
from refo import Plus, Question
from quepy.dsl import HasKeyword
from quepy.parsing import Lemma, Lemmas, Pos, QuestionTemplate, Particle
nouns = Plus(Pos("NN") | Pos("NNS") | Pos("NNP") | Pos("NNPS"))
class TvShow(Particle):
regex = Plus(Question(Pos("DT")) + nouns)
def interpret(self, match):
name = match.words.tokens
return IsTvShow() + HasName(name)
class Actor(Particle):
regex = nouns
def interpret(self, match):
name = match.words.tokens
return IsPerson() + HasName(name)
class CastOfQuestion(QuestionTemplate):
"""
Ex: "What is the cast of Friends?"
"Who works in Breaking Bad?"
"List actors of Seinfeld"
"""
regex = (Question(Lemmas("what be") + Pos("DT")) +
Lemma("cast") + Pos("IN") + TvShow() + Question(Pos("."))) | \
(Lemmas("who works") + Pos("IN") + TvShow() +
Question(Pos("."))) | \
(Lemmas("list actor") + Pos("IN") + TvShow())
def interpret(self, match):
cast = CastOf(match.tvshow)
actor = IsPerson() + IsActorOf(cast)
name = NameOf(actor)
return name
class ListTvShows(QuestionTemplate):
"""
Ex: "List TV shows"
"""
regex = Lemmas("list tv show")
def interpret(self, match):
show = IsTvShow()
label = NameOf(show)
return label
class EpisodeCountQuestion(QuestionTemplate):
"""
Ex: "How many episodes does Seinfeld have?"
"Number of episodes of Seinfeld"
"""
regex = ((Lemmas("how many episode do") + TvShow() + Lemma("have")) |
(Lemma("number") + Pos("IN") + Lemma("episode") +
Pos("IN") + TvShow())) + \
Question(Pos("."))
def interpret(self, match):
number_of_episodes = NumberOfEpisodesIn(match.tvshow)
return number_of_episodes
class ShowsWithQuestion(QuestionTemplate):
"""
Ex: "List shows with Hugh Laurie"
"In what shows does Jennifer Aniston appears?"
"Shows with Matt LeBlanc"
"""
regex = (Lemmas("list show") + Pos("IN") + Actor()) | \
(Pos("IN") + (Lemma("what") | Lemma("which")) + Lemmas("show do") +
Actor() + (Lemma("appear") | Lemma("work")) +
Question(Pos("."))) | \
((Lemma("show") | Lemma("shows")) + Pos("IN") + Actor())
def interpret(self, match):
cast = HasActor(match.actor)
show = IsTvShow() + HasCast(cast)
show_name = NameOf(show)
return show_name
class CreatorOfQuestion(QuestionTemplate):
"""
Ex: "Who is the creator of Breaking Bad?"
"Who are the creators of Friends?"
"""
regex = Question(Lemmas("who be") + Pos("DT")) + \
Lemma("creator") + Pos("IN") + TvShow() + Question(Pos("."))
def interpret(self, match):
creator = CreatorOf(match.tvshow)
name = NameOf(creator)
return name
|
{
"content_hash": "0af0eac01a54daa3b2779f6cf2f4db81",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 79,
"avg_line_length": 27.12727272727273,
"alnum_prop": 0.5650134048257373,
"repo_name": "emoron/quepy",
"id": "1f7f09652a323a32928cff461ee963830b6ad63b",
"size": "3001",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "examples/freebase/freebase/tvshows.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "83138"
}
],
"symlink_target": ""
}
|
import os
import platform
import sys
from datetime import datetime
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
from uuid import uuid4
from flask import current_app
from alerta.app import db
from alerta.database.base import Query
from alerta.utils.format import DateTime
from alerta.utils.response import absolute_url
JSON = Dict[str, Any]
class HeartbeatStatus(str, Enum):
OK = 'ok'
Slow = 'slow'
Expired = 'expired' # aka 'stale'
class Heartbeat:
def __init__(self, origin: str = None, tags: List[str] = None, create_time: datetime = None, timeout: int = None, customer: str = None, **kwargs) -> None:
if any(['.' in key for key in kwargs.get('attributes', dict()).keys()]) \
or any(['$' in key for key in kwargs.get('attributes', dict()).keys()]):
raise ValueError('Attribute keys must not contain "." or "$"')
timeout = timeout if timeout is not None else current_app.config['HEARTBEAT_TIMEOUT']
max_latency = current_app.config['HEARTBEAT_MAX_LATENCY']
try:
timeout = int(timeout)
except ValueError:
raise ValueError(f"Could not convert 'timeout' value of '{timeout}' to an integer")
if timeout < 0:
raise ValueError(f"Invalid negative 'timeout' value ({timeout})")
try:
max_latency = int(max_latency)
except ValueError:
raise ValueError(f"Could not convert 'max_latency' value of '{timeout}' to an integer")
if timeout < 0:
raise ValueError(f"Invalid negative 'max_latency' value ({timeout})")
self.id = kwargs.get('id') or str(uuid4())
self.origin = origin or f'{os.path.basename(sys.argv[0])}/{platform.uname()[1]}'
self.tags = tags or list()
self.attributes = kwargs.get('attributes', None) or dict()
self.event_type = kwargs.get('event_type', kwargs.get('type', None)) or 'Heartbeat'
self.create_time = create_time or datetime.utcnow()
self.timeout = timeout
self.max_latency = max_latency
self.receive_time = kwargs.get('receive_time', None) or datetime.utcnow()
self.latency = int((self.receive_time - self.create_time).total_seconds() * 1000)
self.since = datetime.utcnow() - self.receive_time
self.customer = customer
@property
def status(self) -> str:
if self.since.total_seconds() > self.timeout:
return HeartbeatStatus.Expired
elif self.latency > self.max_latency:
return HeartbeatStatus.Slow
return HeartbeatStatus.OK
@classmethod
def parse(cls, json: JSON) -> 'Heartbeat':
if not isinstance(json.get('tags', []), list):
raise ValueError('tags must be a list')
if not isinstance(json.get('timeout') if json.get('timeout', None) is not None else 0, int):
raise ValueError('timeout must be an integer')
if not isinstance(json.get('attributes', {}), dict):
raise ValueError('attributes must be a JSON object')
if json.get('customer', None) == '':
raise ValueError('customer must not be an empty string')
return Heartbeat(
id=json.get('id', None),
origin=json.get('origin', None),
tags=json.get('tags', list()),
attributes=json.get('attributes', dict()),
create_time=DateTime.parse(json['createTime']) if 'createTime' in json else None,
timeout=json.get('timeout', None),
customer=json.get('customer', None)
)
@property
def serialize(self) -> Dict[str, Any]:
return {
'id': self.id,
'href': absolute_url('/heartbeat/' + self.id),
'origin': self.origin,
'tags': self.tags,
'attributes': self.attributes,
'type': self.event_type,
'createTime': self.create_time,
'timeout': self.timeout,
'maxLatency': self.max_latency,
'receiveTime': self.receive_time,
'customer': self.customer,
'latency': self.latency,
'since': self.since,
'status': self.status
}
def __repr__(self) -> str:
return 'Heartbeat(id={!r}, origin={!r}, create_time={!r}, timeout={!r}, customer={!r})'.format(
self.id, self.origin, self.create_time, self.timeout, self.customer)
@classmethod
def from_document(cls, doc: Dict[str, Any]) -> 'Heartbeat':
return Heartbeat(
id=doc.get('id', None) or doc.get('_id'),
origin=doc.get('origin', None),
tags=doc.get('tags', list()),
attributes=doc.get('attributes', dict()),
event_type=doc.get('type', None),
create_time=doc.get('createTime', None),
timeout=doc.get('timeout', None),
receive_time=doc.get('receiveTime', None),
latency=doc.get('latency', None),
since=doc.get('since', None),
customer=doc.get('customer', None)
)
@classmethod
def from_record(cls, rec) -> 'Heartbeat':
return Heartbeat(
id=rec.id,
origin=rec.origin,
tags=rec.tags,
attributes=dict(getattr(rec, 'attributes') or ()),
event_type=rec.type,
create_time=rec.create_time,
timeout=rec.timeout,
receive_time=rec.receive_time,
latency=rec.latency,
since=rec.since,
customer=rec.customer
)
@classmethod
def from_db(cls, r: Union[Dict, Tuple]) -> 'Heartbeat':
if isinstance(r, dict):
return cls.from_document(r)
elif isinstance(r, tuple):
return cls.from_record(r)
# create/update a heartbeat
def create(self) -> 'Heartbeat':
return Heartbeat.from_db(db.upsert_heartbeat(self))
# retrieve an heartbeat
@staticmethod
def find_by_id(id: str, customers: List[str] = None) -> Optional['Heartbeat']:
return Heartbeat.from_db(db.get_heartbeat(id, customers))
# search heartbeats
@staticmethod
def find_all(query: Query = None, page: int = 1, page_size: int = 1000) -> List['Heartbeat']:
return [Heartbeat.from_db(heartbeat) for heartbeat in db.get_heartbeats(query, page, page_size)]
@staticmethod
def find_all_by_status(status: List[str] = None, query: Query = None, page: int = 1, page_size: int = 1000) -> List['Heartbeat']:
return [Heartbeat.from_db(heartbeat) for heartbeat in db.get_heartbeats_by_status(status, query, page, page_size)]
@staticmethod
def count(query: Query = None) -> int:
return db.get_heartbeats_count(query)
# delete a heartbeat
def delete(self) -> bool:
return db.delete_heartbeat(self.id)
|
{
"content_hash": "abc75c4f8a124a3bf3e147b080f56ecb",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 158,
"avg_line_length": 38.37988826815643,
"alnum_prop": 0.5953420669577875,
"repo_name": "guardian/alerta",
"id": "5e7908fa6fc043698d21a44e1d0b21d0baa572e7",
"size": "6870",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alerta/models/heartbeat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5143"
},
{
"name": "JavaScript",
"bytes": "2971"
},
{
"name": "Makefile",
"bytes": "842"
},
{
"name": "Python",
"bytes": "355607"
},
{
"name": "Shell",
"bytes": "2090"
}
],
"symlink_target": ""
}
|
import binascii
import errno
import logging
import sys
import usb.core
import usb.util
from scapy.layers.bluetooth import *
from scapy.supersocket import SuperSocket
# See BT 4.2 Spec, Vol 4, Part B, "USB Transport Layer".
# Used for "Single Function Primary Controller" devices:
USB_DEVICE_CLASS_WIRELESS_CONTROLLER = 0xFF
USB_DEVICE_SUB_CLASS_RF_CONTROLLER = 0xBB
USB_DEVICE_PROTOCOL_BLUETOOTH = 0xBB
# Used for composite devices:
USB_DEVICE_CLASS_MISCELLANEOUS = 0xEF
USB_DEVICE_SUB_CLASS_COMMON_CLASS = 0x02
USB_DEVICE_PROTOCOL_IAD = 0x01
USB_ENDPOINT_HCI_CMD = 0x00
USB_ENDPOINT_HCI_EVT = 0x81
USB_HCI_CMD_REQUEST_PARAMS = {
"bmRequestType": 0x20, "bRequest": 0x00, "wValue": 0x00, "wIndex": 0x00
}
LOG = logging.getLogger("pybluetooth")
class PyUSBBluetoothUserSocketException(Exception):
pass
class PyUSBBluetoothL2CAPSocket(SuperSocket):
desc = "Read/write Bluetooth L2CAP with pyUSB"
def __init__(self, pyusb_dev):
raise Exception("NYI")
class PyUSBBluetoothHCISocket(SuperSocket):
desc = "Read/write Bluetooth HCI with pyUSB"
def __init__(self, pyusb_dev):
self.pyusb_dev = pyusb_dev
# Drain any data that was already pending:
while self.recv(timeout_secs=0.001):
pass
def __del__(self):
# Always try to do a HCI Reset to stop any on-going
# Bluetooth activity:
try:
self.hci_reset()
except:
pass
# Release the device, so it can be claimed again immediately when
# this object gets free'd.
try:
usb.util.dispose_resources(self.pyusb_dev)
except:
LOG.warn("Couldn't dispose %s" % self.pyusb_dev)
pass
def hci_reset(self):
self.send(HCI_Hdr() / HCI_Command_Hdr() / HCI_Cmd_Reset())
def recv(self, x=512, timeout_secs=10.0):
# FIXME: Don't know how many bytes to expect here,
# using 512 bytes -- will this fly if there's another event right
# after it? Or is each event guaranteed to be put in a USB packet of
# its own?
try:
data_array = self.pyusb_dev.read(
USB_ENDPOINT_HCI_EVT, 512, int(timeout_secs * 1000.0))
except usb.core.USBError as e:
if e.errno == errno.ETIMEDOUT:
return None
else:
raise e
data = ''.join([chr(c) for c in data_array]) # Ugh.. array return val
data = "\4" + data # Prepend H4 'Event' packet indicator
scapy_packet = HCI_Hdr(data)
LOG.debug("recv %s" % scapy_packet.lastlayer().summary())
LOG.debug("recv bytes: " + binascii.hexlify(data))
return scapy_packet
def send(self, scapy_packet):
data = str(scapy_packet)
LOG.debug("send %s" % scapy_packet.lastlayer().summary())
LOG.debug("send bytes: " + binascii.hexlify(data))
data = data[1:] # Cut off the H4 'Command' packet indicator (0x02)
sent_len = self.pyusb_dev.ctrl_transfer(
data_or_wLength=data, **USB_HCI_CMD_REQUEST_PARAMS)
l = len(data)
if sent_len != l:
raise PyUSBBluetoothUserSocketException(
"Send failure. Sent %u instead of %u bytes" % (sent_len, l))
def find_all_bt_adapters():
def bt_adapter_matcher(d):
# Check if the device is a "Single Function Primary Controller":
if (d.bDeviceClass == USB_DEVICE_CLASS_WIRELESS_CONTROLLER and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_RF_CONTROLLER and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_BLUETOOTH):
return True
# Check if it's a composite device:
if not (d.bDeviceClass == USB_DEVICE_CLASS_MISCELLANEOUS and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_COMMON_CLASS and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_IAD):
return False
for cfg in d:
bt_intf_descr = {
"bInterfaceClass": USB_DEVICE_CLASS_WIRELESS_CONTROLLER,
"bInterfaceSubClass": USB_DEVICE_SUB_CLASS_RF_CONTROLLER,
"bInterfaceProtocol": USB_DEVICE_PROTOCOL_BLUETOOTH,
}
intf = usb.util.find_descriptor(cfg, **bt_intf_descr)
if intf is not None:
return True
return False
devs = set()
matchers = [CUSTOM_USB_DEVICE_MATCHER, bt_adapter_matcher]
for matcher in matchers:
if not matcher:
continue
devs |= set(usb.core.find(find_all=True, custom_match=matcher))
# Unfortunately, usb.core.Device doesn't implement __eq__(),
# see https://github.com/walac/pyusb/issues/147.
# So filter out dupes here:
devs_deduped = set(devs)
for d in devs:
for dd in devs:
if d == dd:
continue
if d not in devs_deduped:
continue
if d.bus == dd.bus and d.address == dd.address:
devs_deduped.remove(dd)
return devs_deduped
class PyUSBBluetoothNoAdapterFoundException(Exception):
pass
def find_first_bt_adapter_pyusb_device_or_raise():
pyusb_devs = find_all_bt_adapters()
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth adapters found!")
def _is_usable_device(pyusb_dev):
try:
pyusb_dev.set_configuration()
PyUSBBluetoothHCISocket(pyusb_dev).hci_reset()
return True
except:
return False
pyusb_devs = filter(_is_usable_device, pyusb_devs)
if len(pyusb_devs) == 0:
raise PyUSBBluetoothNoAdapterFoundException(
"No Bluetooth *usable* adapters found!")
if len(pyusb_devs) > 1:
LOG.warn("More than 1 Bluetooth adapters found, "
"using the first one...")
pyusb_dev = pyusb_devs[0]
return pyusb_dev
def find_first_bt_adapter_pyusb_device():
try:
return find_first_bt_adapter_pyusb_device_or_raise()
except PyUSBBluetoothNoAdapterFoundException:
return None
def has_bt_adapter():
pyusb_dev = find_first_bt_adapter_pyusb_device()
if pyusb_dev is None:
return False
return True
def pebble_usb_class_matcher(d):
""" USB device class matcher for Pebble's Test Automation dongles """
USB_DEVICE_CLASS_VENDOR_SPECIFIC = 0xFF
USB_DEVICE_SUB_CLASS_PEBBLE_BT = 0xBB
USB_DEVICE_PROTOCOL_PEBBLE_BT = 0xBB
return (d.bDeviceClass == USB_DEVICE_CLASS_VENDOR_SPECIFIC and
d.bDeviceSubClass == USB_DEVICE_SUB_CLASS_PEBBLE_BT and
d.bDeviceProtocol == USB_DEVICE_PROTOCOL_PEBBLE_BT)
CUSTOM_USB_DEVICE_MATCHER = pebble_usb_class_matcher
def set_custom_matcher(matcher_func):
CUSTOM_USB_DEVICE_MATCHER = matcher_func
|
{
"content_hash": "53dfc99ab344cf85dd54888de5a752fb",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 78,
"avg_line_length": 31.669767441860465,
"alnum_prop": 0.6222646497283008,
"repo_name": "pebble/pybluetooth",
"id": "9a5c21c27496e7aa42b56d0d2cd5bf63ddb4ff6f",
"size": "6809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybluetooth/pyusb_bt_sockets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "44095"
}
],
"symlink_target": ""
}
|
"""This module contains static methods for validating different kinds of data.
"""
__copyright__ = 'Copyright (C) 2009, Purdue University'
__license__ = 'BSD'
__version__ = '#TRUNK#'
import sys
import cPickle
import datetime
import re
import IPy
import constants
import errors
import helpers_lib
from roster_core import punycode_lib
class DataValidation(object):
def __init__(self, reserved_words, group_permissions):
self.reserved_words = reserved_words
self.group_permissions = group_permissions
def isUnicodeString(self, u_string):
"""Checks that a string is unicode.
Inputs:
u_string: unicode string
Raises:
ReservedWordError: Reserved word found, unable to complete request.
Outputs:
bool: bool if string or not
"""
if( not isinstance(u_string, unicode) ):
return False
for word in self.reserved_words:
if( u_string.lower().find(word.lower()) != -1 ):
raise errors.ReservedWordError('Reserved word %s found, unable '
'to complete request' % word)
return True
def isReservedWord(self, u_string):
"""Checks that a string is unicode. Ignores reserved words.
Inputs:
u_string: unicode string
Outputs:
bool: bool if string or not
"""
if( not isinstance(u_string, unicode) ):
return False
return True
def isGroupPermission(self, group_permission):
"""Checks to make sure that the string is a valid group permission.
Inputs:
group_permission: unicode string of a group permission
Outputs:
bool: if group permission is valid or not
"""
group_permissions_list = [
permission.lower() for permission in self.group_permissions]
if( self.isUnicodeString(group_permission) and
group_permission.lower() in group_permissions_list ):
return True
return False
def isAccessLevel(self, access_level):
"""Checks to make sure that the string is a valid access level.
Inputs:
access_level: unisgned int that is in constants.ACCESS_LEVELS
Outputs:
bool: if access level is valid or not
"""
if( self.isUnsignedInt(access_level) and access_level in
constants.ACCESS_LEVELS.values()):
return True
return False
def isIPv4IPAddress(self, ip_address):
"""Checks that a string is an ipv4 IP Address.
Inputs:
ip_address: string of an ipv4 ip address
Outputs:
bool: if string is valid ip address
"""
if( not isinstance(ip_address, basestring) or
re.search(r"\b(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\."
r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\."
r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\."
r"(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b",
ip_address) is None ):
return False
return True
def isIPv6IPAddress(self, ip_address):
"""Checks that a string is a fully enumerated ipv6 IP Address.
Inputs:
ip_address: string of ipv6 ip address
Outputs:
bool: if string is valid ip address
"""
if( not isinstance(ip_address, basestring) or
not ip_address.find('/') == -1 ):
return False
try:
ip = IPy.IP(ip_address)
except ValueError:
return False
if( not ip.strFullsize() == ip_address ):
return False
if( not str(ip.netmask()) == 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' or
not ip.version() == 6 ):
return False
return True
def isCIDRBlock(self, cidr_block):
"""Checks that a string is a CIDR block.
http://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing
Inputs:
cidr_block: string of CIDR block
Outputs:
bool: if it is valid CIDR block
"""
if( not isinstance(cidr_block, basestring) or
cidr_block.isdigit() ):
return False
try:
IPy.IP(cidr_block)
except ValueError:
return False
return True
def isIntBool(self, int_bool):
"""Checks that int_bool is only 1 or 0 and nothing else.
Inputs:
int_bool: 1 or 0
Outputs:
bool: if it is a valid int bool
"""
if( int_bool in (0, 1) and not isinstance(int_bool, bool) ):
return True
return False
def isUnsignedInt(self, unsigned_int):
"""Checks that unsigned_int is of int class and is 0 or higher.
Inputs:
unsigned_int: integer
Outputs:
bool: if it is a valid unsigned int
"""
if( (isinstance(unsigned_int, int) or isinstance(unsigned_int, long)) and
unsigned_int >= 0 and not isinstance(unsigned_int, bool) ):
return True
return False
def isTarget(self, target):
"""Checks that a target and it's components have the correct length
Inputs:
target: target string
Outputs:
bool: if it is a valid target"""
target = punycode_lib.Uni2Puny(unicode(target))
if( len(target) > 255 ):
return False
for component in target.split('.'):
if( len(component) > 63 ):
return False
return True
def isHostname(self, host_name):
"""Checks that is a unicode string and that is properly dotted.
Inputs:
host_name: string of properly dotted time stamp
Outputs:
bool: if it is a valid hostname
"""
if( host_name == '.' ):
return True
if( self.isUnicodeStringNoSpaces(host_name) and
host_name.endswith('.') and
host_name.split('.') > 2 and
not host_name.startswith('.') and
self.isTarget(host_name) ):
return True
return False
def isUnicodeStringNoSpaces(self, string):
"""Checks that string is unicode and contains no spaces
Inputs:
string: string to validate
Outputs:
bool: if it is a valid unicode string with no spaces
"""
if( self.isUnicodeString(string) and ' ' not in string ):
return True
return False
def isUnicodeString255(self, string):
"""Checks that is a unicode string and that is less than 256 characters
long
Inputs:
string: string to validate
Outputs:
bool: if it is a valid unicode string of correct length"""
if( self.isUnicodeString(string) and len(string) < 256 ):
return True
return False
def isDateTime(self, date_time):
"""Checks that is a unicode string and that is a valid time stamp.
Inputs:
date_time: string of date in format YYYY-MM-DD HH:MM:SS
Outputs:
bool: if it is a valid date
"""
if( isinstance(date_time, datetime.datetime) ):
return True
return False
def isPickleString(self, pickle_string):
"""Checks that the string can be unpickled.
Inputs:
pickle_string: string to be unpickled.
Outputs:
bool: if it is a valid pickle string
"""
try:
cPickle.loads(pickle_string)
except (cPickle.PickleError, TypeError):
return False
return True
def isUnixDirectory(self, directoryString):
"""Checks a unicode string for valid Unix directory format.
Inputs:
string: string to be checked
Outpus:
bool: if it is a valid Unix directory format
"""
if( self.isUnicodeString(directoryString) and
directoryString.startswith('/') and
directoryString.endswith('/') ):
return True
return False
def ListGroupPermissions(self):
"""Returns a list of group permissions pulled from the database
Outputs:
list: list of string group permissions
"""
return self.group_permissions
def ValidateRowDict(self, table_name, row_dict, none_ok=False,
all_none_ok=False):
"""Checks row dictionaries for correctness in reference to know data types
and column names in the coresponding table.
Input:
table_name: string of table name
row_dict: dict of row
none_ok: bool of allowance of None as a value in the dict
all_none_ok: bool of allowance of None as every value in the dict
Raises:
UnexpectedDataError: Missing key in dictionary
UnexpectedDataError: Dictionary has extra key that is not used.
FunctionError: No Function to check data type
UnexpectedDataError: Invalid data type
UnexpectedDataError: Need to fill out at least one value in dict
"""
main_dict = helpers_lib.GetRowDict(table_name)
for key in main_dict.iterkeys():
if( key not in row_dict ):
raise errors.UnexpectedDataError('Missing key %s in dictionary' % key)
for key, value in row_dict.iteritems():
if( key not in main_dict ):
raise errors.UnexpectedDataError('Dictionary has extra key that is not '
'used: %s' % key)
if( not 'is%s' % main_dict[key] in dir(self) ):
raise errors.FunctionError('No function to check data '
'type: %s' % main_dict[key])
if( not getattr(self, 'is%s' % main_dict[key])(value) ):
if( (not none_ok and not key.endswith('_id')) or
(none_ok and value is not None) ):
raise errors.UnexpectedDataError('Invalid data type %s for %s: %s' % (
main_dict[key], key, value))
if( none_ok and not all_none_ok ):
for value in row_dict.values():
if( value is not None ):
return
raise errors.UnexpectedDataError('Need to fill out at least one value '
'in dict')
|
{
"content_hash": "55befdb572952a7e1dc75511a1a7ec35",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 80,
"avg_line_length": 27.455072463768115,
"alnum_prop": 0.6245777027027027,
"repo_name": "stephenlienharrell/roster-dns-management",
"id": "2ab4b8bb54794201ea4760c4875b0087fc09a3ab",
"size": "11025",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roster-core/roster_core/data_validation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2339145"
},
{
"name": "Shell",
"bytes": "90"
}
],
"symlink_target": ""
}
|
import os
import shutil
import envoy
import tempfile
from django.conf import settings
from collections import defaultdict
from allmychanges.downloaders.utils import normalize_url
from allmychanges.utils import cd, do, log
def guess(source, discovered={}):
with log.name_and_fields('vcs.hg'):
result = defaultdict(dict)
source, username, repo = normalize_url(source)
path = ''
try:
path = download(source)
# if everything is OK, start populating result
result['changelog']['source'] = source
if username and repo:
result['params'].update(dict(username=username, repo=repo))
except Exception:
# ignore errors because most probably, they are from
# hg command which won't be able to clone repository
# from strange url
pass
finally:
if os.path.exists(path):
shutil.rmtree(path)
return result
def download(source, **params):
with log.name_and_fields('vcs.hg'):
path = tempfile.mkdtemp(dir=settings.TEMP_DIR)
url = source.replace('hg+', '')
with cd(path):
# TODO: сделать настройку через переменную окружения
if False:
import time
time.sleep(20)
envoy.run('cp -r /app/fake/anyjson ./')
return path
response = do('hg clone {url} {path}'.format(url=url,
path=path),
timeout=60)
if response.status_code != 0:
if os.path.exists(path):
shutil.rmtree(path)
raise RuntimeError('Bad status_code from hg clone: {0}. '
'Mercurial\'s stderr: {1}'.format(
response.status_code, response.std_err))
return path
|
{
"content_hash": "4ec13e3c9a27f829db121524ea261218",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 75,
"avg_line_length": 32.266666666666666,
"alnum_prop": 0.5428719008264463,
"repo_name": "AllMyChanges/allmychanges.com",
"id": "ca26c6fae4359c484e3a432f72c557b1c75592a0",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allmychanges/downloaders/vcs/hg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "147634"
},
{
"name": "Dockerfile",
"bytes": "735"
},
{
"name": "Emacs Lisp",
"bytes": "905"
},
{
"name": "HTML",
"bytes": "96639"
},
{
"name": "JavaScript",
"bytes": "2645620"
},
{
"name": "Makefile",
"bytes": "7806"
},
{
"name": "Python",
"bytes": "752509"
},
{
"name": "Shell",
"bytes": "1426"
},
{
"name": "Stylus",
"bytes": "58519"
}
],
"symlink_target": ""
}
|
from io import StringIO
from mako.pygen import adjust_whitespace
from mako.pygen import PythonPrinter
from mako.testing.assertions import eq_
class GeneratePythonTest:
def test_generate_normal(self):
stream = StringIO()
printer = PythonPrinter(stream)
printer.writeline("import lala")
printer.writeline("for x in foo:")
printer.writeline("print x")
printer.writeline(None)
printer.writeline("print y")
assert (
stream.getvalue()
== """import lala
for x in foo:
print x
print y
"""
)
def test_generate_adjusted(self):
block = """
x = 5 +6
if x > 7:
for y in range(1,5):
print "<td>%s</td>" % y
"""
stream = StringIO()
printer = PythonPrinter(stream)
printer.write_indented_block(block)
printer.close()
# print stream.getvalue()
assert (
stream.getvalue()
== """
x = 5 +6
if x > 7:
for y in range(1,5):
print "<td>%s</td>" % y
"""
)
def test_generate_combo(self):
block = """
x = 5 +6
if x > 7:
for y in range(1,5):
print "<td>%s</td>" % y
print "hi"
print "there"
foo(lala)
"""
stream = StringIO()
printer = PythonPrinter(stream)
printer.writeline("import lala")
printer.writeline("for x in foo:")
printer.writeline("print x")
printer.write_indented_block(block)
printer.writeline(None)
printer.writeline("print y")
printer.close()
# print "->" + stream.getvalue().replace(' ', '#') + "<-"
eq_(
stream.getvalue(),
"""import lala
for x in foo:
print x
x = 5 +6
if x > 7:
for y in range(1,5):
print "<td>%s</td>" % y
print "hi"
print "there"
foo(lala)
print y
""",
)
def test_multi_line(self):
block = """
if test:
print ''' this is a block of stuff.
this is more stuff in the block.
and more block.
'''
do_more_stuff(g)
"""
stream = StringIO()
printer = PythonPrinter(stream)
printer.write_indented_block(block)
printer.close()
# print stream.getvalue()
assert (
stream.getvalue()
== """
if test:
print ''' this is a block of stuff.
this is more stuff in the block.
and more block.
'''
do_more_stuff(g)
"""
)
def test_false_unindentor(self):
stream = StringIO()
printer = PythonPrinter(stream)
for line in [
"try:",
"elsemyvar = 12",
"if True:",
"print 'hi'",
None,
"finally:",
"dosomething",
None,
]:
printer.writeline(line)
assert (
stream.getvalue()
== """try:
elsemyvar = 12
if True:
print 'hi'
finally:
dosomething
"""
), stream.getvalue()
def test_backslash_line(self):
block = """
# comment
if test:
if (lala + hoho) + \\
(foobar + blat) == 5:
print "hi"
print "more indent"
"""
stream = StringIO()
printer = PythonPrinter(stream)
printer.write_indented_block(block)
printer.close()
assert (
stream.getvalue()
== """
# comment
if test:
if (lala + hoho) + \\
(foobar + blat) == 5:
print "hi"
print "more indent"
"""
)
class WhitespaceTest:
def test_basic(self):
text = """
for x in range(0,15):
print x
print "hi"
"""
assert (
adjust_whitespace(text)
== """
for x in range(0,15):
print x
print "hi"
"""
)
def test_blank_lines(self):
text = """
print "hi" # a comment
# more comments
print g
"""
assert (
adjust_whitespace(text)
== """
print "hi" # a comment
# more comments
print g
"""
)
def test_open_quotes_with_pound(self):
text = '''
print """ this is text
# and this is text
# and this is too """
'''
assert (
adjust_whitespace(text)
== '''
print """ this is text
# and this is text
# and this is too """
'''
)
def test_quote_with_comments(self):
text = """
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
"""
assert (
adjust_whitespace(text)
== """
print 'hi'
# this is a comment
# another comment
x = 7 # someone's '''comment
print '''
there
'''
# someone else's comment
"""
)
def test_quotes_with_pound(self):
text = '''
if True:
"""#"""
elif False:
"bar"
'''
assert (
adjust_whitespace(text)
== '''
if True:
"""#"""
elif False:
"bar"
'''
)
def test_quotes(self):
text = """
print ''' aslkjfnas kjdfn
askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
asdkfjnads kfajns '''
if x:
print y
"""
assert (
adjust_whitespace(text)
== """
print ''' aslkjfnas kjdfn
askdjfnaskfd fkasnf dknf sadkfjn asdkfjna sdakjn
asdkfjnads kfajns '''
if x:
print y
"""
)
|
{
"content_hash": "a0ab4661b59999646d6c944999af5ef0",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 65,
"avg_line_length": 20.534296028880867,
"alnum_prop": 0.47134317862165964,
"repo_name": "sqlalchemy/mako",
"id": "8adc14263b408b26d76a4abb8562060ead6202b0",
"size": "5688",
"binary": false,
"copies": "9",
"ref": "refs/heads/main",
"path": "test/test_pygen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "61408"
},
{
"name": "Mako",
"bytes": "2482"
},
{
"name": "Python",
"bytes": "529246"
}
],
"symlink_target": ""
}
|
def check_imports(*names):
not_found = []
for name in names:
try:
__import__(name)
except ImportError:
not_found.append(name)
if len(not_found) > 0:
print(('\nauv-docker requires these packages: {}\n'
+ 'Install with "pip3 install {}"\n')
.format(', '.join(not_found), ' '.join(not_found)))
quit()
check_imports('docker', 'clize')
import os
from pathlib import Path
import socket
import subprocess
import time
import clize
import docker
from config import get_config
WORKSPACE_DIRECTORY=get_config("WORKSPACE_DIRECTORY")
CONTAINER_WORKSPACE_DIRECTORY=get_config("CONTAINER_WORKSPACE_DIRECTORY")
REPO_URL=get_config("GIT_REPO_URL")
BRANCH=get_config("BRANCH")
DOCKER_REPO=get_config("DOCKER_REPO")
DOCKER_REPO_JETSON=get_config("DOCKER_REPO_JETSON")
GROUP_ID=get_config("GROUP_ID")
AUV_ENV_ALIAS=get_config("AUV_ENV_ALIAS")
GUARD_DIRECTORY = WORKSPACE_DIRECTORY / ".guards"
REPO_PATH = WORKSPACE_DIRECTORY / "repo"
CONFIGS_DIRECTORY = WORKSPACE_DIRECTORY / "configs"
WORKTREES_DIRECTORY = WORKSPACE_DIRECTORY / "worktrees"
LOGS_DIRECTORY = WORKSPACE_DIRECTORY / "logs"
VIDEOS_DIRECTORY = WORKSPACE_DIRECTORY / "videos"
STORAGE_DIRECTORY = WORKSPACE_DIRECTORY / "container_storage"
NAME_CONFIG_PATH = CONFIGS_DIRECTORY / "name"
EMAIL_CONFIG_PATH = CONFIGS_DIRECTORY / "email"
CUAUV_CONTAINER_PREFIX = 'cuauv-workspace-'
client = docker.from_env()
def guarded_call(name, function, message=None):
"""
Run a function once by creating a guard file on first run
"""
GUARD_DIRECTORY.mkdir(parents=True, exist_ok=True)
guard_file = GUARD_DIRECTORY / name
if not guard_file.exists():
if message is None:
print("Running {}".format(name))
function()
guard_file.touch()
def remove_guard(name):
"""
Removes a guard created via guarded_call()
"""
guard_file = GUARD_DIRECTORY / name
if guard_file.exists():
guard_file.unlink()
def get_worktree_guard(branch: str) -> str:
return "worktree_{}".format(branch)
def check_output(args, cwd):
return subprocess.check_output(args, cwd=cwd).decode("utf-8").strip()
def get_docker_name(branch: str, vehicle: bool):
if vehicle:
return "cuauv_vehicle"
else:
return "{}{}".format(CUAUV_CONTAINER_PREFIX, branch)
def get_containers(docker_name: str):
running = client.containers.list(filters={"name": "^/{}$".format(docker_name)})
return running
def init(*, on_vehicle=False, set_permissions=False):
"""
Initialize the CUAUV workspaces filesystem structure. This should be run
before any other workspace command.
on_vehicle: If True, the workspace will be structured to be run
directly on a vehicle.
"""
def create_directories():
WORKSPACE_DIRECTORY.mkdir(exist_ok=True)
GUARD_DIRECTORY.mkdir(exist_ok=True)
WORKTREES_DIRECTORY.mkdir(exist_ok=True)
LOGS_DIRECTORY.mkdir(exist_ok=True)
VIDEOS_DIRECTORY.mkdir(exist_ok=True)
CONFIGS_DIRECTORY.mkdir(exist_ok=True)
STORAGE_DIRECTORY.mkdir(exist_ok=True)
# Adds a user group to be shared both inside and outside the docker
# file and changes the workspace directory group ownership
if set_permissions:
group_exists = subprocess.run(
["getent", "group", str(GROUP_ID)],
stdout=subprocess.PIPE,
encoding="utf-8"
)
if group_exists.returncode == 0:
print(("GID {} already exists on the system. Are you sure you "
"want the workspace owned by this GID? [y/n]").format(str(GROUP_ID)))
if input() != "y":
raise Exception
subprocess.run(
["setfacl", "-dR", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(WORKSPACE_DIRECTORY)],
check=True
)
subprocess.run(
["setfacl", "-R", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(WORKSPACE_DIRECTORY)],
check=True
)
print(("The workspace is now owned by GID {}. To use permissions, "
"create a group with that GID and add yourself to it.").format(str(GROUP_ID)))
guarded_call(
"create_workspace_directory",
create_directories,
"Creating CUAUV Docker Workspace directory"
)
if not on_vehicle:
def get_initial_configs():
confirmed = False
while not confirmed:
name = input("Enter your name: ")
email = input("Enter your email (including @cornell.edu): ")
print()
print("Name: {}".format(name))
print("Email: {}".format(email))
confirmed = input("Is this information correct? [yn]") == "y"
NAME_CONFIG_PATH.write_text(name)
EMAIL_CONFIG_PATH.write_text(email)
guarded_call(
"get_initial_configs",
get_initial_configs,
"Prompting user for initial configurations"
)
def clone_repo():
cwd = os.path.dirname(os.path.realpath(__file__))
try:
current_git_repo = check_output(["git", "rev-parse", "--show-toplevel"], cwd)
except subprocess.CalledProcessError:
current_git_repo = None
if current_git_repo and check_output(["git", "remote", "get-url", "origin"], cwd) == REPO_URL:
# If already in main repository, then move it to repo path
subprocess.run(
["mv", current_git_repo, str(REPO_PATH)],
check=True
)
print("mv {} {}".format(current_git_repo, str(REPO_PATH)))
else:
# Otherwise, clone main repository
subprocess.run(
["git", "clone", REPO_URL, str(REPO_PATH)],
check=True
)
if not on_vehicle:
# Configure user name and email for git in repo directory
subprocess.run(
["git", "config", "user.name", "\"{}\"".format(NAME_CONFIG_PATH.read_text())],
cwd=str(REPO_PATH),
check=True
)
subprocess.run(
["git", "config", "user.email", "\"{}\"".format(EMAIL_CONFIG_PATH.read_text())],
cwd=str(REPO_PATH),
check=True
)
if set_permissions:
subprocess.run(
["setfacl", "-dR", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(REPO_PATH)],
check=True
)
subprocess.run(
["setfacl", "-R", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(REPO_PATH)],
check=True
)
guarded_call(
"clone_repo",
clone_repo,
"Cloning repo"
)
def set_git_configs():
subprocess.run(
["git", "config", "push.default", "simple"],
cwd=str(REPO_PATH),
check=True
)
subprocess.run(
["git", "config", "pull.rebase", "true"],
cwd=str(REPO_PATH),
check=True
)
subprocess.run(
["git", "config", "rebase.autostash", "true"],
cwd=str(REPO_PATH),
check=True
)
guarded_call(
"set_git_configs",
set_git_configs,
"Setting Git configs"
)
def create_worktree(branch=BRANCH, print_help=True, *, b=False):
"""
Sets up a worktree directory for a branch.
branch: Branch workspace to use.
print_help: Defaults to True. If False, will not print help afterwards.
b: True to create and push a new branch.
"""
# If using master branch, then simply symlink to the existing clone
branch_directory = WORKTREES_DIRECTORY / branch
def _create_worktree():
if branch == "master":
def symlink_master():
branch_directory.symlink_to("../repo", target_is_directory=True)
guarded_call("symlink_master", symlink_master, "Symlinking workspace for master")
else:
if b:
subprocess.run(
["git", "worktree", "add", str(branch_directory), "-b", branch],
cwd=str(REPO_PATH),
check=True,
)
subprocess.run(
["git", "push", "-u", "origin", branch],
cwd=str(REPO_PATH),
check=True,
)
else:
subprocess.run(
["git", "fetch", "origin", "{}:{}".format(branch, branch), "--"],
cwd=str(REPO_PATH),
check=True,
)
subprocess.run(
["git", "worktree", "add", str(branch_directory), branch],
cwd=str(REPO_PATH),
check=True,
)
subprocess.run(
["git", "branch", "-u", "origin/{}".format(branch), branch],
cwd=str(REPO_PATH),
check=True,
)
# Change git paths to relative paths so they work inside the docker container
(branch_directory / ".git").write_text("gitdir: ../../repo/.git/worktrees/{}".format(branch))
(REPO_PATH / ".git" / "worktrees" / branch / "gitdir").write_text("../worktrees/{}".format(branch))
guarded_call(
get_worktree_guard(branch),
_create_worktree,
"Creating workspace for branch {}".format(branch)
)
if print_help:
print('\nYou can now run this command to move to the worktree:\n\n' +
'cd {}\n\n'.format(branch_directory) +
'Add this line to your .bashrc or .zshrc for a shortcut:\n\n' +
'ccd() {\n' +
' $HOME/cuauv/workspaces/repo/docker/auv-docker.py create-worktree $1 False\n' +
' cd $HOME/cuauv/workspaces/worktrees/$1\n' +
'}\n'
)
def start(*, branch:"b"=BRANCH, gpu=True, env=None, vehicle=False):
"""
Starts a Docker container with the proper configuration. This does not
currently recreate a container if different configurations options are
passed.
branch: Branch workspace to use.
gpu: If True, the GPU device will be mounted into the container and
all windows will be rendered directly to the host X server (bypassing SSH X
forwarding).
env: Extra environment variables to inject into the container.
vehicle: Indicates the container should be configured to run
directly on a vehicle.
"""
create_worktree(branch, print_help=False)
docker_name = get_docker_name(branch, vehicle)
running = get_containers(docker_name)
if not running:
print("Starting new container")
software_path = CONTAINER_WORKSPACE_DIRECTORY / "worktrees" / branch
docker_args = {
"image": "{}:{}".format(DOCKER_REPO, branch),
"command": "/sbin/my_init",
"user": "root",
"detach": True,
"environment": {
"software_path": str(software_path),
"CUAUV_SOFTWARE": "{}/".format(software_path),
"CUAUV_LOCALE": "simulator",
"CUAUV_VEHICLE": "odysseus",
"CUAUV_VEHICLE_TYPE": "mainsub",
"CUAUV_CONTEXT": "development",
"VISION_TEST_PATH": str(CONTAINER_WORKSPACE_DIRECTORY / "videos"),
"CUAUV_LOG": str(CONTAINER_WORKSPACE_DIRECTORY / "logs"),
"TERM": "xterm",
"AUV_ENV_ALIAS": AUV_ENV_ALIAS,
},
"hostname": docker_name,
"name": docker_name,
"remove": True,
"volumes": {
str(WORKSPACE_DIRECTORY): {
"bind": str(CONTAINER_WORKSPACE_DIRECTORY),
"mode": "rw",
},
},
"devices": [],
"shm_size": "7G",
"ports": {},
"security_opt": ["seccomp=unconfined"], # for gdb
}
if gpu:
subprocess.run(["xhost", "+local:"])
docker_args["environment"]["DISPLAY"] = os.getenv("DISPLAY")
docker_args["volumes"]["/tmp/.X11-unix/X0"] = {
"bind": "/tmp/.X11-unix/X0",
"mode": "rw",
}
docker_args["devices"] += ["/dev/dri:/dev/dri:rw"]
if vehicle:
docker_args["image"] = "{}:{}".format(DOCKER_REPO_JETSON, branch)
docker_args["volumes"]["/dev"] = {
"bind": "/dev",
"mode": "rw",
}
docker_args["volumes"]["/home/software/sdcard"] = {
"bind": "/home/software/sdcard",
"mode": "rw",
}
nv_path = str(Path("~/.nv").expanduser())
docker_args["volumes"][nv_path] = {
"bind": "/home/software/.nv",
"mode": "rw",
}
docker_args["network_mode"] = "host"
docker_args["privileged"] = True
docker_args["hostname"] = env["CUAUV_VEHICLE"]
if env:
docker_args["environment"].update(env)
container = client.containers.run(**docker_args)
time.sleep(5)
env_parts = ["export {}={}".format(key, value) for key, value in docker_args["environment"].items()]
envs = "bash -c 'printf \"{}\\n\" > /home/software/.env'".format("\\n".join(env_parts))
container.exec_run(envs, user="software")
container.exec_run("sudo groupadd -g {} cuauv".format(str(GROUP_ID)))
container.exec_run("sudo usermod -aG {} software".format(str(GROUP_ID)))
container.exec_run("chmod +x /home/software/.env", user="software")
container.exec_run("rm /home/software/.zshrc_user", user="software")
container.exec_run("ln -s {} /home/software/.zshrc_user".format(software_path / "install/zshrc"), user="software")
container.exec_run("sudo rmdir /home/software/cuauv/software", user="software")
container.exec_run("sudo ln -s {} /home/software/cuauv/software".format(software_path), workdir="/", user="software")
else:
container = running[0]
return container
def cdw(branch=BRANCH):
"""
Enter the workspace container for a branch, creating and starting a
workspace/container as needed.
branch: Branch workspace to enter (and possibly create/start).
"""
container = start(branch=branch)
ip = client.api.inspect_container(container.id)["NetworkSettings"]["Networks"]["bridge"]["IPAddress"]
subprocess.run(
["ssh", "software@{}".format(ip), "-p", "22", "-A", "-o", "StrictHostKeyChecking no", "-o", "UserKnownHostsFile=/dev/null", "-o", "ForwardX11Timeout 596h"]
)
def stop(branch=BRANCH, vehicle=False):
"""
Stop a running container for a branch.
branch: Branch workspace to clean up.
vehicle: Whether running on the vehicle.
"""
# Remove container
docker_name = get_docker_name(branch, vehicle)
container = get_containers(docker_name)
if not container:
print("No container for branch={}, vehicle={}".format(branch, vehicle))
return
container[0].stop()
def destroy(branch=BRANCH, vehicle=False):
"""
Remove a container for a branch and clean up the worktree for the branch.
branch: Branch workspace to clean up.
vehicle: Whether running on the vehicle.
"""
# Remove container
docker_name = get_docker_name(branch, vehicle)
container = get_containers(docker_name)
if container:
container[0].remove(force=True)
print("Removed container for branch={}, vehicle={}".format(branch, vehicle))
else:
print("No container for branch={}, vehicle={}".format(branch, vehicle))
# Delete image for branch
image_name = "{}:{}".format(DOCKER_REPO, branch)
try:
client.images.remove(image_name)
print("Deleted image {}".format(image_name))
except docker.errors.ImageNotFound:
print("No image {}".format(image_name))
# Delete worktree
subprocess.run(
["rm", "-rf", branch],
cwd=str(WORKTREES_DIRECTORY),
check=True,
)
subprocess.run(
["git", "worktree", "prune"],
cwd=str(REPO_PATH),
check=True,
)
print("Deleted worktree {}/{}".format(WORKTREES_DIRECTORY, branch))
# Remove guard file created for the worktree branch
remove_guard(get_worktree_guard(branch))
def vehicle(*, branch:"b"="master", vehicle:"v"=None):
"""
Starts a container on a vehicle.
branch: Branch workspace to be used. You probably shouldn't change this...
"""
if vehicle is None:
vehicle = socket.gethostname()
vehicle_types = {
"odysseus": "mainsub",
"ajax": "minisub",
}
vehicle_type = vehicle_types[vehicle]
env = {
"CUAUV_LOCALE": "teagle",
"CUAUV_VEHICLE": vehicle,
"CUAUV_VEHICLE_TYPE": vehicle_type,
"CUAUV_CONTEXT": "vehicle",
}
start(vehicle=True, branch=branch, gpu=False, env=env)
def set_permissions():
"""
Sets group permissions for the workspace using ACL.
The GID of the cuauv group can be changed in config.py.
"""
subprocess.run(
["sudo", "setfacl", "-dR", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(WORKSPACE_DIRECTORY)],
check=True
)
subprocess.run(
["sudo", "setfacl", "-R", "-m", "g:{}:rwX".format(str(GROUP_ID)), str(WORKSPACE_DIRECTORY)],
check=True
)
def get_running_containers():
"""
Get all running CUAUV containers.
"""
containers = client.containers.list()
return list(filter(lambda c: c.name.startswith(CUAUV_CONTAINER_PREFIX), containers))
def _list():
"""
List all branches with currently running containers.
"""
containers = get_running_containers()
if len(containers) == 0:
print('No running containers!')
else:
print('Running containers:')
for container in containers:
print(' {}'.format(container.name[len(CUAUV_CONTAINER_PREFIX):]))
def stop_all():
"""
Stop all running branch containers.
"""
containers = get_running_containers()
if len(containers) == 0:
print('No running containers!')
else:
for container in containers:
print('Stopping {}'.format(container.name[len(CUAUV_CONTAINER_PREFIX):]))
container.stop()
clize.run(init, start, create_worktree, cdw, _list, stop, stop_all, destroy, vehicle, set_permissions)
|
{
"content_hash": "3de4e7c7694c7f60310827c1e3bd7235",
"timestamp": "",
"source": "github",
"line_count": 583,
"max_line_length": 163,
"avg_line_length": 32.315608919382505,
"alnum_prop": 0.560828025477707,
"repo_name": "cuauv/software",
"id": "31aeac5b533c1f8e4a2af483ed65146daa5eb7d6",
"size": "18915",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docker/auv-docker.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "271780"
},
{
"name": "C++",
"bytes": "2831785"
},
{
"name": "CMake",
"bytes": "5365"
},
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "Dockerfile",
"bytes": "2758"
},
{
"name": "Emacs Lisp",
"bytes": "19028"
},
{
"name": "GLSL",
"bytes": "6783"
},
{
"name": "HTML",
"bytes": "3642"
},
{
"name": "Haskell",
"bytes": "4770"
},
{
"name": "JavaScript",
"bytes": "113413"
},
{
"name": "Makefile",
"bytes": "12887"
},
{
"name": "Nix",
"bytes": "16335"
},
{
"name": "OCaml",
"bytes": "3804"
},
{
"name": "PureBasic",
"bytes": "58"
},
{
"name": "Python",
"bytes": "2141765"
},
{
"name": "Scheme",
"bytes": "129544"
},
{
"name": "Shell",
"bytes": "68820"
},
{
"name": "TeX",
"bytes": "25243"
},
{
"name": "Vim script",
"bytes": "125505"
}
],
"symlink_target": ""
}
|
import account_wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "8ae796b1174734a8240621a49f6339c4",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 65,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.8202247191011236,
"repo_name": "ntiufalara/openerp7",
"id": "a295fe1bd739f67c17519a2dc19b3b7896dd0c31",
"size": "1050",
"binary": false,
"copies": "430",
"ref": "refs/heads/master",
"path": "openerp/addons/l10n_at/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "C#",
"bytes": "93691"
},
{
"name": "C++",
"bytes": "108790"
},
{
"name": "CSS",
"bytes": "583265"
},
{
"name": "Groff",
"bytes": "8138"
},
{
"name": "HTML",
"bytes": "125159"
},
{
"name": "JavaScript",
"bytes": "5109152"
},
{
"name": "Makefile",
"bytes": "14036"
},
{
"name": "NSIS",
"bytes": "14114"
},
{
"name": "PHP",
"bytes": "14033"
},
{
"name": "Python",
"bytes": "9373763"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "6430"
},
{
"name": "XSLT",
"bytes": "156761"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(855, 471)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.List = QtGui.QVBoxLayout()
self.List.setObjectName(_fromUtf8("List"))
self.listItem_3 = QtGui.QWidget(self.centralwidget)
self.listItem_3.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_3.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_3.setObjectName(_fromUtf8("listItem_3"))
self.horizontalLayout_5 = QtGui.QHBoxLayout(self.listItem_3)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.nameLabel_3 = QtGui.QLabel(self.listItem_3)
self.nameLabel_3.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_3.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_3.setFont(font)
self.nameLabel_3.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_3.setObjectName(_fromUtf8("nameLabel_3"))
self.horizontalLayout_5.addWidget(self.nameLabel_3)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.List.addWidget(self.listItem_3)
self.listItem_6 = QtGui.QWidget(self.centralwidget)
self.listItem_6.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_6.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_6.setObjectName(_fromUtf8("listItem_6"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.listItem_6)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.nameLabel_7 = QtGui.QLabel(self.listItem_6)
self.nameLabel_7.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_7.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_7.setFont(font)
self.nameLabel_7.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_7.setObjectName(_fromUtf8("nameLabel_7"))
self.horizontalLayout_7.addWidget(self.nameLabel_7)
self.nameLabel_8 = QtGui.QLabel(self.listItem_6)
self.nameLabel_8.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_8.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_8.setFont(font)
self.nameLabel_8.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_8.setObjectName(_fromUtf8("nameLabel_8"))
self.horizontalLayout_7.addWidget(self.nameLabel_8)
self.mEsperaPerfuracaoA = QtGui.QLabel(self.listItem_6)
self.mEsperaPerfuracaoA.setMinimumSize(QtCore.QSize(100, 0))
self.mEsperaPerfuracaoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.mEsperaPerfuracaoA.setFont(font)
self.mEsperaPerfuracaoA.setText(_fromUtf8(""))
self.mEsperaPerfuracaoA.setAlignment(QtCore.Qt.AlignCenter)
self.mEsperaPerfuracaoA.setObjectName(_fromUtf8("mEsperaPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.mEsperaPerfuracaoA)
self.nameLabel_10 = QtGui.QLabel(self.listItem_6)
self.nameLabel_10.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_10.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_10.setFont(font)
self.nameLabel_10.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_10.setObjectName(_fromUtf8("nameLabel_10"))
self.horizontalLayout_7.addWidget(self.nameLabel_10)
self.utilPerfuracaoA = QtGui.QLabel(self.listItem_6)
self.utilPerfuracaoA.setMinimumSize(QtCore.QSize(100, 0))
self.utilPerfuracaoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.utilPerfuracaoA.setFont(font)
self.utilPerfuracaoA.setText(_fromUtf8(""))
self.utilPerfuracaoA.setAlignment(QtCore.Qt.AlignCenter)
self.utilPerfuracaoA.setObjectName(_fromUtf8("utilPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.utilPerfuracaoA)
self.nameLabel_13 = QtGui.QLabel(self.listItem_6)
self.nameLabel_13.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_13.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_13.setFont(font)
self.nameLabel_13.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_13.setObjectName(_fromUtf8("nameLabel_13"))
self.horizontalLayout_7.addWidget(self.nameLabel_13)
self.compPerfuracaoA = QtGui.QLabel(self.listItem_6)
self.compPerfuracaoA.setMinimumSize(QtCore.QSize(100, 0))
self.compPerfuracaoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.compPerfuracaoA.setFont(font)
self.compPerfuracaoA.setText(_fromUtf8(""))
self.compPerfuracaoA.setAlignment(QtCore.Qt.AlignCenter)
self.compPerfuracaoA.setObjectName(_fromUtf8("compPerfuracaoA"))
self.horizontalLayout_7.addWidget(self.compPerfuracaoA)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_7.addItem(spacerItem1)
self.List.addWidget(self.listItem_6)
self.listItem_7 = QtGui.QWidget(self.centralwidget)
self.listItem_7.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_7.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_7.setObjectName(_fromUtf8("listItem_7"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.listItem_7)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.nameLabel_11 = QtGui.QLabel(self.listItem_7)
self.nameLabel_11.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_11.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_11.setFont(font)
self.nameLabel_11.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_11.setObjectName(_fromUtf8("nameLabel_11"))
self.horizontalLayout_8.addWidget(self.nameLabel_11)
self.nameLabel_12 = QtGui.QLabel(self.listItem_7)
self.nameLabel_12.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_12.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_12.setFont(font)
self.nameLabel_12.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_12.setObjectName(_fromUtf8("nameLabel_12"))
self.horizontalLayout_8.addWidget(self.nameLabel_12)
self.mEsperaPolimentoA = QtGui.QLabel(self.listItem_7)
self.mEsperaPolimentoA.setMinimumSize(QtCore.QSize(100, 0))
self.mEsperaPolimentoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.mEsperaPolimentoA.setFont(font)
self.mEsperaPolimentoA.setText(_fromUtf8(""))
self.mEsperaPolimentoA.setAlignment(QtCore.Qt.AlignCenter)
self.mEsperaPolimentoA.setObjectName(_fromUtf8("mEsperaPolimentoA"))
self.horizontalLayout_8.addWidget(self.mEsperaPolimentoA)
self.nameLabel_14 = QtGui.QLabel(self.listItem_7)
self.nameLabel_14.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_14.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_14.setFont(font)
self.nameLabel_14.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_14.setObjectName(_fromUtf8("nameLabel_14"))
self.horizontalLayout_8.addWidget(self.nameLabel_14)
self.utilPolimentoA = QtGui.QLabel(self.listItem_7)
self.utilPolimentoA.setMinimumSize(QtCore.QSize(100, 0))
self.utilPolimentoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.utilPolimentoA.setFont(font)
self.utilPolimentoA.setText(_fromUtf8(""))
self.utilPolimentoA.setAlignment(QtCore.Qt.AlignCenter)
self.utilPolimentoA.setObjectName(_fromUtf8("utilPolimentoA"))
self.horizontalLayout_8.addWidget(self.utilPolimentoA)
self.nameLabel_15 = QtGui.QLabel(self.listItem_7)
self.nameLabel_15.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_15.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_15.setFont(font)
self.nameLabel_15.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_15.setObjectName(_fromUtf8("nameLabel_15"))
self.horizontalLayout_8.addWidget(self.nameLabel_15)
self.compPolimentoA = QtGui.QLabel(self.listItem_7)
self.compPolimentoA.setMinimumSize(QtCore.QSize(100, 0))
self.compPolimentoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.compPolimentoA.setFont(font)
self.compPolimentoA.setText(_fromUtf8(""))
self.compPolimentoA.setAlignment(QtCore.Qt.AlignCenter)
self.compPolimentoA.setObjectName(_fromUtf8("compPolimentoA"))
self.horizontalLayout_8.addWidget(self.compPolimentoA)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_8.addItem(spacerItem2)
self.List.addWidget(self.listItem_7)
self.listItem_10 = QtGui.QWidget(self.centralwidget)
self.listItem_10.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_10.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_10.setObjectName(_fromUtf8("listItem_10"))
self.horizontalLayout_9 = QtGui.QHBoxLayout(self.listItem_10)
self.horizontalLayout_9.setObjectName(_fromUtf8("horizontalLayout_9"))
self.nameLabel_24 = QtGui.QLabel(self.listItem_10)
self.nameLabel_24.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_24.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_24.setFont(font)
self.nameLabel_24.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_24.setObjectName(_fromUtf8("nameLabel_24"))
self.horizontalLayout_9.addWidget(self.nameLabel_24)
self.nameLabel_25 = QtGui.QLabel(self.listItem_10)
self.nameLabel_25.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_25.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_25.setFont(font)
self.nameLabel_25.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_25.setObjectName(_fromUtf8("nameLabel_25"))
self.horizontalLayout_9.addWidget(self.nameLabel_25)
self.atendidosPerfuracaoA = QtGui.QLabel(self.listItem_10)
self.atendidosPerfuracaoA.setMinimumSize(QtCore.QSize(100, 0))
self.atendidosPerfuracaoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.atendidosPerfuracaoA.setFont(font)
self.atendidosPerfuracaoA.setText(_fromUtf8(""))
self.atendidosPerfuracaoA.setAlignment(QtCore.Qt.AlignCenter)
self.atendidosPerfuracaoA.setObjectName(_fromUtf8("atendidosPerfuracaoA"))
self.horizontalLayout_9.addWidget(self.atendidosPerfuracaoA)
self.nameLabel_26 = QtGui.QLabel(self.listItem_10)
self.nameLabel_26.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_26.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_26.setFont(font)
self.nameLabel_26.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_26.setObjectName(_fromUtf8("nameLabel_26"))
self.horizontalLayout_9.addWidget(self.nameLabel_26)
self.atendidosPolimentoA = QtGui.QLabel(self.listItem_10)
self.atendidosPolimentoA.setMinimumSize(QtCore.QSize(100, 0))
self.atendidosPolimentoA.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.atendidosPolimentoA.setFont(font)
self.atendidosPolimentoA.setText(_fromUtf8(""))
self.atendidosPolimentoA.setAlignment(QtCore.Qt.AlignCenter)
self.atendidosPolimentoA.setObjectName(_fromUtf8("atendidosPolimentoA"))
self.horizontalLayout_9.addWidget(self.atendidosPolimentoA)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_9.addItem(spacerItem3)
self.List.addWidget(self.listItem_10)
self.line_2 = QtGui.QFrame(self.centralwidget)
self.line_2.setMinimumSize(QtCore.QSize(5, 0))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.line_2.setFont(font)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.List.addWidget(self.line_2)
self.listItem_4 = QtGui.QWidget(self.centralwidget)
self.listItem_4.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_4.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_4.setObjectName(_fromUtf8("listItem_4"))
self.horizontalLayout_6 = QtGui.QHBoxLayout(self.listItem_4)
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.nameLabel_4 = QtGui.QLabel(self.listItem_4)
self.nameLabel_4.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_4.setMaximumSize(QtCore.QSize(75, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_4.setFont(font)
self.nameLabel_4.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_4.setObjectName(_fromUtf8("nameLabel_4"))
self.horizontalLayout_6.addWidget(self.nameLabel_4)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem4)
self.List.addWidget(self.listItem_4)
self.listItem_9 = QtGui.QWidget(self.centralwidget)
self.listItem_9.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_9.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_9.setObjectName(_fromUtf8("listItem_9"))
self.horizontalLayout_13 = QtGui.QHBoxLayout(self.listItem_9)
self.horizontalLayout_13.setObjectName(_fromUtf8("horizontalLayout_13"))
self.nameLabel_36 = QtGui.QLabel(self.listItem_9)
self.nameLabel_36.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_36.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_36.setFont(font)
self.nameLabel_36.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_36.setObjectName(_fromUtf8("nameLabel_36"))
self.horizontalLayout_13.addWidget(self.nameLabel_36)
self.nameLabel_20 = QtGui.QLabel(self.listItem_9)
self.nameLabel_20.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_20.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_20.setFont(font)
self.nameLabel_20.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_20.setObjectName(_fromUtf8("nameLabel_20"))
self.horizontalLayout_13.addWidget(self.nameLabel_20)
self.mEsperaPerfuracaoB = QtGui.QLabel(self.listItem_9)
self.mEsperaPerfuracaoB.setMinimumSize(QtCore.QSize(100, 0))
self.mEsperaPerfuracaoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.mEsperaPerfuracaoB.setFont(font)
self.mEsperaPerfuracaoB.setText(_fromUtf8(""))
self.mEsperaPerfuracaoB.setAlignment(QtCore.Qt.AlignCenter)
self.mEsperaPerfuracaoB.setObjectName(_fromUtf8("mEsperaPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.mEsperaPerfuracaoB)
self.nameLabel_38 = QtGui.QLabel(self.listItem_9)
self.nameLabel_38.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_38.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_38.setFont(font)
self.nameLabel_38.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_38.setObjectName(_fromUtf8("nameLabel_38"))
self.horizontalLayout_13.addWidget(self.nameLabel_38)
self.utilPerfuracaoB = QtGui.QLabel(self.listItem_9)
self.utilPerfuracaoB.setMinimumSize(QtCore.QSize(100, 0))
self.utilPerfuracaoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.utilPerfuracaoB.setFont(font)
self.utilPerfuracaoB.setText(_fromUtf8(""))
self.utilPerfuracaoB.setAlignment(QtCore.Qt.AlignCenter)
self.utilPerfuracaoB.setObjectName(_fromUtf8("utilPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.utilPerfuracaoB)
self.nameLabel_16 = QtGui.QLabel(self.listItem_9)
self.nameLabel_16.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_16.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_16.setFont(font)
self.nameLabel_16.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_16.setObjectName(_fromUtf8("nameLabel_16"))
self.horizontalLayout_13.addWidget(self.nameLabel_16)
self.compPerfuracaoB = QtGui.QLabel(self.listItem_9)
self.compPerfuracaoB.setMinimumSize(QtCore.QSize(100, 0))
self.compPerfuracaoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.compPerfuracaoB.setFont(font)
self.compPerfuracaoB.setText(_fromUtf8(""))
self.compPerfuracaoB.setAlignment(QtCore.Qt.AlignCenter)
self.compPerfuracaoB.setObjectName(_fromUtf8("compPerfuracaoB"))
self.horizontalLayout_13.addWidget(self.compPerfuracaoB)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem5)
self.List.addWidget(self.listItem_9)
self.listItem_8 = QtGui.QWidget(self.centralwidget)
self.listItem_8.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_8.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_8.setObjectName(_fromUtf8("listItem_8"))
self.horizontalLayout_10 = QtGui.QHBoxLayout(self.listItem_8)
self.horizontalLayout_10.setObjectName(_fromUtf8("horizontalLayout_10"))
self.nameLabel_19 = QtGui.QLabel(self.listItem_8)
self.nameLabel_19.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_19.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_19.setFont(font)
self.nameLabel_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_19.setObjectName(_fromUtf8("nameLabel_19"))
self.horizontalLayout_10.addWidget(self.nameLabel_19)
self.nameLabel_21 = QtGui.QLabel(self.listItem_8)
self.nameLabel_21.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_21.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_21.setFont(font)
self.nameLabel_21.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_21.setObjectName(_fromUtf8("nameLabel_21"))
self.horizontalLayout_10.addWidget(self.nameLabel_21)
self.mEsperaPolimentoB = QtGui.QLabel(self.listItem_8)
self.mEsperaPolimentoB.setMinimumSize(QtCore.QSize(100, 0))
self.mEsperaPolimentoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.mEsperaPolimentoB.setFont(font)
self.mEsperaPolimentoB.setText(_fromUtf8(""))
self.mEsperaPolimentoB.setAlignment(QtCore.Qt.AlignCenter)
self.mEsperaPolimentoB.setObjectName(_fromUtf8("mEsperaPolimentoB"))
self.horizontalLayout_10.addWidget(self.mEsperaPolimentoB)
self.nameLabel_40 = QtGui.QLabel(self.listItem_8)
self.nameLabel_40.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_40.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_40.setFont(font)
self.nameLabel_40.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_40.setObjectName(_fromUtf8("nameLabel_40"))
self.horizontalLayout_10.addWidget(self.nameLabel_40)
self.utilPolimentoB = QtGui.QLabel(self.listItem_8)
self.utilPolimentoB.setMinimumSize(QtCore.QSize(100, 0))
self.utilPolimentoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.utilPolimentoB.setFont(font)
self.utilPolimentoB.setText(_fromUtf8(""))
self.utilPolimentoB.setAlignment(QtCore.Qt.AlignCenter)
self.utilPolimentoB.setObjectName(_fromUtf8("utilPolimentoB"))
self.horizontalLayout_10.addWidget(self.utilPolimentoB)
self.nameLabel_17 = QtGui.QLabel(self.listItem_8)
self.nameLabel_17.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_17.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_17.setFont(font)
self.nameLabel_17.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_17.setObjectName(_fromUtf8("nameLabel_17"))
self.horizontalLayout_10.addWidget(self.nameLabel_17)
self.compPolimentoB = QtGui.QLabel(self.listItem_8)
self.compPolimentoB.setMinimumSize(QtCore.QSize(100, 0))
self.compPolimentoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.compPolimentoB.setFont(font)
self.compPolimentoB.setText(_fromUtf8(""))
self.compPolimentoB.setAlignment(QtCore.Qt.AlignCenter)
self.compPolimentoB.setObjectName(_fromUtf8("compPolimentoB"))
self.horizontalLayout_10.addWidget(self.compPolimentoB)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_10.addItem(spacerItem6)
self.List.addWidget(self.listItem_8)
self.listItem_12 = QtGui.QWidget(self.centralwidget)
self.listItem_12.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_12.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_12.setObjectName(_fromUtf8("listItem_12"))
self.horizontalLayout_14 = QtGui.QHBoxLayout(self.listItem_12)
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.nameLabel_32 = QtGui.QLabel(self.listItem_12)
self.nameLabel_32.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_32.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_32.setFont(font)
self.nameLabel_32.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_32.setObjectName(_fromUtf8("nameLabel_32"))
self.horizontalLayout_14.addWidget(self.nameLabel_32)
self.nameLabel_33 = QtGui.QLabel(self.listItem_12)
self.nameLabel_33.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_33.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_33.setFont(font)
self.nameLabel_33.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_33.setObjectName(_fromUtf8("nameLabel_33"))
self.horizontalLayout_14.addWidget(self.nameLabel_33)
self.atendidosPerfuracaoB = QtGui.QLabel(self.listItem_12)
self.atendidosPerfuracaoB.setMinimumSize(QtCore.QSize(100, 0))
self.atendidosPerfuracaoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.atendidosPerfuracaoB.setFont(font)
self.atendidosPerfuracaoB.setText(_fromUtf8(""))
self.atendidosPerfuracaoB.setAlignment(QtCore.Qt.AlignCenter)
self.atendidosPerfuracaoB.setObjectName(_fromUtf8("atendidosPerfuracaoB"))
self.horizontalLayout_14.addWidget(self.atendidosPerfuracaoB)
self.nameLabel_34 = QtGui.QLabel(self.listItem_12)
self.nameLabel_34.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_34.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_34.setFont(font)
self.nameLabel_34.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_34.setObjectName(_fromUtf8("nameLabel_34"))
self.horizontalLayout_14.addWidget(self.nameLabel_34)
self.atendidosPolimentoB = QtGui.QLabel(self.listItem_12)
self.atendidosPolimentoB.setMinimumSize(QtCore.QSize(100, 0))
self.atendidosPolimentoB.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.atendidosPolimentoB.setFont(font)
self.atendidosPolimentoB.setText(_fromUtf8(""))
self.atendidosPolimentoB.setAlignment(QtCore.Qt.AlignCenter)
self.atendidosPolimentoB.setObjectName(_fromUtf8("atendidosPolimentoB"))
self.horizontalLayout_14.addWidget(self.atendidosPolimentoB)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_14.addItem(spacerItem7)
self.List.addWidget(self.listItem_12)
self.line = QtGui.QFrame(self.centralwidget)
self.line.setMinimumSize(QtCore.QSize(0, 5))
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.List.addWidget(self.line)
self.listItem_11 = QtGui.QWidget(self.centralwidget)
self.listItem_11.setMinimumSize(QtCore.QSize(0, 0))
self.listItem_11.setMaximumSize(QtCore.QSize(10000, 100))
self.listItem_11.setObjectName(_fromUtf8("listItem_11"))
self.horizontalLayout_12 = QtGui.QHBoxLayout(self.listItem_11)
self.horizontalLayout_12.setObjectName(_fromUtf8("horizontalLayout_12"))
self.nameLabel_23 = QtGui.QLabel(self.listItem_11)
self.nameLabel_23.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_23.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_23.setFont(font)
self.nameLabel_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_23.setObjectName(_fromUtf8("nameLabel_23"))
self.horizontalLayout_12.addWidget(self.nameLabel_23)
self.nameLabel_22 = QtGui.QLabel(self.listItem_11)
self.nameLabel_22.setMinimumSize(QtCore.QSize(100, 0))
self.nameLabel_22.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_22.setFont(font)
self.nameLabel_22.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_22.setObjectName(_fromUtf8("nameLabel_22"))
self.horizontalLayout_12.addWidget(self.nameLabel_22)
self.mEsperaEnvernizamento = QtGui.QLabel(self.listItem_11)
self.mEsperaEnvernizamento.setMinimumSize(QtCore.QSize(100, 0))
self.mEsperaEnvernizamento.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.mEsperaEnvernizamento.setFont(font)
self.mEsperaEnvernizamento.setText(_fromUtf8(""))
self.mEsperaEnvernizamento.setAlignment(QtCore.Qt.AlignCenter)
self.mEsperaEnvernizamento.setObjectName(_fromUtf8("mEsperaEnvernizamento"))
self.horizontalLayout_12.addWidget(self.mEsperaEnvernizamento)
self.nameLabel_46 = QtGui.QLabel(self.listItem_11)
self.nameLabel_46.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_46.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_46.setFont(font)
self.nameLabel_46.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_46.setObjectName(_fromUtf8("nameLabel_46"))
self.horizontalLayout_12.addWidget(self.nameLabel_46)
self.utilEnvernizamento = QtGui.QLabel(self.listItem_11)
self.utilEnvernizamento.setMinimumSize(QtCore.QSize(100, 0))
self.utilEnvernizamento.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.utilEnvernizamento.setFont(font)
self.utilEnvernizamento.setText(_fromUtf8(""))
self.utilEnvernizamento.setAlignment(QtCore.Qt.AlignCenter)
self.utilEnvernizamento.setObjectName(_fromUtf8("utilEnvernizamento"))
self.horizontalLayout_12.addWidget(self.utilEnvernizamento)
self.nameLabel_18 = QtGui.QLabel(self.listItem_11)
self.nameLabel_18.setMinimumSize(QtCore.QSize(125, 0))
self.nameLabel_18.setMaximumSize(QtCore.QSize(125, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nameLabel_18.setFont(font)
self.nameLabel_18.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_18.setObjectName(_fromUtf8("nameLabel_18"))
self.horizontalLayout_12.addWidget(self.nameLabel_18)
self.compEnvernizamento = QtGui.QLabel(self.listItem_11)
self.compEnvernizamento.setMinimumSize(QtCore.QSize(100, 0))
self.compEnvernizamento.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.compEnvernizamento.setFont(font)
self.compEnvernizamento.setText(_fromUtf8(""))
self.compEnvernizamento.setAlignment(QtCore.Qt.AlignCenter)
self.compEnvernizamento.setObjectName(_fromUtf8("compEnvernizamento"))
self.horizontalLayout_12.addWidget(self.compEnvernizamento)
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_12.addItem(spacerItem8)
self.List.addWidget(self.listItem_11)
self.verticalLayout_4.addLayout(self.List)
self.footer = QtGui.QWidget(self.centralwidget)
self.footer.setMaximumSize(QtCore.QSize(100000, 50))
self.footer.setObjectName(_fromUtf8("footer"))
self.horizontalLayout = QtGui.QHBoxLayout(self.footer)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.nameLabel_30 = QtGui.QLabel(self.footer)
self.nameLabel_30.setMinimumSize(QtCore.QSize(115, 0))
self.nameLabel_30.setMaximumSize(QtCore.QSize(115, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_30.setFont(font)
self.nameLabel_30.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.nameLabel_30.setObjectName(_fromUtf8("nameLabel_30"))
self.horizontalLayout.addWidget(self.nameLabel_30)
self.clientesAtendidos = QtGui.QLabel(self.footer)
self.clientesAtendidos.setMinimumSize(QtCore.QSize(100, 0))
self.clientesAtendidos.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.clientesAtendidos.setFont(font)
self.clientesAtendidos.setText(_fromUtf8(""))
self.clientesAtendidos.setAlignment(QtCore.Qt.AlignCenter)
self.clientesAtendidos.setObjectName(_fromUtf8("clientesAtendidos"))
self.horizontalLayout.addWidget(self.clientesAtendidos)
self.nameLabel_28 = QtGui.QLabel(self.footer)
self.nameLabel_28.setMinimumSize(QtCore.QSize(115, 0))
self.nameLabel_28.setMaximumSize(QtCore.QSize(115, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_28.setFont(font)
self.nameLabel_28.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_28.setObjectName(_fromUtf8("nameLabel_28"))
self.horizontalLayout.addWidget(self.nameLabel_28)
self.tempoSimulacao = QtGui.QLabel(self.footer)
self.tempoSimulacao.setMinimumSize(QtCore.QSize(100, 0))
self.tempoSimulacao.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.tempoSimulacao.setFont(font)
self.tempoSimulacao.setText(_fromUtf8(""))
self.tempoSimulacao.setAlignment(QtCore.Qt.AlignCenter)
self.tempoSimulacao.setObjectName(_fromUtf8("tempoSimulacao"))
self.horizontalLayout.addWidget(self.tempoSimulacao)
self.nameLabel_35 = QtGui.QLabel(self.footer)
self.nameLabel_35.setMinimumSize(QtCore.QSize(115, 0))
self.nameLabel_35.setMaximumSize(QtCore.QSize(115, 16777215))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.nameLabel_35.setFont(font)
self.nameLabel_35.setAlignment(QtCore.Qt.AlignCenter)
self.nameLabel_35.setObjectName(_fromUtf8("nameLabel_35"))
self.horizontalLayout.addWidget(self.nameLabel_35)
self.nRepeticoes = QtGui.QLabel(self.footer)
self.nRepeticoes.setMinimumSize(QtCore.QSize(100, 0))
self.nRepeticoes.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
self.nRepeticoes.setFont(font)
self.nRepeticoes.setText(_fromUtf8(""))
self.nRepeticoes.setAlignment(QtCore.Qt.AlignCenter)
self.nRepeticoes.setObjectName(_fromUtf8("nRepeticoes"))
self.horizontalLayout.addWidget(self.nRepeticoes)
self.verticalLayout_4.addWidget(self.footer)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Resultados da simulaçao", None))
self.nameLabel_3.setText(_translate("MainWindow", "Peças grandes (A)", None))
self.nameLabel_7.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_8.setText(_translate("MainWindow", "Media espera", None))
self.nameLabel_10.setText(_translate("MainWindow", "Utilizacao media", None))
self.nameLabel_13.setText(_translate("MainWindow", "Comp medio fila", None))
self.nameLabel_11.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_12.setText(_translate("MainWindow", "Media espera", None))
self.nameLabel_14.setText(_translate("MainWindow", "Utilizacao media", None))
self.nameLabel_15.setText(_translate("MainWindow", "Comp medio fila", None))
self.nameLabel_24.setText(_translate("MainWindow", "Clientes atendidos", None))
self.nameLabel_25.setText(_translate("MainWindow", "Perfuracao", None))
self.nameLabel_26.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_4.setText(_translate("MainWindow", "Peças grandes (B)", None))
self.nameLabel_36.setText(_translate("MainWindow", "Perfuraçao", None))
self.nameLabel_20.setText(_translate("MainWindow", "Media espera", None))
self.nameLabel_38.setText(_translate("MainWindow", "Utilizacao media", None))
self.nameLabel_16.setText(_translate("MainWindow", "Comp medio fila", None))
self.nameLabel_19.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_21.setText(_translate("MainWindow", "Media espera", None))
self.nameLabel_40.setText(_translate("MainWindow", "Utilizacao media", None))
self.nameLabel_17.setText(_translate("MainWindow", "Comp medio fila", None))
self.nameLabel_32.setText(_translate("MainWindow", "Clientes atendidos", None))
self.nameLabel_33.setText(_translate("MainWindow", "Perfuracao", None))
self.nameLabel_34.setText(_translate("MainWindow", "Polimento", None))
self.nameLabel_23.setText(_translate("MainWindow", "Envernizamento", None))
self.nameLabel_22.setText(_translate("MainWindow", "Media espera", None))
self.nameLabel_46.setText(_translate("MainWindow", "Utilizacao media", None))
self.nameLabel_18.setText(_translate("MainWindow", "Comp medio fila", None))
self.nameLabel_30.setText(_translate("MainWindow", "Clientes atendidos", None))
self.nameLabel_28.setText(_translate("MainWindow", "Tempo simulacao", None))
self.nameLabel_35.setText(_translate("MainWindow", "Nº repetiçoes", None))
|
{
"content_hash": "08186f77fda61940fda642807e788df6",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 105,
"avg_line_length": 54.84604715672677,
"alnum_prop": 0.698715355047542,
"repo_name": "Goamaral/SCC",
"id": "06da5b9b17166680d428d0b665b4c84a55fa3243",
"size": "39752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "outputWindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99067"
}
],
"symlink_target": ""
}
|
"""Configure routing between VLANs."""
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2018 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytricia
from faucet.conf import Conf, test_config_condition
class Router(Conf):
"""Implement FAUCET configuration for a router."""
defaults = {
'vlans': None,
}
defaults_types = {
'vlans': list,
}
def __init__(self, _id, dp_id, conf):
self.vlans = []
self.vip_map_by_ipv = {}
super(Router, self).__init__(_id, dp_id, conf)
def __str__(self):
return self._id
def check_config(self):
super(Router, self).check_config()
test_config_condition(not (isinstance(self.vlans, list) and len(self.vlans) > 1), (
'router %s must have at least 2 VLANs configured' % self))
def vip_map(self, ipa):
"""Return VIP for IP address, if any."""
if ipa.version in self.vip_map_by_ipv:
result = self.vip_map_by_ipv[ipa.version].get(ipa)
if result:
return result
return (None, None)
def finalize(self):
for vlan in self.vlans:
for faucet_vip in vlan.faucet_vips:
ipv = faucet_vip.version
if ipv not in self.vip_map_by_ipv:
self.vip_map_by_ipv[ipv] = pytricia.PyTricia(
faucet_vip.ip.max_prefixlen)
self.vip_map_by_ipv[ipv][faucet_vip.network] = (
vlan, faucet_vip)
super(Router, self).finalize()
|
{
"content_hash": "10ce57ff0635c3f3c26490582d5d7d6e",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 91,
"avg_line_length": 33.676923076923075,
"alnum_prop": 0.620831429876656,
"repo_name": "trentindav/faucet",
"id": "6981d9dcac5c0929144e905ece15e77f9f3854ce",
"size": "2189",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "faucet/router.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2538"
},
{
"name": "Python",
"bytes": "1126434"
},
{
"name": "Shell",
"bytes": "19845"
}
],
"symlink_target": ""
}
|
import logging
import datetime
import decimal
from random import uniform
import discord
from discord.ext import commands
from .common import Cog, CoinConverter
from .utils import Table
from .coins import AccountType
log = logging.getLogger(__name__)
# steal constants
BASE_CHANCE = decimal.Decimal('1')
STEAL_CONSTANT = decimal.Decimal('0.42')
# 6 hours in jail by default
# 9 hours for point regen
DEFAULT_ARREST = 6
DEFAULT_REGEN = 9
# how many hours for grace periods
GRACE_PERIOD = 5
class CooldownTypes:
prison = 'prison'
points = 'points'
class CooldownError(Exception):
pass
def fmt_tdelta(delta):
"""Remove the microseconds from a timedelta object."""
return datetime.timedelta(days=delta.days, seconds=delta.seconds)
class CoinsExt(Cog, requires=['coins']):
@property
def coins2(self):
return self.bot.get_cog('Coins2')
async def show(self, ctx, accounts, *, field='amount', limit=10):
"""Show a list of accounts"""
filtered = []
for idx, account in enumerate(accounts):
name = self.jcoin.get_name(account['account_id'], account=account)
account['_name'] = name
filtered.append(account)
if len(filtered) == limit:
break
table = Table('pos', 'name', 'account id', field)
for idx, account in enumerate(filtered):
table.add_row(
str(idx + 1), account['_name'], str(account['account_id']),
str(account[field]))
rendered = await table.render(loop=self.loop)
if len(rendered) > 1993:
await ctx.send(f'very big cant show: {len(rendered)}')
else:
await ctx.send(f'```\n{rendered}```')
@commands.command()
async def top(self, ctx, mode: str = 'g', limit: int = 10):
"""Show accounts by specific criteria.
'global' means all accounts in josé.
'local' means the accounts in the server/guild.
modes:
- g: global accounts ordered by amount.
- l: local accounts ordered by amount.
- t: tax, global accounts ordered by tax paid.
- b: taxbanks, all taxbanks ordered by amount
- p: global poorest.
- lp: local poorest.
"""
if limit > 30 or limit < 1:
raise self.SayException('invalid limit')
if mode == 'g':
accounts = await self.coins.jc_get(
'/wallets', {
'key': 'global',
'reverse': True,
'type': self.coins.AccountType.USER,
'limit': limit,
})
elif mode == 'l':
accounts = await self.coins.jc_get(
'/wallets', {
'key': 'local',
'guild_id': ctx.guild.id,
'reverse': True,
'limit': limit
})
elif mode == 't':
accounts = await self.coins.jc_get('/wallets', {
'key': 'taxpaid',
'reverse': True,
'limit': limit,
})
return await self.show(ctx, accounts, field='taxpaid', limit=limit)
elif mode == 'b':
accounts = await self.coins.jc_get('/wallets', {
'key': 'taxbanks',
'reverse': True,
'limit': limit,
})
elif mode == 'p':
accounts = await self.coins.jc_get('/wallets', {
'key': 'global',
'type': AccountType.USER,
'limit': limit,
})
elif mode == 'lp':
accounts = await self.coins.jc_get('/wallets', {
'key': 'local',
'guild_id': ctx.guild.id,
'limit': limit,
})
else:
raise self.SayException('mode not found')
await self.show(ctx, accounts, limit=limit)
@commands.command(name='prices')
async def _prices(self, ctx):
"""Show price information about commands."""
em = discord.Embed(title='Pricing', color=discord.Color(0x2192bc))
descriptions = {
'OPR': (
'Operational tax for high-load commands',
('yt', 'datamosh'),
),
'API': ('API tax (includes the NSFW commands)',
('xkcd', 'wolframalpha', 'weather', 'money', 'urban')),
'TRN': ('Translation tax', ('translate', )),
}
for category in self.prices:
price = self.prices[category]
em.add_field(
name=f'Category: {category}, Price: {price}',
value=f'{descriptions[category][0]}: '
f'{", ".join(descriptions[category][1])}',
inline=False)
await ctx.send(embed=em)
@commands.command(name='taxes')
@commands.guild_only()
async def taxes(self, ctx):
"""Show your taxbank's wallet."""
await self.coins.ensure_taxbank(ctx)
acc = await self.coins.get_account(ctx.guild.id)
await ctx.send(f'`{self.coins.get_name(ctx.guild)}: {acc["amount"]}`')
async def add_cooldown(self,
user,
c_type: str = 'prison',
hours: int = DEFAULT_ARREST) -> int:
"""Add a steal cooldown to a user.
"""
# Yes, I know, SQL Inejection.
await self.pool.execute(f"""
INSERT INTO steal_cooldown (user_id, ctype, finish)
VALUES ($1, $2, now() + interval '{hours} hours')
""", user.id, c_type)
return hours
async def remove_cooldown(self, user, c_type: CooldownTypes):
"""Remove a cooldown from a user.
This resets the user's steal points if we are
removing a points cooldown.
"""
user_id = user.id
res = await self.pool.execute("""
DELETE FROM steal_cooldown
WHERE user_id=$1 AND ctype=$2
""", user_id, c_type)
log.debug(f'Removing cooldown type {c_type} for {user!s}[{user.id}]')
_, deleted = res.split()
deleted = int(deleted)
if deleted and c_type == CooldownTypes.points:
await self.pool.execute("""
UPDATE steal_points
SET points=3
WHERE user_id=$1
""", user_id)
async def check_cooldowns(self, thief: discord.User):
"""Check if the current thief is with its cooldowns
checked up.
"""
now = datetime.datetime.utcnow()
cooldowns = await self.pool.fetch("""
SELECT ctype, finish FROM steal_cooldown
WHERE user_id=$1
""", thief.id)
for cooldown in cooldowns:
c_type, c_finish = cooldown['ctype'], cooldown['finish']
if now >= c_finish:
await self.remove_cooldown(thief, c_type)
continue
# in the case the cooldown isnt finished
remaining = c_finish - now
if c_type == 'prison':
raise self.SayException('\N{POLICE CAR} You are still in '
'prison, wait '
f'{fmt_tdelta(remaining)} hours')
elif c_type == 'points':
raise self.SayException('\N{DIAMOND SHAPE WITH A DOT INSIDE}'
' You are waiting for steal points. '
f'Wait {fmt_tdelta(remaining)} hours')
async def check_grace(self, target: discord.User):
"""Check if the target is in grace period."""
now = datetime.datetime.utcnow()
grace = await self.pool.fetchrow("""
SELECT finish FROM steal_grace
WHERE user_id = $1
""", target.id)
if not grace:
return
if now < grace['finish']:
remaining = grace['finish'] - now
raise self.SayException('\N{BABY ANGEL} Your target is in'
' grace period. it will expire in'
f' {fmt_tdelta(remaining)} hours')
async def check_points(self, thief: discord.User):
"""Check if current thief has enough steal points,
decrement 1 from them if thief has enough,
puts cooldown if it reaches 0."""
points = await self.pool.fetchrow("""
SELECT points FROM steal_points
WHERE user_id = $1
""", thief.id)
if not points:
await self.pool.execute("""
INSERT INTO steal_points (user_id)
VALUES ($1)
""", thief.id)
points = {'points': 3}
if points['points'] < 1:
await self.add_cooldown(thief, CooldownTypes.points, DEFAULT_REGEN)
raise self.SayException('\N{FACE WITH TEARS OF JOY}'
' You ran out of stealing points!'
f' wait {DEFAULT_REGEN} hours.')
await self.pool.execute("""
UPDATE steal_points
SET points = points - 1
WHERE user_id = $1
""", thief.id)
if (points['points'] - 1) < 1:
await self.add_cooldown(thief, CooldownTypes.points, DEFAULT_REGEN)
async def add_grace(self, target: discord.User, hours: int):
"""Add a grace period to the target.
Removes an existing grace period.
"""
grace = await self.pool.fetch("""
SELECT finish FROM steal_grace
WHERE user_id = $1
""", target.id)
if grace:
await self.pool.execute("""
DELETE FROM steal_grace
WHERE user_id = $1
""", target.id)
# Yes, I know, SQL injection, again.
await self.pool.execute(f"""
INSERT INTO steal_grace (user_id, finish)
VALUES ($1, now() + interval '{hours} hours')
""", target.id)
async def arrest(self, ctx, amount: decimal.Decimal) -> tuple:
"""Arrest the thief.
Returns
-------
tuple
with information about the arrest,
and how many hours does the thief get
in prison.
"""
thief = ctx.author
guild = ctx.guild
fee = amount / 2
# maintain sanity
await self.coins.ensure_ctx(ctx)
log.debug(f'arresting {thief}[{thief.id}]')
try:
transfer_info = await self.coins.transfer_str(thief, guild, fee)
# fee is paid, jail.
hours = await self.add_cooldown(thief)
except self.coins.ConditionError:
# fee is not paid, BIG JAIL.
thief_acc = await self.coins.get_account(thief)
amnt = thief_acc['amount']
# zero the wallet, convert 1jc to extra hour in jail
transfer_info = await self.coins.zero(thief, ctx.guild.id)
hours = await self.add_cooldown(thief, CooldownTypes.prison,
DEFAULT_ARREST + int(amnt))
return hours, transfer_info
def info_arrest(self, hours: int, transfer_info: str, message: str):
"""Generate a SayException with the information on the autojail."""
raise self.SayException(f'\N{POLICE OFFICER} You got '
f'arrested! {message}\n{hours}h in jail.\n'
f'`{transfer_info}`')
def steal_info(self,
res: float,
chance: float,
transfer_info: str,
hours: int = None):
"""Show the information about the success / failure of a steal."""
msg_res = [f'`[chance: {chance} | res: {res}]`']
if res < chance:
msg_res.append(f'Congrats!')
else:
msg_res.append(f'\N{POLICE OFFICER} Arrested!')
msg_res.append(f'{hours}h in jail.')
msg_res.append(transfer_info)
raise self.SayException('\n'.join(msg_res))
@commands.command(name='steal')
@commands.guild_only()
async def steal(self, ctx, target: discord.User, *, amount: CoinConverter):
"""Steal JoséCoins from someone.
Obviously this does not have a 100% success rate,
the best success you can achieve is a 50% rate.
The probability of success depends on four factors:
the base chance, the wallet of your target,
the amount you want to steal from the wallet,
and the steal constant.
Rules:
- You can't steal less than 0.01JC.
- You can't steal if you have less than 6JC.
- You can't steal more than double your current wallet.
- You can't steal from targets who are in grace period.
- You, by default, have 3 stealing points, and you lose
one each time you use the steal command successfully.
- You can't steal from José, it will automatically jail you.
- You are automatically jailed if you try to steal
more than your target's wallet.
"""
c2 = self.coins
await c2.ensure_ctx(ctx)
thief = ctx.author
if thief == target:
raise self.SayException('You can not steal from yourself')
# make sure both have accounts
try:
thief_acc = await c2.get_account(thief.id)
target_acc = await c2.get_account(target.id)
except c2.AccountNotFoundError:
raise self.SayException("One of you don't have a JoséCoin wallet")
if amount <= 0.01:
raise self.SayException('\N{LOW BRIGHTNESS SYMBOL} '
'Stealing too low.')
if thief_acc['amount'] < 6:
raise self.SayException("You have less than `6JC`, "
"can't use the steal command")
if target_acc['amount'] < 3:
raise self.SayException('Target has less than `3JC`, '
'cannot steal them')
if amount > 2 * thief_acc['amount']:
raise self.SayException('You can not steal more than double '
'your current wallet amount.')
try:
await c2.lock(thief.id, target.id)
await self.check_cooldowns(thief)
await self.check_grace(target)
await self.check_points(thief)
await c2.jc_post(f'/wallets/{thief.id}/steal_use')
await c2.unlock(thief.id, target.id)
# checking for other stuff that cause autojail
if target_acc['amount'] == -69:
hours, transfer_info = await self.arrest(ctx, amount)
self.info_arrest(hours, transfer_info,
'You can not steal from this account.')
t_amnt = target_acc['amount']
if amount > t_amnt:
hours, transfer_info = await self.arrest(ctx, amount)
self.info_arrest(hours, transfer_info,
'Trying to steal more than the target')
chance = (BASE_CHANCE + (t_amnt / amount)) * STEAL_CONSTANT
if chance > 5:
chance = 5
chance = round(chance, 3)
res = uniform(0, 10)
res = round(res, 3)
log.info(f'[steal] chance={chance} res={res} amount={amount}'
f' t_amnt={t_amnt} '
f'thief={thief}[{thief.id}] target={target}[{target.id}]')
success = res < chance
# log steal
await self.pool.execute("""
insert into steal_history (thief, target, target_before,
amount, success, chance, res)
values ($1, $2, $3, $4, $5, $6, $7)
""", thief.id, target.id, t_amnt, amount, success, chance, res)
if success:
# success
await c2.jc_post(f'/wallets/{thief.id}/steal_success')
transfer_info = await c2.transfer_str(target.id, thief.id,
amount)
grace = GRACE_PERIOD
if t_amnt > 200:
# decrease grace period the richer you are
temp = t_amnt * (decimal.Decimal('0.3') * t_amnt)
grace -= (temp / amount) * decimal.Decimal(0.001)
try:
grace_s = f'{grace}h grace period' if grace > 0 else \
'(NO GRACE AVAILABLE)'
await target.send(':gun: **You were robbed!** '
f'The thief(`{thief}`) stole '
f'{amount} from you. '
f'{grace_s}')
except:
pass
if grace > 0:
await self.add_grace(target, GRACE_PERIOD)
self.steal_info(res, chance, transfer_info)
else:
# jail
hours, transfer_info = await self.arrest(ctx, amount)
self.steal_info(res, chance, transfer_info, hours)
finally:
await c2.unlock(thief.id, target.id)
@commands.command(name='stealstate', aliases=['stealstatus'])
async def stealstate(self, ctx):
"""Show your current stealing state."""
author = ctx.author
now = datetime.datetime.utcnow()
em = discord.Embed(title=f'Steal state for {author}')
cooldowns = await self.pool.fetch("""
SELECT ctype, finish FROM steal_cooldown
WHERE user_id = $1
""", author.id)
grace = await self.pool.fetchrow("""
SELECT finish FROM steal_grace
WHERE user_id = $1
""", author.id)
points = await self.pool.fetchrow("""
SELECT points FROM steal_points
WHERE user_id = $1
""", author.id)
if points:
em.add_field(
name='remaining stealing points',
value=points['points'],
inline=False)
for idx, cooldown in enumerate(cooldowns):
c_type = cooldown['ctype']
c_type_str = 'jail' if c_type == 'prison' else 'steal points regen'
remaining = cooldown['finish'] - now
r_sec = remaining.total_seconds()
expired = ' [EXPIRED]' if r_sec < 0 else ''
em.add_field(
name=f'cooldown {idx}{expired}',
value=f'{c_type_str}: `{fmt_tdelta(remaining)}`',
inline=False)
if grace:
# get timedelta
remaining = grace['finish'] - now
r_sec = remaining.total_seconds()
expired = '[EXPIRED] ' if r_sec < 0 else ''
em.add_field(
name=f'{expired}grace period',
value=f'`{fmt_tdelta(remaining)}`',
inline=False)
await ctx.send(embed=em)
@commands.command(name='stealreset')
@commands.is_owner()
async def stealreset(self, ctx, *people: discord.User):
"""Reset people's steal states."""
for person in people:
res = await self.pool.execute(f"""
DELETE FROM steal_points WHERE user_id = {person.id};
DELETE FROM steal_cooldown WHERE user_id = {person.id};
DELETE FROM steal_grace WHERE user_id = {person.id};
""")
self.loop.create_task(ctx.send(f'`{person}: {res}`'))
@commands.group(aliases=['txr'], invoke_without_command=True)
async def taxreturn(self, ctx):
"""Manage tax returns.
Depending on how much tax you've paid, you
can request a tax return.
The returned money is calculated based on all your
transactions done to taxbanks.
NOTE: Only transactions that were done at the
time v3 was deployed are valid.
Of course, not all transactions will apply
to the tax return.
Only tax transactions which are above 5JC will count
to the total available tax return money.
Plus, not all the "available tax return" money will
be promptly available to withdraw, once you withdraw,
only 10% of that amount is given to your wallet.
"""
await ctx.invoke(self.bot.get_command('help'), 'txr')
async def txr_transactions(self, user: discord.User) -> list:
"""Get all tax transactions done by the user.
Only transactions that are above average count torwards
the list.
Returns
-------
list[asyncpg.Record]
List of transactions that satisfy the criteria.
"""
return await self.pool.fetch("""
select *
from transactions
join accounts on transactions.receiver = accounts.account_id
where transactions.sender=$1
and accounts.account_type=1
and transactions.amount >= 5
and transactions.taxreturn_used = false
""", user.id)
async def txr_total(self, user: discord.User) -> decimal.Decimal:
"""Get the total amount of tax that is available
to be returned to the user.
"""
return await self.pool.fetchval("""
select sum(transactions.amount) * 25/100
from transactions
join accounts on transactions.receiver = accounts.account_id
where transactions.sender=$1
and accounts.account_type=1
and transactions.amount >= 5
and transactions.taxreturn_used = false
""", user.id)
async def txr_not_total(self, user: discord.User) -> decimal.Decimal:
return await self.pool.fetchval("""
select sum(transactions.amount)
from transactions
join accounts on transactions.receiver = accounts.account_id
where transactions.sender=$1
and accounts.account_type=1
and transactions.amount >= 5
and transactions.taxreturn_used = false
""", user.id)
@taxreturn.command(name='query', aliases=['q'])
async def taxreturn_check(self, ctx):
"""Check your tax return situation.
Please, **PLEASE**, do 'j!help taxreturn' to
understand how this works.
Not reading the documentation then asking me
how does it work will grant you a very angery girl,
looking straight in your eyes, with fury in her eyes,
wanting to kill you, with an AK-47.
"""
total_avail = await self.txr_total(ctx.author)
total_criteria = await self.txr_not_total(ctx.author)
total_trans = await self.txr_transactions(ctx.author)
em = discord.Embed(
title='Tax return situation', color=discord.Color.gold())
if not total_criteria:
raise self.SayException("You don't have any transactions "
"that meet criteria")
em.add_field(
name='Money that fits the criteria',
value=f'`{round(total_criteria, 2)}JC`')
em.add_field(
name='Withdrawable money', value=f'`{round(total_avail, 2)}JC`')
em.add_field(
name='Tax transactions that meet criteria',
value=f'{len(total_trans)}')
await ctx.send(embed=em)
@taxreturn.command(name='withdraw', aliases=['w'])
async def taxreturn_withdraw(self, ctx):
"""Withdraw your available tax return money.
This command has a cooldown of a week.
"""
finish = await self.pool.fetchval("""
select finish
from taxreturn_cooldown
where user_id = $1
""", ctx.author.id)
now = datetime.datetime.utcnow()
if finish and finish > now:
delta = finish - now
raise self.SayException('\N{MANTELPIECE CLOCK}'
f'You have to wait {fmt_tdelta(delta)}.')
else:
await self.pool.execute("""
delete from taxreturn_cooldown
where user_id = $1
""", ctx.author.id)
transactions = await self.txr_transactions(ctx.author)
success, error = 0, 0
sent = decimal.Decimal(0)
log.debug(f'[txr] processing {ctx.author}, ' f'{len(transactions)}')
for trans in transactions:
# apply 10% to the amount
# theres transactions.amount and accounts.amount
# we do a filter to get the right one
items = trans.items()
t_amount = next(
v for k, v in items if isinstance(v, decimal.Decimal))
applied = t_amount * decimal.Decimal('0.25')
# the reverse transaction, as tax return
try:
await self.jcoin.transfer(trans['receiver'], trans['sender'],
applied)
await self.pool.execute("""
update transactions
set taxreturn_used=true
where idx=$1
""", trans['idx'])
success += 1
sent += applied
except self.coins.TransferError as err:
log.exception('error on tax return reverse transfer op')
await ctx.send('Error while transferring from '
f'`{self.jcoin.get_name(trans["receiver"])}` '
f'amount: `{applied}JC` '
f'`{err!r}`')
error += 1
sent = round(sent, 2)
log.debug(f'[txr] {ctx.author}, {success} succ, '
f'{error} err, {sent}jc total')
await ctx.send(f'{success} success transactions, '
f'{error} raised errors.\n'
f'You got `{sent}JC` from tax returns.')
await self.pool.execute("""
insert into taxreturn_cooldown (user_id, finish)
values ($1, now() + interval '1 week')
""", ctx.author.id)
def setup(bot):
bot.add_jose_cog(CoinsExt)
|
{
"content_hash": "14841a2c5112c5248bc03b4d7ab61ac9",
"timestamp": "",
"source": "github",
"line_count": 750,
"max_line_length": 79,
"avg_line_length": 34.797333333333334,
"alnum_prop": 0.5302705188137022,
"repo_name": "lnmds/jose",
"id": "5b4e71fe6ad1e82b4d80297f7444a5c8c9194aba",
"size": "26102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ext/coins+.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "420261"
}
],
"symlink_target": ""
}
|
import datetime
import email.utils as eut
import json
import os
import re
from functools import wraps
import msrest.polling
from azure_serializer import AzureSerializer
from c7n_azure import utils, constants
from c7n_azure.session import Session
from c7n_azure.utils import ThreadHelper
from mock import patch
from msrest.pipeline import ClientRawResponse
from msrest.serialization import Model
from msrest.service_client import ServiceClient
from vcr_unittest import VCRTestCase
from c7n.resources import load_resources
from c7n.schema import generate
from c7n.testing import TestUtils
load_resources()
C7N_SCHEMA = generate()
DEFAULT_SUBSCRIPTION_ID = 'ea42f556-5106-4743-99b0-c129bfa71a47'
CUSTOM_SUBSCRIPTION_ID = '00000000-5106-4743-99b0-c129bfa71a47'
DEFAULT_USER_OBJECT_ID = '00000000-0000-0000-0000-000000000002'
DEFAULT_TENANT_ID = '00000000-0000-0000-0000-000000000003'
DEFAULT_INSTRUMENTATION_KEY = '00000000-0000-0000-0000-000000000004'
DEFAULT_STORAGE_KEY = 'DEC0DEDITtVwMoyAuTz1LioKkC+gB/EpRlQKNIaszQEhVidjWyP1kLW1z+jo'\
'/MGFHKc+t+M20PxoraNCslng9w=='
GRAPH_RESPONSE = {
"value": [
{
"NOTE": "THIS RESPONSE FAKED BY AZURE_COMMON.PY",
"odata.type": "Microsoft.DirectoryServices.User",
"objectType": "User",
"objectId": DEFAULT_USER_OBJECT_ID,
"displayName": "John Doe",
"mail": "john@doe.com",
"refreshTokensValidFromDateTime": "2018-08-22T20:37:43Z",
"userPrincipalName": "john@doe.com"
}
]
}
ACTIVITY_LOG_RESPONSE = {
"value": [
{
"caller": "john@doe.com",
"id": "/subscriptions/ea42f556-5106-4743-99b0-c129bfa71a47/resourcegroups/"
"TEST_VM/providers/Microsoft.Compute/virtualMachines/cctestvm/events/"
"37bf930a-fbb8-4c8c-9cc7-057cc1805c04/ticks/636923208048336028",
"operationName": {
"value": "Microsoft.Compute/virtualMachines/write",
"localizedValue": "Create or Update Virtual Machine"
},
"eventTimestamp": "2019-05-01T15:20:04.8336028Z"
}
]
}
class AzureVCRBaseTest(VCRTestCase):
TEST_DATE = None
FILTERED_HEADERS = ['authorization',
'accept-encoding',
'client-request-id',
'retry-after',
'strict-transport-security',
'server',
'user-Agent',
'accept-language',
'connection',
'x-ms-client-request-id',
'x-ms-correlation-request-id',
'x-ms-keyvault-service-version',
'x-ms-keyvault-network-info',
'x-ms-keyvault-region',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-request-id',
'x-ms-routing-request-id',
'x-ms-gateway-service-instanceid',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-served-by',
'x-ms-cosmos-llsn',
'x-ms-last-state-change-utc',
'x-ms-xp-role',
'x-ms-gatewayversion',
'x-ms-global-committed-lsn',
'x-aspnet-version',
'x-content-type-options',
'x-powered-by',
'ocp-aad-diagnostics-server-name',
'ocp-aad-session-key',
'vary',
'pragma',
'transfer-encoding',
'expires',
'content-location']
def is_playback(self):
# You can't do this in setup because it is actually required by the base class
# setup (via our callbacks), but it is also not possible to do until the base class setup
# has completed initializing the cassette instance.
return not hasattr(self, 'cassette') or os.path.isfile(self.cassette._path)
def _get_cassette_name(self):
test_method = getattr(self, self._testMethodName)
name_override = getattr(test_method, 'cassette_name', None)
method_name = name_override or self._testMethodName
return '{0}.{1}.yaml'.format(self.__class__.__name__,
method_name)
def _get_vcr_kwargs(self):
return super(VCRTestCase, self)._get_vcr_kwargs(
before_record_request=self._request_callback,
before_record_response=self._response_callback,
decode_compressed_response=True
)
def _get_vcr(self, **kwargs):
myvcr = super(VCRTestCase, self)._get_vcr(**kwargs)
myvcr.register_matcher('azure-matcher', self._azure_matcher)
myvcr.match_on = ['azure-matcher', 'method']
myvcr.register_serializer('azure-json', AzureSerializer())
myvcr.serializer = 'azure-json'
myvcr.path_transformer = AzureVCRBaseTest._json_extension
# Block recording when using fake token (generally only used on build servers)
if os.environ.get(constants.ENV_ACCESS_TOKEN) == "fake_token":
myvcr.record_mode = 'none'
return myvcr
def _azure_matcher(self, r1, r2):
"""Replace all subscription ID's and ignore api-version"""
if [k for k in set(r1.query) if k[0] != 'api-version'] != [
k for k in set(r2.query) if k[0] != 'api-version']:
return False
r1_path = AzureVCRBaseTest._replace_subscription_id(r1.path)
r2_path = AzureVCRBaseTest._replace_subscription_id(r2.path)
r1_path = r1_path.replace('//', '/').lower()
r2_path = r2_path.replace('//', '/').lower()
return r1_path == r2_path
def _request_callback(self, request):
"""Modify requests before saving"""
request.uri = AzureVCRBaseTest._replace_subscription_id(request.uri)
request.uri = AzureVCRBaseTest._replace_tenant_id(request.uri)
if request.body:
request.body = b'mock_body'
# Request headers serve no purpose as only URI is read during a playback.
request.headers = None
if re.match('https://login.microsoftonline.com/([^/]+)/oauth2/token', request.uri):
return None
if re.match('https://login.microsoftonline.com/([^/]+)/oauth2/token', request.uri):
return None
return request
def _response_callback(self, response):
if self.is_playback():
if 'data' in response['body']:
body = json.dumps(response['body']['data'])
response['body']['string'] = body.encode('utf-8')
response['headers']['content-length'] = [str(len(body))]
return response
response['headers'] = {k.lower(): v for (k, v) in
response['headers'].items()
if k.lower() not in self.FILTERED_HEADERS}
content_type = response['headers'].get('content-type', (None,))[0]
if not content_type or 'application/json' not in content_type:
return response
body = response['body'].pop('string').decode('utf-8')
# Clean up subscription IDs and storage keys
body = AzureVCRBaseTest._replace_tenant_id(body)
body = AzureVCRBaseTest._replace_subscription_id(body)
body = AzureVCRBaseTest._replace_storage_keys(body)
body = AzureVCRBaseTest._replace_instrumentation_key(body)
try:
response['body']['data'] = json.loads(body)
except json.decoder.JSONDecodeError:
self.fail("AzureVCRBaseTest could not parse JSON response body "
"while attempting to record cassette. Body:\n%s" % body)
# Replace some API responses entirely
response = AzureVCRBaseTest._response_substitutions(response)
return response
@staticmethod
def _response_substitutions(response):
data = response['body']['data']
if isinstance(data, dict):
# Replace AD graph responses
odata_metadata = data.get('odata.metadata')
if odata_metadata and "directoryObjects" in odata_metadata:
response['body']['data'] = GRAPH_RESPONSE
return response
# Replace Activity Log API responses
value_array = data.get('value', [])
if value_array and \
isinstance(value_array[0], dict) and \
value_array[0].get('eventTimestamp'):
response['body']['data'] = ACTIVITY_LOG_RESPONSE
return response
if 'authorizations' in data:
response['body']['data']['authorizations'] = []
# Real resource type responses are critical to catching
# API version failures, but we can get rid of extra fields
# and save a lot of space
if 'resourceTypes' in data:
response['body']['data']['resourceTypes'] = \
[{
'resourceType': r['resourceType'],
'apiVersions': [next(iter(r['apiVersions']))]
} for r in data['resourceTypes']]
return response
@staticmethod
def _replace_subscription_id(s):
prefixes = ['(/|%2F)?subscriptions(/|%2F)',
'"subscription":\\s*"']
regex = r"(?P<prefix>(%s))" \
r"[\da-zA-Z]{8}-([\da-zA-Z]{4}-){3}[\da-zA-Z]{12}" \
% '|'.join(['(%s)' % p for p in prefixes])
return re.sub(regex, r"\g<prefix>" + DEFAULT_SUBSCRIPTION_ID, s)
@staticmethod
def _replace_tenant_id(s):
prefixes = ['(/|%2F)graph.windows.net(/|%2F)',
'"(t|T)enantId":\\s*"']
regex = r"(?P<prefix>(%s))" \
r"[\da-zA-Z]{8}-([\da-zA-Z]{4}-){3}[\da-zA-Z]{12}" \
% '|'.join(['(%s)' % p for p in prefixes])
return re.sub(regex, r"\g<prefix>" + DEFAULT_TENANT_ID, s)
@staticmethod
def _replace_storage_keys(s):
# All usages of storage keys have the word "key" somewhere
if "key" in s.lower():
return re.sub(
r"(?P<prefix>=|\"|:)(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==)",
r"\g<prefix>" + DEFAULT_STORAGE_KEY, s)
return s
@staticmethod
def _replace_instrumentation_key(s):
prefixes = ['"InstrumentationKey":\\s*"']
regex = r"(?P<prefix>(%s))" \
r"[\da-zA-Z]{8}-([\da-zA-Z]{4}-){3}[\da-zA-Z]{12}" \
% '|'.join(['(%s)' % p for p in prefixes])
return re.sub(regex, r"\g<prefix>" + DEFAULT_INSTRUMENTATION_KEY, s)
@staticmethod
def _json_extension(path):
# A simple transformer keeps the native
# cassette naming logic in place
return path[:-4] + "json"
class BaseTest(TestUtils, AzureVCRBaseTest):
""" Azure base testing class.
"""
def __init__(self, *args, **kwargs):
super(BaseTest, self).__init__(*args, **kwargs)
self._requires_polling = False
def setUp(self):
super(BaseTest, self).setUp()
ThreadHelper.disable_multi_threading = True
# We always patch the date so URLs that involve dates match up
self._utc_patch = patch.object(utils, 'utcnow', self.get_test_date)
self._utc_patch.start()
self.addCleanup(self._utc_patch.stop)
self._now_patch = patch.object(utils, 'now', self.get_test_date)
self._now_patch.start()
self.addCleanup(self._now_patch.stop)
if not self._requires_polling:
# Patch Poller with constructor that always disables polling
# This breaks blocking on long running operations (resource creation).
self._lro_patch = patch.object(msrest.polling.LROPoller, '__init__', BaseTest.lro_init)
self._lro_patch.start()
self.addCleanup(self._lro_patch.stop)
if self.is_playback():
if self._requires_polling:
# If using polling we need to monkey patch the timeout during playback
# or we'll have long sleeps introduced into our test runs
Session._old_client = Session.client
Session.client = BaseTest.session_client_wrapper
self.addCleanup(BaseTest.session_client_cleanup)
if constants.ENV_ACCESS_TOKEN in os.environ:
self._tenant_patch = patch('c7n_azure.session.Session.get_tenant_id',
return_value=DEFAULT_TENANT_ID)
self._tenant_patch.start()
self.addCleanup(self._tenant_patch.stop)
self._subscription_patch = patch('c7n_azure.session.Session.get_subscription_id',
return_value=DEFAULT_SUBSCRIPTION_ID)
self._subscription_patch.start()
self.addCleanup(self._subscription_patch.stop)
def get_test_date(self, tz=None):
header_date = self.cassette.responses[0]['headers'].get('date') \
if self.cassette.responses else None
if header_date:
test_date = datetime.datetime(*eut.parsedate(header_date[0])[:6])
else:
test_date = datetime.datetime.now()
return test_date.replace(hour=23, minute=59, second=59, microsecond=0)
@staticmethod
def setup_account():
# Find actual name of storage account provisioned in our test environment
s = Session()
client = s.client('azure.mgmt.storage.StorageManagementClient')
accounts = list(client.storage_accounts.list())
matching_account = [a for a in accounts if a.name.startswith("cctstorage")]
return matching_account[0]
@staticmethod
def sign_out_patch():
return patch.dict(os.environ,
{
constants.ENV_TENANT_ID: '',
constants.ENV_SUB_ID: '',
constants.ENV_CLIENT_ID: '',
constants.ENV_CLIENT_SECRET: ''
}, clear=True)
@staticmethod
def lro_init(self, client, initial_response, deserialization_callback, polling_method):
self._client = client if isinstance(client, ServiceClient) else client._client
self._response = initial_response.response if \
isinstance(initial_response, ClientRawResponse) else \
initial_response
self._callbacks = [] # type List[Callable]
self._polling_method = msrest.polling.NoPolling()
if isinstance(deserialization_callback, type) and \
issubclass(deserialization_callback, Model):
deserialization_callback = deserialization_callback.deserialize
# Might raise a CloudError
self._polling_method.initialize(self._client, self._response, deserialization_callback)
self._thread = None
self._done = None
self._exception = None
@staticmethod
def session_client_cleanup():
Session.client = Session._old_client
@staticmethod
def session_client_wrapper(self, client):
client = Session._old_client(self, client)
client.config.long_running_operation_timeout = 0
return client
def arm_template(template):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
template_file_path = os.path.dirname(__file__) + "/templates/" + template
if not os.path.isfile(template_file_path):
return args[0].fail("ARM template {} is not found".format(template_file_path))
return func(*args, **kwargs)
return wrapper
return decorator
def cassette_name(name):
def decorator(func):
func.cassette_name = name
return func
return decorator
def requires_arm_polling(cls):
orig_init = cls.__init__
# Make copy of original __init__, so we can call it without recursion
def __init__(self, *args, **kws):
orig_init(self, *args, **kws) # Call the original __init__
self._requires_polling = True
cls.__init__ = __init__ # Set the class' __init__ to the new one
return cls
|
{
"content_hash": "92864e02c6efe79d8f14facc05b421fa",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 99,
"avg_line_length": 39.26603325415677,
"alnum_prop": 0.5705039017603291,
"repo_name": "ewbankkit/cloud-custodian",
"id": "7fb4f91b58ad105f3d988ef45bbb28a38e5c2c82",
"size": "17121",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/c7n_azure/tests/azure_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "145643"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "4913354"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
}
|
from .ortho_display_ascii import AsciiDisplay
from .ortho_display_image import ImageDisplay
|
{
"content_hash": "84b2779606f01bfe2c761160d25a3715",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 45,
"avg_line_length": 46,
"alnum_prop": 0.8478260869565217,
"repo_name": "VaysseB/PySimpleMazeGenerator",
"id": "096a9666d9e7c46d7e9724603f6cf512eb784ed1",
"size": "117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "maze/ortho/display/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22131"
}
],
"symlink_target": ""
}
|
"""
Example that trains an LSTM or GRU networks for sentiment analysis
Reference:
See J.Li et al, EMNLP2015 - http://arxiv.org/pdf/1503.00185v5.pdf
$ python examples/imdb_lstm.py -e 2 -eval 1 --rlayer_type lstm
"""
from neon.backends import gen_backend
from neon.data.dataloaders import load_imdb
from neon.data.dataiterator import ArrayIterator
from neon.data.text_preprocessing import pad_data
from neon.initializers import Uniform, GlorotUniform
from neon.layers import (GeneralizedCost, LSTM, Affine, Dropout, LookupTable,
RecurrentSum, Recurrent, DeepBiLSTM, DeepBiRNN)
from neon.models import Model
from neon.optimizers import Adagrad
from neon.transforms import Logistic, Tanh, Softmax, CrossEntropyMulti, Accuracy
from neon.callbacks.callbacks import Callbacks
from neon.util.argparser import NeonArgparser, extract_valid_args
# parse the command line arguments
parser = NeonArgparser(__doc__)
parser.add_argument('--rlayer_type', default='lstm',
choices=['bilstm', 'lstm', 'birnn', 'rnn'],
help='type of recurrent layer to use (lstm, bilstm, rnn, birnn)')
args = parser.parse_args(gen_be=False)
# hyperparameters from the reference
args.batch_size = 128
gradient_clip_value = 15
vocab_size = 20000
sentence_length = 128
embedding_dim = 128
hidden_size = 128
reset_cells = True
# setup backend
be = gen_backend(**extract_valid_args(args, gen_backend))
# make dataset
path = load_imdb(path=args.data_dir)
(X_train, y_train), (X_test, y_test), nclass = pad_data(path,
vocab_size=vocab_size,
sentence_length=sentence_length)
print "Vocab size - ", vocab_size
print "Sentence Length - ", sentence_length
print "# of train sentences", X_train.shape[0]
print "# of test sentence", X_test.shape[0]
train_set = ArrayIterator(X_train, y_train, nclass=2)
valid_set = ArrayIterator(X_test, y_test, nclass=2)
# weight initialization
uni = Uniform(low=-0.1/embedding_dim, high=0.1/embedding_dim)
g_uni = GlorotUniform()
if args.rlayer_type == 'lstm':
rlayer = LSTM(hidden_size, g_uni, activation=Tanh(),
gate_activation=Logistic(), reset_cells=True)
elif args.rlayer_type == 'bilstm':
rlayer = DeepBiLSTM(hidden_size, g_uni, activation=Tanh(), depth=1,
gate_activation=Logistic(), reset_cells=True)
elif args.rlayer_type == 'rnn':
rlayer = Recurrent(hidden_size, g_uni, activation=Tanh(), reset_cells=True)
elif args.rlayer_type == 'birnn':
rlayer = DeepBiRNN(hidden_size, g_uni, activation=Tanh(), depth=1, reset_cells=True)
layers = [
LookupTable(vocab_size=vocab_size, embedding_dim=embedding_dim, init=uni),
rlayer,
RecurrentSum(),
Dropout(keep=0.5),
Affine(2, g_uni, bias=g_uni, activation=Softmax())
]
model = Model(layers=layers)
cost = GeneralizedCost(costfunc=CrossEntropyMulti(usebits=True))
optimizer = Adagrad(learning_rate=0.01, gradient_clip_value=gradient_clip_value)
# configure callbacks
callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)
# train model
model.fit(train_set, optimizer=optimizer, num_epochs=args.epochs, cost=cost, callbacks=callbacks)
# eval model
print "Train Accuracy - ", 100 * model.eval(train_set, metric=Accuracy())
print "Test Accuracy - ", 100 * model.eval(valid_set, metric=Accuracy())
|
{
"content_hash": "5d31ff2b28bb008e069d11bb5f4aec34",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 97,
"avg_line_length": 35.65625,
"alnum_prop": 0.6958808063102542,
"repo_name": "dongjoon-hyun/neon",
"id": "061ef37e6f4c5cd531048fff3b41b1c005a52da2",
"size": "4186",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/imdb_lstm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6534"
},
{
"name": "C++",
"bytes": "67757"
},
{
"name": "CSS",
"bytes": "927996"
},
{
"name": "Cuda",
"bytes": "14937"
},
{
"name": "Makefile",
"bytes": "11069"
},
{
"name": "Python",
"bytes": "1560785"
}
],
"symlink_target": ""
}
|
from micropython import const
import struct
import bluetooth
# Advertising payloads are repeated packets of the following form:
# 1 byte data length (N + 1)
# 1 byte type (see constants below)
# N bytes type-specific data
_ADV_TYPE_FLAGS = const(0x01)
_ADV_TYPE_NAME = const(0x09)
_ADV_TYPE_UUID16_COMPLETE = const(0x3)
_ADV_TYPE_UUID32_COMPLETE = const(0x5)
_ADV_TYPE_UUID128_COMPLETE = const(0x7)
_ADV_TYPE_UUID16_MORE = const(0x2)
_ADV_TYPE_UUID32_MORE = const(0x4)
_ADV_TYPE_UUID128_MORE = const(0x6)
_ADV_TYPE_APPEARANCE = const(0x19)
# Generate a payload to be passed to gap_advertise(adv_data=...).
def advertising_payload(limited_disc=False, br_edr=False, name=None, services=None, appearance=0):
payload = bytearray()
def _append(adv_type, value):
nonlocal payload
payload += struct.pack("BB", len(value) + 1, adv_type) + value
_append(
_ADV_TYPE_FLAGS,
struct.pack("B", (0x01 if limited_disc else 0x02) + (0x18 if br_edr else 0x04)),
)
if name:
_append(_ADV_TYPE_NAME, name)
if services:
for uuid in services:
b = bytes(uuid)
if len(b) == 2:
_append(_ADV_TYPE_UUID16_COMPLETE, b)
elif len(b) == 4:
_append(_ADV_TYPE_UUID32_COMPLETE, b)
elif len(b) == 16:
_append(_ADV_TYPE_UUID128_COMPLETE, b)
# See org.bluetooth.characteristic.gap.appearance.xml
if appearance:
_append(_ADV_TYPE_APPEARANCE, struct.pack("<h", appearance))
return payload
def decode_field(payload, adv_type):
i = 0
result = []
while i + 1 < len(payload):
if payload[i + 1] == adv_type:
result.append(payload[i + 2 : i + payload[i] + 1])
i += 1 + payload[i]
return result
def decode_name(payload):
n = decode_field(payload, _ADV_TYPE_NAME)
return str(n[0], "utf-8") if n else ""
def decode_services(payload):
services = []
for u in decode_field(payload, _ADV_TYPE_UUID16_COMPLETE):
services.append(bluetooth.UUID(struct.unpack("<h", u)[0]))
for u in decode_field(payload, _ADV_TYPE_UUID32_COMPLETE):
services.append(bluetooth.UUID(struct.unpack("<d", u)[0]))
for u in decode_field(payload, _ADV_TYPE_UUID128_COMPLETE):
services.append(bluetooth.UUID(u))
return services
def demo():
payload = advertising_payload(
name="micropython",
services=[bluetooth.UUID(0x181A), bluetooth.UUID("6E400001-B5A3-F393-E0A9-E50E24DCCA9E")],
)
print(payload)
print(decode_name(payload))
print(decode_services(payload))
if __name__ == "__main__":
demo()
|
{
"content_hash": "d63135be64392818cd752ccc969440f2",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 98,
"avg_line_length": 29.24175824175824,
"alnum_prop": 0.6260804208944006,
"repo_name": "iabdalkader/openmv",
"id": "e520af57cd58798efc7154c436692330cfa70779",
"size": "3847",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/libraries/ble_advertising.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "569030"
},
{
"name": "C",
"bytes": "100413378"
},
{
"name": "C++",
"bytes": "97780"
},
{
"name": "CMake",
"bytes": "10173"
},
{
"name": "Dockerfile",
"bytes": "874"
},
{
"name": "Makefile",
"bytes": "72669"
},
{
"name": "Python",
"bytes": "1197447"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management import *
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions.default import default
from slider import slider
from ambari_commons import OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from resource_management.core.exceptions import ClientComponentHasNoStatus
class SliderClient(Script):
def status(self, env):
raise ClientComponentHasNoStatus()
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class SliderClientLinux(SliderClient):
def get_component_name(self):
return "slider-client"
def pre_upgrade_restart(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
conf_select.select(params.stack_name, "slider", params.version)
stack_select.select("slider-client", params.version)
# also set all of the hadoop clients since slider client is upgraded as
# part of the final "CLIENTS" group and we need to ensure that
# hadoop-client is also set
conf_select.select(params.stack_name, "hadoop", params.version)
stack_select.select("hadoop-client", params.version)
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
slider()
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class SliderClientWindows(SliderClient):
def install(self, env):
import params
if params.slider_home is None:
self.install_packages(env)
self.configure(env)
if __name__ == "__main__":
SliderClient().execute()
|
{
"content_hash": "2b6214000759db98bebeff77315c3fc5",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 92,
"avg_line_length": 37.263888888888886,
"alnum_prop": 0.7711516958628402,
"repo_name": "alexryndin/ambari",
"id": "08c856963dff740be3a18560de3102c27e0239a2",
"size": "2705",
"binary": false,
"copies": "2",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/ADH/1.5/services/SLIDER/package/scripts/slider_client.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
"""
Weather: A simple weather plugin.
"""
import sys
if sys.version_info <= (3, 6):
raise RuntimeError("This plugin requires Python 3.6 or above.")
import supybot
from supybot import world
# Use this for the version of this plugin.
__version__ = "15092021"
# XXX Replace this with an appropriate author or supybot.Author instance.
__author__ = supybot.Author('Barry Suridge', 'Alcheri',
'barry.suridge@outlook.com')
# This is a dictionary mapping supybot.Author instances to lists of
# contributions.
__contributors__ = {}
# This is a url where the most recent plugin package can be downloaded.
__url__ = 'https://github.com/Alcheri/Plugins/tree/master/Weather'
from . import config
from . import plugin
if sys.version_info >= (3, 4):
from importlib import reload
else:
from imp import reload
# In case we're being reloaded.
reload(config)
reload(plugin)
# Add more reloads here if you add third-party modules and want them to be
# reloaded when this plugin is reloaded. Don't forget to import them as well!
if world.testing:
from . import test
Class = plugin.Class
configure = config.configure
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
{
"content_hash": "72c0d9440a0d8d66103bb5780b1b692c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 27.431818181818183,
"alnum_prop": 0.7174813587406794,
"repo_name": "Alcheri/Plugins",
"id": "3a3bc405f28312e2a9b6532750729cc2a95bb18f",
"size": "2789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Weather/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "86460"
}
],
"symlink_target": ""
}
|
from pymongo import MongoClient
from patterny.bugzilla.comment import Comment
from patterny.bugzilla.bug import Bug
import abc
class MongoDB(object):
def __init__(self, config):
self.url = config.mongo_url
self.db = config.mongo_db
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def db(self):
return self.client[self._db]
@db.setter
def db(self, db):
self._db = db
@property
def client(self):
return MongoClient(self.url)
@abc.abstractmethod
def collection(self):
return
@abc.abstractmethod
def convert(self, cursor):
return
def insert(self, data):
collection = self.collection()
result = self.db[collection].insert_one(data)
return result.inserted_id
def insert_many(self, data):
collection = self.collection()
result = self.db[collection].insert_many(data)
return result.inserted_ids
def find(self, query=None):
collection = self.collection()
if query:
cursor = self.db[collection].find(query)
else:
cursor = self.db[collection].find()
return self.convert(cursor)
class Bugs(MongoDB):
def __init__(self, config):
super(Bugs, self).__init__(config)
def collection(self):
return 'bugs'
def convert(self, cursor):
if cursor:
return [Bug(data) for data in cursor]
return None
class Comments(MongoDB):
def __init__(self, config):
super(Comments, self).__init__(config)
def collection(self):
return 'comments'
def convert(self, cursor):
if cursor:
return [Comment(data) for data in cursor]
return None
|
{
"content_hash": "1f1b694b0cdc6f4c12ef48e4b7b4348e",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 54,
"avg_line_length": 22.072289156626507,
"alnum_prop": 0.5960698689956332,
"repo_name": "marquesarthur/BugAnalysisRecommender",
"id": "3469e3a98fd735f5f81b5b5c8bb8641cbaa8d42c",
"size": "1832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patterny/patterny/db/dao.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "16154"
},
{
"name": "Python",
"bytes": "529600"
}
],
"symlink_target": ""
}
|
'''
InfPcdObject
'''
import os
import re
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library import DataType as DT
from Library.Misc import Sdict
from Library.Misc import GetHelpStringByRemoveHashKey
from Library.ParserValidate import IsValidPcdType
from Library.ParserValidate import IsValidCVariableName
from Library.ParserValidate import IsValidPcdValue
from Library.ParserValidate import IsValidArch
from Library.CommentParsing import ParseComment
from Library.String import GetSplitValueList
from Library.String import IsHexDigitUINT32
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Parser.InfAsBuiltProcess import GetPackageListInfo
from Parser.DecParser import Dec
from Object.Parser.InfPackagesObject import InfPackageItem
def ValidateArch(ArchItem, PcdTypeItem1, LineNo, SupArchDict, SupArchList):
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
if PcdTypeItem1.upper != DT.TAB_INF_FEATURE_PCD.upper():
ArchList = GetSplitValueList(ArchItem, ' ')
for ArchItemNew in ArchList:
if not IsValidArch(ArchItemNew):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID%(ArchItemNew),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=ArchItemNew)
SupArchDict[PcdTypeItem1] = ArchList
else:
SupArchList.append(ArchItem)
return SupArchList, SupArchDict
def ParsePcdComment(CommentList, PcdTypeItem, PcdItemObj):
CommentInsList = []
PreUsage = None
PreHelpText = ''
BlockFlag = -1
FFEHelpText = ''
CommentItemHelpText = ''
Count = 0
for CommentItem in CommentList:
Count = Count + 1
CommentItemUsage, CommentType, CommentString, CommentItemHelpText = ParseComment(CommentItem,
DT.ALL_USAGE_TOKENS,
{},
[],
False)
if CommentType and CommentString:
pass
if PcdTypeItem == 'FeaturePcd':
CommentItemUsage = DT.USAGE_ITEM_CONSUMES
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == 1:
FFEHelpText = CommentItemHelpText
else:
FFEHelpText = FFEHelpText + DT.END_OF_LINE + CommentItemHelpText
if Count == len(CommentList):
CommentItemHelpText = FFEHelpText
BlockFlag = 4
else:
continue
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == len(CommentList) and CommentItemUsage == DT.ITEM_UNDEFINED:
CommentItemHelpText = DT.END_OF_LINE
if Count == len(CommentList) and (BlockFlag == 1 or BlockFlag == 2):
if CommentItemUsage == DT.ITEM_UNDEFINED:
BlockFlag = 4
else:
BlockFlag = 3
elif BlockFlag == -1 and Count == len(CommentList):
BlockFlag = 4
if BlockFlag == -1 or BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == DT.ITEM_UNDEFINED:
if BlockFlag == -1:
BlockFlag = 1
elif BlockFlag == 1:
BlockFlag = 2
else:
if BlockFlag == 1 or BlockFlag == 2:
BlockFlag = 3
elif BlockFlag == -1:
BlockFlag = 4
#
# Combine two comment line if they are generic comment
#
if CommentItemUsage == PreUsage == DT.ITEM_UNDEFINED:
CommentItemHelpText = PreHelpText + DT.END_OF_LINE + CommentItemHelpText
PreHelpText = CommentItemHelpText
if BlockFlag == 4:
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreHelpText = ''
elif BlockFlag == 3:
#
# Add previous help string
#
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
if PreHelpText == '' or PreHelpText.endswith(DT.END_OF_LINE):
PreHelpText += DT.END_OF_LINE
CommentItemIns.SetHelpStringItem(PreHelpText)
CommentInsList.append(CommentItemIns)
#
# Add Current help string
#
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreHelpText = ''
else:
PreUsage = CommentItemUsage
PreHelpText = CommentItemHelpText
PcdItemObj.SetHelpStringList(CommentInsList)
return PcdItemObj
class InfPcdItemCommentContent():
def __init__(self):
#
# ## SOMETIMES_CONSUMES ## HelpString
#
self.UsageItem = ''
#
# Help String
#
self.HelpStringItem = ''
def SetUsageItem(self, UsageItem):
self.UsageItem = UsageItem
def GetUsageItem(self):
return self.UsageItem
def SetHelpStringItem(self, HelpStringItem):
self.HelpStringItem = HelpStringItem
def GetHelpStringItem(self):
return self.HelpStringItem
## InfPcdItem
#
# This class defined Pcd item used in Module files
#
# @param CName: Input value for CName, default is ''
# @param Token: Input value for Token, default is ''
# @param TokenSpaceGuidCName: Input value for TokenSpaceGuidCName, default
# is ''
# @param DatumType: Input value for DatumType, default is ''
# @param MaxDatumSize: Input value for MaxDatumSize, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
# @param ItemType: Input value for ItemType, default is ''
# @param ValidUsage: Input value for ValidUsage, default is []
# @param SkuInfoList: Input value for SkuInfoList, default is {}
# @param SupModuleList: Input value for SupModuleList, default is []
#
class InfPcdItem():
def __init__(self):
self.CName = ''
self.Token = ''
self.TokenSpaceGuidCName = ''
self.TokenSpaceGuidValue = ''
self.DatumType = ''
self.MaxDatumSize = ''
self.DefaultValue = ''
self.Offset = ''
self.ValidUsage = ''
self.ItemType = ''
self.SupModuleList = []
self.HelpStringList = []
self.FeatureFlagExp = ''
self.SupArchList = []
self.PcdErrorsList = []
def SetCName(self, CName):
self.CName = CName
def GetCName(self):
return self.CName
def SetToken(self, Token):
self.Token = Token
def GetToken(self):
return self.Token
def SetTokenSpaceGuidCName(self, TokenSpaceGuidCName):
self.TokenSpaceGuidCName = TokenSpaceGuidCName
def GetTokenSpaceGuidCName(self):
return self.TokenSpaceGuidCName
def SetTokenSpaceGuidValue(self, TokenSpaceGuidValue):
self.TokenSpaceGuidValue = TokenSpaceGuidValue
def GetTokenSpaceGuidValue(self):
return self.TokenSpaceGuidValue
def SetDatumType(self, DatumType):
self.DatumType = DatumType
def GetDatumType(self):
return self.DatumType
def SetMaxDatumSize(self, MaxDatumSize):
self.MaxDatumSize = MaxDatumSize
def GetMaxDatumSize(self):
return self.MaxDatumSize
def SetDefaultValue(self, DefaultValue):
self.DefaultValue = DefaultValue
def GetDefaultValue(self):
return self.DefaultValue
def SetPcdErrorsList(self, PcdErrorsList):
self.PcdErrorsList = PcdErrorsList
def GetPcdErrorsList(self):
return self.PcdErrorsList
def SetItemType(self, ItemType):
self.ItemType = ItemType
def GetItemType(self):
return self.ItemType
def SetSupModuleList(self, SupModuleList):
self.SupModuleList = SupModuleList
def GetSupModuleList(self):
return self.SupModuleList
def SetHelpStringList(self, HelpStringList):
self.HelpStringList = HelpStringList
def GetHelpStringList(self):
return self.HelpStringList
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetSupportArchList(self, ArchList):
self.SupArchList = ArchList
def GetSupportArchList(self):
return self.SupArchList
def SetOffset(self, Offset):
self.Offset = Offset
def GetOffset(self):
return self.Offset
##
#
#
#
class InfPcdObject():
def __init__(self, FileName):
self.Pcds = Sdict()
self.FileName = FileName
def SetPcds(self, PcdContent, KeysList = None, PackageInfo = None):
if GlobalData.gIS_BINARY_INF:
self.SetAsBuildPcds(PcdContent, KeysList, PackageInfo)
return True
#
# Validate Arch
#
SupArchList = []
SupArchDict = {}
PcdTypeItem = ''
for (PcdTypeItem1, ArchItem, LineNo) in KeysList:
SupArchList, SupArchDict = ValidateArch(ArchItem, PcdTypeItem1, LineNo, SupArchDict, SupArchList)
#
# Validate PcdType
#
if (PcdTypeItem1 == '' or PcdTypeItem1 == None):
return False
else:
if not IsValidPcdType(PcdTypeItem1):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_SECTION_TYPE_ERROR%(DT.PCD_USAGE_TYPE_LIST_OF_MODULE),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=PcdTypeItem1)
return False
PcdTypeItem = PcdTypeItem1
for PcdItem in PcdContent:
PcdItemObj = InfPcdItem()
CommentList = PcdItem[1]
CurrentLineOfPcdItem = PcdItem[2]
PcdItem = PcdItem[0]
if CommentList != None and len(CommentList) != 0:
PcdItemObj = ParsePcdComment(CommentList, PcdTypeItem, PcdItemObj)
else:
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
PcdItemObj.SetHelpStringList([CommentItemIns])
if len(PcdItem) >= 1 and len(PcdItem) <= 3:
PcdItemObj = SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj)
if len(PcdItem) >= 2 and len(PcdItem) <= 3:
#
# Contain PcdName and Value, validate value.
#
if IsValidPcdValue(PcdItem[1]) or PcdItem[1].strip() == "":
PcdItemObj.SetDefaultValue(PcdItem[1])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_VALUE_INVALID,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdItem[1])
if len(PcdItem) == 3:
#
# Contain PcdName, value, and FeatureFlag express
#
#
# Validate Feature Flag Express
#
if PcdItem[2].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
#
# Validate FFE
#
FeatureFlagRtv = IsValidFeatureFlagExp(PcdItem[2].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID%(FeatureFlagRtv[1]),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
PcdItemObj.SetFeatureFlagExp(PcdItem[2])
if len(PcdItem) < 1 or len(PcdItem) > 3:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_SECTION_CONTENT_ERROR,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
return False
if PcdTypeItem.upper != DT.TAB_INF_FEATURE_PCD.upper():
PcdItemObj.SetSupportArchList(SupArchDict[PcdTypeItem])
else:
PcdItemObj.SetSupportArchList(SupArchList)
if self.Pcds.has_key((PcdTypeItem, PcdItemObj)):
PcdsList = self.Pcds[PcdTypeItem, PcdItemObj]
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
else:
PcdsList = []
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
return True
def SetAsBuildPcds(self, PcdContent, KeysList = None, PackageInfo = None):
for PcdItem in PcdContent:
PcdItemObj = InfPcdItem()
CommentList = PcdItem[1]
CurrentLineOfPcdItem = PcdItem[2]
PcdItem = PcdItem[0]
CommentString = ''
for CommmentLine in CommentList:
CommentString += GetHelpStringByRemoveHashKey(CommmentLine)
PcdItemObj.SetHelpStringList(CommentString)
PcdItemObj.SetItemType(KeysList[0][0])
#
# Set PcdTokenSpaceCName and CName
#
PcdItemObj = SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj)
#
# Set Value/DatumType/MaxDatumSize/Token
#
PcdItemObj = SetValueDatumTypeMaxSizeToken(PcdItem,
CurrentLineOfPcdItem,
PcdItemObj,
KeysList[0][1],
PackageInfo)
PcdTypeItem = KeysList[0][0]
if self.Pcds.has_key((PcdTypeItem, PcdItemObj)):
PcdsList = self.Pcds[PcdTypeItem, PcdItemObj]
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
else:
PcdsList = []
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
def GetPcds(self):
return self.Pcds
def ParserPcdInfoInDec(String):
ValueList = GetSplitValueList(String, DT.TAB_VALUE_SPLIT, 3)
#
# DatumType, Token
#
return ValueList[2], ValueList[3]
def SetValueDatumTypeMaxSizeToken(PcdItem, CurrentLineOfPcdItem, PcdItemObj, Arch, PackageInfo = None):
#
# Package information not been generated currently, we need to parser INF file to get information.
#
if not PackageInfo:
PackageInfo = []
InfFileName = CurrentLineOfPcdItem[2]
PackageInfoList = GetPackageListInfo(InfFileName, GlobalData.gWORKSPACE, -1)
for PackageInfoListItem in PackageInfoList:
PackageInfoIns = InfPackageItem()
PackageInfoIns.SetPackageName(PackageInfoListItem)
PackageInfo.append(PackageInfoIns)
PcdInfoInDecHasFound = False
for PackageItem in PackageInfo:
if PcdInfoInDecHasFound:
break
PackageName = PackageItem.PackageName
#
# Open DEC file to get information
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(GlobalData.gWORKSPACE, PackageName)))
DecParser = Dec(FullFileName)
#
# Find PCD information.
#
DecPcdsDict = DecParser.GetPcdSectionObject().ValueDict
for Key in DecPcdsDict.keys():
if (Key[0] == 'PCDSDYNAMICEX' and PcdItemObj.GetItemType() == 'PcdEx') and \
(Key[1] == 'COMMON' or Key[1] == Arch):
for PcdInDec in DecPcdsDict[Key]:
if PcdInDec.TokenCName == PcdItemObj.CName and \
PcdInDec.TokenSpaceGuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetToken(PcdInDec.TokenValue)
PcdItemObj.SetDatumType(PcdInDec.DatumType)
PcdItemObj.SetSupportArchList([Arch])
if (Key[0] == 'PCDSPATCHABLEINMODULE' and PcdItemObj.GetItemType() == 'PatchPcd') and \
(Key[1] == 'COMMON' or Key[1] == Arch):
for PcdInDec in DecPcdsDict[Key]:
if PcdInDec.TokenCName == PcdItemObj.CName and \
PcdInDec.TokenSpaceGuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetToken(PcdInDec.TokenValue)
PcdItemObj.SetDatumType(PcdInDec.DatumType)
PcdItemObj.SetSupportArchList([Arch])
if PcdItemObj.GetDatumType() == 'VOID*':
PcdItemObj.SetMaxDatumSize('%s'%(len(GetSplitValueList(PcdItem[1], DT.TAB_COMMA_SPLIT))))
DecGuidsDict = DecParser.GetGuidSectionObject().ValueDict
for Key in DecGuidsDict.keys():
if Key == 'COMMON' or Key == Arch:
for GuidInDec in DecGuidsDict[Key]:
if GuidInDec.GuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetTokenSpaceGuidValue(GuidInDec.GuidString)
#
# Validate Value.
#
if ValidatePcdValueOnDatumType(PcdItem[1], PcdItemObj.GetDatumType()):
PcdItemObj.SetDefaultValue(PcdItem[1])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_VALUE_INVALID%("\"" + PcdItem[1] + "\"", "\"" +
PcdItemObj.GetDatumType() + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
#
# validate offset
#
if PcdItemObj.GetItemType().upper() == DT.TAB_INF_PATCH_PCD.upper():
if not IsHexDigitUINT32(PcdItem[2]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_OFFSET_FORMAT_INVALID%("\"" + PcdItem[2] + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
PcdItemObj.SetOffset(PcdItem[2])
if PcdItemObj.GetToken() == '' or PcdItemObj.GetDatumType() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_DECLARITION_MISS%("\"" + PcdItem[0] + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
return PcdItemObj
def ValidatePcdValueOnDatumType(Value, Type):
Value = Value.strip()
#
# Boolean type only allow 0x00 or 0x01 as value per INF spec
#
if Type == 'BOOLEAN':
if not (Value == '0x00' or Value == '0x01'):
return False
elif Type == 'VOID*':
if not Value.startswith("{"):
return False
if not Value.endswith("}"):
return False
#
# Strip "{" at head and "}" at tail.
#
Value = Value[1:-1]
ValueList = GetSplitValueList(Value, DT.TAB_COMMA_SPLIT)
ReIsValidHexByte = re.compile("^0x[0-9a-f]{1,2}$", re.IGNORECASE)
for ValueItem in ValueList:
if not ReIsValidHexByte.match(ValueItem):
return False
elif Type == 'UINT8' or Type == 'UINT16' or Type == 'UINT32' or Type == 'UINT64':
ReIsValidUint8z = re.compile('^0[x|X][a-fA-F0-9]{2}$')
ReIsValidUint16z = re.compile('^0[x|X][a-fA-F0-9]{4}$')
ReIsValidUint32z = re.compile('^0[x|X][a-fA-F0-9]{8}$')
ReIsValidUint64z = re.compile('^0[x|X][a-fA-F0-9]{16}$')
if not ReIsValidUint8z.match(Value) and Type == 'UINT8':
return False
elif not ReIsValidUint16z.match(Value) and Type == 'UINT16':
return False
elif not ReIsValidUint32z.match(Value) and Type == 'UINT32':
return False
elif not ReIsValidUint64z.match(Value) and Type == 'UINT64':
return False
else:
#
# Since we assume the DEC file always correct, should never go to here.
#
pass
return True
def SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj):
#
# Only PCD Name specified
# <PcdName> ::= <TokenSpaceGuidCName> "." <TokenCName>
#
PcdId = GetSplitValueList(PcdItem[0], DT.TAB_SPLIT)
if len(PcdId) != 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_NAME_FORMAT_ERROR,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
else:
#
# Validate PcdTokenSpaceGuidCName
#
if not IsValidCVariableName(PcdId[0]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_CVAR_GUID,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdId[0])
if not IsValidCVariableName(PcdId[1]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_CVAR_PCDCNAME,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdId[1])
PcdItemObj.SetTokenSpaceGuidCName(PcdId[0])
PcdItemObj.SetCName(PcdId[1])
return PcdItemObj
|
{
"content_hash": "5706e6665e50b2e27ef7ef8189356a25",
"timestamp": "",
"source": "github",
"line_count": 626,
"max_line_length": 109,
"avg_line_length": 39.86741214057508,
"alnum_prop": 0.5299515166085668,
"repo_name": "egraba/vbox_openbsd",
"id": "f69e056ae33e4f5c04110ce0d7ed72f1463fce83",
"size": "25564",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "VirtualBox-5.0.0/src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfPcdObject.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "88714"
},
{
"name": "Assembly",
"bytes": "4303680"
},
{
"name": "AutoIt",
"bytes": "2187"
},
{
"name": "Batchfile",
"bytes": "95534"
},
{
"name": "C",
"bytes": "192632221"
},
{
"name": "C#",
"bytes": "64255"
},
{
"name": "C++",
"bytes": "83842667"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "6041"
},
{
"name": "CSS",
"bytes": "26756"
},
{
"name": "D",
"bytes": "41844"
},
{
"name": "DIGITAL Command Language",
"bytes": "56579"
},
{
"name": "DTrace",
"bytes": "1466646"
},
{
"name": "GAP",
"bytes": "350327"
},
{
"name": "Groff",
"bytes": "298540"
},
{
"name": "HTML",
"bytes": "467691"
},
{
"name": "IDL",
"bytes": "106734"
},
{
"name": "Java",
"bytes": "261605"
},
{
"name": "JavaScript",
"bytes": "80927"
},
{
"name": "Lex",
"bytes": "25122"
},
{
"name": "Logos",
"bytes": "4941"
},
{
"name": "Makefile",
"bytes": "426902"
},
{
"name": "Module Management System",
"bytes": "2707"
},
{
"name": "NSIS",
"bytes": "177212"
},
{
"name": "Objective-C",
"bytes": "5619792"
},
{
"name": "Objective-C++",
"bytes": "81554"
},
{
"name": "PHP",
"bytes": "58585"
},
{
"name": "Pascal",
"bytes": "69941"
},
{
"name": "Perl",
"bytes": "240063"
},
{
"name": "PowerShell",
"bytes": "10664"
},
{
"name": "Python",
"bytes": "9094160"
},
{
"name": "QMake",
"bytes": "3055"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "1460572"
},
{
"name": "SourcePawn",
"bytes": "4139"
},
{
"name": "TypeScript",
"bytes": "142342"
},
{
"name": "Visual Basic",
"bytes": "7161"
},
{
"name": "XSLT",
"bytes": "1034475"
},
{
"name": "Yacc",
"bytes": "22312"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.