text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import factory.fuzzy
from rest_framework.reverse import reverse
from waldur_core.structure import models as structure_models
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.users import models
class InvitationBaseFactory(factory.DjangoModelFactory):
email = factory.Sequence(lambda n: 'test%s@invitation.com' % n)
@classmethod
def get_list_url(cls, action=None):
url = 'http://testserver' + reverse('user-invitation-list')
return url if action is None else url + action + '/'
@classmethod
def get_url(cls, invitation=None, action=None):
if invitation is None:
invitation = cls()
url = 'http://testserver' + reverse(
'user-invitation-detail', kwargs={'uuid': invitation.uuid.hex}
)
return url if action is None else url + action + '/'
class ProjectInvitationFactory(InvitationBaseFactory):
class Meta:
model = models.Invitation
customer = factory.SelfAttribute('project.customer')
project = factory.SubFactory(structure_factories.ProjectFactory)
project_role = structure_models.ProjectRole.MANAGER
class CustomerInvitationFactory(InvitationBaseFactory):
class Meta:
model = models.Invitation
customer = factory.SubFactory(structure_factories.CustomerFactory)
customer_role = structure_models.CustomerRole.OWNER
class GroupInvitationBaseFactory(factory.DjangoModelFactory):
@classmethod
def get_list_url(cls, action=None):
url = 'http://testserver' + reverse('user-group-invitation-list')
return url if action is None else url + action + '/'
@classmethod
def get_url(cls, invitation=None, action=None):
if invitation is None:
invitation = cls()
url = 'http://testserver' + reverse(
'user-group-invitation-detail', kwargs={'uuid': invitation.uuid.hex}
)
return url if action is None else url + action + '/'
class ProjectGroupInvitationFactory(GroupInvitationBaseFactory):
class Meta:
model = models.GroupInvitation
customer = factory.SelfAttribute('project.customer')
project = factory.SubFactory(structure_factories.ProjectFactory)
project_role = structure_models.ProjectRole.MANAGER
class CustomerGroupInvitationFactory(GroupInvitationBaseFactory):
class Meta:
model = models.GroupInvitation
customer = factory.SubFactory(structure_factories.CustomerFactory)
customer_role = structure_models.CustomerRole.OWNER
class PermissionRequestFactory(factory.DjangoModelFactory):
class Meta:
model = models.PermissionRequest
invitation = factory.SubFactory(CustomerGroupInvitationFactory)
created_by = factory.SubFactory(structure_factories.UserFactory)
state = models.PermissionRequest.States.PENDING
@classmethod
def get_list_url(cls, action=None):
url = 'http://testserver' + reverse('user-permission-request-list')
return url if action is None else url + action + '/'
@classmethod
def get_url(cls, request=None, action=None):
if request is None:
request = cls()
url = 'http://testserver' + reverse(
'user-permission-request-detail', kwargs={'uuid': request.uuid.hex}
)
return url if action is None else url + action + '/'
|
{
"content_hash": "c5210c407ba6f4c4ed776fca5d7b819c",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 80,
"avg_line_length": 34.670103092783506,
"alnum_prop": 0.6993755575379126,
"repo_name": "opennode/waldur-mastermind",
"id": "c53af7af58087fe07586a3a0fe988277d9e4f677",
"size": "3363",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/waldur_core/users/tests/factories.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
}
|
"""Tests for occupancy_flow_grids."""
import tensorflow as tf
from waymo_open_dataset.utils import occupancy_flow_data
from waymo_open_dataset.utils import occupancy_flow_grids
from waymo_open_dataset.utils import occupancy_flow_renderer
from waymo_open_dataset.utils import occupancy_flow_test_util
class OccupancyFlowGridsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self.batch_size = 8
inputs = occupancy_flow_test_util.make_one_data_batch(
batch_size=self.batch_size)
self.inputs = occupancy_flow_data.add_sdc_fields(inputs)
self.config = occupancy_flow_test_util.make_test_config()
def test_create_ground_truth_timestep_grids(self):
timestep_grids = occupancy_flow_grids.create_ground_truth_timestep_grids(
inputs=self.inputs, config=self.config)
batch_size = self.batch_size
height = self.config.grid_height_cells
width = self.config.grid_width_cells
num_past_steps = self.config.num_past_steps
num_future_steps = self.config.num_future_steps
waypoint_size = num_future_steps // self.config.num_waypoints
num_all_steps = occupancy_flow_renderer._get_num_steps_from_times(
times=['past', 'current', 'future'], config=self.config)
num_flow_steps = num_all_steps - waypoint_size
for object_type in occupancy_flow_data.ALL_AGENT_TYPES:
# Occupancy.
self.assertEqual(
timestep_grids.view(object_type).past_occupancy.shape,
(batch_size, height, width, num_past_steps))
self.assertEqual(
timestep_grids.view(object_type).current_occupancy.shape,
(batch_size, height, width, 1))
self.assertEqual(
timestep_grids.view(object_type).future_observed_occupancy.shape,
(batch_size, height, width, num_future_steps))
self.assertEqual(
timestep_grids.view(object_type).future_occluded_occupancy.shape,
(batch_size, height, width, num_future_steps))
# All occupancy for flow origin.
self.assertEqual(
timestep_grids.view(object_type).all_occupancy.shape,
(batch_size, height, width, num_all_steps))
# Flow.
self.assertEqual(
timestep_grids.view(object_type).all_flow.shape,
(batch_size, height, width, num_flow_steps, 2))
# The test scene contains all agent classes. Verify some values too.
current_occupancy = timestep_grids.view(object_type).current_occupancy
all_flow = timestep_grids.view(object_type).all_flow
self.assertEqual(tf.reduce_min(current_occupancy), 0)
self.assertEqual(tf.reduce_max(current_occupancy), 1)
self.assertLess(tf.reduce_min(all_flow), 0)
self.assertGreater(tf.reduce_max(all_flow), 0)
def test_create_ground_truth_waypoint_grids(self):
timestep_grids = occupancy_flow_grids.create_ground_truth_timestep_grids(
inputs=self.inputs, config=self.config)
true_waypoints = occupancy_flow_grids.create_ground_truth_waypoint_grids(
timestep_grids=timestep_grids, config=self.config)
batch_size = self.batch_size
height = self.config.grid_height_cells
width = self.config.grid_width_cells
num_waypoints = self.config.num_waypoints
for object_type in occupancy_flow_data.ALL_AGENT_TYPES:
self.assertLen(
true_waypoints.view(object_type).observed_occupancy, num_waypoints)
self.assertLen(
true_waypoints.view(object_type).occluded_occupancy, num_waypoints)
self.assertLen(
true_waypoints.view(object_type).flow_origin_occupancy, num_waypoints)
self.assertLen(true_waypoints.view(object_type).flow, num_waypoints)
self.assertEqual(
true_waypoints.view(object_type).observed_occupancy[0].shape,
(batch_size, height, width, 1))
self.assertEqual(
true_waypoints.view(object_type).occluded_occupancy[0].shape,
(batch_size, height, width, 1))
self.assertEqual(
true_waypoints.view(object_type).flow_origin_occupancy[0].shape,
(batch_size, height, width, 1))
self.assertEqual(
true_waypoints.view(object_type).flow[0].shape,
(batch_size, height, width, 2))
def test_create_ground_truth_vis_grids(self):
timestep_grids = occupancy_flow_grids.create_ground_truth_timestep_grids(
inputs=self.inputs, config=self.config)
vis_grids = occupancy_flow_grids.create_ground_truth_vis_grids(
inputs=self.inputs,
timestep_grids=timestep_grids,
config=self.config,
)
batch_size = self.batch_size
height = self.config.grid_height_cells
width = self.config.grid_width_cells
self.assertEqual(vis_grids.roadgraph.shape, (batch_size, height, width, 1))
self.assertEqual(vis_grids.agent_trails.shape,
(batch_size, height, width, 1))
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "dcbe3ed3ac5227c9681a68aad68ace02",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 80,
"avg_line_length": 41.389830508474574,
"alnum_prop": 0.6852989352989353,
"repo_name": "waymo-research/waymo-open-dataset",
"id": "3385e7e4d62abd3ecab31b37398c0293bce5fdb1",
"size": "5580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "waymo_open_dataset/utils/occupancy_flow_grids_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "680892"
},
{
"name": "Dockerfile",
"bytes": "6981"
},
{
"name": "Jupyter Notebook",
"bytes": "6236449"
},
{
"name": "Python",
"bytes": "480022"
},
{
"name": "Shell",
"bytes": "12226"
},
{
"name": "Smarty",
"bytes": "439"
},
{
"name": "Starlark",
"bytes": "61788"
}
],
"symlink_target": ""
}
|
import os
import mock
from oslo.config import cfg
from poppy.transport import pecan
from tests.unit import base
class PecanTransportDriverTest(base.TestCase):
def test_listen(self):
tests_path = os.path.abspath(os.path.dirname(
os.path.dirname(
os.path.dirname(os.path.dirname(__file__)
))))
conf_path = os.path.join(tests_path, 'etc', 'default_functional.conf')
cfg.CONF(args=[], default_config_files=[conf_path])
mock_path = 'poppy.transport.pecan.driver.simple_server'
with mock.patch(mock_path) as mocked_module:
mock_server = mock.Mock()
mocked_module.make_server = mock.Mock(return_value=mock_server)
driver = pecan.Driver(cfg.CONF, None)
driver.listen()
self.assertTrue(mock_server.serve_forever.called)
|
{
"content_hash": "ea08cbd3acb19ba100b48654ddc5940a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 78,
"avg_line_length": 35.5,
"alnum_prop": 0.5937161430119177,
"repo_name": "amitgandhinz/cdn",
"id": "2c1fea62768b62cc76eda5b4138b0bde1ee4b5e3",
"size": "1508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/transport/pecan/test_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "303888"
},
{
"name": "Shell",
"bytes": "4071"
}
],
"symlink_target": ""
}
|
import optparse
import os
import signal
import subprocess
import sys
import utils
os_name = utils.GuessOS()
POSIX_INFO = 'ps -p %s -o args'
EXECUTABLE_NAMES = {
'win32': {
'chrome': 'chrome.exe',
'dart': 'dart.exe',
'dart_precompiled_runtime': 'dart_precompiled_runtime.exe',
'firefox': 'firefox.exe',
'gen_snapshot': 'gen_snapshot.exe',
'git': 'git.exe',
'iexplore': 'iexplore.exe',
'vctip': 'vctip.exe',
'mspdbsrv': 'mspdbsrv.exe',
},
'linux': {
'chrome': 'chrome',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'flutter_tester': 'flutter_tester',
'git': 'git',
},
'macos': {
'chrome': 'Chrome',
'chrome_helper': 'Chrome Helper',
'dart': 'dart',
'dart_precompiled_runtime': 'dart_precompiled_runtime',
'firefox': 'firefox',
'gen_snapshot': 'gen_snapshot',
'git': 'git',
'safari': 'Safari',
}
}
INFO_COMMAND = {
'win32': 'wmic process where Processid=%s get CommandLine',
'macos': POSIX_INFO,
'linux': POSIX_INFO,
}
STACK_INFO_COMMAND = {
'win32': None,
'macos': '/usr/bin/sample %s 1 4000 -mayDie',
'linux': '/usr/bin/eu-stack -p %s',
}
def GetOptions():
parser = optparse.OptionParser('usage: %prog [options]')
true_or_false = ['True', 'False']
parser.add_option(
"--kill_dart",
default='True',
type='choice',
choices=true_or_false,
help="Kill all dart processes")
parser.add_option(
"--kill_vc",
default='True',
type='choice',
choices=true_or_false,
help="Kill all git processes")
parser.add_option(
"--kill_vsbuild",
default='False',
type='choice',
choices=true_or_false,
help="Kill all visual studio build related processes")
parser.add_option(
"--kill_browsers",
default='False',
type='choice',
choices=true_or_false,
help="Kill all browser processes")
(options, args) = parser.parse_args()
return options
def GetPidsPosix(process_name):
# This is to have only one posix command, on linux we could just do:
# pidof process_name
cmd = 'ps -e -o pid= -o comm='
# Sample output:
# 1 /sbin/launchd
# 80943 /Applications/Safari.app/Contents/MacOS/Safari
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
# On mac this ps commands actually gives us the full path to non
# system binaries.
if len(split) >= 2 and " ".join(split[1:]).endswith(process_name):
results.append(split[0])
return results
def GetPidsWindows(process_name):
cmd = 'tasklist /FI "IMAGENAME eq %s" /NH' % process_name
# Sample output:
# dart.exe 4356 Console 1 6,800 K
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, stderr = p.communicate()
results = []
lines = output.splitlines()
for line in lines:
split = line.split()
if len(split) > 2 and split[0] == process_name:
results.append(split[1])
return results
def GetPids(process_name):
if os_name == "win32":
return GetPidsWindows(process_name)
else:
return GetPidsPosix(process_name)
def PrintPidStackInfo(pid):
command_pattern = STACK_INFO_COMMAND.get(os_name, False)
if command_pattern:
p = subprocess.Popen(
command_pattern % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
stdout, stderr = p.communicate()
stdout = stdout.splitlines()
stderr = stderr.splitlines()
print " Stack:"
for line in stdout:
print " %s" % line
if stderr:
print " Stack (stderr):"
for line in stderr:
print " %s" % line
def PrintPidInfo(pid, dump_stacks):
# We assume that the list command will return lines in the format:
# EXECUTABLE_PATH ARGS
# There may be blank strings in the output
p = subprocess.Popen(
INFO_COMMAND[os_name] % pid,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
output, stderr = p.communicate()
lines = output.splitlines()
# Pop the header
lines.pop(0)
print "Hanging process info:"
print " PID: %s" % pid
for line in lines:
# wmic will output a bunch of empty strings, we ignore these
if line: print " Command line: %s" % line
if dump_stacks:
PrintPidStackInfo(pid)
def KillPosix(pid):
try:
os.kill(int(pid), signal.SIGKILL)
except:
# Ignore this, the process is already dead from killing another process.
pass
def KillWindows(pid):
# os.kill is not available until python 2.7
cmd = "taskkill /F /PID %s" % pid
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
p.communicate()
def Kill(name, dump_stacks=False):
if name not in EXECUTABLE_NAMES[os_name]:
return 0
print("***************** Killing %s *****************" % name)
platform_name = EXECUTABLE_NAMES[os_name][name]
pids = GetPids(platform_name)
for pid in pids:
PrintPidInfo(pid, dump_stacks)
if os_name == "win32":
KillWindows(pid)
else:
KillPosix(pid)
print("Killed pid: %s" % pid)
if len(pids) == 0:
print(" No %s processes found." % name)
return len(pids)
def KillBrowsers():
status = Kill('firefox')
# We don't give error on killing chrome. It happens quite often that the
# browser controller fails in killing chrome, so we silently do it here.
Kill('chrome')
status += Kill('chrome_helper')
status += Kill('iexplore')
status += Kill('safari')
return status
def KillVCSystems():
status = Kill('git')
return status
def KillVSBuild():
status = Kill('vctip')
status += Kill('mspdbsrv')
return status
def KillDart():
status = Kill("dart", dump_stacks=True)
status += Kill("gen_snapshot", dump_stacks=True)
status += Kill("dart_precompiled_runtime", dump_stacks=True)
status += Kill("flutter_tester", dump_stacks=True)
return status
def Main():
options = GetOptions()
status = 0
if options.kill_dart == 'True':
if os_name == "win32":
# TODO(24086): Add result of KillDart into status once pub hang is fixed.
KillDart()
else:
status += KillDart()
if options.kill_vc == 'True':
status += KillVCSystems()
if options.kill_vsbuild == 'True' and os_name == 'win32':
status += KillVSBuild()
if options.kill_browsers == 'True':
status += KillBrowsers()
return status
if __name__ == '__main__':
sys.exit(Main())
|
{
"content_hash": "1e0efdd1cef7ae2b27fb0ddffaa22422",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 85,
"avg_line_length": 27.420454545454547,
"alnum_prop": 0.5824008841000138,
"repo_name": "dartino/dart-sdk",
"id": "3c36acc5620ebb82825bc7ef9406066e31c3063a",
"size": "7585",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/task_kill.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "5209"
},
{
"name": "Batchfile",
"bytes": "49645"
},
{
"name": "C",
"bytes": "169243"
},
{
"name": "C++",
"bytes": "17828371"
},
{
"name": "CMake",
"bytes": "1598"
},
{
"name": "CSS",
"bytes": "96163"
},
{
"name": "Common Lisp",
"bytes": "234"
},
{
"name": "Dart",
"bytes": "83151790"
},
{
"name": "GAP",
"bytes": "37395"
},
{
"name": "HTML",
"bytes": "679631"
},
{
"name": "Java",
"bytes": "627371"
},
{
"name": "JavaScript",
"bytes": "157014"
},
{
"name": "Makefile",
"bytes": "8113"
},
{
"name": "Python",
"bytes": "1203692"
},
{
"name": "Shell",
"bytes": "140264"
},
{
"name": "TeX",
"bytes": "271705"
}
],
"symlink_target": ""
}
|
"""Contains DiscoElastigroup class that orchestrates AWS Spotinst Elastigroups"""
import copy
import logging
import time
import os
from base64 import b64encode
from itertools import groupby
import boto3
from disco_aws_automation.resource_helper import throttled_call, tag2dict
from .spotinst_client import SpotinstClient
from .base_group import BaseGroup
from .exceptions import TooManyAutoscalingGroups, SpotinstException, TimeoutError
logger = logging.getLogger(__name__)
# max time to wait in seconds for instances to become healthy after a roll
GROUP_ROLL_TIMEOUT = 1200
class DiscoElastigroup(BaseGroup):
"""Class orchestrating elastigroups"""
def __init__(self, environment_name):
self.environment_name = environment_name
if os.environ.get('SPOTINST_TOKEN'):
self.spotinst_client = SpotinstClient(
token=os.environ.get('SPOTINST_TOKEN'),
environment_name=environment_name
)
else:
self.spotinst_client = None
logger.warn('Create environment variable "SPOTINST_TOKEN" in order to use SpotInst')
super(DiscoElastigroup, self).__init__()
def is_spotinst_enabled(self):
"""Return True if SpotInst should be used"""
# if the spotinst client doesn't exist (meaning the token is missing) then don't use spotinst
return self.spotinst_client is not None
def _get_new_groupname(self, hostclass):
"""Returns a new elastigroup name when given a hostclass"""
return self.environment_name + '_' + hostclass + "_" + str(int(time.time()))
def _filter_by_environment(self, groups):
"""Filters elastigroups by environment"""
return [
group for group in groups
if group['name'].startswith("{0}_".format(self.environment_name))
]
def _get_hostclass(self, group_name):
"""Returns the hostclass when given an elastigroup name"""
# group names follow a <env>_hostclass_<id> pattern. hostclass names could have underscores
# so we need to be careful about how we split out the hostclass name
parts = group_name.split('_')[1:-1]
return '_'.join(parts)
def _get_spotinst_groups(self, hostclass=None, group_name=None):
groups = self.spotinst_client.get_groups()
session = boto3.session.Session()
return [group for group in groups
if group['name'].startswith(self.environment_name) and
(not group_name or group['name'] == group_name) and
(not hostclass or self._get_hostclass(group['name']) == hostclass) and
session.region_name in group['compute']['availabilityZones'][0]['name']]
def get_existing_groups(self, hostclass=None, group_name=None):
# get a dict for each group that matches the structure that would be returned by DiscoAutoscale
# this dict needs to have at least all the fields that the interface specifies
groups = []
for group in self._get_spotinst_groups(hostclass, group_name):
launch_spec = group['compute']['launchSpecification']
# Need this mess because loadBalancersConfig could be missing or might return None
load_balancer_configs = launch_spec.get('loadBalancersConfig', {}).get('loadBalancers', []) or []
groups.append({
'name': group['name'],
'min_size': group['capacity']['minimum'],
'max_size': group['capacity']['maximum'],
'desired_capacity': group['capacity']['target'],
'launch_config_name': None,
'termination_policies': [],
'vpc_zone_identifier': ','.join(
subnet for zone in group['compute']['availabilityZones'] for subnet in zone['subnetIds']
),
'load_balancers': [elb['name'] for elb in load_balancer_configs
if elb['type'] == 'CLASSIC'],
'target_groups': [tg['arn'] for tg in load_balancer_configs
if tg['type'] == 'TARGET_GROUP'],
'image_id': launch_spec['imageId'],
'id': group['id'],
'type': 'spot',
# blockDeviceMappings will be None instead of a empty list if there is no ELB
'blockDeviceMappings': (launch_spec.get('blockDeviceMappings') or []),
'scheduling': group.get('scheduling', {'tasks': []}),
'tags': {tag['tagKey']: tag['tagValue'] for tag in launch_spec.get('tags', [])}
})
groups.sort(key=lambda grp: grp['name'], reverse=True)
return groups
def get_existing_group(self, hostclass=None, group_name=None, throw_on_two_groups=True):
"""
Returns the elastigroup dict for the given hostclass or group name, or None if
no elastigroup exists.
If two or more elastigroups exist for a hostclass, then this method will throw an exception,
unless 'throw_on_two_groups' is False. Then if there are two groups the most recently created
elastigroup will be returned. If there are more than two elastigroups, this method will
always throw an exception.
"""
groups = self.get_existing_groups(hostclass=hostclass, group_name=group_name)
if not groups:
return None
elif len(groups) == 1 or (len(groups) == 2 and not throw_on_two_groups):
return groups[0]
else:
raise TooManyAutoscalingGroups("There are too many elastigroups for {}.".format(hostclass))
def _get_group_instances(self, group_id):
"""Returns list of instance ids in a group"""
return self.spotinst_client.get_group_status(group_id)
def get_instances(self, hostclass=None, group_name=None):
"""Returns elastigroup instances for hostclass in the current environment"""
next_token = None
instances = []
filters = [{
'Name': 'tag:spotinst',
'Values': ['True']
}, {
'Name': 'tag:environment',
'Values': [self.environment_name]
}, {
'Name': 'instance-state-name',
'Values': ['pending', 'running', 'shutting-down', 'stopping', 'stopped']
}]
if hostclass:
filters.append({
'Name': 'tag:hostclass',
'Values': [hostclass]
})
if group_name:
filters.append({
'Name': 'tag:group_name',
'Values': [group_name]
})
while True:
args = {'Filters': filters}
if next_token:
args['NextToken'] = next_token
response = throttled_call(self.boto3_ec.describe_instances, **args)
for reservation in response.get('Reservations'):
for instance in reservation.get('Instances'):
instances.append({
'instance_id': instance['InstanceId'],
'group_name': tag2dict(instance['Tags']).get('group_name')
})
next_token = response.get('NextToken')
if not next_token:
break
return instances
def list_groups(self):
"""Returns list of objects for display purposes for all groups"""
groups = self.get_existing_groups()
return [
{
'name': group['name'],
'image_id': group['image_id'],
'group_cnt': len(self._get_group_instances(group['id'])),
'min_size': group['min_size'],
'desired_capacity': group['desired_capacity'],
'max_size': group['max_size'],
'type': group['type'],
'tags': group['tags']
}
for group in groups
]
def _create_elastigroup_config(self, desired_size, min_size, max_size, instance_type,
subnets, load_balancers, target_groups, security_groups,
instance_monitoring, ebs_optimized, image_id, key_name,
associate_public_ip_address, user_data, tags, instance_profile_name,
block_device_mappings, group_name, spotinst_reserve):
# Pylint thinks this function has too many arguments and too many local variables (it does)
# pylint: disable=too-many-arguments, too-many-locals
"""Create new elastigroup configuration"""
strategy = {
'availabilityVsCost': "equalAzDistribution",
'utilizeReservedInstances': True,
'fallbackToOd': True,
"revertToSpot": {
"performAt": "timeWindow",
# time is in UTC. This is 6AM EST
"timeWindows": [
"Sun:10:00-Sun:11:00",
"Mon:10:00-Mon:11:00",
"Tue:10:00-Tue:11:00",
"Wed:10:00-Wed:11:00",
"Thu:10:00-Thu:11:00",
"Fri:10:00-Fri:11:00",
"Sat:10:00-Sat:11:00"
]
}
}
strategy.update(self._get_risk_config(spotinst_reserve))
_min_size = min_size or 0
_max_size = max([min_size, max_size, desired_size, 0])
_desired_capacity = desired_size or max_size
capacity = {
'target': _desired_capacity,
'minimum': _min_size,
'maximum': _max_size,
'unit': "instance"
}
compute = {
"instanceTypes": self._get_instance_type_config(instance_type),
"product": "Linux/UNIX"
}
compute['availabilityZones'] = [
{
'name': zone,
'subnetIds': [subnet['SubnetId'] for subnet in zone_subnets]
}
for zone, zone_subnets in groupby(subnets, key=lambda subnet: subnet['AvailabilityZone'])
] if subnets else None
bdms = self._get_block_device_config(block_device_mappings)
network_interfaces = [
{"deleteOnTermination": True,
"deviceIndex": 0,
"associatePublicIpAddress": associate_public_ip_address}
] if associate_public_ip_address else None
launch_specification = {
"loadBalancersConfig": self._get_load_balancer_config(load_balancers, target_groups),
"securityGroupIds": security_groups,
"monitoring": instance_monitoring,
"ebsOptimized": ebs_optimized,
"imageId": image_id,
"keyPair": key_name,
"blockDeviceMappings": bdms or None,
"networkInterfaces": network_interfaces,
"userData": b64encode(str(user_data)) if user_data else None,
"iamRole": {
"name": instance_profile_name
} if instance_profile_name else None
}
tags = tags or {}
tags['group_name'] = group_name
launch_specification['tags'] = self._create_elastigroup_tags(tags)
compute["launchSpecification"] = launch_specification
group = {
"name": group_name,
"description": "Spotinst elastigroup: {}".format(group_name),
"strategy": strategy,
"capacity": capacity,
"compute": compute
}
logger.info("Creating elastigroup config for elastigroup '%s'", group_name)
elastigroup_config = {"group": group}
return elastigroup_config
def _create_elastigroup_tags(self, tags):
"""Given a python dictionary, it returns a list of elastigroup tags"""
spotinst_tags = [{'tagKey': key, 'tagValue': str(value)}
for key, value in tags.iteritems()] if tags else []
spotinst_tags.append({'tagKey': 'spotinst', 'tagValue': 'True'})
return spotinst_tags
def create_or_update_group(self, hostclass, desired_size=None, min_size=None, max_size=None,
instance_type=None, load_balancers=None, target_groups=None, subnets=None,
security_groups=None, instance_monitoring=None, ebs_optimized=None,
image_id=None, key_name=None, associate_public_ip_address=None, user_data=None,
tags=None, instance_profile_name=None, block_device_mappings=None,
group_name=None, create_if_exists=False, termination_policies=None,
spotinst=False, spotinst_reserve=None):
# Pylint thinks this function has too many arguments and too many local variables
# pylint: disable=R0913, R0914
"""Updates an existing elastigroup if it exists, otherwise this creates a new elastigroup."""
if not spotinst:
raise SpotinstException('DiscoElastiGroup must be used for creating SpotInst groups')
existing_groups = self._get_spotinst_groups(hostclass, group_name)
if existing_groups and not create_if_exists:
group = existing_groups[0]
self._modify_group(
group, desired_size=desired_size, min_size=min_size, max_size=max_size,
image_id=image_id, tags=tags, instance_profile_name=instance_profile_name,
block_device_mappings=block_device_mappings, spotinst_reserve=spotinst_reserve,
load_balancers=load_balancers, target_groups=target_groups, instance_type=instance_type,
user_data=user_data
)
return {'name': group['name']}
return self._create_group(
desired_size=desired_size,
min_size=min_size,
max_size=max_size,
instance_type=instance_type,
load_balancers=load_balancers,
target_groups=target_groups,
subnets=subnets,
security_groups=security_groups,
instance_monitoring=instance_monitoring,
ebs_optimized=ebs_optimized,
image_id=image_id,
key_name=key_name,
associate_public_ip_address=associate_public_ip_address,
user_data=user_data,
tags=tags,
instance_profile_name=instance_profile_name,
block_device_mappings=block_device_mappings,
group_name=self._get_new_groupname(hostclass),
spotinst_reserve=spotinst_reserve
)
# pylint: disable=too-many-arguments, too-many-locals
def _create_group(self, desired_size=None, min_size=None, max_size=None,
instance_type=None, load_balancers=None, target_groups=None, subnets=None,
security_groups=None, instance_monitoring=None, ebs_optimized=None, image_id=None,
key_name=None, associate_public_ip_address=None, user_data=None, tags=None,
instance_profile_name=None, block_device_mappings=None, group_name=None,
spotinst_reserve=None):
group_config = self._create_elastigroup_config(
desired_size=desired_size,
min_size=min_size,
max_size=max_size,
instance_type=instance_type,
load_balancers=load_balancers,
target_groups=target_groups,
subnets=subnets,
security_groups=security_groups,
instance_monitoring=instance_monitoring,
ebs_optimized=ebs_optimized,
image_id=image_id,
key_name=key_name,
associate_public_ip_address=associate_public_ip_address,
user_data=user_data,
tags=tags,
instance_profile_name=instance_profile_name,
block_device_mappings=block_device_mappings,
group_name=group_name,
spotinst_reserve=spotinst_reserve
)
new_group = self.spotinst_client.create_group(group_config)
new_group_name = new_group['name']
return {'name': new_group_name}
def _modify_group(self, existing_group, desired_size=None, min_size=None, max_size=None,
image_id=None, tags=None, instance_profile_name=None, block_device_mappings=None,
spotinst_reserve=None, load_balancers=None, target_groups=None, instance_type=None,
user_data=None):
new_config = copy.deepcopy(existing_group)
if min_size is not None:
new_config['capacity']['minimum'] = min_size
if max_size is not None:
new_config['capacity']['maximum'] = max_size
if desired_size is not None:
new_config['capacity']['target'] = desired_size
if spotinst_reserve is not None:
new_config['strategy'] = self._get_risk_config(spotinst_reserve)
if tags is not None:
tags['group_name'] = existing_group['name']
new_config['compute']['launchSpecification']['tags'] = self._create_elastigroup_tags(tags)
if image_id is not None:
new_config['compute']['launchSpecification']['imageId'] = image_id
if block_device_mappings is not None:
launch_spec = new_config['compute']['launchSpecification']
launch_spec['blockDeviceMappings'] = self._get_block_device_config(block_device_mappings)
if instance_profile_name is not None:
new_config['compute']['launchSpecification']['iamRole'] = {
'name': instance_profile_name
}
if instance_type is not None:
new_config['compute']['instanceTypes'] = self._get_instance_type_config(instance_type)
if user_data is not None:
new_config['compute']['launchSpecification']['userData'] = b64encode(str(user_data))
# remove fields that can't be updated
new_config['capacity'].pop('unit', None)
new_config['compute'].pop('product', None)
new_config.pop('createdAt', None)
new_config.pop('updatedAt', None)
group_id = new_config.pop('id')
self.spotinst_client.update_group(group_id, {'group': new_config})
if load_balancers or target_groups:
self.update_elb(load_balancers, target_groups, group_name=existing_group['name'])
def _delete_group(self, group_id):
"""Delete an elastigroup by group id"""
self.spotinst_client.delete_group(group_id)
def delete_groups(self, hostclass=None, group_name=None, force=False):
"""Delete all elastigroups based on hostclass"""
# We need argument `force` to match method in autoscale
# pylint: disable=unused-argument
groups = self.get_existing_groups(hostclass=hostclass, group_name=group_name)
for group in groups:
logger.info("Deleting group %s", group['name'])
self._delete_group(group_id=group['id'])
def scaledown_groups(self, hostclass=None, group_name=None, wait=False, noerror=False):
"""
Scales down number of instances in a hostclass's elastigroup, or the given elastigroup, to zero.
If wait is true, this function will block until all instances are terminated, or it will raise
a WaiterError if this process times out, unless noerror is True.
Returns true if the elastigroups were successfully scaled down, False otherwise.
"""
groups = self.get_existing_groups(hostclass=hostclass, group_name=group_name)
for group in groups:
group_update = {
"group": {
"capacity": {
"target": 0,
"minimum": 0,
"maximum": 0
}
}
}
logger.info("Scaling down group %s", group['name'])
self.spotinst_client.update_group(group['id'], group_update)
if wait:
self.wait_instance_termination(group_name=group_name, group=group, noerror=noerror)
def terminate(self, instance_id, decrement_capacity=True):
"""
Terminates an instance using the autoscaling API.
When decrement_capacity is True this allows us to avoid
autoscaling immediately replacing a terminated instance.
"""
pass
def delete_all_recurring_group_actions(self, hostclass=None, group_name=None):
"""Deletes all recurring scheduled actions for a hostclass"""
groups = self.get_existing_groups(hostclass=hostclass, group_name=group_name)
for existing_group in groups:
logger.info("Deleting scheduled actions for autoscaling group %s", existing_group['name'])
group_config = {
'group': {
'scheduling': None
}
}
self.spotinst_client.update_group(existing_group['id'], group_config)
def create_recurring_group_action(self, recurrance, min_size=None, desired_capacity=None, max_size=None,
hostclass=None, group_name=None):
"""Creates a recurring scheduled action for a hostclass"""
existing_groups = self.get_existing_groups(hostclass=hostclass, group_name=group_name)
for existing_group in existing_groups:
logger.info("Creating scheduled action for hostclass %s, group_name %s", hostclass, group_name)
task = {
'cronExpression': recurrance,
'taskType': 'scale'
}
if min_size is not None:
task['scaleMinCapacity'] = min_size
if max_size is not None:
task['scaleMaxCapacity'] = max_size
if desired_capacity is not None:
task['scaleTargetCapacity'] = desired_capacity
existing_schedule = existing_group['scheduling']
# don't create tasks that already exist
if task in existing_schedule['tasks']:
continue
existing_schedule['tasks'].append(task)
group_config = {
'group': {
'scheduling': existing_schedule
}
}
self.spotinst_client.update_group(existing_group['id'], group_config)
def update_elb(self, elb_names, target_groups, hostclass=None, group_name=None):
"""Updates an existing autoscaling group to use a different set of load balancers"""
# pylint: disable=arguments-differ
existing_group = self.get_existing_group(hostclass=hostclass, group_name=group_name)
if not existing_group:
logger.warning(
"Auto Scaling group %s does not exist. Cannot change %s ELB(s)",
hostclass or group_name,
', '.join(elb_names)
)
return set(), set()
new_lbs = set(elb_names) - set(existing_group['load_balancers'])
extra_lbs = set(existing_group['load_balancers']) - set(elb_names)
if new_lbs or extra_lbs:
logger.info(
"Updating ELBs for group %s from [%s] to [%s]",
existing_group['name'],
", ".join(existing_group['load_balancers']),
", ".join(elb_names)
)
elb_configs = [
{
'name': elb,
'type': 'CLASSIC'
} for elb in elb_names
]
new_tgs = set(target_groups) - set(existing_group['target_groups'])
extra_tgs = set(existing_group['target_groups']) - set(target_groups)
if new_tgs or extra_tgs:
logger.info(
"Updating Target Groups for group %s from [%s] to [%s]",
existing_group['name'],
", ".join(existing_group['target_groups']),
", ".join(target_groups)
)
target_group_configs = [
{
'arn': target_group,
'type': 'TARGET_GROUP'
} for target_group in target_groups
]
new_configs = elb_configs + target_group_configs
group_config = {
'group': {
'compute': {
'launchSpecification': {
'loadBalancersConfig': {
'loadBalancers': new_configs
}
}
}
}
}
self.spotinst_client.update_group(existing_group['id'], group_config)
return new_lbs, extra_lbs, new_tgs, extra_tgs
def get_launch_config(self, hostclass=None, group_name=None):
"""Return launch config info for a hostclass, None otherwise"""
existing_groups = self._get_spotinst_groups(hostclass=hostclass, group_name=group_name)
if not existing_groups:
return None
on_demand_type = existing_groups[0]['compute']['instanceTypes']['ondemand']
# the first spot type is always the ondemand type so strip it out to avoid returning it twice
spot_types = existing_groups[0]['compute']['instanceTypes']['spot'][1:]
instance_type = ':'.join([on_demand_type] + spot_types)
return {
'instance_type': instance_type
}
def clean_configs(self):
"""Delete unused Launch Configurations in current environment"""
raise Exception('Elastigroups don\'t have launch configs')
def get_configs(self, names=None):
"""Returns Launch Configurations in current environment"""
raise Exception('Elastigroups don\'t have launch configs')
def delete_config(self, config_name):
"""Delete a specific Launch Configuration"""
raise Exception('Elastigroups don\'t have launch configs')
def list_policies(self, group_name=None, policy_types=None, policy_names=None):
"""Returns all autoscaling policies"""
raise Exception('Scaling for Elastigroups is not implemented')
# pylint: disable=too-many-arguments
def create_policy(self, group_name, policy_name, policy_type="SimpleScaling", adjustment_type=None,
min_adjustment_magnitude=None, scaling_adjustment=None, cooldown=600,
metric_aggregation_type=None, step_adjustments=None, estimated_instance_warmup=None):
"""
Creates a new autoscaling policy, or updates an existing one if the autoscaling group name and
policy name already exist. Handles the logic of constructing the correct autoscaling policy request,
because not all parameters are required.
"""
raise Exception('Scaling for Elastigroups is not implemented')
def delete_policy(self, policy_name, group_name):
"""Deletes an autoscaling policy"""
raise Exception('Scaling for Elastigroups is not implemented')
def update_snapshot(self, snapshot_id, snapshot_size, hostclass=None, group_name=None):
"""Updates all of a hostclasses existing autoscaling groups to use a different snapshot"""
existing_group = self.get_existing_group(hostclass, group_name)
if not existing_group:
raise Exception(
'Elastigroup for %s hostclass and %s group name does not exist' %
(str(hostclass), str(group_name))
)
block_device_mappings = existing_group['blockDeviceMappings']
# find which device uses snapshots. throw errors if none found or more than 1 found
snapshot_devices = [device['ebs'] for device in block_device_mappings
if device.get('ebs', {}).get('snapshotId')]
if not snapshot_devices:
raise Exception("Hostclass %s does not mount a snapshot" % hostclass)
elif len(snapshot_devices) > 1:
raise Exception(
"Unsupported configuration: hostclass %s has multiple snapshot based devices." % hostclass
)
snapshot_device = snapshot_devices[0]
old_snapshot_id = snapshot_device['snapshotId']
if old_snapshot_id == snapshot_id:
logger.debug(
"Autoscaling group %s is already referencing latest snapshot %s",
hostclass or group_name,
snapshot_id
)
return
snapshot_device['snapshotId'] = snapshot_id
snapshot_device['volumeSize'] = snapshot_size
group_config = {
'group': {
'compute': {
'launchSpecification': {
'blockDeviceMappings': block_device_mappings
}
}
}
}
logger.info(
"Updating %s group's snapshot from %s to %s",
hostclass or group_name,
old_snapshot_id,
snapshot_id
)
self.spotinst_client.update_group(existing_group['id'], group_config)
def _roll_group(self, group_id, batch_percentage=100, grace_period=GROUP_ROLL_TIMEOUT,
health_check_type='EC2', wait=False):
"""
Recreate the instances in a Elastigroup
:param group_id (str): Elastigroup ID to roll
:param batch_percentage (int): Percentage of instances to roll at a time (0-100)
:param grace_period (int): Time in seconds to wait for new instances to become healthy
:param wait (boolean): True to wait for roll operation to finish
:raises TimeoutError if grace_period has expired
"""
self.spotinst_client.roll_group(group_id, batch_percentage, grace_period, health_check_type)
if wait:
# wait for the deploy to appear in list
time.sleep(10)
deployments = self.spotinst_client.get_deployments(group_id)
deploy_id = deployments[-1]['id']
current_time = time.time()
# wait an extra amount of time after grace_period has ended to give time for roll to finish
stop_time = current_time + grace_period + 300
while current_time < stop_time:
roll_status = self.spotinst_client.get_roll_status(group_id, deploy_id)
if roll_status['status'] not in ('in_progress', 'starting'):
if roll_status['status'] != 'finished':
logger.error("Roll of group %s did not complete successfully with status %s",
group_id, roll_status['status'])
break
logger.info("Waiting for %s group to roll in order to update settings", group_id)
time.sleep(10)
current_time = time.time()
if current_time >= stop_time:
raise TimeoutError(
"Timed out after waiting %s seconds for rolling deploy of %s" %
(grace_period, group_id)
)
def _get_instance_type_config(self, instance_types):
return {
"ondemand": instance_types.split(':')[0],
"spot": instance_types.split(':')
}
def _get_load_balancer_config(self, load_balancers, target_groups):
lbs = [{"name": elb, "type": "CLASSIC"} for elb in load_balancers] if load_balancers else []
tgs = [{"arn": tg, "type": "TARGET_GROUP"} for tg in target_groups] if target_groups else []
return {
"loadBalancers": lbs + tgs
} if load_balancers or target_groups else None
def _get_risk_config(self, spotinst_reserve):
if not spotinst_reserve:
return {
'risk': 100,
'onDemandCount': None
}
if str(spotinst_reserve).endswith('%'):
return {
'risk': 100 - int(spotinst_reserve.strip('%')),
'onDemandCount': None
}
return {
'risk': None,
'onDemandCount': int(spotinst_reserve)
}
def _get_block_device_config(self, block_device_mappings):
bdms = []
for block_device_mapping in block_device_mappings or []:
for name, device in block_device_mapping.iteritems():
if device.ephemeral_name:
bdms.append({
'deviceName': name,
'virtualName': device.ephemeral_name
})
elif any([device.size, device.iops, device.snapshot_id]):
bdm = {'deviceName': name, 'ebs': {'deleteOnTermination': device.delete_on_termination}}
if device.size:
bdm['ebs']['volumeSize'] = device.size
if device.iops:
bdm['ebs']['iops'] = device.iops
if device.volume_type:
bdm['ebs']['volumeType'] = device.volume_type
if device.snapshot_id:
bdm['ebs']['snapshotId'] = device.snapshot_id
bdms.append(bdm)
return bdms or None
|
{
"content_hash": "84ce73e4b45c8af521ab7d63b9f76922",
"timestamp": "",
"source": "github",
"line_count": 785,
"max_line_length": 110,
"avg_line_length": 42.21528662420382,
"alnum_prop": 0.5738555780198558,
"repo_name": "amplifylitco/asiaq",
"id": "a861aab686dc1a7e936c8911b2e812e3f415905f",
"size": "33139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disco_aws_automation/disco_elastigroup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Groovy",
"bytes": "509"
},
{
"name": "Python",
"bytes": "1389525"
},
{
"name": "Ruby",
"bytes": "42453"
},
{
"name": "Shell",
"bytes": "164839"
}
],
"symlink_target": ""
}
|
"""Demonstrate that a Model can inherite from more abstract models."""
from django.conf import settings
import salesforce
from salesforce import models
from salesforce.models import SalesforceModel
# All demo models simplified for readability, except tested features
class User(SalesforceModel):
username = models.CharField(max_length=80)
email = models.CharField(max_length=100)
class DefaultMixin(SalesforceModel):
"""Common fields used in the most of SFDC models."""
last_modified_date = models.DateTimeField(sf_read_only=models.READ_ONLY, auto_now=True)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING,
default=lambda:User(pk='DEFAULT')) # db_column='OwnerId'
class Meta:
abstract = True
class CommonAccount(DefaultMixin, SalesforceModel):
"""Common fields of Salesforce Account model."""
description = models.TextField()
phone = models.CharField(max_length=255)
class Meta:
abstract = True
class CoreAccount(SalesforceModel):
"""Fields specific to standard Account only."""
name = models.CharField(max_length=255)
class Meta:
abstract = True
class PersonAccount(SalesforceModel):
"""Fields specific to Account after activating "Person Account"."""
LastName = models.CharField(max_length=80)
FirstName = models.CharField(max_length=40)
Name = models.CharField(max_length=255, sf_read_only=models.READ_ONLY)
IsPersonAccount = models.BooleanField(default=False, sf_read_only=models.READ_ONLY)
PersonEmail = models.CharField(max_length=100)
class Meta:
abstract = True
if getattr(settings, 'PERSON_ACCOUNT_ACTIVATED', False):
class Account(CommonAccount, PersonAccount):
pass
else:
class Account(CommonAccount, CoreAccount):
pass
class DummyMixin(object):
def some_overridden_method(self):
pass
class DummyMixin2(object):
pass
class Contact(DummyMixin, DefaultMixin, SalesforceModel, DummyMixin2):
name = models.CharField(max_length=255, sf_read_only=models.READ_ONLY)
last_name = models.CharField(max_length=80)
first_name = models.CharField(max_length=40, blank=True)
account = salesforce.fields.ForeignKey(Account, on_delete=salesforce.models.DO_NOTHING)
class ProxyContact(Contact):
class Meta:
proxy = True
class Proxy2Contact(ProxyContact):
class Meta:
proxy = True
|
{
"content_hash": "1c8207015e96c045f9199f78b8811b55",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 88,
"avg_line_length": 28.2375,
"alnum_prop": 0.769809650287738,
"repo_name": "philchristensen/django-salesforce",
"id": "9f36d9ad0e79727f624bb64fe549fe28a6349822",
"size": "2259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_mixin/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1215"
},
{
"name": "Python",
"bytes": "202579"
},
{
"name": "Shell",
"bytes": "2666"
}
],
"symlink_target": ""
}
|
from manilaclient import base
from manilaclient import utils
class Extension(utils.HookableMixin):
"""Extension descriptor."""
SUPPORTED_HOOKS = ('__pre_parse_args__', '__post_parse_args__')
def __init__(self, name, module):
self.name = name
self.module = module
self._parse_extension_module()
def _parse_extension_module(self):
self.manager_class = None
for attr_name, attr_value in self.module.__dict__.items():
if attr_name in self.SUPPORTED_HOOKS:
self.add_hook(attr_name, attr_value)
elif utils.safe_issubclass(attr_value, base.Manager):
self.manager_class = attr_value
def __repr__(self):
return "<Extension '%s'>" % self.name
|
{
"content_hash": "26a57d8e87532e8fe680dc27129af36a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 67,
"avg_line_length": 31.791666666666668,
"alnum_prop": 0.6094364351245085,
"repo_name": "bswartz/python-manilaclient",
"id": "82fa8f182d841080f58ffc8f0e94611b4005abe9",
"size": "1393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "manilaclient/extension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "183311"
},
{
"name": "Shell",
"bytes": "5576"
}
],
"symlink_target": ""
}
|
"""Utilities to handle tensor tracer parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
import re
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
TRACE_MODE_PART_TENSOR = 'part-tensor'
TRACE_MODE_FULL_TENSOR = 'full-tensor'
TRACE_MODE_FULL_TENSOR_SUMMARY = 'full_tensor_summary'
TRACE_MODE_NAN_INF = 'nan-inf'
TRACE_MODE_NORM = 'norm'
TRACE_MODE_MAX_ABS = 'max-abs'
TRACE_MODE_SUMMARY = 'summary'
# summary mode to collects a finite set of signatures for each traced tensor,
# (such as norm, max, min, mean) and dumps it using tb summaries.
# Full tensor mode dumps the whole tensor values for the traced tensors without
# any processing on them; using tb summaries.
_SUBMODE_BRIEF = 'brief'
_SUBMODE_DETAILED = 'detailed'
_FLAG_SINGLE_QUOTE_PAT = re.compile(r"\s*--([^=]+)='([^']*)'")
_FLAG_DOUBLE_QUOTE_PAT = re.compile(r'\s*--([^=]+)="([^"]*)"')
_FLAG_NO_QUOTE_PAT = re.compile(r'\s*--([^=]+)=(\S*)')
_FLAG_NO_EQUAL_PAT = re.compile(r'\s*--([^=]+)\s*')
FLAGS_ENV_VAR = 'TENSOR_TRACER_FLAGS'
FLAG_NAME_ENABLE = 'enable'
FLAG_NAME_TRACE_MODE = 'trace_mode'
FLAG_NAME_TRACE_SCALAR_OPS = 'trace_scalar'
FLAG_NAME_SUBMODE = 'submode'
FLAG_NAME_EXCLUDED_OPNAMES = 'excluded_opnames'
FLAG_NAME_EXCLUDED_OPTYPES = 'excluded_optypes'
FLAG_NAME_INCLUDED_OPNAMES = 'included_opnames'
FLAG_NAME_INCLUDED_OPTYPES = 'included_optypes'
FLAG_NAME_TRACE_LEVEL = 'trace_level'
FLAG_NAME_TRACE_DIR = 'trace_dir'
FLAG_NAME_REPORT_FILE = 'report_file'
FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR = 'use_test_undeclared_outputs_dir'
FLAG_NAME_OP_RANGE = 'op_range'
# Folder to dump the pre (before tensor tracer updates) and post graphs (after
# tensor tracer updates).
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS = 'dump_graphs'
FLAG_NAME_SUMMARY_SIGNATURES = 'signatures'
FLAG_NAME_SUMMARY_PER_CORE = 'collect_summary_per_core'
FLAG_NAME_TEMP_CACHE_VAR = 'use_temp_cache'
FLAG_NAME_INSPECT_TRACE = 'inspect_trace'
FLAG_NAME_FINGERPRINT_DIR = 'use_fingerprint_subdirectory'
FLAG_FLUSH_SUMMARY = 'flush_summaries'
# Flag used in v2 only.
FLAG_SUMMARY_MODE_TYPE = 'summary_mode'
UI_MODE = 'ui'
TEXT_MODE = 'text'
_OP_RANGE_PAT = re.compile(r'(\d+):(\d+)')
_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR = 'TEST_UNDECLARED_OUTPUTS_DIR'
_TT_DEFAULT_TRACE_LEVEL = 3
_TT_PREFIX = 'tensor_tracer'
_TT_NORM = 'norm'
_TT_MAX = 'max'
_TT_MAX_ABS = 'max-abs'
_TT_MIN = 'min'
_TT_MEAN = 'mean'
_TT_VAR = 'var'
_TT_SIZE = 'size'
TT_SUMMARY_NORM = '%s_%s' % (_TT_PREFIX, _TT_NORM)
TT_SUMMARY_MAX = '%s_%s' % (_TT_PREFIX, _TT_MAX)
TT_SUMMARY_MAX_ABS = '%s_%s' % (_TT_PREFIX, _TT_MAX_ABS)
TT_SUMMARY_MIN = '%s_%s' % (_TT_PREFIX, _TT_MIN)
TT_SUMMARY_MEAN = '%s_%s' % (_TT_PREFIX, _TT_MEAN)
TT_SUMMARY_VAR = '%s_%s' % (_TT_PREFIX, _TT_VAR)
TT_SUMMARY_SIZE = '%s_%s' % (_TT_PREFIX, _TT_SIZE)
TT_SUMMARY_SIGNATURES = (TT_SUMMARY_NORM, TT_SUMMARY_MAX, TT_SUMMARY_MIN,
TT_SUMMARY_MEAN, TT_SUMMARY_VAR, TT_SUMMARY_SIZE,
TT_SUMMARY_MAX_ABS)
class TTParameters(object):
"""A class that handles the parameters of Tensor Tracer."""
def __init__(self, env=None):
if env:
self._env = env
else:
self._env = os.environ
self._validate_flag_names()
self.trace_mode = self._get_trace_mode()
self.submode = self._get_submode()
self.trace_dir = self._get_trace_dir()
self.report_file_path = self._get_report_filepath()
self.op_range = self._get_op_range()
self.excluded_opname_re_list = self._flag_value_to_re_list(
FLAG_NAME_EXCLUDED_OPNAMES)
self.excluded_optype_re_list = self._flag_value_to_re_list(
FLAG_NAME_EXCLUDED_OPTYPES)
self.included_opname_re_list = self._flag_value_to_re_list(
FLAG_NAME_INCLUDED_OPNAMES)
self.included_optype_re_list = self._flag_value_to_re_list(
FLAG_NAME_INCLUDED_OPTYPES)
self.trace_scalar_ops = self.is_flag_on(FLAG_NAME_TRACE_SCALAR_OPS)
self.use_compact_trace = self.trace_mode in (TRACE_MODE_NAN_INF,
TRACE_MODE_NORM,
TRACE_MODE_MAX_ABS,
TRACE_MODE_SUMMARY)
self.use_temp_cache_var = self.is_flag_on(FLAG_NAME_TEMP_CACHE_VAR)
self.inspect_trace = self.is_flag_on(FLAG_NAME_INSPECT_TRACE)
self.use_fingerprint_subdir = self.is_flag_on(FLAG_NAME_FINGERPRINT_DIR)
_, self.graph_dump_path = self.get_flag_value(
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS)
self.trace_level = self._get_flag_int_value(FLAG_NAME_TRACE_LEVEL,
_TT_DEFAULT_TRACE_LEVEL)
self.summary_signatures = self._get_summary_signatures()
self.collect_summary_per_core = self.is_flag_on(FLAG_NAME_SUMMARY_PER_CORE)
self.flush_summaries_with_outside_compile = self.is_flag_on(
FLAG_FLUSH_SUMMARY)
self.summary_mode = self._get_summary_mode()
self._check_flag_errors()
def _check_flag_errors(self):
if self.trace_mode in (TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY):
if not self.trace_dir:
raise ValueError('trace_dir must be explicitly provided in '
'TENSOR_TRACER_FLAGS when summary mode is used.')
def _get_report_filepath(self):
"""Sets the path of the output report file."""
found, report_file_path = self.get_flag_value(FLAG_NAME_REPORT_FILE)
if found and report_file_path and self.use_test_undeclared_outputs_dir():
if os.path.isabs(report_file_path):
raise ValueError('If use_test_undeclared_outputs_dir is set,'
'report_file_path cannot be an absolute path (%s)'
%report_file_path)
outputs_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
report_file_path = os.path.join(outputs_dir, report_file_path)
return report_file_path
def _get_op_range(self):
"""Sets the index range of the Ops that we will consider tracing."""
found, op_range = self.get_flag_value(FLAG_NAME_OP_RANGE)
if not found or not op_range:
op_range = (-1, -1) # this means including all ops.
return op_range
match = _OP_RANGE_PAT.match(op_range)
if not match:
op_range = (-1, -1) # this means including all ops.
return op_range
op_range = (int(match.group(1)), int(match.group(2)))
return op_range
def _get_trace_dir(self):
found, trace_dir = self.get_flag_value(FLAG_NAME_TRACE_DIR)
if found and trace_dir and self.use_test_undeclared_outputs_dir():
raise ValueError(
'Cannot not use --%s and --%s at the same time' %
(FLAG_NAME_TRACE_DIR, FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR))
if self.use_test_undeclared_outputs_dir():
trace_dir = self._env.get(_TEST_UNDECLARED_OUTPUTS_DIR_ENV_VAR)
return trace_dir
def _get_trace_mode(self):
"""Checks if the given trace mode is valid."""
found, trace_mode = self.get_flag_value(FLAG_NAME_TRACE_MODE)
if not found or not trace_mode:
trace_mode = TRACE_MODE_NORM
valid_trace_modes = [
TRACE_MODE_NAN_INF, TRACE_MODE_PART_TENSOR, TRACE_MODE_FULL_TENSOR,
TRACE_MODE_NORM, TRACE_MODE_MAX_ABS,
TRACE_MODE_SUMMARY, TRACE_MODE_FULL_TENSOR_SUMMARY
]
if trace_mode not in valid_trace_modes:
raise ValueError('Invalid trace mode "%s" given to the Tensor_Tracer.'
'Valid trace modes are: %s'%(trace_mode,
valid_trace_modes))
return trace_mode
def is_brief_mode(self):
return self.submode == _SUBMODE_BRIEF
def _get_submode(self):
"""Checks if the given submode is valid."""
found, submode = self.get_flag_value(FLAG_NAME_SUBMODE)
if not found or not submode:
submode = _SUBMODE_DETAILED
if not submode:
return
valid_submodes = [_SUBMODE_DETAILED, _SUBMODE_BRIEF]
if submode not in valid_submodes:
raise ValueError('Invalid submode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(submode,
valid_submodes))
return submode
@staticmethod
def match_next_flag(flags, pos):
"""Returns the match for the next TensorTracer flag.
Args:
flags: a string that contains the flags.
pos: where in flags to start the search.
Returns:
A pair where the first element is the regular-expression
match found and the second element indicates if the match
has a value.
"""
match = _FLAG_DOUBLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_SINGLE_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_QUOTE_PAT.match(flags, pos)
if match:
return match, True
match = _FLAG_NO_EQUAL_PAT.match(flags, pos)
if match:
# The flag is found but is not given a value.
return match, False
# The flag is not found.
return None, False
def _validate_flag_names(self):
"""Validates if the TensorTrace flags passed are valid."""
valid_flag_names = [
FLAG_NAME_ENABLE, FLAG_NAME_TRACE_MODE,
FLAG_NAME_TRACE_SCALAR_OPS,
FLAG_NAME_SUBMODE, FLAG_NAME_EXCLUDED_OPNAMES,
FLAG_NAME_EXCLUDED_OPTYPES, FLAG_NAME_INCLUDED_OPNAMES,
FLAG_NAME_INCLUDED_OPTYPES, FLAG_NAME_TRACE_DIR,
FLAG_NAME_REPORT_FILE,
FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR,
FLAG_NAME_OP_RANGE,
FLAG_NAME_DUMP_BEFORE_AFTER_GRAPHS, FLAG_NAME_TRACE_LEVEL,
FLAG_NAME_SUMMARY_SIGNATURES, FLAG_NAME_SUMMARY_PER_CORE,
FLAG_NAME_TEMP_CACHE_VAR, FLAG_NAME_FINGERPRINT_DIR,
FLAG_NAME_INSPECT_TRACE, FLAG_FLUSH_SUMMARY, FLAG_SUMMARY_MODE_TYPE
]
tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return
pos = 0
while True:
match, _ = TTParameters.match_next_flag(tensor_tracer_flags, pos)
if not match:
break
flag_name = match.group(1)
if flag_name not in valid_flag_names:
raise ValueError(
'The flag name "%s" passed via the environment variable "%s" '
'is invalid. Valid flag names are:'
'\n%s' % (flag_name, FLAGS_ENV_VAR, valid_flag_names))
pos = match.end()
def _supported_signatures(self):
"""Returns a tuple of supported signatures."""
return TT_SUMMARY_SIGNATURES
def _get_summary_signatures(self):
"""Verifies and returns the summary signatures.
Returns:
A dictionary of the signature identifiers {signature: index} that will be
computed when trace_mode is summary.
"""
signatures = self._flag_value_as_list(FLAG_NAME_SUMMARY_SIGNATURES)
supported_signatures = self._supported_signatures()
tt_signatures = []
for signature in signatures:
signature_with_prefix = '%s_%s' % (_TT_PREFIX, signature)
if signature in supported_signatures:
tt_signatures.append(signature)
elif signature_with_prefix in supported_signatures:
tt_signatures.append(signature_with_prefix)
else:
logging.warning('Unknown signature:%s. Supported signatures: %s' %
(signature, supported_signatures))
if not tt_signatures:
# Default case collects norm and max only.
return {TT_SUMMARY_MAX_ABS: 0, TT_SUMMARY_NORM: 1}
else:
return {signature: idx for idx, signature in enumerate(tt_signatures)}
def get_signature_to_agg_fn_map(self):
"""Returns a map that contains the aggregate function for each signature."""
return {TRACE_MODE_NORM: linalg_ops.norm,
TRACE_MODE_MAX_ABS: math_ops.reduce_max,
TRACE_MODE_NAN_INF: math_ops.reduce_max,
TT_SUMMARY_NORM: linalg_ops.norm,
TT_SUMMARY_MAX: math_ops.reduce_max,
TT_SUMMARY_MAX_ABS:
lambda t, axis=0: math_ops.reduce_max(math_ops.abs(t), # pylint: disable=g-long-lambda
axis=axis),
TT_SUMMARY_MIN: math_ops.reduce_min,
TT_SUMMARY_MEAN: math_ops.reduce_mean,
TT_SUMMARY_VAR: math_ops.reduce_max, # Simply reduce max variance.
TT_SUMMARY_SIZE: math_ops.reduce_sum}
def _flag_value_as_list(self, wanted_flag_name):
"""Returns the string list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
The list value of the flag.
"""
string_value_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
string_value_list = flag_value.split(',')
return string_value_list
def _flag_value_as_int_list(self, wanted_flag_name):
"""Returns the integer list of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
int_list = []
found, flag_value = self.get_flag_value(wanted_flag_name)
if found and flag_value:
try:
integer_values = flag_value.split(',')
int_list = [int(int_val) for int_val in integer_values]
except ValueError:
logging.warning('Cannot convert %s to int for flag %s', int_list,
wanted_flag_name)
return int_list
def _get_flag_int_value(self, wanted_flag_name, default_value):
"""Returns the int value of a TensorTracer flag.
Args:
wanted_flag_name: the name of the flag we are looking for.
default_value: the default value for the flag, if not provided.
Returns:
the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
flag_int_value = default_value
found, flag_value = self.get_flag_value(wanted_flag_name)
if found:
try:
flag_int_value = int(flag_value)
except ValueError:
logging.warning('Cannot convert %s to int for flag %s' % (
flag_int_value, wanted_flag_name))
return flag_int_value
def get_flag_value(self, wanted_flag_name):
"""Returns the value of a TensorTracer flags.
Args:
wanted_flag_name: the name of the flag we are looking for.
Returns:
A pair where the first element indicates if the flag is
found and the second element is the value of the flag.
Raises:
RuntimeError: If supposedly deadcode is reached.
"""
tensor_tracer_flags = self._env.get(FLAGS_ENV_VAR)
if not tensor_tracer_flags:
return False, None
pos = 0
while True:
match, has_value = TTParameters.match_next_flag(
tensor_tracer_flags, pos)
if not match:
return False, None
flag_name = match.group(1)
if has_value:
flag_value = match.group(2)
else:
flag_value = None
if flag_name == wanted_flag_name:
return True, flag_value
pos = match.end()
raise RuntimeError('Should not reach here.')
def _flag_value_to_re_list(self, flag_name):
"""Converts list of strings to compiled RE."""
re_list = []
found, flag_value = self.get_flag_value(flag_name)
if not found or not flag_value:
return re_list
list_of_values = flag_value.split(',')
for v in list_of_values:
r = re.compile(v)
re_list.append(r)
return re_list
def is_flag_on(self, flag_name):
"""Returns True if the given flag is on."""
found, flag_value = self.get_flag_value(flag_name)
if not found:
return False
if flag_value is None:
return True
# Depends on the flag value.
flag_value = flag_value.lower()
enabled = flag_value in ['1', 't', 'true', 'y', 'yes']
return enabled
def is_enabled(self):
"""Returns True if TensorTracer is enabled."""
if self.is_flag_on(FLAG_NAME_ENABLE):
logging.debug('Tensor Tracer is enabled with flags %s.',
self._env.get(FLAGS_ENV_VAR))
return True
else:
return False
def use_test_undeclared_outputs_dir(self):
"""Decides the output directory of the report and trace files.
Args:
None.
Returns:
True if the output files should be written to the
test-undeclared-outputs-directory defined via an
env variable.
"""
return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)
def _get_summary_mode(self):
"""Returns the summary mode after checking if it is valid."""
found, summary_mode = self.get_flag_value(FLAG_SUMMARY_MODE_TYPE)
if not found:
summary_mode = UI_MODE
valid_summary_modes = [UI_MODE, TEXT_MODE]
if summary_mode not in valid_summary_modes:
raise ValueError('Invalid summary mode "%s" given to the Tensor_Tracer.'
'Valid submodes are: %s'%(summary_mode,
valid_summary_modes))
return summary_mode
|
{
"content_hash": "402e9e0ff072010f34c58aa2bddb6f57",
"timestamp": "",
"source": "github",
"line_count": 478,
"max_line_length": 103,
"avg_line_length": 35.89748953974895,
"alnum_prop": 0.6373914563785769,
"repo_name": "frreiss/tensorflow-fred",
"id": "b45f9fc6409464f41f8092fb1cf7da926f84093f",
"size": "17842",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/tpu/tensor_tracer_flags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from analyze_sessions.commands import AnalyzeSessionsCommand as Command
|
{
"content_hash": "5084fba2e6cfc6e90a0807aba8235361",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 71,
"avg_line_length": 72,
"alnum_prop": 0.8888888888888888,
"repo_name": "isnotajoke/django-analyze-sessions",
"id": "dadd9fe6b50633efec12fa15016914b8fa600510",
"size": "200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyze_sessions/management/commands/analyze-sessions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8386"
}
],
"symlink_target": ""
}
|
import json
import os.path
import time
import subprocess
import sys
CONFIG_FILE_NAME = "/opt/pi/CheeseCave/config.json"
STATE_FILE_NAME = "/var/lib/CheeseCave/sensor1-state.json"
RELAY_EXECUTABLE = "/opt/pi/CheeseCave/relay"
LOG_FILE_NAME = "/var/lib/CheeseCave/control-log.json"
config_mod_time = 0
config_compressor_pin = 0
config_humidity_pin = 0
def load_config():
global config_mod_time
global config_compressor_pin
global config_humidity_pin
with open(CONFIG_FILE_NAME, 'r') as f:
data = json.load(f)
config_mod_time = os.path.getmtime(CONFIG_FILE_NAME)
print " load config data is ",data
config_humidity_pin = data['config']['humidity_pin']
print "new humidity pin ",config_humidity_pin
config_compressor_pin = data['config']['compressor_pin']
print "new compressor pin ",config_compressor_pin
return data['config'], data['targets']
def config_has_changed():
return os.path.getmtime(CONFIG_FILE_NAME) != config_mod_time
def log_state(relay, state):
with open(LOG_FILE_NAME, 'a') as log_file:
log_file.write('{"epoch":%d, "relay":"%s", "state":"%s"}\n'%(int(time.time()), relay, state))
# returns "on" or "off"
def get_pin_state(pin):
r = subprocess.Popen(["sudo", RELAY_EXECUTABLE, "state",str(pin)], stdout=subprocess.PIPE).communicate()[0]
return r
def reverse( s ):
if s == "off":
return "on"
else:
return "off"
# set to "on" or "off"
def set_pin_state(pin, state):
print "setting pin ",pin," to ",state
subprocess.Popen(["sudo", RELAY_EXECUTABLE, state, str(pin)])
def get_compressor_state():
if config_compressor_pin == 0 :
return "off"
return get_pin_state(config_compressor_pin)
#return ["on"]
def set_compressor_state(state):
if config_compressor_pin == 0 :
return
log_state('compressor',state)
set_pin_state(config_compressor_pin, state)
# the polarity of the humidity pin is backward
def get_humidity_state():
if config_humidity_pin == 0 :
return "off"
return reverse ( get_pin_state(config_humidity_pin) )
def set_humidity_state(state):
if config_humidity_pin == 0 :
return
log_state('humidity', state)
state = reverse( state )
set_pin_state(config_humidity_pin, state)
def get_current_sensors():
state = open(STATE_FILE_NAME, 'r')
data = json.load(state)
return data
if __name__ == "__main__":
first_time = True
compressor_end = 0
humidity_end = 0
config, targets = load_config()
# special case:
if get_compressor_state() == 'on' :
print "turning off compressor for initial state"
set_compressor_state('off')
if get_humidity_state() == 'on' :
print "turning off humidity for initial state"
set_humidity_state('off')
while True:
# get configuration if we need to
if (config_has_changed() or first_time):
first_time = False
config, targets = load_config()
print "config ",config
print "targets ",targets
# get relay state(s)
compressor = get_compressor_state()
print "compressor state (",compressor,")"
humidity = get_humidity_state()
print "humidity state (",humidity,")"
# get current sensor values (temperature, humidity)
sensors = get_current_sensors()
print "sensors ",sensors
# modify compressor relay based on desired conditions,
# current stats, and relay state.
# For the moment, we only do the compressor relay
if compressor_end > 0 :
if time.time() > compressor_end :
if sensors['temperature'] > targets['temperature']:
# add another quanta of time without bumming out compressor
compressor_end = time.time() + config['temperature_delay']
print "adding more time to the compressor end"
else:
set_compressor_state("off")
compressor_end = 0
print "turning off compressor after delay: temp",sensors['temperature']
else :
print "compressor staying on"
elif ( sensors['temperature'] > targets['temperature'] ) and ( compressor == 'off' ) :
set_compressor_state('on')
compressor_end = time.time() + config['temperature_delay']
print time.strftime("%b %d %Y %I:%M%p %Z: turned temp relay ON")
sys.stdout.flush()
else:
print " compressor relay is (",compressor,")"
print " sensor is (",sensors['temperature'],")"
print " target temp is (",targets['temperature'],")"
print "no temp change needed"
# modify humidity relay based on desired conditions,
# current current value, and relay state.
# Only do if we have humidity configured.
if config_humidity_pin :
if humidity_end > 0 :
if time.time() > humidity_end :
if sensors['humidity'] < targets['humidity']:
# add another quanta of time without bumming out device
humidity_end = time.time() + config['humidity_delay']
print "adding more time to the humidity end"
else:
set_humidity_state('off')
humidity_end = 0
print "turning off humidifier after delay: humid",sensors['humidity']
else :
print "humidifier staying on"
elif (sensors['humidity'] < targets['humidity']) and humidity == 'off':
set_humidity_state('on')
humidity_end = time.time() + config['humidity_delay']
print time.strftime("%b %d %Y %I:%M%p %Z: turned humidity ON")
sys.stdout.flush()
else:
print " humidity relay is (",humidity,")"
print " humidity value is (",sensors['humidity'],")"
print " target humidity is (",targets['humidity'],")"
print "no humidity change needed"
time.sleep(config['sleep_delay'])
|
{
"content_hash": "a3321d886e66a97cf74ee5bd0fe66e85",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 109,
"avg_line_length": 30.72,
"alnum_prop": 0.6795014880952381,
"repo_name": "cswales/CheeseCave",
"id": "35d380fdeeab62e10bca351d2d0a7532c57f2ac9",
"size": "5395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cheesecave_ctrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "29625"
},
{
"name": "Go",
"bytes": "7447"
},
{
"name": "HTML",
"bytes": "7266"
},
{
"name": "Makefile",
"bytes": "194"
},
{
"name": "Python",
"bytes": "21667"
},
{
"name": "Shell",
"bytes": "554"
}
],
"symlink_target": ""
}
|
'''@file dblstm.py
contains de LAS class'''
import tensorflow as tf
from nabu.neuralnetworks.classifiers import classifier
from nabu.neuralnetworks.classifiers.asr.encoders import encoder_factory
from nabu.neuralnetworks.classifiers.asr.asr_decoders import asr_decoder_factory
from nabu.neuralnetworks.classifiers.asr.reconstructors \
import reconstructor_factory
class EncoderDecoderReconstructor(classifier.Classifier):
'''a general class for an encoder decoder reconstructor system'''
def __init__(self, conf, output_dim, name=None):
'''Constructor for this kind of object
Args:
conf: The classifier configuration
output_dim: the classifier output dimension as a tuple of the
dimension of the actual outputs and the dimension of the
quantized reconstructed audio samples
name: the classifier name
'''
super(EncoderDecoderReconstructor, self).__init__(
conf, output_dim, name)
#create the listener
self.encoder = encoder_factory.factory(conf)
#create the speller
self.decoder = asr_decoder_factory.factory(conf, self.output_dim[0])
#create the reconstructors
self.reconstructor = reconstructor_factory.factory(conf,
self.output_dim[1])
def _get_outputs(self, inputs, input_seq_length, targets=None,
target_seq_length=None, is_training=False):
'''
Add the neural net variables and operations to the graph
Args:
inputs: the inputs to the neural network, this is a
[batch_size x max_input_length x feature_dim] tensor
input_seq_length: The sequence lengths of the input utterances, this
is a [batch_size] vector
targets: the targets to the neural network, this is a tuple of
[batch_size x max_output_length] tensors. The targets can be
used during training. The first element are the text targets,
the second element are the quantized audio samples
target_seq_length: The sequence lengths of the target utterances,
this is a tuple [batch_size] vectors. Firste element is for the
text targets, second is for the quantized audio samples
is_training: whether or not the network is in training mode
Returns:
A pair containing:
- output logits as a tuple of text targets and reconstructed
audio samples
- the output logits sequence lengths as a tuple of two vectors
'''
#add input noise
std_input_noise = float(self.conf['std_input_noise'])
if is_training and std_input_noise > 0:
noisy_inputs = inputs + tf.random_normal(
inputs.get_shape(), stddev=std_input_noise)
else:
noisy_inputs = inputs
#compute the high level features
hlfeat = self.encoder(
inputs=noisy_inputs,
sequence_lengths=input_seq_length,
is_training=is_training)
#prepend a sequence border label to the targets to get the encoder
#inputs, the label is the last label
batch_size = int(targets[0].get_shape()[0])
s_labels = tf.constant(self.output_dim[0]-1,
dtype=tf.int32,
shape=[batch_size, 1])
encoder_inputs = tf.concat([s_labels, targets[0]], 1)
#compute the output logits
text_logits, _ = self.decoder(
hlfeat=hlfeat,
encoder_inputs=encoder_inputs,
initial_state=self.decoder.zero_state(batch_size),
first_step=True,
is_training=is_training)
#compute the output logits
audio_logits = self.reconstructor(
hlfeat=hlfeat,
reconstructor_inputs=targets[1],
is_training=is_training)
# adapt the sequence length of the logits in the correct way
# plus one if the target length was not zero because and eos label will
# be added, remain zero when the target length was also zero.
empty_targets = tf.equal(target_seq_length[0], 0)
zeros = tf.zeros([target_seq_length[0].get_shape()[0]], dtype=tf.int32)
logit_seq_length1 = tf.where(empty_targets, zeros,
target_seq_length[0]+1)
#assemble two kind of logits and lengths in tuples
logits = (text_logits, audio_logits)
logits_lengths = (logit_seq_length1, target_seq_length[1])
return logits, logits_lengths
|
{
"content_hash": "cc3622529ed195d714a0889f33fe0a21",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 80,
"avg_line_length": 42.68468468468468,
"alnum_prop": 0.6135500211059519,
"repo_name": "JeroenBosmans/nabu",
"id": "efe8d686717b319e9c78c9d666e241b4041b05b9",
"size": "4738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nabu/neuralnetworks/classifiers/asr/encoder_decoder_reconstructor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "395778"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
}
|
from time import sleep
from UUGear import *
import sys
import pygame
import datetime
import os
print 'starting init'
pygame.init()
UUGearDevice.setShowLogs(0)
fname = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M') + "-log.txt"
f_new = open(fname, 'a')
f_new.close()
try:
device = UUGearDevice('UUGear-Arduino-8483-2314')
if device.isValid():
pygame.mixer.music.load('track01.mp3')
pygame.mixer.music.play()
pygame.mixer.music.pause()
while True:
if float(device.analogRead(3)) > float(device.analogRead(4)):
print "On"
pygame.mixer.music.unpause()
else:
print "Off"
pygame.mixer.music.pause()
print "Device 1: %0.2f" % (float(device.analogRead(3)) * 3.3 / 1024), "V"
print "Device 2: %0.2f" % (float(device.analogRead(4)) * 3.3 / 1024), "V"
f = open(fname, 'ab')
f.write(datetime.datetime.now().strftime('%Y-%m-%dT%H:%M.%S') + "\n")
f.close()
sleep(0.5)
if float(device.analogRead(4)) * 3.3 / 1024 < 0.1:
print("Stopping script")
pygame.mixer.music.stop()
device.detach()
device.stopDaemon()
pygame.quit()
print("Script Stopped")
raise SystemExit
pygame.mixer.music.stop()
device.detach()
device.stopDaemon()
pygame.quit()
print("Script Finished")
raise SystemExit
else:
print 'UUGear device is not currently installed'
except:
print "Unexpected error:", sys.exc_info()[0]
device.detach()
device.stopDaemon()
print("Script Finished")
raise SystemExit
|
{
"content_hash": "96c09ce5838a7fea30b0b9a535ca0e02",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 85,
"avg_line_length": 29.1,
"alnum_prop": 0.5572737686139748,
"repo_name": "TimSmith714/Pi-Arduino-MusicJokeBox",
"id": "c770a949f17f632a3cfde48fe6327fa78a2d02b6",
"size": "1746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1746"
}
],
"symlink_target": ""
}
|
'''
usage: 4up.py my.pdf firstpage lastpage
Creates 4up.my.pdf
'''
import sys
import os
import find_pdfrw
from pdfrw import PdfReader, PdfWriter, PdfDict, PdfName, PdfArray
from pdfrw.buildxobj import pagexobj
def get4(allpages):
# Pull a maximum of 4 pages off the list
pages = [pagexobj(x) for x in allpages[:4]]
del allpages[:4]
x_max = max(page.BBox[2] for page in pages)
y_max = max(page.BBox[3] for page in pages)
stream = []
xobjdict = PdfDict()
for index, page in enumerate(pages):
x = x_max * (index & 1) / 2.0
y = y_max * (index <= 1) / 2.0
index = '/P%s' % index
stream.append('q 0.5 0 0 0.5 %s %s cm %s Do Q\n' % (x, y, index))
xobjdict[index] = page
return PdfDict(
Type = PdfName.Page,
Contents = PdfDict(stream=''.join(stream)),
MediaBox = PdfArray([0, 0, x_max, y_max]),
Resources = PdfDict(XObject = xobjdict),
)
def go(inpfn, outfn):
pages = PdfReader(inpfn, decompress=False).pages
writer = PdfWriter()
while pages:
writer.addpage(get4(pages))
writer.write(outfn)
if __name__ == '__main__':
inpfn, = sys.argv[1:]
outfn = '4up.' + os.path.basename(inpfn)
go(inpfn, outfn)
|
{
"content_hash": "c050c2b60a61745fec7173848d099dc4",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 73,
"avg_line_length": 25.510204081632654,
"alnum_prop": 0.5968,
"repo_name": "kulbirsaini/pdfrw-fork",
"id": "0f1fd3b70c9888d77a8874478df7ecdf447108ae",
"size": "1273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/4up.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76482"
}
],
"symlink_target": ""
}
|
"""
Reinforcement Learning (DQN) tutorial
=====================================
**Author**: `Adam Paszke <https://github.com/apaszke>`_
This tutorial shows how to use PyTorch to train a Deep Q Learning (DQN) agent
on the CartPole-v0 task from the `OpenAI Gym <https://gym.openai.com/>`__.
**Task**
The agent has to decide between two actions - moving the cart left or
right - so that the pole attached to it stays upright. You can find an
official leaderboard with various algorithms and visualizations at the
`Gym website <https://gym.openai.com/envs/CartPole-v0>`__.
.. figure:: /_static/img/cartpole.gif
:alt: cartpole
cartpole
As the agent observes the current state of the environment and chooses
an action, the environment *transitions* to a new state, and also
returns a reward that indicates the consequences of the action. In this
task, the environment terminates if the pole falls over too far.
The CartPole task is designed so that the inputs to the agent are 4 real
values representing the environment state (position, velocity, etc.).
However, neural networks can solve the task purely by looking at the
scene, so we'll use a patch of the screen centered on the cart as an
input. Because of this, our results aren't directly comparable to the
ones from the official leaderboard - our task is much harder.
Unfortunately this does slow down the training, because we have to
render all the frames.
Strictly speaking, we will present the state as the difference between
the current screen patch and the previous one. This will allow the agent
to take the velocity of the pole into account from one image.
**Packages**
First, let's import needed packages. Firstly, we need
`gym <https://gym.openai.com/docs>`__ for the environment
(Install using `pip install gym`).
We'll also use the following from PyTorch:
- neural networks (``torch.nn``)
- optimization (``torch.optim``)
- automatic differentiation (``torch.autograd``)
- utilities for vision tasks (``torchvision`` - `a separate
package <https://github.com/pytorch/vision>`__).
"""
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
env = gym.make('CartPole-v0').unwrapped
# set up matplotlib
is_ipython = 'inline' in matplotlib.get_backend()
if is_ipython:
from IPython import display
plt.ion()
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################################################
# Replay Memory
# -------------
#
# We'll be using experience replay memory for training our DQN. It stores
# the transitions that the agent observes, allowing us to reuse this data
# later. By sampling from it randomly, the transitions that build up a
# batch are decorrelated. It has been shown that this greatly stabilizes
# and improves the DQN training procedure.
#
# For this, we're going to need two classses:
#
# - ``Transition`` - a named tuple representing a single transition in
# our environment
# - ``ReplayMemory`` - a cyclic buffer of bounded size that holds the
# transitions observed recently. It also implements a ``.sample()``
# method for selecting a random batch of transitions for training.
#
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
######################################################################
# Now, let's define our model. But first, let quickly recap what a DQN is.
#
# DQN algorithm
# -------------
#
# Our environment is deterministic, so all equations presented here are
# also formulated deterministically for the sake of simplicity. In the
# reinforcement learning literature, they would also contain expectations
# over stochastic transitions in the environment.
#
# Our aim will be to train a policy that tries to maximize the discounted,
# cumulative reward
# :math:`R_{t_0} = \sum_{t=t_0}^{\infty} \gamma^{t - t_0} r_t`, where
# :math:`R_{t_0}` is also known as the *return*. The discount,
# :math:`\gamma`, should be a constant between :math:`0` and :math:`1`
# that ensures the sum converges. It makes rewards from the uncertain far
# future less important for our agent than the ones in the near future
# that it can be fairly confident about.
#
# The main idea behind Q-learning is that if we had a function
# :math:`Q^*: State \times Action \rightarrow \mathbb{R}`, that could tell
# us what our return would be, if we were to take an action in a given
# state, then we could easily construct a policy that maximizes our
# rewards:
#
# .. math:: \pi^*(s) = \arg\!\max_a \ Q^*(s, a)
#
# However, we don't know everything about the world, so we don't have
# access to :math:`Q^*`. But, since neural networks are universal function
# approximators, we can simply create one and train it to resemble
# :math:`Q^*`.
#
# For our training update rule, we'll use a fact that every :math:`Q`
# function for some policy obeys the Bellman equation:
#
# .. math:: Q^{\pi}(s, a) = r + \gamma Q^{\pi}(s', \pi(s'))
#
# The difference between the two sides of the equality is known as the
# temporal difference error, :math:`\delta`:
#
# .. math:: \delta = Q(s, a) - (r + \gamma \max_a Q(s', a))
#
# To minimise this error, we will use the `Huber
# loss <https://en.wikipedia.org/wiki/Huber_loss>`__. The Huber loss acts
# like the mean squared error when the error is small, but like the mean
# absolute error when the error is large - this makes it more robust to
# outliers when the estimates of :math:`Q` are very noisy. We calculate
# this over a batch of transitions, :math:`B`, sampled from the replay
# memory:
#
# .. math::
#
# \mathcal{L} = \frac{1}{|B|}\sum_{(s, a, s', r) \ \in \ B} \mathcal{L}(\delta)
#
# .. math::
#
# \text{where} \quad \mathcal{L}(\delta) = \begin{cases}
# \frac{1}{2}{\delta^2} & \text{for } |\delta| \le 1, \\
# |\delta| - \frac{1}{2} & \text{otherwise.}
# \end{cases}
#
# Q-network
# ^^^^^^^^^
#
# Our model will be a convolutional neural network that takes in the
# difference between the current and previous screen patches. It has two
# outputs, representing :math:`Q(s, \mathrm{left})` and
# :math:`Q(s, \mathrm{right})` (where :math:`s` is the input to the
# network). In effect, the network is trying to predict the *quality* of
# taking each action given the current input.
#
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
self.head = nn.Linear(448, 2)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
return self.head(x.view(x.size(0), -1))
######################################################################
# Input extraction
# ^^^^^^^^^^^^^^^^
#
# The code below are utilities for extracting and processing rendered
# images from the environment. It uses the ``torchvision`` package, which
# makes it easy to compose image transforms. Once you run the cell it will
# display an example patch that it extracted.
#
resize = T.Compose([T.ToPILImage(),
T.Resize(40, interpolation=Image.CUBIC),
T.ToTensor()])
# This is based on the code from gym.
screen_width = 600
def get_cart_location():
world_width = env.x_threshold * 2
scale = screen_width / world_width
return int(env.state[0] * scale + screen_width / 2.0) # MIDDLE OF CART
def get_screen():
screen = env.render(mode='rgb_array').transpose(
(2, 0, 1)) # transpose into torch order (CHW)
# Strip off the top and bottom of the screen
screen = screen[:, 160:320]
view_width = 320
cart_location = get_cart_location()
if cart_location < view_width // 2:
slice_range = slice(view_width)
elif cart_location > (screen_width - view_width // 2):
slice_range = slice(-view_width, None)
else:
slice_range = slice(cart_location - view_width // 2,
cart_location + view_width // 2)
# Strip off the edges, so that we have a square image centered on a cart
screen = screen[:, :, slice_range]
# Convert to float, rescare, convert to torch tensor
# (this doesn't require a copy)
screen = np.ascontiguousarray(screen, dtype=np.float32) / 255
screen = torch.from_numpy(screen)
# Resize, and add a batch dimension (BCHW)
return resize(screen).unsqueeze(0).to(device)
env.reset()
plt.figure()
plt.imshow(get_screen().cpu().squeeze(0).permute(1, 2, 0).numpy(),
interpolation='none')
plt.title('Example extracted screen')
plt.show()
######################################################################
# Training
# --------
#
# Hyperparameters and utilities
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# This cell instantiates our model and its optimizer, and defines some
# utilities:
#
# - ``select_action`` - will select an action accordingly to an epsilon
# greedy policy. Simply put, we'll sometimes use our model for choosing
# the action, and sometimes we'll just sample one uniformly. The
# probability of choosing a random action will start at ``EPS_START``
# and will decay exponentially towards ``EPS_END``. ``EPS_DECAY``
# controls the rate of the decay.
# - ``plot_durations`` - a helper for plotting the durations of episodes,
# along with an average over the last 100 episodes (the measure used in
# the official evaluations). The plot will be underneath the cell
# containing the main training loop, and will update after every
# episode.
#
BATCH_SIZE = 128
GAMMA = 0.999
EPS_START = 0.9
EPS_END = 0.05
EPS_DECAY = 200
TARGET_UPDATE = 10
policy_net = DQN().to(device)
target_net = DQN().to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.RMSprop(policy_net.parameters())
memory = ReplayMemory(10000)
steps_done = 0
def select_action(state):
global steps_done
sample = random.random()
eps_threshold = EPS_END + (EPS_START - EPS_END) * \
math.exp(-1. * steps_done / EPS_DECAY)
steps_done += 1
if sample > eps_threshold:
with torch.no_grad():
return policy_net(state).max(1)[1].view(1, 1)
else:
return torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long)
episode_durations = []
def plot_durations():
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Take 100 episode averages and plot them too
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.pause(0.001) # pause a bit so that plots are updated
if is_ipython:
display.clear_output(wait=True)
display.display(plt.gcf())
######################################################################
# Training loop
# ^^^^^^^^^^^^^
#
# Finally, the code for training our model.
#
# Here, you can find an ``optimize_model`` function that performs a
# single step of the optimization. It first samples a batch, concatenates
# all the tensors into a single one, computes :math:`Q(s_t, a_t)` and
# :math:`V(s_{t+1}) = \max_a Q(s_{t+1}, a)`, and combines them into our
# loss. By defition we set :math:`V(s) = 0` if :math:`s` is a terminal
# state. We also use a target network to compute :math:`V(s_{t+1})` for
# added stability. The target network has its weights kept frozen most of
# the time, but is updated with the policy network's weights every so often.
# This is usually a set number of steps but we shall use episodes for
# simplicity.
#
def optimize_model():
if len(memory) < BATCH_SIZE:
return
transitions = memory.sample(BATCH_SIZE)
# Transpose the batch (see http://stackoverflow.com/a/19343/3343043 for
# detailed explanation).
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.uint8)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
# Compute Q(s_t, a) - the model computes Q(s_t), then we select the
# columns of actions taken
state_action_values = policy_net(state_batch).gather(1, action_batch)
# Compute V(s_{t+1}) for all next states.
next_state_values = torch.zeros(BATCH_SIZE, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0].detach()
# Compute the expected Q values
expected_state_action_values = (next_state_values * GAMMA) + reward_batch
# Compute Huber loss
loss = F.smooth_l1_loss(state_action_values, expected_state_action_values.unsqueeze(1))
# Optimize the model
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
######################################################################
#
# Below, you can find the main training loop. At the beginning we reset
# the environment and initialize the ``state`` Tensor. Then, we sample
# an action, execute it, observe the next screen and the reward (always
# 1), and optimize our model once. When the episode ends (our model
# fails), we restart the loop.
#
# Below, `num_episodes` is set small. You should download
# the notebook and run lot more epsiodes.
#
num_episodes = 50
for i_episode in range(num_episodes):
# Initialize the environment and state
env.reset()
last_screen = get_screen()
current_screen = get_screen()
state = current_screen - last_screen
for t in count():
# Select and perform an action
action = select_action(state)
_, reward, done, _ = env.step(action.item())
reward = torch.tensor([reward], device=device)
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None
# Store the transition in memory
memory.push(state, action, next_state, reward)
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize_model()
if done:
episode_durations.append(t + 1)
plot_durations()
break
# Update the target network
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Complete')
env.render()
env.close()
plt.ioff()
plt.show()
|
{
"content_hash": "0677272319e4cf528b93a55dffecedd6",
"timestamp": "",
"source": "github",
"line_count": 464,
"max_line_length": 95,
"avg_line_length": 34.72629310344828,
"alnum_prop": 0.6500961956184448,
"repo_name": "jt120/start-ml",
"id": "a1301c1becf55a917e07a1870c8e18fde1b10c27",
"size": "16137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorch/reinforcement_q_learning.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3223"
},
{
"name": "Jupyter Notebook",
"bytes": "1307290"
},
{
"name": "Python",
"bytes": "32655"
}
],
"symlink_target": ""
}
|
"""
constants.py: Scientific constants in SI units.
Included constants:
* **g0** : standard acceleration of gravity
* **r_earth** : mean radius of Earth
* **Omega** : angular velocity of Earth
* **Rd** : specific gas constant for dry air
* **Rv** : specific gas constant for water vapor
* **Cpd** : specific heat capacity of dry air at constant pressure at 300K
* **Cl** : specific heat capacity of liquid water
* **Gammad** : dry adiabatic lapse rate
* **Lv0** : latent heat of vaporization for water at 0C
"""
from numpy import pi
g0 = 9.80665 # standard gravitational acceleration (m/s)
stefan = 5.67e-8 # Stefan-boltzmann constant (W/m^2/K^4)
r_earth = 6370000. # Radius of Earth (m)
Omega = 7.2921159e-5 # Angular velocity of Earth (Rad/s)
Rd = 287.04 # R for dry air (J/kg/K)
Rv = 461.50 # R for water vapor
Cpd = 1005. # Specific heat of dry air at constant pressure (J/kg/K)
Cl = 4186. # Specific heat of liquid water (J/kg/K)
Gammad = g0/Cpd # Dry adabatic lapse rate (K/m)
Lv0 = 2.501e6 # Latent heat of vaporization for water at 0 Celsius (J/kg)
|
{
"content_hash": "93dace580f893ea6843e88e98ad16f51",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 39.629629629629626,
"alnum_prop": 0.688785046728972,
"repo_name": "mcgibbon/atmos",
"id": "2efd475a376d7e434484027b0ad324f415cde291",
"size": "1094",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "atmos/constants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "105"
},
{
"name": "Python",
"bytes": "157515"
},
{
"name": "Shell",
"bytes": "70"
}
],
"symlink_target": ""
}
|
from typing import Any, Dict, List, Optional, Type, Union
from flask_appbuilder.models.filters import BaseFilter
from flask_appbuilder.models.sqla import Model
from flask_appbuilder.models.sqla.interface import SQLAInterface
from sqlalchemy.exc import SQLAlchemyError, StatementError
from sqlalchemy.orm import Session
from superset.dao.exceptions import (
DAOConfigError,
DAOCreateFailedError,
DAODeleteFailedError,
DAOUpdateFailedError,
)
from superset.extensions import db
class BaseDAO:
"""
Base DAO, implement base CRUD sqlalchemy operations
"""
model_cls: Optional[Type[Model]] = None
"""
Child classes need to state the Model class so they don't need to implement basic
create, update and delete methods
"""
base_filter: Optional[BaseFilter] = None
"""
Child classes can register base filtering to be aplied to all filter methods
"""
id_column_name = "id"
@classmethod
def find_by_id(
cls,
model_id: Union[str, int],
session: Session = None,
skip_base_filter: bool = False,
) -> Optional[Model]:
"""
Find a model by id, if defined applies `base_filter`
"""
session = session or db.session
query = session.query(cls.model_cls)
if cls.base_filter and not skip_base_filter:
data_model = SQLAInterface(cls.model_cls, session)
query = cls.base_filter( # pylint: disable=not-callable
cls.id_column_name, data_model
).apply(query, None)
id_filter = {cls.id_column_name: model_id}
try:
return query.filter_by(**id_filter).one_or_none()
except StatementError:
# can happen if int is passed instead of a string or similar
return None
@classmethod
def find_by_ids(cls, model_ids: Union[List[str], List[int]]) -> List[Model]:
"""
Find a List of models by a list of ids, if defined applies `base_filter`
"""
id_col = getattr(cls.model_cls, cls.id_column_name, None)
if id_col is None:
return []
query = db.session.query(cls.model_cls).filter(id_col.in_(model_ids))
if cls.base_filter:
data_model = SQLAInterface(cls.model_cls, db.session)
query = cls.base_filter( # pylint: disable=not-callable
cls.id_column_name, data_model
).apply(query, None)
return query.all()
@classmethod
def find_all(cls) -> List[Model]:
"""
Get all that fit the `base_filter`
"""
query = db.session.query(cls.model_cls)
if cls.base_filter:
data_model = SQLAInterface(cls.model_cls, db.session)
query = cls.base_filter( # pylint: disable=not-callable
cls.id_column_name, data_model
).apply(query, None)
return query.all()
@classmethod
def find_one_or_none(cls, **filter_by: Any) -> Optional[Model]:
"""
Get the first that fit the `base_filter`
"""
query = db.session.query(cls.model_cls)
if cls.base_filter:
data_model = SQLAInterface(cls.model_cls, db.session)
query = cls.base_filter( # pylint: disable=not-callable
cls.id_column_name, data_model
).apply(query, None)
return query.filter_by(**filter_by).one_or_none()
@classmethod
def create(cls, properties: Dict[str, Any], commit: bool = True) -> Model:
"""
Generic for creating models
:raises: DAOCreateFailedError
"""
if cls.model_cls is None:
raise DAOConfigError()
model = cls.model_cls() # pylint: disable=not-callable
for key, value in properties.items():
setattr(model, key, value)
try:
db.session.add(model)
if commit:
db.session.commit()
except SQLAlchemyError as ex: # pragma: no cover
db.session.rollback()
raise DAOCreateFailedError(exception=ex) from ex
return model
@classmethod
def save(cls, instance_model: Model, commit: bool = True) -> Model:
"""
Generic for saving models
:raises: DAOCreateFailedError
"""
if cls.model_cls is None:
raise DAOConfigError()
if not isinstance(instance_model, cls.model_cls):
raise DAOCreateFailedError(
"the instance model is not a type of the model class"
)
try:
db.session.add(instance_model)
if commit:
db.session.commit()
except SQLAlchemyError as ex: # pragma: no cover
db.session.rollback()
raise DAOCreateFailedError(exception=ex) from ex
return instance_model
@classmethod
def update(
cls, model: Model, properties: Dict[str, Any], commit: bool = True
) -> Model:
"""
Generic update a model
:raises: DAOCreateFailedError
"""
for key, value in properties.items():
setattr(model, key, value)
try:
db.session.merge(model)
if commit:
db.session.commit()
except SQLAlchemyError as ex: # pragma: no cover
db.session.rollback()
raise DAOUpdateFailedError(exception=ex) from ex
return model
@classmethod
def delete(cls, model: Model, commit: bool = True) -> Model:
"""
Generic delete a model
:raises: DAODeleteFailedError
"""
try:
db.session.delete(model)
if commit:
db.session.commit()
except SQLAlchemyError as ex: # pragma: no cover
db.session.rollback()
raise DAODeleteFailedError(exception=ex) from ex
return model
|
{
"content_hash": "720de019c122e1dd5a18e3dff6072e03",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 85,
"avg_line_length": 34.092485549132945,
"alnum_prop": 0.5879959308240081,
"repo_name": "zhouyao1994/incubator-superset",
"id": "c6890e53a5ce483a9bd1fe5e128fa5e68ccbe775",
"size": "6743",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "superset/dao/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4776"
},
{
"name": "Dockerfile",
"bytes": "6940"
},
{
"name": "HTML",
"bytes": "1243911"
},
{
"name": "JavaScript",
"bytes": "2445349"
},
{
"name": "Jinja",
"bytes": "5542"
},
{
"name": "Jupyter Notebook",
"bytes": "1925627"
},
{
"name": "Less",
"bytes": "106438"
},
{
"name": "Makefile",
"bytes": "3946"
},
{
"name": "Mako",
"bytes": "1197"
},
{
"name": "Pug",
"bytes": "2969"
},
{
"name": "Python",
"bytes": "6296253"
},
{
"name": "Shell",
"bytes": "56211"
},
{
"name": "Smarty",
"bytes": "4298"
},
{
"name": "TypeScript",
"bytes": "6909337"
}
],
"symlink_target": ""
}
|
import unittest
import re
import os.path
import codecs
from mock import MagicMock, call, patch
from uiautomator import AutomatorDevice, Selector
class TestDevice(unittest.TestCase):
def setUp(self):
self.device = AutomatorDevice()
self.device.server = MagicMock()
self.device.server.jsonrpc = MagicMock()
self.device.server.jsonrpc_wrap = MagicMock()
def test_info(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = {}
self.assertEqual(self.device.info, {})
self.device.server.jsonrpc.deviceInfo.assert_called_once_with()
def test_click(self):
self.device.server.jsonrpc.click = MagicMock()
self.device.server.jsonrpc.click.return_value = True
self.assertEqual(self.device.click(1, 2), True)
self.device.server.jsonrpc.click.assert_called_once_with(1, 2)
def test_swipe(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
self.assertEqual(self.device.swipe(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(1, 2, 3, 4, 100)
def test_long_click(self):
self.device.server.jsonrpc.swipe = MagicMock()
self.device.server.jsonrpc.swipe.return_value = True
x, y = 100, 200
self.assertEqual(self.device.long_click(x, y), True)
self.device.server.jsonrpc.swipe.assert_called_once_with(x, y, x+1, y+1, 100)
def test_drag(self):
self.device.server.jsonrpc.drag = MagicMock()
self.device.server.jsonrpc.drag.return_value = True
self.assertEqual(self.device.drag(1, 2, 3, 4, 100), True)
self.device.server.jsonrpc.drag.assert_called_once_with(1, 2, 3, 4, 100)
def test_dump(self):
self.device.server.jsonrpc.dumpWindowHierarchy = MagicMock()
with codecs.open(os.path.join(os.path.dirname(__file__), "res", "layout.xml"), "r", encoding="utf8") as f:
xml = f.read()
self.device.server.jsonrpc.dumpWindowHierarchy.return_value = xml
self.assertEqual(self.device.dump("/tmp/test.xml"), xml)
self.device.server.jsonrpc.dumpWindowHierarchy.assert_called_once_with(True, None)
self.assertEqual(self.device.dump("/tmp/test.xml", False), xml)
raw_xml = "".join(re.split(r"\n[ ]*", xml))
self.device.server.jsonrpc.dumpWindowHierarchy.return_value = raw_xml
self.assertTrue("\n " in self.device.dump("/tmp/test.xml"))
def test_screenshot(self):
self.device.server.jsonrpc.takeScreenshot = MagicMock()
self.device.server.jsonrpc.takeScreenshot.return_value = "1.png"
self.device.server.adb.cmd = cmd = MagicMock()
self.device.server.screenshot = MagicMock()
self.device.server.screenshot.return_value = None
cmd.return_value.returncode = 0
self.assertEqual(self.device.screenshot("a.png", 1.0, 99), "a.png")
self.device.server.jsonrpc.takeScreenshot.assert_called_once_with("screenshot.png", 1.0, 99)
self.assertEqual(cmd.call_args_list, [call("pull", "1.png", "a.png"), call("shell", "rm", "1.png")])
self.device.server.jsonrpc.takeScreenshot.return_value = None
self.assertEqual(self.device.screenshot("a.png", 1.0, 100), None)
def test_freeze_rotation(self):
self.device.server.jsonrpc.freezeRotation = MagicMock()
self.device.freeze_rotation(True)
self.device.freeze_rotation(False)
self.assertEqual(self.device.server.jsonrpc.freezeRotation.call_args_list, [call(True), call(False)])
def test_orientation(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
orientation = {
0: "natural",
1: "left",
2: "upsidedown",
3: "right"
}
for i in range(4):
self.device.server.jsonrpc.deviceInfo.return_value = {"displayRotation": i}
self.assertEqual(self.device.orientation, orientation[i])
# set
orientations = [
(0, "natural", "n", 0),
(1, "left", "l", 90),
(2, "upsidedown", "u", 180),
(3, "right", "r", 270)
]
for values in orientations:
for value in values:
self.device.server.jsonrpc.setOrientation = MagicMock()
self.device.orientation = value
self.device.server.jsonrpc.setOrientation.assert_called_once_with(values[1])
with self.assertRaises(ValueError):
self.device.orientation = "invalid orientation"
def test_last_traversed_text(self):
self.device.server.jsonrpc.getLastTraversedText = MagicMock()
self.device.server.jsonrpc.getLastTraversedText.return_value = "abcdef"
self.assertEqual(self.device.last_traversed_text, "abcdef")
self.device.server.jsonrpc.getLastTraversedText.assert_called_once_with()
def test_clear_traversed_text(self):
self.device.server.jsonrpc.clearLastTraversedText = MagicMock()
self.device.clear_traversed_text()
self.device.server.jsonrpc.clearLastTraversedText.assert_called_once_with()
def test_open(self):
self.device.server.jsonrpc.openNotification = MagicMock()
self.device.open.notification()
self.device.server.jsonrpc.openNotification.assert_called_once_with()
self.device.server.jsonrpc.openQuickSettings = MagicMock()
self.device.open.quick_settings()
self.device.server.jsonrpc.openQuickSettings.assert_called_once_with()
def test_watchers(self):
names = ["a", "b", "c"]
self.device.server.jsonrpc.getWatchers = MagicMock()
self.device.server.jsonrpc.getWatchers.return_value = names
self.assertEqual(self.device.watchers, names)
self.device.server.jsonrpc.getWatchers.assert_called_once_with()
self.device.server.jsonrpc.hasAnyWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasAnyWatcherTriggered.return_value = True
self.assertEqual(self.device.watchers.triggered, True)
self.device.server.jsonrpc.hasAnyWatcherTriggered.assert_called_once_with()
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove("a")
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watchers.remove()
self.assertEqual(self.device.server.jsonrpc.removeWatcher.call_args_list, [call(name) for name in names])
self.device.server.jsonrpc.resetWatcherTriggers = MagicMock()
self.device.watchers.reset()
self.device.server.jsonrpc.resetWatcherTriggers.assert_called_once_with()
self.device.server.jsonrpc.runWatchers = MagicMock()
self.device.watchers.run()
self.device.server.jsonrpc.runWatchers.assert_called_once_with()
def test_watcher(self):
self.device.server.jsonrpc.hasWatcherTriggered = MagicMock()
self.device.server.jsonrpc.hasWatcherTriggered.return_value = False
self.assertFalse(self.device.watcher("name").triggered)
self.device.server.jsonrpc.hasWatcherTriggered.assert_called_once_with("name")
self.device.server.jsonrpc.removeWatcher = MagicMock()
self.device.watcher("a").remove()
self.device.server.jsonrpc.removeWatcher.assert_called_once_with("a")
self.device.server.jsonrpc.registerClickUiObjectWatcher = MagicMock()
condition1 = {"text": "my text", "className": "android"}
condition2 = {"description": "my desc", "clickable": True}
target = {"className": "android.widget.Button", "text": "OK"}
self.device.watcher("watcher").when(**condition1).when(**condition2).click(**target)
self.device.server.jsonrpc.registerClickUiObjectWatcher.assert_called_once_with(
"watcher",
[Selector(**condition1), Selector(**condition2)],
Selector(**target)
)
self.device.server.jsonrpc.registerPressKeyskWatcher = MagicMock()
self.device.watcher("watcher2").when(**condition1).when(**condition2).press.back.home.power("menu")
self.device.server.jsonrpc.registerPressKeyskWatcher.assert_called_once_with(
"watcher2", [Selector(**condition1), Selector(**condition2)], ("back", "home", "power", "menu"))
def test_press(self):
key = ["home", "back", "left", "right", "up", "down", "center",
"menu", "search", "enter", "delete", "del", "recent",
"volume_up", "volume_down", "volume_mute", "camera", "power"]
self.device.server.jsonrpc.pressKey = MagicMock()
self.device.server.jsonrpc.pressKey.return_value = True
self.assertTrue(self.device.press.home())
self.device.server.jsonrpc.pressKey.return_value = False
self.assertFalse(self.device.press.back())
self.device.server.jsonrpc.pressKey.return_value = False
for k in key:
self.assertFalse(self.device.press(k))
self.assertEqual(self.device.server.jsonrpc.pressKey.call_args_list, [call("home"), call("back")] + [call(k) for k in key])
self.device.server.jsonrpc.pressKeyCode.return_value = True
self.assertTrue(self.device.press(1))
self.assertTrue(self.device.press(1, 2))
self.assertEqual(self.device.server.jsonrpc.pressKeyCode.call_args_list, [call(1), call(1, 2)])
def test_wakeup(self):
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.wakeup()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen.on()
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
self.device.server.jsonrpc.wakeUp = MagicMock()
self.device.screen("on")
self.device.server.jsonrpc.wakeUp.assert_called_once_with()
def test_screen_status(self):
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = {"screenOn": True}
self.assertTrue(self.device.screen == "on")
self.assertTrue(self.device.screen != "off")
self.device.server.jsonrpc.deviceInfo.return_value = {"screenOn": False}
self.assertTrue(self.device.screen == "off")
self.assertTrue(self.device.screen != "on")
def test_sleep(self):
self.device.server.jsonrpc.sleep = MagicMock()
self.device.sleep()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen.off()
self.device.server.jsonrpc.sleep.assert_called_once_with()
self.device.server.jsonrpc.sleep = MagicMock()
self.device.screen("off")
self.device.server.jsonrpc.sleep.assert_called_once_with()
def test_wait_idle(self):
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = True
self.assertTrue(self.device.wait.idle(timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
self.device.server.jsonrpc_wrap.return_value.waitForIdle = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForIdle.return_value = False
self.assertFalse(self.device.wait("idle", timeout=10))
self.device.server.jsonrpc_wrap.return_value.waitForIdle.assert_called_once_with(10)
def test_wait_update(self):
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = True
self.assertTrue(self.device.wait.update(timeout=10, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 10)
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate = MagicMock()
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.return_value = False
self.assertFalse(self.device.wait("update", timeout=100, package_name="android"))
self.device.server.jsonrpc_wrap.return_value.waitForWindowUpdate.assert_called_once_with("android", 100)
def test_get_info_attr(self):
info = {"test_a": 1, "test_b": "string", "displayWidth": 720, "displayHeight": 1024}
self.device.server.jsonrpc.deviceInfo = MagicMock()
self.device.server.jsonrpc.deviceInfo.return_value = info
for k in info:
self.assertEqual(getattr(self.device, k), info[k])
self.assertEqual(self.device.width, info["displayWidth"])
self.assertEqual(self.device.height, info["displayHeight"])
with self.assertRaises(AttributeError):
self.device.not_exists
def test_device_obj(self):
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
kwargs = {"text": "abc", "description": "description...", "clickable": True}
self.device(**kwargs)
AutomatorDeviceObject.assert_called_once_with(self.device, Selector(**kwargs))
with patch("uiautomator.AutomatorDeviceObject") as AutomatorDeviceObject:
AutomatorDeviceObject.return_value.exists = True
self.assertTrue(self.device.exists(clickable=True))
AutomatorDeviceObject.return_value.exists = False
self.assertFalse(self.device.exists(text="..."))
class TestDeviceWithSerial(unittest.TestCase):
def test_serial(self):
with patch('uiautomator.AutomatorServer') as AutomatorServer:
AutomatorDevice("abcdefhijklmn")
AutomatorServer.assert_called_once_with(serial="abcdefhijklmn", local_port=None, adb_server_host=None, adb_server_port=None)
|
{
"content_hash": "ce89d028c6c0d405b1c962e1f4c5e31c",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 136,
"avg_line_length": 49.13986013986014,
"alnum_prop": 0.670414116977373,
"repo_name": "sumitahuja79/testing2",
"id": "43f226bb1ffcb0c532ff1a4a462eb782faba0de6",
"size": "14101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "114550"
}
],
"symlink_target": ""
}
|
'''Adversarial autoencoder
'''
from cortex.plugins import ModelPlugin, register_model
from cortex.built_ins.models.gan import (SimpleDiscriminator, GradientPenalty,
generator_loss)
from cortex.built_ins.models.vae import ImageDecoder, ImageEncoder
class AdversarialAutoencoder(ModelPlugin):
'''Adversarial Autoencoder
Autoencoder with a GAN loss on the latent space.
'''
defaults = dict(
data=dict(batch_size=dict(train=64, test=640),
inputs=dict(inputs='images')),
optimizer=dict(optimizer='Adam', learning_rate=1e-4),
train=dict(epochs=500, archive_every=10)
)
def __init__(self):
super().__init__()
encoder_contract = dict(kwargs=dict(dim_out='dim_z'))
decoder_contract = dict(kwargs=dict(dim_in='dim_z'))
disc_contract = dict(kwargs=dict(dim_in='dim_z'))
penalty_contract = dict(nets=dict(network='discriminator'))
self.encoder = ImageEncoder(contract=encoder_contract)
self.decoder = ImageDecoder(contract=decoder_contract)
self.discriminator = SimpleDiscriminator(contract=disc_contract)
self.penalty = GradientPenalty(contract=penalty_contract)
def build(self, noise_type='normal', dim_z=64):
'''
Args:
noise_type: Prior noise distribution.
dim_z: Dimensionality of latent space.
'''
self.add_noise('Z', dist=noise_type, size=dim_z)
self.encoder.build()
self.decoder.build()
self.discriminator.build()
def routine(self, inputs, Z, encoder_loss_type='non-saturating',
measure=None, beta=1.0):
'''
Args:
encoder_loss_type: Adversarial loss type for the encoder.
beta: Amount of adversarial loss for the encoder.
'''
Z_Q = self.encoder.encode(inputs)
self.decoder.routine(inputs, Z_Q)
E_pos, E_neg, P_samples, Q_samples = self.discriminator.score(
Z, Z_Q, measure)
adversarial_loss = generator_loss(
Q_samples, measure, loss_type=encoder_loss_type)
self.losses.encoder = self.losses.decoder + beta * adversarial_loss
self.results.adversarial_loss = adversarial_loss.item()
def train_step(self, n_discriminator_updates=1):
'''
Args:
n_discriminator_updates: Number of discriminator updates per step.
'''
for _ in range(n_discriminator_updates):
self.data.next()
inputs, Z = self.inputs('inputs', 'Z')
Z_Q = self.encoder.encode(inputs)
self.discriminator.routine(Z, Z_Q)
self.optimizer_step()
self.penalty.routine(Z)
self.optimizer_step()
self.routine(auto_input=True)
self.optimizer_step()
def eval_step(self):
self.data.next()
inputs, Z = self.inputs('inputs', 'Z')
Z_Q = self.encoder.encode(inputs)
self.discriminator.routine(Z, Z_Q)
self.penalty.routine(Z)
self.routine(auto_input=True)
def visualize(self, inputs, Z, targets):
self.decoder.visualize(Z)
self.encoder.visualize(inputs, targets)
Z_Q = self.encoder.encode(inputs)
R = self.decoder.decode(Z_Q)
self.add_image(inputs, name='ground truth')
self.add_image(R, name='reconstructed')
register_model(AdversarialAutoencoder)
|
{
"content_hash": "5b5201cffdaecb80dfb2acc2a6c66aa7",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 78,
"avg_line_length": 31.27027027027027,
"alnum_prop": 0.6113511956208585,
"repo_name": "rdevon/cortex",
"id": "45d9a08d4d8c9c9c4546cd7c53828c12b454867c",
"size": "3471",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cortex/built_ins/models/adversarial_autoencoder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "814"
},
{
"name": "Makefile",
"bytes": "612"
},
{
"name": "Python",
"bytes": "285169"
},
{
"name": "Shell",
"bytes": "981"
}
],
"symlink_target": ""
}
|
from ironic.common import exception as excp
from ironic.common import fsm
from ironic.tests import base
class FSMTest(base.TestCase):
def setUp(self):
super(FSMTest, self).setUp()
m = fsm.FSM()
m.add_state('working', stable=True)
m.add_state('daydream')
m.add_state('wakeup', target='working')
m.add_state('play', stable=True)
m.add_transition('wakeup', 'working', 'walk')
self.fsm = m
def test_target_state_stable(self):
# Test to verify that adding a new state with a 'target' state pointing
# to a 'stable' state does not raise an exception
self.fsm.add_state('foo', target='working')
self.fsm.default_start_state = 'working'
self.fsm.initialize()
def test__validate_target_state(self):
# valid
self.fsm._validate_target_state('working')
# target doesn't exist
self.assertRaisesRegexp(excp.InvalidState, "does not exist",
self.fsm._validate_target_state, 'new state')
# target isn't a stable state
self.assertRaisesRegexp(excp.InvalidState, "stable",
self.fsm._validate_target_state, 'daydream')
def test_initialize(self):
# no start state
self.assertRaises(excp.InvalidState, self.fsm.initialize)
# no target state
self.fsm.initialize('working')
self.assertEqual('working', self.fsm.current_state)
self.assertIsNone(self.fsm.target_state)
# default target state
self.fsm.initialize('wakeup')
self.assertEqual('wakeup', self.fsm.current_state)
self.assertEqual('working', self.fsm.target_state)
# specify (it overrides default) target state
self.fsm.initialize('wakeup', 'play')
self.assertEqual('wakeup', self.fsm.current_state)
self.assertEqual('play', self.fsm.target_state)
# specify an invalid target state
self.assertRaises(excp.InvalidState, self.fsm.initialize,
'wakeup', 'daydream')
def test_process_event(self):
# default target state
self.fsm.initialize('wakeup')
self.fsm.process_event('walk')
self.assertEqual('working', self.fsm.current_state)
self.assertIsNone(self.fsm.target_state)
# specify (it overrides default) target state
self.fsm.initialize('wakeup')
self.fsm.process_event('walk', 'play')
self.assertEqual('working', self.fsm.current_state)
self.assertEqual('play', self.fsm.target_state)
# specify an invalid target state
self.fsm.initialize('wakeup')
self.assertRaises(excp.InvalidState, self.fsm.process_event,
'walk', 'daydream')
|
{
"content_hash": "ed586ac51f538e38632301038f51618e",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 79,
"avg_line_length": 37.28,
"alnum_prop": 0.6208869814020028,
"repo_name": "hpproliant/ironic",
"id": "2d523544a2954d46df68bb651d0035eaddadae0d",
"size": "3453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironic/tests/unit/common/test_fsm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "3716155"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
class KeyValue(models.Model):
"""How this KeyValue class works:
The KeyValue objects have functions that correspond to different
keys. When a key is saved an attempt is made to find a validation
function for that key.
>>> attr = hasattr(kv, key)
If `attr` is not None, then it is checked for callability.
>>> attr = getattr(kv, key)
>>> callable(attr)
If it is callable, it is called with the value of the key.
>>> kv.attr(kv.value)
The validator is then free to raise exceptions if the value being
inserted is invalid.
When a validator for a key is not found, the KeyValue class can either
riase an exception or not. This behavior is controled by the
'force_validation' attribute: if 'force_validation' is 'True' and
KeyValue requires a validation function. The 'require_validation' param
to the clean method can be used to override the behavior of
'force_validation'.
Subclass this class and include a Foreign Key when needed.
Validation functions can start with '_aa_'. 'aa' stands for auxililary
attribute.
"""
id = models.AutoField(primary_key=True)
key = models.CharField(max_length=255)
value = models.CharField(max_length=255)
force_validation = False
class Meta:
abstract = True
def __repr__(self):
return "<{0}>".format(self)
def __str__(self):
return "Key: {0} Value {1}".format(self.key, self.value)
def clean(self, require_validation=True):
key_attr = self.key.replace('-', '_')
# aa stands for auxilarary attribute.
if (not hasattr(self, key_attr) and
not hasattr(self, "_aa_" + key_attr)):
# ??? Do we want this?
if self.force_validation and require_validation:
raise ValidationError("No validator for key %s" % self.key)
else:
return
if hasattr(self, key_attr):
validate = getattr(self, key_attr)
else:
validate = getattr(self, "_aa_" + key_attr)
if not callable(validate):
raise ValidationError("No validator for key %s not callable" %
key_attr)
try:
validate()
except TypeError, e:
# We want to catch when the validator didn't accept the correct
# number of arguements.
raise ValidationError("%s" % str(e))
self.validate_unique()
def validate_unique(self):
if (self.__class__.objects.filter(
key=self.key, value=self.value, obj=self.obj).
filter(~Q(id=self.pk)).exists()):
raise ValidationError("A key with this value already exists.")
|
{
"content_hash": "43c592c7bd2cb4debeae3f4754be57f9",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 35.80487804878049,
"alnum_prop": 0.6018392370572208,
"repo_name": "rtucker-mozilla/mozilla_inventory",
"id": "15439fdffb9c94533657fedbbc0cdb1fae2ac1e2",
"size": "2936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/keyvalue/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CoffeeScript",
"bytes": "9538"
},
{
"name": "JavaScript",
"bytes": "1485560"
},
{
"name": "PHP",
"bytes": "27273"
},
{
"name": "Puppet",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "1960271"
},
{
"name": "Ruby",
"bytes": "1459"
},
{
"name": "Shell",
"bytes": "8766"
}
],
"symlink_target": ""
}
|
import logging
import os
import time
from ray.util.debug import log_once
from ray.rllib.utils.framework import try_import_tf
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
class TFRunBuilder:
"""Used to incrementally build up a TensorFlow run.
This is particularly useful for batching ops from multiple different
policies in the multi-agent setting.
"""
def __init__(self, session, debug_name):
self.session = session
self.debug_name = debug_name
self.feed_dict = {}
self.fetches = []
self._executed = None
def add_feed_dict(self, feed_dict):
assert not self._executed
for k in feed_dict:
if k in self.feed_dict:
raise ValueError("Key added twice: {}".format(k))
self.feed_dict.update(feed_dict)
def add_fetches(self, fetches):
assert not self._executed
base_index = len(self.fetches)
self.fetches.extend(fetches)
return list(range(base_index, len(self.fetches)))
def get(self, to_fetch):
if self._executed is None:
try:
self._executed = run_timeline(
self.session, self.fetches, self.debug_name,
self.feed_dict, os.environ.get("TF_TIMELINE_DIR"))
except Exception as e:
logger.exception("Error fetching: {}, feed_dict={}".format(
self.fetches, self.feed_dict))
raise e
if isinstance(to_fetch, int):
return self._executed[to_fetch]
elif isinstance(to_fetch, list):
return [self.get(x) for x in to_fetch]
elif isinstance(to_fetch, tuple):
return tuple(self.get(x) for x in to_fetch)
else:
raise ValueError("Unsupported fetch type: {}".format(to_fetch))
_count = 0
def run_timeline(sess, ops, debug_name, feed_dict={}, timeline_dir=None):
if timeline_dir:
from tensorflow.python.client import timeline
run_options = tf1.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf1.RunMetadata()
start = time.time()
fetches = sess.run(
ops,
options=run_options,
run_metadata=run_metadata,
feed_dict=feed_dict)
trace = timeline.Timeline(step_stats=run_metadata.step_stats)
global _count
outf = os.path.join(
timeline_dir, "timeline-{}-{}-{}.json".format(
debug_name, os.getpid(), _count % 10))
_count += 1
trace_file = open(outf, "w")
logger.info("Wrote tf timeline ({} s) to {}".format(
time.time() - start, os.path.abspath(outf)))
trace_file.write(trace.generate_chrome_trace_format())
else:
if log_once("tf_timeline"):
logger.info(
"Executing TF run without tracing. To dump TF timeline traces "
"to disk, set the TF_TIMELINE_DIR environment variable.")
fetches = sess.run(ops, feed_dict=feed_dict)
return fetches
|
{
"content_hash": "98e67e493b4c9df272f45ce111cad22c",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 79,
"avg_line_length": 34.3,
"alnum_prop": 0.5866537091026887,
"repo_name": "robertnishihara/ray",
"id": "82b904bd1316487868f1fa6ad6a936274393afdc",
"size": "3087",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "rllib/utils/tf_run_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "82909"
},
{
"name": "C++",
"bytes": "3971373"
},
{
"name": "CSS",
"bytes": "8025"
},
{
"name": "Cython",
"bytes": "179979"
},
{
"name": "Dockerfile",
"bytes": "6468"
},
{
"name": "Go",
"bytes": "23139"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1248954"
},
{
"name": "JavaScript",
"bytes": "444"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "2205"
},
{
"name": "Python",
"bytes": "6567694"
},
{
"name": "Shell",
"bytes": "102477"
},
{
"name": "Starlark",
"bytes": "231513"
},
{
"name": "TypeScript",
"bytes": "147793"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from pyface.qt import QtGui
from traits.api import Any, Int, Float, Str, Bool, Either, Callable
# ============= standard library imports ========================
# ============= local library imports ==========================
from traitsui.basic_editor_factory import BasicEditorFactory
from traitsui.qt4.editor import Editor
class _DialEditor(Editor):
dial = Any
label = Any
_format_func = None
def init(self, parent):
self._create_control(parent)
def _create_control(self, parent):
self.dial = QtGui.QDial()
self.dial.setNotchesVisible(self.factory.notches_visible)
self.dial.setRange(self.factory.low, self.factory.high)
self.dial.setSingleStep(self.factory.step)
if self.factory.width > 0:
self.dial.setFixedWidth(self.factory.width)
if self.factory.height > 0:
self.dial.setFixedHeight(self.factory.height)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.dial)
if self.factory.display_value:
self.label = QtGui.QLabel()
hbox.addWidget(self.label)
if self.factory.value_format:
if hasattr(self.factory.value_format, "__call__"):
func = self.factory.value_format
else:
func = lambda x: self.factory.value_format.format(x)
else:
func = lambda x: "{}".format(x)
self.label.setText(func(self.value))
self._format_func = func
self.control = self.dial
self.dial.valueChanged[int].connect(self.valueChanged)
parent.addLayout(hbox)
def update_editor(self):
pass
def valueChanged(self):
self.value = self.dial.value()
if self.label:
v = self._format_func(self.value)
self.label.setText(v)
class DialEditor(BasicEditorFactory):
klass = _DialEditor
low = Int
high = Int
step = Int(1)
display_value = Bool(False)
value_format = Either(Str, Callable)
height = Float
width = Float
notches_visible = Bool(False)
# ============= EOF =============================================
|
{
"content_hash": "495f0a3fb9962bacc2947c9bef411426",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 72,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.5766292134831461,
"repo_name": "NMGRL/pychron",
"id": "b4b6f02762d71baa702d2a744a3230183424be37",
"size": "3027",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "pychron/core/ui/qt/dial_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "128"
},
{
"name": "C++",
"bytes": "3706"
},
{
"name": "CSS",
"bytes": "263"
},
{
"name": "Cython",
"bytes": "1692"
},
{
"name": "Fortran",
"bytes": "455875"
},
{
"name": "HTML",
"bytes": "46796"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Processing",
"bytes": "11421"
},
{
"name": "Python",
"bytes": "10773692"
},
{
"name": "Shell",
"bytes": "1003"
}
],
"symlink_target": ""
}
|
import pyfwk
from pyfi.entity.habtm.entityindustry import EntityIndustry
from pyfi.entity.habtm.entityindices import EntityIndices
from pyfi.entity.industry.object import Industry
from pyfi.entity.sector.object import Sector
from pyfi.entity.indices.object import Indices
from pyfi.entity.indices.membership import IndicesMembership
# -----------------------------STOCK-PROFILE------------------------------#
class StockProfile(pyfwk.Object):
entity = None
industry = None
sector = None
indices = None
def __init__(self, entity_id):
self.entity = entity_id
# check for an entry in entityfund
eim = EntityIndustry.instance()
eir = eim.get_rec_from_entity_id(entity_id)
if (eir is not None):
# create objects for industry and sector
self.industry = Industry(eir['industry'])
self.sector = Sector(self.industry.sector)
# check for indices membership
exm = EntityIndices.instance()
exs = exm.get_recs_from_entity_id(entity_id)
if (exs is not None):
# create the modified indices object (list of indices)
indices = []
for exr in exs:
obj = Indices(exr['indices'])
indices.append(obj)
self.indices = IndicesMembership(indices)
# ----------------------------------MAIN----------------------------------#
def main():
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "411f155c0def4674fd4157cacc32e6c7",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 34.18604651162791,
"alnum_prop": 0.5863945578231292,
"repo_name": "rlinguri/pyfi",
"id": "3f139e1a74e2b3389a6b14d812dd6c039e6571a3",
"size": "1493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyfi/entity/profile/stock.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41076"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.request import urlopen
from future.moves.urllib.parse import urlparse
import time
import logging
from datetime import timedelta, datetime
import requests
# Allow some request objects to be imported from here instead of requests
import warnings
from requests import RequestException
from flexget import __version__ as version
from flexget.utils.tools import parse_timedelta, TimedDict, timedelta_total_seconds
# If we use just 'requests' here, we'll get the logger created by requests, rather than our own
log = logging.getLogger('utils.requests')
# Don't emit info level urllib3 log messages or below
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# same as above, but for systems where urllib3 isn't part of the requests pacakge (i.e., Ubuntu)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# Time to wait before trying an unresponsive site again
WAIT_TIME = timedelta(seconds=60)
# Remembers sites that have timed out
unresponsive_hosts = TimedDict(WAIT_TIME)
def is_unresponsive(url):
"""
Checks if host of given url has timed out within WAIT_TIME
:param url: The url to check
:return: True if the host has timed out within WAIT_TIME
:rtype: bool
"""
host = urlparse(url).hostname
return host in unresponsive_hosts
def set_unresponsive(url):
"""
Marks the host of a given url as unresponsive
:param url: The url that timed out
"""
host = urlparse(url).hostname
if host in unresponsive_hosts:
# If somehow this is called again before previous timer clears, don't refresh
return
unresponsive_hosts[host] = True
class DomainLimiter(object):
def __init__(self, domain):
self.domain = domain
def __call__(self):
"""This method will be called once before every request to the domain."""
raise NotImplementedError
class TokenBucketLimiter(DomainLimiter):
"""
A token bucket rate limiter for domains.
New instances for the same domain will restore previous values.
"""
# This is just an in memory cache right now, it works for the daemon, and across tasks in a single execution
# but not for multiple executions via cron. Do we need to store this to db?
state_cache = {}
def __init__(self, domain, tokens, rate, wait=True):
"""
:param int tokens: Size of bucket
:param rate: Amount of time to accrue 1 token. Either `timedelta` or interval string.
:param bool wait: If true, will wait for a token to be available. If false, errors when token is not available.
"""
super(TokenBucketLimiter, self).__init__(domain)
self.max_tokens = tokens
self.rate = parse_timedelta(rate)
self.wait = wait
# Restore previous state for this domain, or establish new state cache
self.state = self.state_cache.setdefault(domain, {'tokens': self.max_tokens, 'last_update': datetime.now()})
@property
def tokens(self):
return min(self.max_tokens, self.state['tokens'])
@tokens.setter
def tokens(self, value):
self.state['tokens'] = value
@property
def last_update(self):
return self.state['last_update']
@last_update.setter
def last_update(self, value):
self.state['last_update'] = value
def __call__(self):
if self.tokens < self.max_tokens:
regen = (timedelta_total_seconds(datetime.now() - self.last_update) /
timedelta_total_seconds(self.rate))
self.tokens += regen
self.last_update = datetime.now()
if self.tokens < 1:
if not self.wait:
raise RequestException('Requests to %s have exceeded their limit.' % self.domain)
wait = timedelta_total_seconds(self.rate) * (1 - self.tokens)
# Don't spam console if wait is low
if wait < 4:
level = log.debug
else:
level = log.verbose
level('Waiting %.2f seconds until next request to %s', wait, self.domain)
# Sleep until it is time for the next request
time.sleep(wait)
self.tokens -= 1
class TimedLimiter(TokenBucketLimiter):
"""Enforces a minimum interval between requests to a given domain."""
def __init__(self, domain, interval):
super(TimedLimiter, self).__init__(domain, 1, interval)
def _wrap_urlopen(url, timeout=None):
"""
Handles alternate schemes using urllib, wraps the response in a requests.Response
This is not installed as an adapter in requests, since urls without network locations
(e.g. file:///somewhere) will cause errors
"""
try:
raw = urlopen(url, timeout=timeout)
except IOError as e:
msg = 'Error getting %s: %s' % (url, e)
log.error(msg)
raise RequestException(msg)
resp = requests.Response()
resp.raw = raw
# requests passes the `decode_content` kwarg to read
orig_read = raw.read
resp.raw.read = lambda size, **kwargs: orig_read(size)
resp.status_code = raw.code or 200
resp.headers = requests.structures.CaseInsensitiveDict(raw.headers)
return resp
def limit_domains(url, limit_dict):
"""
If this url matches a domain in `limit_dict`, run the limiter.
This is separated in to its own function so that limits can be disabled during unit tests with VCR.
"""
for domain, limiter in limit_dict.items():
if domain in url:
limiter()
break
class Session(requests.Session):
"""
Subclass of requests Session class which defines some of our own defaults, records unresponsive sites,
and raises errors by default.
"""
def __init__(self, timeout=30, max_retries=1, *args, **kwargs):
"""Set some defaults for our session if not explicitly defined."""
super(Session, self).__init__(*args, **kwargs)
self.timeout = timeout
self.stream = True
self.adapters['http://'].max_retries = max_retries
# Stores min intervals between requests for certain sites
self.domain_limiters = {}
self.headers.update({'User-Agent': 'FlexGet/%s (www.flexget.com)' % version})
def add_cookiejar(self, cookiejar):
"""
Merges cookies from `cookiejar` into cookiejar for this session.
:param cookiejar: CookieJar instance to add to the session.
"""
for cookie in cookiejar:
self.cookies.set_cookie(cookie)
def set_domain_delay(self, domain, delay):
"""
DEPRECATED, use `add_domain_limiter`
Registers a minimum interval between requests to `domain`
:param domain: The domain to set the interval on
:param delay: The amount of time between requests, can be a timedelta or string like '3 seconds'
"""
warnings.warn('set_domain_delay is deprecated, use add_domain_limiter', DeprecationWarning, stacklevel=2)
self.domain_limiters[domain] = TimedLimiter(domain, delay)
def add_domain_limiter(self, limiter):
"""
Add a limiter to throttle requests to a specific domain.
:param DomainLimiter limiter: The `DomainLimiter` to add to the session.
"""
self.domain_limiters[limiter.domain] = limiter
def request(self, method, url, *args, **kwargs):
"""
Does a request, but raises Timeout immediately if site is known to timeout, and records sites that timeout.
Also raises errors getting the content by default.
:param bool raise_status: If True, non-success status code responses will be raised as errors (True by default)
"""
# Raise Timeout right away if site is known to timeout
if is_unresponsive(url):
raise requests.Timeout('Requests to this site (%s) have timed out recently. Waiting before trying again.' %
urlparse(url).hostname)
# Run domain limiters for this url
limit_domains(url, self.domain_limiters)
kwargs.setdefault('timeout', self.timeout)
raise_status = kwargs.pop('raise_status', True)
# If we do not have an adapter for this url, pass it off to urllib
if not any(url.startswith(adapter) for adapter in self.adapters):
log.debug('No adaptor, passing off to urllib')
return _wrap_urlopen(url, timeout=kwargs['timeout'])
try:
log.debug('Fetching URL %s with args %s and kwargs %s', url, args, kwargs)
result = super(Session, self).request(method, url, *args, **kwargs)
except requests.Timeout:
# Mark this site in known unresponsive list
set_unresponsive(url)
raise
if raise_status:
result.raise_for_status()
return result
# Define some module level functions that use our Session, so this module can be used like main requests module
def request(method, url, **kwargs):
s = kwargs.pop('session', Session())
return s.request(method=method, url=url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('head', url, **kwargs)
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def post(url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary or bytes to send in the body of the :class:`Request`.
:param kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, **kwargs)
|
{
"content_hash": "1be15164aa767c057f40343e5e573d71",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 119,
"avg_line_length": 35.92982456140351,
"alnum_prop": 0.6552734375,
"repo_name": "qk4l/Flexget",
"id": "d10b89885e4d495af612e53e6b7162774cdfc8c3",
"size": "10240",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/utils/requests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "79376"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3322934"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
from django.utils.functional import cached_property
from share import ProviderAppConfig
from .harvester import FigshareHarvester
class AppConfig(ProviderAppConfig):
name = 'providers.com.figshare.v2'
version = '0.0.1'
title = 'figshare'
long_title = 'figshare'
harvester = FigshareHarvester
home_page = 'https://figshare.com/'
@cached_property
def user(self):
from share.models import ShareUser
return ShareUser.objects.get(robot='providers.com.figshare')
|
{
"content_hash": "12fa450829cb5e86594b92b04b315112",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.7170923379174853,
"repo_name": "zamattiac/SHARE",
"id": "91605b22d8d44ebcf3fd24f93b94db27d70e02c0",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "providers/com/figshare/v2/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3690"
},
{
"name": "HTML",
"bytes": "1582"
},
{
"name": "Python",
"bytes": "1517988"
},
{
"name": "Shell",
"bytes": "633"
}
],
"symlink_target": ""
}
|
"""Contains the definition of the Inception Resnet V2 architecture.
As described in http://arxiv.org/abs/1602.07261.
Inception-v4, Inception-ResNet and the Impact of Residual Connections
on Learning
Christian Szegedy, Sergey Ioffe, Vincent Vanhoucke, Alex Alemi
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
scaled_up = up * scale
if activation_fn == tf.nn.relu6:
# Use clip_by_value to simulate bandpass activation.
scaled_up = tf.clip_by_value(scaled_up, -6.0, 6.0)
net += scaled_up
if activation_fn:
net = activation_fn(net)
return net
def inception_resnet_v2_base(inputs,
final_endpoint='Conv2d_7b_1x1',
output_stride=16,
align_feature_maps=False,
scope=None,
activation_fn=tf.nn.relu):
"""Inception model from http://arxiv.org/abs/1602.07261.
Constructs an Inception Resnet v2 network from inputs to the given final
endpoint. This method can construct the network up to the final inception
block Conv2d_7b_1x1.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'MaxPool_3a_3x3', 'Conv2d_3b_1x1', 'Conv2d_4a_3x3', 'MaxPool_5a_3x3',
'Mixed_5b', 'Mixed_6a', 'PreAuxLogits', 'Mixed_7a', 'Conv2d_7b_1x1']
output_stride: A scalar that specifies the requested ratio of input to
output spatial resolution. Only supports 8 and 16.
align_feature_maps: When true, changes all the VALID paddings in the network
to SAME padding so that the feature maps are aligned.
scope: Optional variable_scope.
activation_fn: Activation function for block scopes.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or if the output_stride is not 8 or 16, or if the output_stride is 8 and
we request an end point after 'PreAuxLogits'.
"""
if output_stride != 8 and output_stride != 16:
raise ValueError('output_stride must be 8 or 16.')
padding = 'SAME' if align_feature_maps else 'VALID'
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs]):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding=padding,
scope='Conv2d_1a_3x3')
if add_and_check_final('Conv2d_1a_3x3', net): return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding=padding,
scope='Conv2d_2a_3x3')
if add_and_check_final('Conv2d_2a_3x3', net): return net, end_points
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net): return net, end_points
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_3a_3x3')
if add_and_check_final('MaxPool_3a_3x3', net): return net, end_points
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding=padding,
scope='Conv2d_3b_1x1')
if add_and_check_final('Conv2d_3b_1x1', net): return net, end_points
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding=padding,
scope='Conv2d_4a_3x3')
if add_and_check_final('Conv2d_4a_3x3', net): return net, end_points
# 35 x 35 x 192
net = slim.max_pool2d(net, 3, stride=2, padding=padding,
scope='MaxPool_5a_3x3')
if add_and_check_final('MaxPool_5a_3x3', net): return net, end_points
# 35 x 35 x 320
with tf.variable_scope('Mixed_5b'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 96, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 48, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 64, 5,
scope='Conv2d_0b_5x5')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 64, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 96, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 96, 3,
scope='Conv2d_0c_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.avg_pool2d(net, 3, stride=1, padding='SAME',
scope='AvgPool_0a_3x3')
tower_pool_1 = slim.conv2d(tower_pool, 64, 1,
scope='Conv2d_0b_1x1')
net = tf.concat(
[tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1], 3)
if add_and_check_final('Mixed_5b', net): return net, end_points
# TODO(alemi): Register intermediate endpoints
net = slim.repeat(net, 10, block35, scale=0.17,
activation_fn=activation_fn)
# 17 x 17 x 1088 if output_stride == 8,
# 33 x 33 x 1088 if output_stride == 16
use_atrous = output_stride == 8
with tf.variable_scope('Mixed_6a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 384, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 256, 3,
scope='Conv2d_0b_3x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 384, 3,
stride=1 if use_atrous else 2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_pool = slim.max_pool2d(net, 3, stride=1 if use_atrous else 2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
if add_and_check_final('Mixed_6a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
with slim.arg_scope([slim.conv2d], rate=2 if use_atrous else 1):
net = slim.repeat(net, 20, block17, scale=0.10,
activation_fn=activation_fn)
if add_and_check_final('PreAuxLogits', net): return net, end_points
if output_stride == 8:
# TODO(gpapan): Properly support output_stride for the rest of the net.
raise ValueError('output_stride==8 is only supported up to the '
'PreAuxlogits end_point for now.')
# 8 x 8 x 2080
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 288, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 320, 3, stride=2,
padding=padding,
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2,
padding=padding,
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
if add_and_check_final('Mixed_7a', net): return net, end_points
# TODO(alemi): register intermediate endpoints
net = slim.repeat(net, 9, block8, scale=0.20, activation_fn=activation_fn)
net = block8(net, activation_fn=None)
# 8 x 8 x 1536
net = slim.conv2d(net, 1536, 1, scope='Conv2d_7b_1x1')
if add_and_check_final('Conv2d_7b_1x1', net): return net, end_points
raise ValueError('final_endpoint (%s) not recognized', final_endpoint)
def inception_resnet_v2(inputs, num_classes=1001, is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionResnetV2',
create_aux_logits=True,
activation_fn=tf.nn.relu):
"""Creates the Inception Resnet V2 model.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
Dimension batch_size may be undefined. If create_aux_logits is false,
also height and width may be undefined.
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
is_training: whether is training or not.
dropout_keep_prob: float, the fraction to keep before final layer.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
create_aux_logits: Whether to include the auxilliary logits.
activation_fn: Activation function for conv2d.
Returns:
net: the output of the logits layer (if num_classes is a non-zero integer),
or the non-dropped-out input to the logits layer (if num_classes is 0 or
None).
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs],
reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = inception_resnet_v2_base(inputs, scope=scope,
activation_fn=activation_fn)
if create_aux_logits and num_classes:
with tf.variable_scope('AuxLogits'):
aux = end_points['PreAuxLogits']
aux = slim.avg_pool2d(aux, 5, stride=3, padding='VALID',
scope='Conv2d_1a_3x3')
aux = slim.conv2d(aux, 128, 1, scope='Conv2d_1b_1x1')
aux = slim.conv2d(aux, 768, aux.get_shape()[1:3],
padding='VALID', scope='Conv2d_2a_5x5')
aux = slim.flatten(aux)
aux = slim.fully_connected(aux, num_classes, activation_fn=None,
scope='Logits')
end_points['AuxLogits'] = aux
with tf.variable_scope('Logits'):
# TODO(sguada,arnoegw): Consider adding a parameter global_pool which
# can be set to False to disable pooling here (as in resnet_*()).
kernel_size = net.get_shape()[1:3]
if kernel_size.is_fully_defined():
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a_8x8')
else:
net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool')
end_points['global_pool'] = net
if not num_classes:
return net, end_points
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None,
scope='Logits')
end_points['Logits'] = logits
end_points['Predictions'] = tf.nn.softmax(logits, name='Predictions')
return logits, end_points
inception_resnet_v2.default_image_size = 299
def inception_resnet_v2_arg_scope(weight_decay=0.00004,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu):
"""Returns the scope with the default parameters for inception_resnet_v2.
Args:
weight_decay: the weight decay for weights variables.
batch_norm_decay: decay for the moving average of batch_norm momentums.
batch_norm_epsilon: small float added to variance to avoid dividing by zero.
activation_fn: Activation function for conv2d.
Returns:
a arg_scope with the parameters needed for inception_resnet_v2.
"""
# Set weight_decay for weights in conv2d and fully_connected layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_regularizer=slim.l2_regularizer(weight_decay)):
batch_norm_params = {
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'fused': None, # Use fused batch norm if possible.
}
# Set activation_fn and parameters for batch_norm.
with slim.arg_scope([slim.conv2d], activation_fn=activation_fn,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params) as scope:
return scope
|
{
"content_hash": "d1e14eda38e00f0b60f670d78a614cb5",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 80,
"avg_line_length": 45.49869451697128,
"alnum_prop": 0.5836680821760588,
"repo_name": "ildoonet/tf-openpose",
"id": "0205340e71baf62cad4f2e281a9bfa1b328e3e1c",
"size": "18111",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tf_pose/slim/nets/inception_resnet_v2.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "100377"
},
{
"name": "Shell",
"bytes": "995"
}
],
"symlink_target": ""
}
|
##
# Import Modules
#
import Common.EdkLogger as EdkLogger
## TableFile
#
# This class defined a common table
#
# @param object: Inherited from object class
#
# @param Cursor: Cursor of the database
# @param TableName: Name of the table
#
class Table(object):
def __init__(self, Cursor):
self.Cur = Cursor
self.Table = ''
self.ID = 0
## Create table
#
# Create a table
#
def Create(self, SqlCommand):
self.Cur.execute(SqlCommand)
self.ID = 0
EdkLogger.verbose(SqlCommand + " ... DONE!")
## Insert table
#
# Insert a record into a table
#
def Insert(self, SqlCommand):
self.Exec(SqlCommand)
## Query table
#
# Query all records of the table
#
def Query(self):
EdkLogger.verbose("\nQuery tabel %s started ..." % self.Table)
SqlCommand = """select * from %s""" % self.Table
self.Cur.execute(SqlCommand)
for Rs in self.Cur:
EdkLogger.verbose(str(Rs))
TotalCount = self.GetCount()
EdkLogger.verbose("*** Total %s records in table %s ***" % (TotalCount, self.Table) )
EdkLogger.verbose("Query tabel %s DONE!" % self.Table)
## Drop a table
#
# Drop the table
#
def Drop(self):
SqlCommand = """drop table IF EXISTS %s""" % self.Table
self.Cur.execute(SqlCommand)
EdkLogger.verbose("Drop tabel %s ... DONE!" % self.Table)
## Get count
#
# Get a count of all records of the table
#
# @retval Count: Total count of all records
#
def GetCount(self):
SqlCommand = """select count(ID) from %s""" % self.Table
self.Cur.execute(SqlCommand)
for Item in self.Cur:
return Item[0]
## Generate ID
#
# Generate an ID if input ID is -1
#
# @param ID: Input ID
#
# @retval ID: New generated ID
#
def GenerateID(self, ID):
if ID == -1:
self.ID = self.ID + 1
return self.ID
## Init the ID of the table
#
# Init the ID of the table
#
def InitID(self):
self.ID = self.GetCount()
## Exec
#
# Exec Sql Command, return result
#
# @param SqlCommand: The SqlCommand to be executed
#
# @retval RecordSet: The result after executed
#
def Exec(self, SqlCommand):
EdkLogger.debug(4, "SqlCommand: %s" % SqlCommand)
self.Cur.execute(SqlCommand)
RecordSet = self.Cur.fetchall()
EdkLogger.debug(4, "RecordSet: %s" % RecordSet)
return RecordSet
|
{
"content_hash": "b68e96e177d5517293965ae9c6e3d753",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 93,
"avg_line_length": 25.01851851851852,
"alnum_prop": 0.5455218356772761,
"repo_name": "MattDevo/edk2",
"id": "e89b99320d1f697ef23d32ce46fb8b7cd5d12fe5",
"size": "3282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BaseTools/Source/Python/Table/Table.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "4545237"
},
{
"name": "Batchfile",
"bytes": "93042"
},
{
"name": "C",
"bytes": "94289702"
},
{
"name": "C++",
"bytes": "20170310"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "GAP",
"bytes": "698245"
},
{
"name": "GDB",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "472114"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "231845"
},
{
"name": "NSIS",
"bytes": "2229"
},
{
"name": "Objective-C",
"bytes": "4147834"
},
{
"name": "PHP",
"bytes": "674"
},
{
"name": "PLSQL",
"bytes": "24782"
},
{
"name": "Perl",
"bytes": "6218"
},
{
"name": "Python",
"bytes": "27130096"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Roff",
"bytes": "28192"
},
{
"name": "Shell",
"bytes": "104362"
},
{
"name": "SourcePawn",
"bytes": "29427"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
}
|
"""Provides utility functions in anyconfig.backend.base."""
import functools
import pathlib
import typing
def not_implemented(*_args, **_kwargs) -> None:
"""Raise NotImplementedError."""
raise NotImplementedError()
def ensure_outdir_exists(filepath: typing.Union[str, pathlib.Path]) -> None:
"""Make dir to dump 'filepath' if that dir does not exist.
:param filepath: path of file to dump
"""
pathlib.Path(filepath).parent.mkdir(parents=True, exist_ok=True)
def to_method(func: typing.Callable[..., typing.Any]
) -> typing.Callable[..., typing.Any]:
"""Lift :func:`func` to a method.
It will be called with the first argument 'self' ignored.
:param func: Any callable object
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""Original function decorated."""
return func(*args[1:], **kwargs)
return wrapper
# vim:sw=4:ts=4:et:
|
{
"content_hash": "36820cf23544f479937f79f36c48ce28",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 76,
"avg_line_length": 26.514285714285716,
"alnum_prop": 0.6540948275862069,
"repo_name": "ssato/python-anyconfig",
"id": "536916d5b92feb9430be92ba8ef5d0d051635935",
"size": "1031",
"binary": false,
"copies": "1",
"ref": "refs/heads/next",
"path": "src/anyconfig/backend/base/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "568"
},
{
"name": "Python",
"bytes": "348779"
},
{
"name": "Shell",
"bytes": "3456"
}
],
"symlink_target": ""
}
|
import sys
import re
# Print the transition from the old x,y to the new x,y
# @param oldX {int} original x dimension
# @param oldY {int} oritinal y dimension
# @param newX {int} scaled x dimension
# @param newY {int} scaled y dimension
def print_new_size(oldX, oldY, newX, newY):
print (" > {}x{} -> {}x{}".format(oldX, oldY, newX, newY))
# Calculate the resized x dimension based on the given y dimension
# @param oldX {int} original x dimension
# @param oldY {int} oritinal y dimension
# @param newY {int} determined y dimension
def resize_x(oldX, oldY, newY):
return int(round(((oldX * newY)/oldY), 0))
# Calculate the resized x dimension based on the given y dimension
# @param oldX {int} original x dimension
# @param oldY {int} oritinal y dimension
# @param newY {int} determined x dimension
def resize_y(oldX, oldY, newX):
return int(round(((oldY * newX)/oldX), 0))
# Calculate the resized x dimension based on the given y dimension
# @param oldX {int} original x dimension
# @param oldY {int} oritinal y dimension
# @param scale {int} dimension scale
def resize_s(oldX, oldY, scale):
scale = float(scale) / 100.0
return [int(round(oldX * scale, 0)), int(round(oldY * scale, 0))]
# Run the program out of this message
def scale():
print()
arg_r = re.compile('^-[xys]$')
val_r = re.compile('^[0-9]*$')
arguments = sys.argv[1:]
if (not len(arguments) == 4):
print("Invalid Argument Count")
sys.exit(1)
if (not re.match(arg_r, arguments[0])):
print("Invalid Modifier: ", arguments[0])
sys.exit(1)
for arg in arguments[1:]:
if (not re.match(val_r, arg)):
print("Invalid Argument: ", arg)
sys.exit(1)
x = int(arguments[3])
y = int(arguments[3])
# If -y argument, then the scaled y dimension is given
if (re.match(r'-y', arguments[0])):
x = resize_x(int(arguments[1]), int(arguments[2]), int(arguments[3]))
# If -x argument, then the scaled x dimension is given
if (re.match(r'-x', arguments[0])):
y = resize_y(int(arguments[1]), int(arguments[2]), int(arguments[3]))
# If -s argument, then a scale to be applied to both dimensions is given
if (re.match(r'-s', arguments[0])):
vals = resize_s(int(arguments[1]), int(arguments[2]), int(arguments[3]))
x = vals[0]
y = vals[1]
print_new_size(int(arguments[1]), int(arguments[2]), x, y)
|
{
"content_hash": "9ef8244a9868f4490b9678ae5476dcd1",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 74,
"avg_line_length": 31.875,
"alnum_prop": 0.673202614379085,
"repo_name": "ChrisoftheBoyerClan/scale.py",
"id": "893edffbbfd32170b8bfe54e01e4605df2e8761f",
"size": "2295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scalepy/scale.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2737"
}
],
"symlink_target": ""
}
|
import sys, pprint, os
sys.path.insert(1, '..')
sys.path.insert(1, '.')
sys.path.insert(1, os.path.join("..", "py"))
import h2o
import h2o_test_utils
from h2o_test_utils import ModelSpec
import os
import argparse
import time
import json
import requests
sys.path.insert(1, '../rest_tests')
#################
# Config
#################
clean_up_after = False
algos = ['kmeans', 'deeplearning', 'deepwater', 'drf', 'glm', 'gbm', 'pca', 'naivebayes', 'glrm', 'svd', 'aggregator']
algo_additional_default_params = { 'grep' : { 'regex' : '.*' },
'kmeans' : { 'k' : 2 }
} # additional params to add to the default params
#################
# setup
#################
parser = argparse.ArgumentParser(
description='Run basic H2O REST API tests.',
)
parser.add_argument('--verbose', '-v', help='verbose output', action='count')
parser.add_argument('--usecloud', help='ip:port to attach to', default='')
parser.add_argument('--host', help='hostname to attach to', default='localhost')
parser.add_argument('--port', help='port to attach to', type=int, default=54321)
args = parser.parse_args()
h2o_test_utils.setVerbosity(args.verbose)
h2o.H2O.verbose = h2o_test_utils.isVerboser()
if (len(args.usecloud) > 0):
arr = args.usecloud.split(":")
args.host = arr[0]
args.port = int(arr[1])
host = args.host
port = args.port
h2o.H2O.verboseprint("host: " + str(host))
h2o.H2O.verboseprint("port" + str(port))
pp = pprint.PrettyPrinter(indent=4) # pretty printer for debugging
################
# The test body:
################
a_node = h2o.H2O(host, port)
h2o.H2O.verboseprint("connected to: ", str(host), ':', str(port))
import test_metadata
test_metadata.test(a_node, pp)
import test_html
test_html.test(a_node, pp)
import test_cluster_sanity
test_cluster_sanity.test(a_node, pp, algos)
# Clean up old objects from the DKV, in case the cluster has been doing other things:
if h2o_test_utils.isVerbose(): print('Cleaning up old stuff. . .')
h2o_test_utils.cleanup(a_node)
import test_and_import_frames
datasets = test_and_import_frames.load_and_test(a_node, pp)
import test_models
test_models.build_and_test(a_node, pp, datasets, algos, algo_additional_default_params)
# Metadata used to get corrupted, so test again
test_metadata.test(a_node, pp)
import test_predict_and_model_metrics
test_predict_and_model_metrics.test(a_node, pp)
import test_final_sanity
test_final_sanity.test(a_node, pp)
# TODO: use built_models
if clean_up_after:
h2o_test_utils.cleanup(models=[dl_airlines_model_name, 'deeplearning_prostate_binomial', 'kmeans_prostate'], frames=['prostate_binomial', 'airlines_binomial'])
|
{
"content_hash": "d72f0199c76e93120468498d86a8427a",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 163,
"avg_line_length": 26.762376237623762,
"alnum_prop": 0.6618571957084721,
"repo_name": "jangorecki/h2o-3",
"id": "9fb91dbcf7b1e2400eeb56c45bb61c92b405ad89",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py/testdir_multi_jvm/test_rest_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12629"
},
{
"name": "CSS",
"bytes": "248999"
},
{
"name": "CoffeeScript",
"bytes": "273112"
},
{
"name": "Emacs Lisp",
"bytes": "7110"
},
{
"name": "Groovy",
"bytes": "5090"
},
{
"name": "HTML",
"bytes": "190624"
},
{
"name": "Java",
"bytes": "7617358"
},
{
"name": "JavaScript",
"bytes": "63887"
},
{
"name": "Jupyter Notebook",
"bytes": "6237434"
},
{
"name": "Makefile",
"bytes": "42012"
},
{
"name": "Python",
"bytes": "4130120"
},
{
"name": "R",
"bytes": "2160977"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "16483"
},
{
"name": "Shell",
"bytes": "83394"
},
{
"name": "TeX",
"bytes": "584679"
}
],
"symlink_target": ""
}
|
# Hive Netius System
# Copyright (c) 2008-2020 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2020 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
from . import base
from . import dkim
from . import rsa
from . import smtp
|
{
"content_hash": "5cc8e3d0e1da74f86c0fffa7ab30252d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 76,
"avg_line_length": 32.5,
"alnum_prop": 0.6939271255060728,
"repo_name": "hivesolutions/netius",
"id": "fae40397cbedb280295c2d39ba5668328e1542d9",
"size": "1279",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/netius/sh/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1400497"
}
],
"symlink_target": ""
}
|
import requests
from requests import HTTPError
from project.exception_handling import ApiError
def make_mobile_payment(amount, phone, names):
try:
amnt = int(amount)
task = {
"names": names,
"phone": phone,
"amount": amnt
}
resp = requests.post('http://127.0.0.1:8000/mobilepayment/', json=task, auth=('tst_user', '@ssSuper!135'))
print resp.text + " ------------------" + str(resp.status_code)
if resp.status_code != 200:
print resp.status_code
# raise HTTPError
print('Created task. ID: {}'.format(resp.json()["id"]))
return resp.status_code
except requests.exceptions.RequestException as e:
raise HTTPError
|
{
"content_hash": "281b459488366b368ef71dcb6bd73a6f",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 31.416666666666668,
"alnum_prop": 0.5755968169761273,
"repo_name": "fiston/abaganga",
"id": "27985e29f3a42fffa12923a221f8447242a3838c",
"size": "754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/payment/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "306"
},
{
"name": "HTML",
"bytes": "31190"
},
{
"name": "JavaScript",
"bytes": "57"
},
{
"name": "Python",
"bytes": "45949"
}
],
"symlink_target": ""
}
|
class ModuleDocFragment(object):
# HPE 3PAR doc fragment
DOCUMENTATION = '''
options:
storage_system_ip:
description:
- The storage system IP address.
type: str
required: true
storage_system_password:
description:
- The storage system password.
type: str
required: true
storage_system_username:
description:
- The storage system user name.
type: str
required: true
requirements:
- hpe3par_sdk >= 1.0.2. Install using 'pip install hpe3par_sdk'
- WSAPI service should be enabled on the 3PAR storage array.
notes:
- check_mode not supported
'''
|
{
"content_hash": "6560720bdfc5e6b633086de14c92c4dc",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 24.185185185185187,
"alnum_prop": 0.6385911179173047,
"repo_name": "SergeyCherepanov/ansible",
"id": "2f0c69eea24e26eee8c0c0de23082b5593f36ca8",
"size": "815",
"binary": false,
"copies": "38",
"ref": "refs/heads/master",
"path": "ansible/ansible/plugins/doc_fragments/hpe3par.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
}
|
import copy
from gen.resource_xsd import *
from gen.resource_common import *
from gen.resource_server import *
from pprint import pformat
class QuotaHelper(object):
default_quota = {
'defaults': -1
}
@classmethod
def get_project_dict(cls, proj_uuid, db_conn):
(ok, proj_dict) = db_conn.dbe_read('project', {'uuid': proj_uuid})
return (ok, proj_dict)
@classmethod
def get_quota_limit(cls, proj_dict, obj_type):
quota = proj_dict.get('quota') or cls.default_quota
quota_type = obj_type.replace('-','_')
quota_limit = quota.get(quota_type)
if quota_limit is None:
quota_limit = cls.default_quota.get(quota_type)
if quota_limit is None:
quota_limit = cls.default_quota['defaults']
return quota_limit
@classmethod
def check_quota_limit(cls, proj_dict, obj_type, quota_count):
quota_limit = cls.get_quota_limit(proj_dict, obj_type)
if quota_limit > 0 and quota_count >= quota_limit:
return (False, 'quota limit (%d) exceeded for resource %s' % (quota_limit, obj_type))
return (True, quota_limit)
@classmethod
def verify_quota_for_resource(cls, db_conn, resource, obj_type,
user_visibility, proj_uuid=None, fq_name=[]):
if not proj_uuid and fq_name:
try:
proj_uuid = db_conn.fq_name_to_uuid('project', fq_name[0:2])
except cfgm_common.exceptions.NoIdError:
return (False, (500, 'No Project ID error : ' + proj_uuid))
(ok, proj_dict) = cls.get_project_dict(proj_uuid, db_conn)
if not ok:
return (False, (500, 'Internal error : ' + pformat(proj_dict)))
if not user_visibility:
return True, ""
quota_count = len(proj_dict.get(resource, []))
(ok, quota_limit) = cls.check_quota_limit(proj_dict, obj_type,
quota_count)
if not ok:
return (False, (403, pformat(fq_name) + ' : ' + quota_limit))
return True, ""
|
{
"content_hash": "be54f3456b0cfbc5746da011032f79c9",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 97,
"avg_line_length": 36.58620689655172,
"alnum_prop": 0.5730442978322338,
"repo_name": "srajag/contrail-controller",
"id": "884ab420c555e5ac5637a15c5432f1fa626d01fb",
"size": "2122",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/config/api-server/vnc_quota.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "80551"
},
{
"name": "C",
"bytes": "44989"
},
{
"name": "C++",
"bytes": "14871796"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Java",
"bytes": "171966"
},
{
"name": "Lua",
"bytes": "7673"
},
{
"name": "Makefile",
"bytes": "12439"
},
{
"name": "Objective-C",
"bytes": "720"
},
{
"name": "Protocol Buffer",
"bytes": "1120"
},
{
"name": "Python",
"bytes": "3008184"
},
{
"name": "Shell",
"bytes": "54611"
},
{
"name": "Thrift",
"bytes": "40763"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['PolyTrend'] , ['NoCycle'] , ['ARX'] );
|
{
"content_hash": "8591ddf1ccaafb8c43ce59e621ba7ab6",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 37.25,
"alnum_prop": 0.697986577181208,
"repo_name": "antoinecarme/pyaf",
"id": "251071b3f018f45d59bf4d90bee799ad78add3b3",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_PolyTrend_NoCycle_ARX.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
import os
import sys
import argparse
import json
import requests
def debug(msg):
if args.debug:
print "DEBUG: %s" % (msg)
def process_repo_file(filename, stack={"repos":[]}, depth=0):
print "[updateRepos]%s Processing %s" % ((' ' * depth), filename)
try:
repo_file = open(filename)
except IOError as error:
print "[updateRepos][error] Open of file \"%s\" failed: %s" % (filename, error)
sys.exit(1)
try:
repo_dict = json.load(repo_file)
except (ValueError, NameError) as error:
print "[updateRepos][error] JSON format error in file \"%s\": %s" % (filename, error)
sys.exit(1)
#debug(json.dumps(repo_dict, indent=1))
if 'include' in repo_dict:
for includerepo in repo_dict["include"]:
debug("found include: %s" % includerepo)
stack = process_repo_file("/tmp/snops-repo/images/%s/fs/etc/snopsrepo.d/%s.json" % (includerepo, includerepo), stack, depth+1)
stack["repos"].extend(repo_dict["repos"])
return stack
# Setup and process arguments
parser = argparse.ArgumentParser(description='Script to update f5-super-netops Container Repo List')
parser.add_argument("json", help="The JSON repo file")
parser.add_argument("-D", "--debug", help="Enable debug output", action="store_true")
args = parser.parse_args()
repos = process_repo_file(args.json)
user_repos = process_repo_file('/tmp/user_repos.json')
debug(json.dumps(repos, indent=1))
with open('/home/snops/repos.json', 'w') as out:
json.dump(repos, out, indent=1)
|
{
"content_hash": "5a23c672c3cb6f182f41b97aa6a0b6fa",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 129,
"avg_line_length": 29.2,
"alnum_prop": 0.6952054794520548,
"repo_name": "s-archer/f5-super-netops-openshift",
"id": "16f035847bb47d8e0a04411aa70d9f27111e5d46",
"size": "1479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/base/fs/snopsboot/updateRepos.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Groovy",
"bytes": "647"
},
{
"name": "Python",
"bytes": "4933"
},
{
"name": "Shell",
"bytes": "3616"
}
],
"symlink_target": ""
}
|
"""Test that pymongo resets its own locks after a fork."""
import os
import sys
import unittest
from multiprocessing import Pipe
sys.path[0:0] = [""]
from test import IntegrationTest
from test.utils import is_greenthread_patched
from bson.objectid import ObjectId
@unittest.skipIf(
not hasattr(os, "register_at_fork"), "register_at_fork not available in this version of Python"
)
@unittest.skipIf(
is_greenthread_patched(),
"gevent and eventlet do not support POSIX-style forking.",
)
class TestFork(IntegrationTest):
def test_lock_client(self):
# Forks the client with some items locked.
# Parent => All locks should be as before the fork.
# Child => All locks should be reset.
with self.client._MongoClient__lock:
def target():
self.client.admin.command("ping")
with self.fork(target):
pass
self.client.admin.command("ping")
def test_lock_object_id(self):
# Forks the client with ObjectId's _inc_lock locked.
# Parent => _inc_lock should remain locked.
# Child => _inc_lock should be unlocked.
with ObjectId._inc_lock:
def target():
self.assertFalse(ObjectId._inc_lock.locked())
self.assertTrue(ObjectId())
with self.fork(target):
pass
def test_topology_reset(self):
# Tests that topologies are different from each other.
# Cannot use ID because virtual memory addresses may be the same.
# Cannot reinstantiate ObjectId in the topology settings.
# Relies on difference in PID when opened again.
parent_conn, child_conn = Pipe()
init_id = self.client._topology._pid
parent_cursor_exc = self.client._kill_cursors_executor
def target():
self.client.admin.command("ping")
child_conn.send(self.client._topology._pid)
child_conn.send(
(
parent_cursor_exc != self.client._kill_cursors_executor,
"client._kill_cursors_executor was not reinitialized",
)
)
with self.fork(target):
self.assertEqual(self.client._topology._pid, init_id)
child_id = parent_conn.recv()
self.assertNotEqual(child_id, init_id)
passed, msg = parent_conn.recv()
self.assertTrue(passed, msg)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "bade110d9ab230157e9ddd65bff25c3c",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 99,
"avg_line_length": 32.01282051282051,
"alnum_prop": 0.6059271125350421,
"repo_name": "mongodb/mongo-python-driver",
"id": "422cd89f28351ce731749dcda24c84aa29188cb6",
"size": "3079",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_fork.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "183641"
},
{
"name": "Python",
"bytes": "2983153"
},
{
"name": "Shell",
"bytes": "30026"
}
],
"symlink_target": ""
}
|
import os
def load_tests(loader, standard_tests, pattern):
this_dir = os.path.dirname(__file__)
if pattern is None:
pattern = "test*"
package_tests = loader.discover(start_dir=this_dir, pattern=pattern)
standard_tests.addTests(package_tests)
return standard_tests
|
{
"content_hash": "00844901b24cce3ab1461776bbe20cfe",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 72,
"avg_line_length": 32.55555555555556,
"alnum_prop": 0.6928327645051194,
"repo_name": "bitdancer/pynvm",
"id": "976bb18892a8176e12a35f5052cf6f578ad36ecb",
"size": "293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "102344"
}
],
"symlink_target": ""
}
|
import os
import sys
from distribute_setup import use_setuptools
use_setuptools()
from setuptools import setup, Extension
if not hasattr(sys, "hexversion") or sys.hexversion < 0x02040000:
raise Error("Python 2.4 or newer is required")
if os.name == "posix":
from setup_posix import get_config
else: # assume windows
from setup_windows import get_config
metadata, options = get_config()
metadata['ext_modules'] = [Extension(sources=['_mysql.c'], **options)]
metadata['long_description'] = metadata['long_description'].replace(r'\n', '')
setup(**metadata)
|
{
"content_hash": "d4f05650ee7490d5f031497b95e63dc7",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 78,
"avg_line_length": 30,
"alnum_prop": 0.7298245614035088,
"repo_name": "1c7/Python-ask-answer-website-practice",
"id": "798f96f266bd8eb262ac8966b7b85378e95be240",
"size": "593",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "1-Flask/MySQL-python-1.2.4b4/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "84040"
},
{
"name": "HTML",
"bytes": "2847"
},
{
"name": "Python",
"bytes": "292966"
},
{
"name": "Shell",
"bytes": "3809"
},
{
"name": "Smarty",
"bytes": "27"
}
],
"symlink_target": ""
}
|
"""Generated message classes for servicemanagement version v1.
The service management API for Google Cloud Platform
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.protorpclite import messages as _messages
from googlecloudsdk.third_party.apitools.base.py import encoding
package = 'servicemanagement'
class Api(_messages.Message):
"""Api is a light-weight descriptor for a protocol buffer service.
Enums:
SyntaxValueValuesEnum: The source syntax of the service.
Fields:
methods: The methods of this api, in unspecified order.
mixins: Included APIs. See Mixin.
name: The fully qualified name of this api, including package name
followed by the api's simple name.
options: Any metadata attached to the API.
sourceContext: Source context for the protocol buffer service represented
by this message.
syntax: The source syntax of the service.
version: A version string for this api. If specified, must have the form
`major-version.minor-version`, as in `1.10`. If the minor version is
omitted, it defaults to zero. If the entire version field is empty, the
major version is derived from the package name, as outlined below. If
the field is not empty, the version in the package name will be verified
to be consistent with what is provided here. The versioning schema uses
[semantic versioning](http://semver.org) where the major version number
indicates a breaking change and the minor version an additive, non-
breaking change. Both version numbers are signals to users what to
expect from different versions, and should be carefully chosen based on
the product plan. The major version is also reflected in the package
name of the API, which must end in `v<major-version>`, as in
`google.feature.v1`. For major versions 0 and 1, the suffix can be
omitted. Zero major versions must only be used for experimental, none-GA
apis.
"""
class SyntaxValueValuesEnum(_messages.Enum):
"""The source syntax of the service.
Values:
SYNTAX_PROTO2: Syntax `proto2`.
SYNTAX_PROTO3: Syntax `proto3`.
"""
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
methods = _messages.MessageField('Method', 1, repeated=True)
mixins = _messages.MessageField('Mixin', 2, repeated=True)
name = _messages.StringField(3)
options = _messages.MessageField('Option', 4, repeated=True)
sourceContext = _messages.MessageField('SourceContext', 5)
syntax = _messages.EnumField('SyntaxValueValuesEnum', 6)
version = _messages.StringField(7)
class AreaUnderCurveParams(_messages.Message):
"""AreaUnderCurveParams groups the metrics relevant to generating duration
based metric from base (snapshot) metric and delta (change) metric. The
generated metric has two dimensions: resource usage metric and the
duration the metric applies. Essentially the generated metric is the Area
Under Curve(AUC) of the "duration - resource" usage curve. This AUC metric
is readily appliable to billing since "billable resource usage" depends on
resource usage and duration of the resource used. A service config may
contain multiple resources and corresponding metrics. AreaUnderCurveParams
groups the relevant ones: which snapshot_metric and change_metric are used
to produce which generated_metric.
Fields:
changeMetric: Change of resource usage at a particular timestamp. This
should a DELTA metric.
generatedMetric: Metric generated from snapshot_metric and change_metric.
This is also a DELTA metric.
snapshotMetric: Total usage of a resource at a particular timestamp. This
should be a GAUGE metric.
"""
changeMetric = _messages.StringField(1)
generatedMetric = _messages.StringField(2)
snapshotMetric = _messages.StringField(3)
class AuthProvider(_messages.Message):
"""Configuration for an anthentication provider, including support for [JSON
Web Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-
token-32).
Fields:
id: The unique identifier of the auth provider. It will be referred to by
`AuthRequirement.provider_id`. Example: "bookstore_auth".
issuer: Identifies the principal that issued the JWT. See
https://tools.ietf.org/html/draft-ietf-oauth-json-web-
token-32#section-4.1.1 Usually a URL or an email address. Example:
https://securetoken.google.com Example:
1234567-compute@developer.gserviceaccount.com
jwksUri: URL of the provider's public key set to validate signature of the
JWT. See [OpenID Discovery](https://openid.net/specs/openid-connect-
discovery-1_0.html#ProviderMetadata). Optional if the key set document:
- can be retrieved from [OpenID Discovery](https://openid.net/specs
/openid-connect-discovery-1_0.html of the issuer. - can be inferred
from the email domain of the issuer (e.g. a Google service account).
Example: https://www.googleapis.com/oauth2/v1/certs
"""
id = _messages.StringField(1)
issuer = _messages.StringField(2)
jwksUri = _messages.StringField(3)
class AuthRequirement(_messages.Message):
"""User-defined authentication requirements, including support for [JSON Web
Token (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-
token-32).
Fields:
audiences: The list of JWT [audiences](https://tools.ietf.org/html/draft-
ietf-oauth-json-web-token-32#section-4.1.3). that are allowed to access.
A JWT containing any of these audiences will be accepted. When this
setting is absent, only JWTs with audience
"https://Service_name/API_name" will be accepted. For example, if no
audiences are in the setting, LibraryService API will only accept JWTs
with the following audience "https://library-
example.googleapis.com/google.example.library.v1.LibraryService".
Example: audiences: bookstore_android.apps.googleusercontent.com,
bookstore_web.apps.googleusercontent.com
providerId: id from authentication provider. Example: provider_id:
bookstore_auth
"""
audiences = _messages.StringField(1)
providerId = _messages.StringField(2)
class Authentication(_messages.Message):
"""`Authentication` defines the authentication configuration for an API.
Example for an API targeted for external use: name:
calendar.googleapis.com authentication: rules: - selector:
"*" oauth: canonical_scopes:
https://www.googleapis.com/auth/calendar - selector:
google.calendar.Delegate oauth: canonical_scopes:
https://www.googleapis.com/auth/calendar.read
Fields:
providers: Defines a set of authentication providers that a service
supports.
rules: Individual rules for authentication.
"""
providers = _messages.MessageField('AuthProvider', 1, repeated=True)
rules = _messages.MessageField('AuthenticationRule', 2, repeated=True)
class AuthenticationRule(_messages.Message):
"""Authentication rules for the service. By default, if a method has any
authentication requirements, every request must include a valid credential
matching one of the requirements. It's an error to include more than one
kind of credential in a single request. If a method doesn't have any auth
requirements, request credentials will be ignored.
Fields:
allowWithoutCredential: Whether to allow requests without a credential.
If quota is enabled, an API key is required for such request to pass the
quota check.
oauth: The requirements for OAuth credentials.
requirements: Requirements for additional authentication providers.
selector: Selects the methods to which this rule applies. Refer to
selector for syntax details.
"""
allowWithoutCredential = _messages.BooleanField(1)
oauth = _messages.MessageField('OAuthRequirements', 2)
requirements = _messages.MessageField('AuthRequirement', 3, repeated=True)
selector = _messages.StringField(4)
class Backend(_messages.Message):
"""`Backend` defines the backend configuration for a service. Example:
backend: rules: - selector: "*" address: calendar-prod-
backend.gslb.googleapis.com - selector:
google.calendar.Calendar.Delegate address: calendar-dogfood-
backend.gslb.googleapis.com Here, the default backend for all methods in
the service is set to `calendar-prod-backend.gslb.googleapis.com`. For the
method `Delegate`, this is overriden by `calendar-dogfood-
backend.gslb.googleapis.com`.
Fields:
rules: A list of backend rules providing configuration for individual API
elements.
"""
rules = _messages.MessageField('BackendRule', 1, repeated=True)
class BackendRule(_messages.Message):
"""A backend rule provides configuration for an individual API element.
Fields:
address: The address of the API backend.
deadline: The number of seconds to wait for a response from a request.
The default depends on the deployment context.
selector: Selects the methods to which this rule applies. Refer to
selector for syntax details.
"""
address = _messages.StringField(1)
deadline = _messages.FloatField(2)
selector = _messages.StringField(3)
class Billing(_messages.Message):
"""Billing related configuration of the service. The following example
shows how to configure metrics for billing: metrics: - name:
library.googleapis.com/read_calls metric_kind: DELTA value_type:
INT64 - name: library.googleapis.com/write_calls metric_kind:
DELTA value_type: INT64 billing: metrics: -
library.googleapis.com/read_calls - library.googleapis.com/write_calls
The next example shows how to enable billing status check and customize the
check behavior. It makes sure billing status check is included in the
`Check` method of [Service Control API](https://cloud.google.com/service-
control/). In the example, "google.storage.Get" method can be served when
the billing status is either `current` or `delinquent`, while
"google.storage.Write" method can only be served when the billing status is
`current`: billing: rules: - selector: google.storage.Get
allowed_statuses: - current - delinquent - selector:
google.storage.Write allowed_statuses: current Mostly services
should only allow `current` status when serving requests. In addition,
services can choose to allow both `current` and `delinquent` statuses when
serving read-only requests to resources. If there's no matching selector for
operation, no billing status check will be performed.
Fields:
areaUnderCurveParams: Per resource grouping for delta billing based
resource configs.
metrics: Names of the metrics to report to billing. Each name must be
defined in Service.metrics section.
rules: A list of billing status rules for configuring billing status
check.
"""
areaUnderCurveParams = _messages.MessageField('AreaUnderCurveParams', 1, repeated=True)
metrics = _messages.StringField(2, repeated=True)
rules = _messages.MessageField('BillingStatusRule', 3, repeated=True)
class BillingStatusRule(_messages.Message):
"""Defines the billing status requirements for operations. When used with
[Service Control API](https://cloud.google.com/service-control/), the
following statuses are supported: - **current**: the associated billing
account is up to date and capable of paying for resource
usages. - **delinquent**: the associated billing account has a correctable
problem, such as late payment. Mostly services should
only allow `current` status when serving requests. In addition, services can
choose to allow both `current` and `delinquent` statuses when serving read-
only requests to resources. If the list of allowed_statuses is empty, it
means no billing requirement.
Fields:
allowedStatuses: Allowed billing statuses. The billing status check passes
if the actual billing status matches any of the provided values here.
selector: Selects the operation names to which this rule applies. Refer to
selector for syntax details.
"""
allowedStatuses = _messages.StringField(1, repeated=True)
selector = _messages.StringField(2)
class Context(_messages.Message):
"""`Context` defines which contexts an API requests. Example: context:
rules: - selector: "*" requested: -
google.rpc.context.ProjectContext - google.rpc.context.OriginContext
The above specifies that all methods in the API request
`google.rpc.context.ProjectContext` and `google.rpc.context.OriginContext`.
Available context types are defined in package `google.rpc.context`.
Fields:
rules: List of rules for context, applicable to methods.
"""
rules = _messages.MessageField('ContextRule', 1, repeated=True)
class ContextRule(_messages.Message):
"""A context rule provides information about the context for an individual
API element.
Fields:
provided: A list of full type names of provided contexts.
requested: A list of full type names of requested contexts.
selector: Selects the methods to which this rule applies. Refer to
selector for syntax details.
"""
provided = _messages.StringField(1, repeated=True)
requested = _messages.StringField(2, repeated=True)
selector = _messages.StringField(3)
class Control(_messages.Message):
"""Selects and configures the service controller used by the service. The
service controller handles features like abuse, quota, billing, logging,
monitoring, etc. Example: control: environment:
usagemanager.googleprod.com Supported Usage Manager environments: *
`usagemanager.googleprod.com` * `staging-usagemanager.googleprod.com`
Supported Service Control environments: * `servicecontrol.googleapis.com` *
`staging-servicecontrol.sandbox.googleapis.com` * `testgaia-
servicecontrol.sandbox.googleapis.com`
Fields:
environment: The service control environment to use. If empty, no control
plane feature (like quota and billing) will be enabled.
"""
environment = _messages.StringField(1)
class ConvertConfigRequest(_messages.Message):
"""Request message for `ConvertConfig` method.
Fields:
serviceName: The service name to use for constructing the normalized
service configuration equivalent of the provided configuration
specification.
swaggerSpec: The swagger specification for an API.
"""
serviceName = _messages.StringField(1)
swaggerSpec = _messages.MessageField('SwaggerSpec', 2)
class ConvertConfigResponse(_messages.Message):
"""Response message for `ConvertConfig` method.
Fields:
diagnostics: Any errors or warnings that occured during config conversion.
serviceConfig: The service configuration. Not set if errors occured during
conversion.
"""
diagnostics = _messages.MessageField('Diagnostic', 1, repeated=True)
serviceConfig = _messages.MessageField('Service', 2)
class CustomError(_messages.Message):
"""Customize service error responses. For example, list any service
specific protobuf types that can appear in error detail lists of error
responses. Example: custom_error: types: -
google.foo.v1.CustomError - google.foo.v1.AnotherError
Fields:
rules: The list of custom error rules to select to which messages this
should apply.
types: The list of custom error detail types, e.g.
'google.foo.v1.CustomError'.
"""
rules = _messages.MessageField('CustomErrorRule', 1, repeated=True)
types = _messages.StringField(2, repeated=True)
class CustomErrorRule(_messages.Message):
"""A custom error rule.
Fields:
isErrorType: Mark this message as possible payload in error response.
Otherwise, objects of this type will be filtered when they appear in
error payload.
selector: Selects messages to which this rule applies. Refer to selector
for syntax details.
"""
isErrorType = _messages.BooleanField(1)
selector = _messages.StringField(2)
class CustomHttpPattern(_messages.Message):
"""A custom pattern is used for defining custom HTTP verb.
Fields:
kind: The name of this custom HTTP verb.
path: The path matched by this custom verb.
"""
kind = _messages.StringField(1)
path = _messages.StringField(2)
class CustomerSettings(_messages.Message):
"""Settings that control how a customer (identified by a billing account)
uses a service
Fields:
customerId: ID for the customer that consumes the service (see above). The
supported types of customers are: 1. domain:{domain} A Google Apps
domain name. For example, google.com. 2.
billingAccount:{billing_account_id} A Google Cloud Plafrom billing
account. For Example, 123456-7890ab-cdef12.
quotaSettings: Settings that control how much or how fast the service can
be used by the consumer projects owned by the customer collectively.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements.
"""
customerId = _messages.StringField(1)
quotaSettings = _messages.MessageField('QuotaSettings', 2)
serviceName = _messages.StringField(3)
class Diagnostic(_messages.Message):
"""A collection that represents a diagnostic message (error or warning)
Enums:
KindValueValuesEnum: The kind of diagnostic information provided.
Fields:
kind: The kind of diagnostic information provided.
location: Location of the cause or context of the diagnostic information.
message: The string message of the diagnostic information.
"""
class KindValueValuesEnum(_messages.Enum):
"""The kind of diagnostic information provided.
Values:
WARNING: Warnings and errors
ERROR: Only errors
"""
WARNING = 0
ERROR = 1
kind = _messages.EnumField('KindValueValuesEnum', 1)
location = _messages.StringField(2)
message = _messages.StringField(3)
class Documentation(_messages.Message):
"""`Documentation` provides the information for describing a service.
Example: <pre><code>documentation: summary: > The Google Calendar API
gives access to most calendar features. pages: - name: Overview
content: (== include google/foo/overview.md ==) - name: Tutorial
content: (== include google/foo/tutorial.md ==) subpages; -
name: Java content: (== include google/foo/tutorial_java.md
==) rules: - selector: google.calendar.Calendar.Get description:
> ... - selector: google.calendar.Calendar.Put description: >
... </code></pre> Documentation is provided in markdown syntax. In addition
to standard markdown features, definition lists, tables and fenced code
blocks are supported. Section headers can be provided and are interpreted
relative to the section nesting of the context where a documentation
fragment is embedded. Documentation from the IDL is merged with
documentation defined via the config at normalization time, where
documentation provided by config rules overrides IDL provided. A number of
constructs specific to the API platform are supported in documentation text.
In order to reference a proto element, the following notation can be used:
<pre><code>[fully.qualified.proto.name][]</code></pre> To override
the display text used for the link, this can be used:
<pre><code>[display text][fully.qualified.proto.name]</code></pre>
Text can be excluded from doc using the following notation:
<pre><code>(-- internal comment --)</code></pre> Comments can be
made conditional using a visibility label. The below text will be only
rendered if the `BETA` label is available: <pre><code>(--BETA: comment
for BETA users --)</code></pre> A few directives are available in
documentation. Note that directives must appear on a single line to be
properly identified. The `include` directive includes a markdown file from
an external source: <pre><code>(== include path/to/file
==)</code></pre> The `resource_for` directive marks a message to be the
resource of a collection in REST view. If it is not specified, tools attempt
to infer the resource from the operations in a collection:
<pre><code>(== resource_for v1.shelves.books ==)</code></pre> The
directive `suppress_warning` does not directly affect documentation and is
documented together with service config validation.
Fields:
documentationRootUrl: The URL to the root of documentation.
overview: Declares a single overview page. For example:
<pre><code>documentation: summary: ... overview: (== include
overview.md ==) </code></pre> This is a shortcut for the following
declaration (using pages style): <pre><code>documentation: summary:
... pages: - name: Overview content: (== include overview.md
==) </code></pre> Note: you cannot specify both `overview` field and
`pages` field.
pages: The top level pages for the documentation set.
rules: Documentation rules for individual elements of the service.
summary: A short summary of what the service does. Can only be provided by
plain text.
"""
documentationRootUrl = _messages.StringField(1)
overview = _messages.StringField(2)
pages = _messages.MessageField('Page', 3, repeated=True)
rules = _messages.MessageField('DocumentationRule', 4, repeated=True)
summary = _messages.StringField(5)
class DocumentationRule(_messages.Message):
"""A documentation rule provides information about individual API elements.
Fields:
deprecationDescription: Deprecation description of the selected
element(s). It can be provided if an element is marked as `deprecated`.
description: Description of the selected API(s).
selector: The selector is a comma-separated list of patterns. Each pattern
is a qualified name of the element which may end in "*", indicating a
wildcard. Wildcards are only allowed at the end and for a whole
component of the qualified name, i.e. "foo.*" is ok, but not "foo.b*" or
"foo.*.bar". To specify a default for all applicable elements, the whole
pattern "*" is used.
"""
deprecationDescription = _messages.StringField(1)
description = _messages.StringField(2)
selector = _messages.StringField(3)
class EffectiveQuotaGroup(_messages.Message):
"""An effective quota group contains both the metadata for a quota group as
derived from the service config, and the effective limits in that group as
calculated from producer and consumer overrides together with service
defaults.
Enums:
BillingInteractionValueValuesEnum:
Fields:
baseGroup: The service configuration for this quota group, minus the quota
limits, which are replaced by the effective limits below.
billingInteraction: A BillingInteractionValueValuesEnum attribute.
effectiveLimits: The effective limits for this quota group. DEPRECATED:
new code should use |quotas.limit|.
quotas: The usage and limit information for each limit within this quota
group.
"""
class BillingInteractionValueValuesEnum(_messages.Enum):
"""BillingInteractionValueValuesEnum enum type.
Values:
BILLING_INTERACTION_UNSPECIFIED: The interaction between this quota
group and the project billing status is unspecified.
NONBILLABLE_ONLY: This quota group is enforced only when the consumer
project is not billable.
BILLABLE_ONLY: This quota group is enforced only when the consumer
project is billable.
ANY_BILLING_STATUS: This quota group is enforced regardless of the
consumer project's billing status.
"""
BILLING_INTERACTION_UNSPECIFIED = 0
NONBILLABLE_ONLY = 1
BILLABLE_ONLY = 2
ANY_BILLING_STATUS = 3
baseGroup = _messages.MessageField('QuotaGroup', 1)
billingInteraction = _messages.EnumField('BillingInteractionValueValuesEnum', 2)
effectiveLimits = _messages.MessageField('EffectiveQuotaLimit', 3, repeated=True)
quotas = _messages.MessageField('QuotaInfo', 4, repeated=True)
class EffectiveQuotaLimit(_messages.Message):
"""An effective quota limit contains the metadata for a quota limit as
derived from the service config, together with fields that describe the
effective limit value and what overrides can be applied to it.
Fields:
baseLimit: The service's configuration for this quota limit.
consumerOverrideAllowed: Whether a consumer override is allowed.
effectiveLimit: The effective limit value, based on the stored producer
and consumer overrides and the service defaults.
key: The key used to identify this limit when applying overrides. The
consumer_overrides and producer_overrides maps are keyed by strings of
the form "QuotaGroupName/QuotaLimitName".
maxConsumerOverrideAllowed: The maximum override value that a consumer may
specify.
"""
baseLimit = _messages.MessageField('QuotaLimit', 1)
consumerOverrideAllowed = _messages.BooleanField(2)
effectiveLimit = _messages.IntegerField(3)
key = _messages.StringField(4)
maxConsumerOverrideAllowed = _messages.IntegerField(5)
class Enum(_messages.Message):
"""Enum type definition.
Enums:
SyntaxValueValuesEnum: The source syntax.
Fields:
enumvalue: Enum value definitions.
name: Enum type name.
options: Protocol buffer options.
sourceContext: The source context.
syntax: The source syntax.
"""
class SyntaxValueValuesEnum(_messages.Enum):
"""The source syntax.
Values:
SYNTAX_PROTO2: Syntax `proto2`.
SYNTAX_PROTO3: Syntax `proto3`.
"""
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
enumvalue = _messages.MessageField('EnumValue', 1, repeated=True)
name = _messages.StringField(2)
options = _messages.MessageField('Option', 3, repeated=True)
sourceContext = _messages.MessageField('SourceContext', 4)
syntax = _messages.EnumField('SyntaxValueValuesEnum', 5)
class EnumValue(_messages.Message):
"""Enum value definition.
Fields:
name: Enum value name.
number: Enum value number.
options: Protocol buffer options.
"""
name = _messages.StringField(1)
number = _messages.IntegerField(2, variant=_messages.Variant.INT32)
options = _messages.MessageField('Option', 3, repeated=True)
class Field(_messages.Message):
"""A single field of a message type.
Enums:
CardinalityValueValuesEnum: The field cardinality.
KindValueValuesEnum: The field type.
Fields:
cardinality: The field cardinality.
defaultValue: The string value of the default value of this field. Proto2
syntax only.
jsonName: The field JSON name.
kind: The field type.
name: The field name.
number: The field number.
oneofIndex: The index of the field type in `Type.oneofs`, for message or
enumeration types. The first type has index 1; zero means the type is
not in the list.
options: The protocol buffer options.
packed: Whether to use alternative packed wire representation.
typeUrl: The field type URL, without the scheme, for message or
enumeration types. Example:
`"type.googleapis.com/google.protobuf.Timestamp"`.
"""
class CardinalityValueValuesEnum(_messages.Enum):
"""The field cardinality.
Values:
CARDINALITY_UNKNOWN: For fields with unknown cardinality.
CARDINALITY_OPTIONAL: For optional fields.
CARDINALITY_REQUIRED: For required fields. Proto2 syntax only.
CARDINALITY_REPEATED: For repeated fields.
"""
CARDINALITY_UNKNOWN = 0
CARDINALITY_OPTIONAL = 1
CARDINALITY_REQUIRED = 2
CARDINALITY_REPEATED = 3
class KindValueValuesEnum(_messages.Enum):
"""The field type.
Values:
TYPE_UNKNOWN: Field type unknown.
TYPE_DOUBLE: Field type double.
TYPE_FLOAT: Field type float.
TYPE_INT64: Field type int64.
TYPE_UINT64: Field type uint64.
TYPE_INT32: Field type int32.
TYPE_FIXED64: Field type fixed64.
TYPE_FIXED32: Field type fixed32.
TYPE_BOOL: Field type bool.
TYPE_STRING: Field type string.
TYPE_GROUP: Field type group. Proto2 syntax only, and deprecated.
TYPE_MESSAGE: Field type message.
TYPE_BYTES: Field type bytes.
TYPE_UINT32: Field type uint32.
TYPE_ENUM: Field type enum.
TYPE_SFIXED32: Field type sfixed32.
TYPE_SFIXED64: Field type sfixed64.
TYPE_SINT32: Field type sint32.
TYPE_SINT64: Field type sint64.
"""
TYPE_UNKNOWN = 0
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
cardinality = _messages.EnumField('CardinalityValueValuesEnum', 1)
defaultValue = _messages.StringField(2)
jsonName = _messages.StringField(3)
kind = _messages.EnumField('KindValueValuesEnum', 4)
name = _messages.StringField(5)
number = _messages.IntegerField(6, variant=_messages.Variant.INT32)
oneofIndex = _messages.IntegerField(7, variant=_messages.Variant.INT32)
options = _messages.MessageField('Option', 8, repeated=True)
packed = _messages.BooleanField(9)
typeUrl = _messages.StringField(10)
class File(_messages.Message):
"""A single swagger specification file.
Fields:
contents: The contents of the swagger spec file.
path: The relative path of the swagger spec file.
"""
contents = _messages.StringField(1)
path = _messages.StringField(2)
class Http(_messages.Message):
"""Defines the HTTP configuration for a service. It contains a list of
HttpRule, each specifying the mapping of an RPC method to one or more HTTP
REST API methods.
Fields:
rules: A list of HTTP rules for configuring the HTTP REST API methods.
"""
rules = _messages.MessageField('HttpRule', 1, repeated=True)
class HttpRule(_messages.Message):
"""`HttpRule` defines the mapping of an RPC method to one or more HTTP REST
APIs. The mapping determines what portions of the request message are
populated from the path, query parameters, or body of the HTTP request. The
mapping is typically specified as an `google.api.http` annotation, see
"google/api/annotations.proto" for details. The mapping consists of a field
specifying the path template and method kind. The path template can refer
to fields in the request message, as in the example below which describes a
REST GET operation on a resource collection of messages: ```proto service
Messaging { rpc GetMessage(GetMessageRequest) returns (Message) {
option (google.api.http).get = "/v1/messages/{message_id}"; } } message
GetMessageRequest { string message_id = 1; // mapped to the URL } message
Message { string text = 1; // content of the resource } ``` This
definition enables an automatic, bidrectional mapping of HTTP JSON to RPC.
Example: HTTP | RPC -----|----- `GET /v1/messages/123456` |
`GetMessage(message_id: "123456")` In general, not only fields but also
field paths can be referenced from a path pattern. Fields mapped to the path
pattern cannot be repeated and must have a primitive (non-message) type.
Any fields in the request message which are not bound by the path pattern
automatically become (optional) HTTP query parameters. Assume the following
definition of the request message: ```proto message GetMessageRequest {
string message_id = 1; // mapped to the URL int64 revision = 2; //
becomes a parameter } ``` This enables a HTTP JSON to RPC mapping as below:
HTTP | RPC -----|----- `GET /v1/messages/123456?revision=2` |
`GetMessage(message_id: "123456" revision: 2)` Note that fields which are
mapped to HTTP parameters must have a primitive type or a repeated primitive
type. Message types are not allowed. In the case of a repeated type, the
parameter can be repeated in the URL, as in `...?param=A¶m=B`. For HTTP
method kinds which allow a request body, the `body` field specifies the
mapping. Consider a REST update method on the message resource collection:
```proto service Messaging { rpc UpdateMessage(UpdateMessageRequest)
returns (Message) { option (google.api.http) = { put:
"/v1/messages/{message_id}" body: "message" }; } } message
UpdateMessageRequest { string message_id = 1; // mapped to the URL
Message message = 2; // mapped to the body } ``` The following HTTP JSON
to RPC mapping is enabled, where the representation of the JSON in the
request body is determined by protos JSON encoding: HTTP | RPC -----|-----
`PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id:
"123456" message { text: "Hi!" })` The special name `*` can be used in the
body mapping to define that every field not bound by the path template
should be mapped to the request body. This enables the following
alternative definition of the update method: ```proto service Messaging {
rpc UpdateMessage(Message) returns (Message) { option (google.api.http)
= { put: "/v1/messages/{message_id}" body: "*" }; } }
message Message { string message_id = 1; string text = 2; } ``` The
following HTTP JSON to RPC mapping is enabled: HTTP | RPC -----|----- `PUT
/v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456"
text: "Hi!")` Note that when using `*` in the body mapping, it is not
possible to have HTTP parameters, as all fields not bound by the path end in
the body. This makes this option more rarely used in practice of defining
REST APIs. The common usage of `*` is in custom methods which don't use the
URL at all for transferring data. It is possible to define multiple HTTP
methods for one RPC by using the `additional_bindings` option. Example:
```proto service Messaging { rpc GetMessage(GetMessageRequest) returns
(Message) { option (google.api.http) = { get:
"/v1/messages/{message_id}" additional_bindings { get:
"/v1/users/{user_id}/messages/{message_id}" } }; } } message
GetMessageRequest { string message_id = 1; string user_id = 2; } ```
This enables the following two alternative HTTP JSON to RPC mappings: HTTP
| RPC -----|----- `GET /v1/messages/123456` | `GetMessage(message_id:
"123456")` `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me"
message_id: "123456")` # Rules for HTTP mapping The rules for mapping HTTP
path, query parameters, and body fields to the request message are as
follows: 1. The `body` field specifies either `*` or a field path, or is
omitted. If omitted, it assumes there is no HTTP body. 2. Leaf fields
(recursive expansion of nested messages in the request) can be classified
into three types: (a) Matched in the URL template. (b) Covered by
body (if body is `*`, everything except (a) fields; else everything
under the body field) (c) All other fields. 3. URL query parameters
found in the HTTP request are mapped to (c) fields. 4. Any body sent with an
HTTP request can contain only (b) fields. The syntax of the path template
is as follows: Template = "/" Segments [ Verb ] ; Segments =
Segment { "/" Segment } ; Segment = "*" | "**" | LITERAL | Variable ;
Variable = "{" FieldPath [ "=" Segments ] "}" ; FieldPath = IDENT { "."
IDENT } ; Verb = ":" LITERAL ; The syntax `*` matches a single path
segment. It follows the semantics of [RFC
6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String
Expansion. The syntax `**` matches zero or more path segments. It follows
the semantics of [RFC 6570](https://tools.ietf.org/html/rfc6570) Section
3.2.3 Reserved Expansion. The syntax `LITERAL` matches literal text in the
URL path. The syntax `Variable` matches the entire path as specified by its
template; this nested template must not contain further variables. If a
variable matches a single path segment, its template may be omitted, e.g.
`{var}` is equivalent to `{var=*}`. NOTE: the field paths in variables and
in the `body` must not refer to repeated fields or map fields. Use
CustomHttpPattern to specify any HTTP method that is not included in the
`pattern` field, such as HEAD, or "*" to leave the HTTP method unspecified
for a given URL path rule. The wild-card rule is useful for services that
provide content to Web (HTML) clients.
Fields:
additionalBindings: Additional HTTP bindings for the selector. Nested
bindings must not contain an `additional_bindings` field themselves
(that is, the nesting may only be one level deep).
body: The name of the request field whose value is mapped to the HTTP
body, or `*` for mapping all fields not captured by the path pattern to
the HTTP body. NOTE: the referred field must not be a repeated field.
custom: Custom pattern is used for defining custom verbs.
delete: Used for deleting a resource.
get: Used for listing and getting information about resources.
mediaDownload: Do not use this. For media support, add instead
[][google.bytestream.RestByteStream] as an API to your configuration.
mediaUpload: Do not use this. For media support, add instead
[][google.bytestream.RestByteStream] as an API to your configuration.
patch: Used for updating a resource.
post: Used for creating a resource.
put: Used for updating a resource.
selector: Selects methods to which this rule applies. Refer to selector
for syntax details.
"""
additionalBindings = _messages.MessageField('HttpRule', 1, repeated=True)
body = _messages.StringField(2)
custom = _messages.MessageField('CustomHttpPattern', 3)
delete = _messages.StringField(4)
get = _messages.StringField(5)
mediaDownload = _messages.MessageField('MediaDownload', 6)
mediaUpload = _messages.MessageField('MediaUpload', 7)
patch = _messages.StringField(8)
post = _messages.StringField(9)
put = _messages.StringField(10)
selector = _messages.StringField(11)
class LabelDescriptor(_messages.Message):
"""A description of a label.
Enums:
ValueTypeValueValuesEnum: The type of data that can be assigned to the
label.
Fields:
description: A human-readable description for the label.
key: The label key.
valueType: The type of data that can be assigned to the label.
"""
class ValueTypeValueValuesEnum(_messages.Enum):
"""The type of data that can be assigned to the label.
Values:
STRING: A variable-length string. This is the default.
BOOL: Boolean; true or false.
INT64: A 64-bit signed integer.
"""
STRING = 0
BOOL = 1
INT64 = 2
description = _messages.StringField(1)
key = _messages.StringField(2)
valueType = _messages.EnumField('ValueTypeValueValuesEnum', 3)
class ListServicesResponse(_messages.Message):
"""Response message for `ListServices` method.
Fields:
nextPageToken: Token that can be passed to `ListServices` to resume a
paginated query.
services: The results of the query.
"""
nextPageToken = _messages.StringField(1)
services = _messages.MessageField('ManagedService', 2, repeated=True)
class LogDescriptor(_messages.Message):
"""A description of a log type. Example in YAML format: - name:
library.googleapis.com/activity_history description: The history of
borrowing and returning library items. display_name: Activity
labels: - key: /customer_id description: Identifier of a
library customer
Fields:
description: A human-readable description of this log. This information
appears in the documentation and can contain details.
displayName: The human-readable name for this log. This information
appears on the user interface and should be concise.
labels: The set of labels that are available to describe a specific log
entry. Runtime requests that contain labels not specified here are
considered invalid.
name: The name of the log. It must be less than 512 characters long and
can include the following characters: upper- and lower-case alphanumeric
characters [A-Za-z0-9], and punctuation characters including slash,
underscore, hyphen, period [/_-.].
"""
description = _messages.StringField(1)
displayName = _messages.StringField(2)
labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
name = _messages.StringField(4)
class Logging(_messages.Message):
"""Logging configuration of the service. The following example shows how to
configure logs to be sent to the producer and consumer projects. In the
example, the `library.googleapis.com/activity_history` log is sent to both
the producer and consumer projects, whereas the
`library.googleapis.com/purchase_history` log is only sent to the producer
project: monitored_resources: - type: library.googleapis.com/branch
labels: - key: /city description: The city where the library
branch is located in. - key: /name description: The name of
the branch. logs: - name: library.googleapis.com/activity_history
labels: - key: /customer_id - name:
library.googleapis.com/purchase_history logging:
producer_destinations: - monitored_resource:
library.googleapis.com/branch logs: -
library.googleapis.com/activity_history -
library.googleapis.com/purchase_history consumer_destinations: -
monitored_resource: library.googleapis.com/branch logs: -
library.googleapis.com/activity_history
Fields:
consumerDestinations: Logging configurations for sending logs to the
consumer project. There can be multiple consumer destinations, each one
must have a different monitored resource type. A log can be used in at
most one consumer destination.
producerDestinations: Logging configurations for sending logs to the
producer project. There can be multiple producer destinations, each one
must have a different monitored resource type. A log can be used in at
most one producer destination.
"""
consumerDestinations = _messages.MessageField('LoggingDestination', 1, repeated=True)
producerDestinations = _messages.MessageField('LoggingDestination', 2, repeated=True)
class LoggingDestination(_messages.Message):
"""Configuration of a specific logging destination (the producer project or
the consumer project).
Fields:
logs: Names of the logs to be sent to this destination. Each name must be
defined in the Service.logs section.
monitoredResource: The monitored resource type. The type must be defined
in Service.monitored_resources section.
"""
logs = _messages.StringField(1, repeated=True)
monitoredResource = _messages.StringField(2)
class ManagedService(_messages.Message):
"""The full representation of an API Service that is managed by the
`ServiceManager` API. Includes both the service configuration, as well as
other control plane deployment related information.
Fields:
generation: A server-assigned monotonically increasing number that changes
whenever a mutation is made to the `ManagedService` or any of its
components via the `ServiceManager` API.
operations: Read-only view of pending operations affecting this resource,
if requested.
producerProjectId: ID of the project that produces and owns this service.
projectSettings: Read-only view of settings for a particular consumer
project, if requested.
serviceConfig: The service's generated configuration.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. This name must match `google.api.Service.name`
in the `service_config` field.
"""
generation = _messages.IntegerField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
producerProjectId = _messages.StringField(3)
projectSettings = _messages.MessageField('ProjectSettings', 4)
serviceConfig = _messages.MessageField('Service', 5)
serviceName = _messages.StringField(6)
class MediaDownload(_messages.Message):
"""Do not use this. For media support, add instead
[][google.bytestream.RestByteStream] as an API to your configuration.
Fields:
enabled: Whether download is enabled.
"""
enabled = _messages.BooleanField(1)
class MediaUpload(_messages.Message):
"""Do not use this. For media support, add instead
[][google.bytestream.RestByteStream] as an API to your configuration.
Fields:
enabled: Whether upload is enabled.
"""
enabled = _messages.BooleanField(1)
class Method(_messages.Message):
"""Method represents a method of an api.
Enums:
SyntaxValueValuesEnum: The source syntax of this method.
Fields:
name: The simple name of this method.
options: Any metadata attached to the method.
requestStreaming: If true, the request is streamed.
requestTypeUrl: A URL of the input message type.
responseStreaming: If true, the response is streamed.
responseTypeUrl: The URL of the output message type.
syntax: The source syntax of this method.
"""
class SyntaxValueValuesEnum(_messages.Enum):
"""The source syntax of this method.
Values:
SYNTAX_PROTO2: Syntax `proto2`.
SYNTAX_PROTO3: Syntax `proto3`.
"""
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
name = _messages.StringField(1)
options = _messages.MessageField('Option', 2, repeated=True)
requestStreaming = _messages.BooleanField(3)
requestTypeUrl = _messages.StringField(4)
responseStreaming = _messages.BooleanField(5)
responseTypeUrl = _messages.StringField(6)
syntax = _messages.EnumField('SyntaxValueValuesEnum', 7)
class MetricDescriptor(_messages.Message):
"""Defines a metric type and its schema.
Enums:
MetricKindValueValuesEnum: Whether the metric records instantaneous
values, changes to a value, etc.
ValueTypeValueValuesEnum: Whether the measurement is an integer, a
floating-point number, etc.
Fields:
description: A detailed description of the metric, which can be used in
documentation.
displayName: A concise name for the metric, which can be displayed in user
interfaces. Use sentence case without an ending period, for example
"Request count".
labels: The set of labels that can be used to describe a specific instance
of this metric type. For example, the
`compute.googleapis.com/instance/network/received_bytes_count` metric
type has a label, `loadbalanced`, that specifies whether the traffic was
received through a load balanced IP address.
metricKind: Whether the metric records instantaneous values, changes to a
value, etc.
name: Resource name. The format of the name may vary between different
implementations. For examples:
projects/{project_id}/metricDescriptors/{type=**}
metricDescriptors/{type=**}
type: The metric type including a DNS name prefix, for example
`"compute.googleapis.com/instance/cpu/utilization"`. Metric types should
use a natural hierarchical grouping such as the following:
compute.googleapis.com/instance/cpu/utilization
compute.googleapis.com/instance/disk/read_ops_count
compute.googleapis.com/instance/network/received_bytes_count Note that
if the metric type changes, the monitoring data will be discontinued,
and anything depends on it will break, such as monitoring dashboards,
alerting rules and quota limits. Therefore, once a metric has been
published, its type should be immutable.
unit: The unit in which the metric value is reported. It is only
applicable if the `value_type` is `INT64`, `DOUBLE`, or `DISTRIBUTION`.
The supported units are a subset of [The Unified Code for Units of
Measure](http://unitsofmeasure.org/ucum.html) standard: **Basic units
(UNIT)** * `bit` bit * `By` byte * `s` second * `min` minute
* `h` hour * `d` day **Prefixes (PREFIX)** * `k` kilo
(10**3) * `M` mega (10**6) * `G` giga (10**9) * `T`
tera (10**12) * `P` peta (10**15) * `E` exa (10**18) *
`Z` zetta (10**21) * `Y` yotta (10**24) * `m` milli
(10**-3) * `u` micro (10**-6) * `n` nano (10**-9) * `p`
pico (10**-12) * `f` femto (10**-15) * `a` atto
(10**-18) * `z` zepto (10**-21) * `y` yocto (10**-24) * `Ki`
kibi (2**10) * `Mi` mebi (2**20) * `Gi` gibi (2**30) *
`Ti` tebi (2**40) **Grammar** The grammar includes the
dimensionless unit `1`, such as `1/s`. The grammar also includes these
connectors: * `/` division (as an infix operator, e.g. `1/s`). * `.`
multiplication (as an infix operator, e.g. `GBy.d`) The grammar for a
unit is as follows: Expression = Component { "." Component } { "/"
Component } ; Component = [ PREFIX ] UNIT [ Annotation ]
| Annotation | "1" ; Annotation = "{"
NAME "}" ; Notes: * `Annotation` is just a comment if it follows a
`UNIT` and is equivalent to `1` if it is used alone. For examples,
`{requests}/s == 1/s`, `By{transmitted}/s == By/s`. * `NAME` is a
sequence of non-blank printable ASCII characters not containing '{'
or '}'.
valueType: Whether the measurement is an integer, a floating-point number,
etc.
"""
class MetricKindValueValuesEnum(_messages.Enum):
"""Whether the metric records instantaneous values, changes to a value,
etc.
Values:
METRIC_KIND_UNSPECIFIED: Do not use this default value.
GAUGE: Instantaneous measurements of a varying quantity.
DELTA: Changes over non-overlapping time intervals.
CUMULATIVE: Cumulative value over time intervals that can overlap. The
overlapping intervals must have the same start time.
"""
METRIC_KIND_UNSPECIFIED = 0
GAUGE = 1
DELTA = 2
CUMULATIVE = 3
class ValueTypeValueValuesEnum(_messages.Enum):
"""Whether the measurement is an integer, a floating-point number, etc.
Values:
VALUE_TYPE_UNSPECIFIED: Do not use this default value.
BOOL: The value is a boolean. This value type can be used only if the
metric kind is `GAUGE`.
INT64: The value is a signed 64-bit integer.
DOUBLE: The value is a double precision floating point number.
STRING: The value is a text string. This value type can be used only if
the metric kind is `GAUGE`.
DISTRIBUTION: The value is a `Distribution`.
MONEY: The value is money.
"""
VALUE_TYPE_UNSPECIFIED = 0
BOOL = 1
INT64 = 2
DOUBLE = 3
STRING = 4
DISTRIBUTION = 5
MONEY = 6
description = _messages.StringField(1)
displayName = _messages.StringField(2)
labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
metricKind = _messages.EnumField('MetricKindValueValuesEnum', 4)
name = _messages.StringField(5)
type = _messages.StringField(6)
unit = _messages.StringField(7)
valueType = _messages.EnumField('ValueTypeValueValuesEnum', 8)
class Mixin(_messages.Message):
"""Declares an API to be included in this API. The including API must
redeclare all the methods from the included API, but documentation and
options are inherited as follows: - If after comment and whitespace
stripping, the documentation string of the redeclared method is empty, it
will be inherited from the original method. - Each annotation belonging
to the service config (http, visibility) which is not set in the
redeclared method will be inherited. - If an http annotation is
inherited, the path pattern will be modified as follows. Any version
prefix will be replaced by the version of the including API plus the root
path if specified. Example of a simple mixin: package google.acl.v1;
service AccessControl { // Get the underlying ACL object. rpc
GetAcl(GetAclRequest) returns (Acl) { option (google.api.http).get =
"/v1/{resource=**}:getAcl"; } } package google.storage.v2;
service Storage { // rpc GetAcl(GetAclRequest) returns (Acl);
// Get a data record. rpc GetData(GetDataRequest) returns (Data) {
option (google.api.http).get = "/v2/{resource=**}"; } } Example
of a mixin configuration: apis: - name: google.storage.v2.Storage
mixins: - name: google.acl.v1.AccessControl The mixin construct
implies that all methods in `AccessControl` are also declared with same name
and request/response types in `Storage`. A documentation generator or
annotation processor will see the effective `Storage.GetAcl` method after
inherting documentation and annotations as follows: service Storage {
// Get the underlying ACL object. rpc GetAcl(GetAclRequest) returns
(Acl) { option (google.api.http).get = "/v2/{resource=**}:getAcl";
} ... } Note how the version in the path pattern changed from
`v1` to `v2`. If the `root` field in the mixin is specified, it should be a
relative path under which inherited HTTP paths are placed. Example:
apis: - name: google.storage.v2.Storage mixins: - name:
google.acl.v1.AccessControl root: acls This implies the following
inherited HTTP annotation: service Storage { // Get the
underlying ACL object. rpc GetAcl(GetAclRequest) returns (Acl) {
option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; }
... }
Fields:
name: The fully qualified name of the API which is included.
root: If non-empty specifies a path under which inherited HTTP paths are
rooted.
"""
name = _messages.StringField(1)
root = _messages.StringField(2)
class MonitoredResourceDescriptor(_messages.Message):
"""An object that describes the schema of a MonitoredResource object using a
type name and a set of labels. For example, the monitored resource
descriptor for Google Compute Engine VM instances has a type of
`"gce_instance"` and specifies the use of the labels `"instance_id"` and
`"zone"` to identify particular VM instances. Different APIs can support
different monitored resource types. APIs generally provide a `list` method
that returns the monitored resource descriptors used by the API.
Fields:
description: Optional. A detailed description of the monitored resource
type that might be used in documentation.
displayName: Optional. A concise name for the monitored resource type that
might be displayed in user interfaces. For example, `"Google Cloud SQL
Database"`.
labels: Required. A set of labels used to describe instances of this
monitored resource type. For example, an individual Google Cloud SQL
database is identified by values for the labels `"database_id"` and
`"zone"`.
name: Optional. The resource name of the monitored resource descriptor:
`"projects/<project_id>/monitoredResourceDescriptors/<type>"` where
<type> is the value of the `type` field in this object and
<project_id> is a project ID that provides API-specific context
for accessing the type. APIs that do not use project information can
use the resource name format `"monitoredResourceDescriptors/<type>"`.
type: Required. The monitored resource type. For example, the type
`"cloudsql_database"` represents databases in Google Cloud SQL.
"""
description = _messages.StringField(1)
displayName = _messages.StringField(2)
labels = _messages.MessageField('LabelDescriptor', 3, repeated=True)
name = _messages.StringField(4)
type = _messages.StringField(5)
class Monitoring(_messages.Message):
"""Monitoring configuration of the service. The example below shows how to
configure monitored resources and metrics for monitoring. In the example, a
monitored resource and two metrics are defined. The
`library.googleapis.com/book/returned_count` metric is sent to both producer
and consumer projects, whereas the
`library.googleapis.com/book/overdue_count` metric is only sent to the
consumer project. monitored_resources: - type:
library.googleapis.com/branch labels: - key: /city
description: The city where the library branch is located in. - key:
/name description: The name of the branch. metrics: - name:
library.googleapis.com/book/returned_count metric_kind: DELTA
value_type: INT64 labels: - key: /customer_id - name:
library.googleapis.com/book/overdue_count metric_kind: GAUGE
value_type: INT64 labels: - key: /customer_id monitoring:
producer_destinations: - monitored_resource:
library.googleapis.com/branch metrics: -
library.googleapis.com/book/returned_count consumer_destinations:
- monitored_resource: library.googleapis.com/branch metrics:
- library.googleapis.com/book/returned_count -
library.googleapis.com/book/overdue_count
Fields:
consumerDestinations: Monitoring configurations for sending metrics to the
consumer project. There can be multiple consumer destinations, each one
must have a different monitored resource type. A metric can be used in
at most one consumer destination.
producerDestinations: Monitoring configurations for sending metrics to the
producer project. There can be multiple producer destinations, each one
must have a different monitored resource type. A metric can be used in
at most one producer destination.
"""
consumerDestinations = _messages.MessageField('MonitoringDestination', 1, repeated=True)
producerDestinations = _messages.MessageField('MonitoringDestination', 2, repeated=True)
class MonitoringDestination(_messages.Message):
"""Configuration of a specific monitoring destination (the producer project
or the consumer project).
Fields:
metrics: Names of the metrics to report to this monitoring destination.
Each name must be defined in Service.metrics section.
monitoredResource: The monitored resource type. The type must be defined
in Service.monitored_resources section.
"""
metrics = _messages.StringField(1, repeated=True)
monitoredResource = _messages.StringField(2)
class OAuthRequirements(_messages.Message):
"""OAuth scopes are a way to define data and permissions on data. For
example, there are scopes defined for "Read-only access to Google Calendar"
and "Access to Cloud Platform". Users can consent to a scope for an
application, giving it permission to access that data on their behalf.
OAuth scope specifications should be fairly coarse grained; a user will need
to see and understand the text description of what your scope means. In
most cases: use one or at most two OAuth scopes for an entire family of
products. If your product has multiple APIs, you should probably be sharing
the OAuth scope across all of those APIs. When you need finer grained OAuth
consent screens: talk with your product management about how developers will
use them in practice. Please note that even though each of the canonical
scopes is enough for a request to be accepted and passed to the backend, a
request can still fail due to the backend requiring additional scopes or
permissions.
Fields:
canonicalScopes: The list of publicly documented OAuth scopes that are
allowed access. An OAuth token containing any of these scopes will be
accepted. Example: canonical_scopes:
https://www.googleapis.com/auth/calendar,
https://www.googleapis.com/auth/calendar.read
"""
canonicalScopes = _messages.StringField(1)
class Operation(_messages.Message):
"""This resource represents a long-running operation that is the result of a
network API call.
Messages:
MetadataValue: Service-specific metadata associated with the operation.
It typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
ResponseValue: The normal response of the operation in case of success.
If the original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Fields:
done: If the value is `false`, it means the operation is still in
progress. If true, the operation is completed, and either `error` or
`response` is available.
error: The error result of the operation in case of failure.
metadata: Service-specific metadata associated with the operation. It
typically contains progress information and common metadata such as
create time. Some services might not provide such metadata. Any method
that returns a long-running operation should document the metadata type,
if any.
name: The server-assigned name, which is only unique within the same
service that originally returns it. If you use the default HTTP mapping,
the `name` should have the format of `operations/some/unique/name`.
response: The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the
response is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
"""Service-specific metadata associated with the operation. It typically
contains progress information and common metadata such as create time.
Some services might not provide such metadata. Any method that returns a
long-running operation should document the metadata type, if any.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ResponseValue(_messages.Message):
"""The normal response of the operation in case of success. If the
original method returns no data on success, such as `Delete`, the response
is `google.protobuf.Empty`. If the original method is standard
`Get`/`Create`/`Update`, the response should be the resource. For other
methods, the response should have the type `XxxResponse`, where `Xxx` is
the original method name. For example, if the original method name is
`TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`.
Messages:
AdditionalProperty: An additional property for a ResponseValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ResponseValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
done = _messages.BooleanField(1)
error = _messages.MessageField('Status', 2)
metadata = _messages.MessageField('MetadataValue', 3)
name = _messages.StringField(4)
response = _messages.MessageField('ResponseValue', 5)
class OperationMetadata(_messages.Message):
"""The metadata associated with a long running operation resource.
Fields:
progressPercentage: Percentage of completion of this operation, ranging
from 0 to 100.
resourceNames: The full name of the resources that this operation is
directly associated with.
startTime: The start time of the operation.
steps: Detailed status information for each step. The order is
undetermined.
"""
progressPercentage = _messages.IntegerField(1, variant=_messages.Variant.INT32)
resourceNames = _messages.StringField(2, repeated=True)
startTime = _messages.StringField(3)
steps = _messages.MessageField('Step', 4, repeated=True)
class Option(_messages.Message):
"""A protocol buffer option, which can be attached to a message, field,
enumeration, etc.
Messages:
ValueValue: The option's value. For example, `"com.google.protobuf"`.
Fields:
name: The option's name. For example, `"java_package"`.
value: The option's value. For example, `"com.google.protobuf"`.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ValueValue(_messages.Message):
"""The option's value. For example, `"com.google.protobuf"`.
Messages:
AdditionalProperty: An additional property for a ValueValue object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ValueValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
name = _messages.StringField(1)
value = _messages.MessageField('ValueValue', 2)
class Page(_messages.Message):
"""Represents a documentation page. A page can contain subpages to represent
nested documentation set structure.
Fields:
content: The Markdown content of the page. You can use <code>(==
include {path} ==)</code> to include content from a Markdown file.
name: The name of the page. It will be used as an identity of the page to
generate URI of the page, text of the link to this page in navigation,
etc. The full page name (start from the root page name to this page
concatenated with `.`) can be used as reference to the page in your
documentation. For example: <pre><code>pages: - name: Tutorial
content: (== include tutorial.md ==) subpages: - name: Java
content: (== include tutorial_java.md ==) </code></pre> You can
reference `Java` page using Markdown reference link syntax: `Java`.
subpages: Subpages of this page. The order of subpages specified here will
be honored in the generated docset.
"""
content = _messages.StringField(1)
name = _messages.StringField(2)
subpages = _messages.MessageField('Page', 3, repeated=True)
class ProjectProperties(_messages.Message):
"""A descriptor for defining project properties for a service. One service
may have many consumer projects, and the service may want to behave
differently depending on some properties on the project. For example, a
project may be associated with a school, or a business, or a government
agency, a business type property on the project may affect how a service
responds to the client. This descriptor defines which properties are allowed
to be set on a project. Example: project_properties: properties:
- name: NO_WATERMARK type: BOOL description: Allows usage of
the API without watermarks. - name: EXTENDED_TILE_CACHE_PERIOD
type: INT64
Fields:
properties: List of per consumer project-specific properties.
"""
properties = _messages.MessageField('Property', 1, repeated=True)
class ProjectSettings(_messages.Message):
"""Settings that control how a consumer project uses a service.
Messages:
PropertiesValue: Service-defined per-consumer properties. A key-value
mapping a string key to a google.protobuf.ListValue proto. Values in the
list are typed as defined in the Service configuration's
consumer.properties field.
Fields:
consumerProjectId: ID for the project consuming this service.
operations: Read-only view of pending operations affecting this resource,
if requested.
properties: Service-defined per-consumer properties. A key-value mapping
a string key to a google.protobuf.ListValue proto. Values in the list
are typed as defined in the Service configuration's consumer.properties
field.
quotaSettings: Settings that control how much or how fast the service can
be used by the consumer project.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements.
usageSettings: Settings that control whether this service is usable by the
consumer project.
visibilitySettings: Settings that control which features of the service
are visible to the consumer project.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class PropertiesValue(_messages.Message):
"""Service-defined per-consumer properties. A key-value mapping a string
key to a google.protobuf.ListValue proto. Values in the list are typed as
defined in the Service configuration's consumer.properties field.
Messages:
AdditionalProperty: An additional property for a PropertiesValue object.
Fields:
additionalProperties: Additional properties of type PropertiesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a PropertiesValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2, repeated=True)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
consumerProjectId = _messages.StringField(1)
operations = _messages.MessageField('Operation', 2, repeated=True)
properties = _messages.MessageField('PropertiesValue', 3)
quotaSettings = _messages.MessageField('QuotaSettings', 4)
serviceName = _messages.StringField(5)
usageSettings = _messages.MessageField('UsageSettings', 6)
visibilitySettings = _messages.MessageField('VisibilitySettings', 7)
class Property(_messages.Message):
"""Defines project properties. API services can define properties that can
be assigned to consumer projects so that backends can perform response
customization without having to make additional calls or maintain additional
storage. For example, Maps API defines properties that controls map tile
cache period, or whether to embed a watermark in a result. These values can
be set via API producer console. Only API providers can define and set these
properties.
Enums:
TypeValueValuesEnum: The type of this property.
Fields:
description: The description of the property
name: The name of the property (a.k.a key).
type: The type of this property.
"""
class TypeValueValuesEnum(_messages.Enum):
"""The type of this property.
Values:
UNSPECIFIED: The type is unspecified, and will result in an error.
INT64: The type is `int64`.
BOOL: The type is `bool`.
STRING: The type is `string`.
DOUBLE: The type is 'double'.
"""
UNSPECIFIED = 0
INT64 = 1
BOOL = 2
STRING = 3
DOUBLE = 4
description = _messages.StringField(1)
name = _messages.StringField(2)
type = _messages.EnumField('TypeValueValuesEnum', 3)
class QueryUserAccessResponse(_messages.Message):
"""Request message for QueryUserAccess method.
Fields:
accessibleVisibilityLabels: Any visibility labels on the service that are
accessible by the user.
canAccessService: True if the user can access the service and any
unrestricted API surface.
"""
accessibleVisibilityLabels = _messages.StringField(1, repeated=True)
canAccessService = _messages.BooleanField(2)
class Quota(_messages.Message):
"""Quota configuration helps to achieve fairness and budgeting in service
usage. - Fairness is achieved through the use of short-term quota limits
that are usually defined over a time window of several seconds or minutes.
When such a limit is applied, for example at the user level, it ensures
that no single user will monopolize the service or a given customer's
allocated portion of it. - Budgeting is achieved through the use of long-
term quota limits that are usually defined over a time window of one or
more days. These limits help client application developers predict the
usage and help budgeting. Quota enforcement uses a simple token-based
algorithm for resource sharing. The quota configuration structure is as
follows: - `QuotaLimit` defines a single enforceable limit with a specified
token amount that can be consumed over a specific duration and applies to
a particular entity, like a project or an end user. If the limit applies
to a user, each user making the request will get the specified number of
tokens to consume. When the tokens run out, the requests from that user
will be blocked until the duration elapses and the next duration window
starts. - `QuotaGroup` groups a set of quota limits. - `QuotaRule` maps a
method to a set of quota groups. This allows sharing of quota groups
across methods as well as one method consuming tokens from more than one
quota group. When a group contains multiple limits, requests to a method
consuming tokens from that group must satisfy all the limits in that
group. Example: quota: groups: - name: ReadGroup
limits: - description: Daily Limit name: ProjectQpd
default_limit: 10000 duration: 1d limit_by:
CLIENT_PROJECT - description: Per-second Limit name:
UserQps default_limit: 20000 duration: 100s
limit_by: USER - name: WriteGroup limits: -
description: Daily Limit name: ProjectQpd default_limit:
1000 max_limit: 1000 duration: 1d limit_by:
CLIENT_PROJECT - description: Per-second Limit name:
UserQps default_limit: 2000 max_limit: 4000
duration: 100s limit_by: USER rules: - selector: "*"
groups: - group: ReadGroup - selector:
google.calendar.Calendar.Update groups: - group: WriteGroup
cost: 2 - selector: google.calendar.Calendar.Delete groups:
- group: WriteGroup Here, the configuration defines two quota groups:
ReadGroup and WriteGroup, each defining its own daily and per-second limits.
Note that One Platform enforces per-second limits averaged over a duration
of 100 seconds. The rules map ReadGroup for all methods, except for the
Update and Delete methods. These two methods consume from WriteGroup, with
Update method consuming at twice the rate as Delete method. Multiple quota
groups can be specified for a method. The quota limits in all of those
groups will be enforced. Example: quota: groups: - name:
WriteGroup limits: - description: Daily Limit
name: ProjectQpd default_limit: 1000 max_limit: 1000
duration: 1d limit_by: CLIENT_PROJECT - description: Per-
second Limit name: UserQps default_limit: 2000
max_limit: 4000 duration: 100s limit_by: USER -
name: StorageGroup limits: - description: Storage Quota
name: StorageQuota default_limit: 1000 duration: 0
limit_by: USER rules: - selector:
google.calendar.Calendar.Create groups: - group:
StorageGroup - group: WriteGroup - selector:
google.calendar.Calendar.Delete groups: - group:
StorageGroup In the above example, the Create and Delete methods manage the
user's storage space. In addition, Create method uses WriteGroup to manage
the requests. In this case, requests to Create method need to satisfy all
quota limits defined in both quota groups. One can disable quota for
selected method(s) identified by the selector by setting disable_quota to
ture. For example, rules: - selector: "*" group:
- group ReadGroup - selector: google.calendar.Calendar.Select
disable_quota: true
Fields:
groups: List of `QuotaGroup` definitions for the service.
rules: List of `QuotaRule` definitions, each one mapping a selected method
to one or more quota groups.
"""
groups = _messages.MessageField('QuotaGroup', 1, repeated=True)
rules = _messages.MessageField('QuotaRule', 2, repeated=True)
class QuotaGroup(_messages.Message):
"""`QuotaGroup` defines a set of quota limits to enforce.
Fields:
billable: Indicates if the quota limits defined in this quota group apply
to consumers who have active billing. Quota limits defined in billable
groups will be applied only to consumers who have active billing. The
amount of tokens consumed from billable quota group will also be
reported for billing. Quota limits defined in non-billable groups will
be applied only to consumers who have no active billing.
description: User-visible description of this quota group.
limits: Quota limits to be enforced when this quota group is used. A
request must satisfy all the limits in a group for it to be permitted.
name: Name of this quota group. Must be unique within the service. Quota
group name is used as part of the id for quota limits. Once the quota
group has been put into use, the name of the quota group should be
immutable.
"""
billable = _messages.BooleanField(1)
description = _messages.StringField(2)
limits = _messages.MessageField('QuotaLimit', 3, repeated=True)
name = _messages.StringField(4)
class QuotaGroupMapping(_messages.Message):
"""A quota group mapping.
Fields:
cost: Number of tokens to consume for each request. This allows different
cost to be associated with different methods that consume from the same
quota group. By default, each request will cost one token.
group: The `QuotaGroup.name` of the group. Requests for the mapped methods
will consume tokens from each of the limits defined in this group.
"""
cost = _messages.IntegerField(1, variant=_messages.Variant.INT32)
group = _messages.StringField(2)
class QuotaInfo(_messages.Message):
"""Metadata about an individual quota, containing usage and limit
information.
Fields:
currentUsage: The usage data for this quota as it applies to the current
limit.
historicalUsage: The historical usage data of this quota limit. Currently
it is only available for daily quota limit, that is, base_limit.duration
= "1d".
limit: The effective limit for this quota.
"""
currentUsage = _messages.MessageField('QuotaUsage', 1)
historicalUsage = _messages.MessageField('QuotaUsage', 2, repeated=True)
limit = _messages.MessageField('EffectiveQuotaLimit', 3)
class QuotaLimit(_messages.Message):
"""`QuotaLimit` defines a specific limit that applies over a specified
duration for a limit type. There can be at most one limit for a duration and
limit type combination defined within a `QuotaGroup`.
Enums:
LimitByValueValuesEnum: Limit type to use for enforcing this quota limit.
Each unique value gets the defined number of tokens to consume from. For
a quota limit that uses user type, each user making requests through the
same client application project will get his/her own pool of tokens to
consume, whereas for a limit that uses client project type, all users
making requests through the same client application project share a
single pool of tokens.
Fields:
defaultLimit: Default number of tokens that can be consumed during the
specified duration. This is the number of tokens assigned when a client
application developer activates the service for his/her project.
Specifying a value of 0 will block all requests. This can be used if you
are provisioning quota to selected consumers and blocking others.
Similarly, a value of -1 will indicate an unlimited quota. No other
negative values are allowed.
description: User-visible description for this quota limit.
displayName: The UI display name of the limit. If empty, client should use
'name' field instead.
duration: Duration of this limit in textual notation. Example: "100s",
"24h", "1d". For duration longer than a day, only multiple of days is
supported. We support only "100s" and "1d" for now. Additional support
will be added in the future. "0" indicates indefinite duration.
freeTier: Free tier value displayed in the Developers Console for this
limit. The free tier is the number of tokens that will be subtracted
from the billed amount when billing is enabled. This field can only be
set on a limit with duration "1d", in a billable group; it is invalid on
any other limit. If this field is not set, it defaults to 0, indicating
that there is no free tier for this service.
limitBy: Limit type to use for enforcing this quota limit. Each unique
value gets the defined number of tokens to consume from. For a quota
limit that uses user type, each user making requests through the same
client application project will get his/her own pool of tokens to
consume, whereas for a limit that uses client project type, all users
making requests through the same client application project share a
single pool of tokens.
maxLimit: Maximum number of tokens that can be consumed during the
specified duration. Client application developers can override the
default limit up to this maximum. If specified, this value cannot be set
to a value less than the default limit. If not specified, it is set to
the default limit. To allow clients to apply overrides with no upper
bound, set this to -1, indicating unlimited maximum quota.
name: Name of the quota limit. Must be unique within the quota group.
This name is used to refer to the limit when overriding the limit on a
per-project basis. If a name is not provided, it will be generated from
the limit_by and duration fields. The maximum length of the limit name
is 64 characters. The name of a limit is used as a unique identifier
for this limit. Therefore, once a limit has been put into use, its name
should be immutable. You can use the display_name field to provide a
user-friendly name for the limit. The display name can be evolved over
time without affecting the identity of the limit.
"""
class LimitByValueValuesEnum(_messages.Enum):
"""Limit type to use for enforcing this quota limit. Each unique value
gets the defined number of tokens to consume from. For a quota limit that
uses user type, each user making requests through the same client
application project will get his/her own pool of tokens to consume,
whereas for a limit that uses client project type, all users making
requests through the same client application project share a single pool
of tokens.
Values:
CLIENT_PROJECT: ID of the project owned by the client application
developer making the request.
USER: ID of the end user making the request using the client
application.
"""
CLIENT_PROJECT = 0
USER = 1
defaultLimit = _messages.IntegerField(1)
description = _messages.StringField(2)
displayName = _messages.StringField(3)
duration = _messages.StringField(4)
freeTier = _messages.IntegerField(5)
limitBy = _messages.EnumField('LimitByValueValuesEnum', 6)
maxLimit = _messages.IntegerField(7)
name = _messages.StringField(8)
class QuotaLimitOverride(_messages.Message):
"""Specifies a custom quota limit that is applied for this consumer project.
This overrides the default value in google.api.QuotaLimit.
Fields:
limit: The new limit for this project. May be -1 (unlimited), 0 (block),
or any positive integer.
unlimited: Indicates the override is to provide unlimited quota. If true,
any value set for limit will be ignored. DEPRECATED. Use a limit value
of -1 instead.
"""
limit = _messages.IntegerField(1)
unlimited = _messages.BooleanField(2)
class QuotaRule(_messages.Message):
"""`QuotaRule` maps a method to a set of `QuotaGroup`s.
Fields:
disableQuota: Indicates if quota checking should be enforced. Quota will
be disabled for methods without quota rules or with quota rules having
this field set to true. When this field is set to true, no quota group
mapping is allowed.
groups: Quota groups to be used for this method. This supports associating
a cost with each quota group.
selector: Selects methods to which this rule applies. Refer to selector
for syntax details.
"""
disableQuota = _messages.BooleanField(1)
groups = _messages.MessageField('QuotaGroupMapping', 2, repeated=True)
selector = _messages.StringField(3)
class QuotaSettings(_messages.Message):
"""Per-consumer overrides for quota settings. See google/api/quota.proto for
the corresponding service configuration which provides the default values.
Messages:
ConsumerOverridesValue: Quota overrides set by the consumer. Consumer
overrides will only have an effect up to the max_limit specified in the
service config, or the the producer override, if one exists. The key
for this map is '<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
EffectiveQuotasValue: The effective quota limits for each group, derived
from the service defaults together with any producer or consumer
overrides. For each limit, the effective value is the minimum of the
producer and consumer overrides if either is present, or else the
service default if neither is present. DEPRECATED. Use
effective_quota_groups instead.
ProducerOverridesValue: Quota overrides set by the producer. Note that if
a consumer override is also specified, then the minimum of the two will
be used. This allows consumers to cap their usage voluntarily. The key
for this map is '<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
Fields:
consumerOverrides: Quota overrides set by the consumer. Consumer overrides
will only have an effect up to the max_limit specified in the service
config, or the the producer override, if one exists. The key for this
map is '<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
effectiveQuotaGroups: Use this field for quota limits defined under quota
groups. Combines service quota configuration and project-specific
settings, as a map from quota group name to the effective quota
information for that group. Output-only.
effectiveQuotas: The effective quota limits for each group, derived from
the service defaults together with any producer or consumer overrides.
For each limit, the effective value is the minimum of the producer and
consumer overrides if either is present, or else the service default if
neither is present. DEPRECATED. Use effective_quota_groups instead.
producerOverrides: Quota overrides set by the producer. Note that if a
consumer override is also specified, then the minimum of the two will be
used. This allows consumers to cap their usage voluntarily. The key for
this map is '<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
variableTermQuotas: Quotas that are active over a specified time period.
Only writeable by the producer.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ConsumerOverridesValue(_messages.Message):
"""Quota overrides set by the consumer. Consumer overrides will only have
an effect up to the max_limit specified in the service config, or the the
producer override, if one exists. The key for this map is
'<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
Messages:
AdditionalProperty: An additional property for a ConsumerOverridesValue
object.
Fields:
additionalProperties: Additional properties of type
ConsumerOverridesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ConsumerOverridesValue object.
Fields:
key: Name of the additional property.
value: A QuotaLimitOverride attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QuotaLimitOverride', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class EffectiveQuotasValue(_messages.Message):
"""The effective quota limits for each group, derived from the service
defaults together with any producer or consumer overrides. For each limit,
the effective value is the minimum of the producer and consumer overrides
if either is present, or else the service default if neither is present.
DEPRECATED. Use effective_quota_groups instead.
Messages:
AdditionalProperty: An additional property for a EffectiveQuotasValue
object.
Fields:
additionalProperties: Additional properties of type EffectiveQuotasValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a EffectiveQuotasValue object.
Fields:
key: Name of the additional property.
value: A QuotaLimitOverride attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QuotaLimitOverride', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class ProducerOverridesValue(_messages.Message):
"""Quota overrides set by the producer. Note that if a consumer override
is also specified, then the minimum of the two will be used. This allows
consumers to cap their usage voluntarily. The key for this map is
'<GROUP_NAME>/<LIMIT_NAME>' where GROUP_NAME is the
google.api.QuotaGroup.name field and LIMIT_NAME is the
google.api.QuotaLimit.name field from the service config. For example:
'ReadGroup/ProjectDaily'.
Messages:
AdditionalProperty: An additional property for a ProducerOverridesValue
object.
Fields:
additionalProperties: Additional properties of type
ProducerOverridesValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a ProducerOverridesValue object.
Fields:
key: Name of the additional property.
value: A QuotaLimitOverride attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('QuotaLimitOverride', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
consumerOverrides = _messages.MessageField('ConsumerOverridesValue', 1)
effectiveQuotaGroups = _messages.MessageField('EffectiveQuotaGroup', 2, repeated=True)
effectiveQuotas = _messages.MessageField('EffectiveQuotasValue', 3)
producerOverrides = _messages.MessageField('ProducerOverridesValue', 4)
variableTermQuotas = _messages.MessageField('VariableTermQuota', 5, repeated=True)
class QuotaUsage(_messages.Message):
"""Specifies the used quota amount for a quota limit at a particular time.
Fields:
endTime: The time the quota duration ended.
queryTime: The time the quota usage data was queried.
startTime: The time the quota duration started.
usage: The used quota value at the "query_time".
"""
endTime = _messages.StringField(1)
queryTime = _messages.StringField(2)
startTime = _messages.StringField(3)
usage = _messages.IntegerField(4)
class Service(_messages.Message):
"""`Service` is the root object of the configuration schema. It describes
basic information like the name of the service and the exposed API
interfaces, and delegates other aspects to configuration sub-sections.
Example: type: google.api.Service config_version: 1 name:
calendar.googleapis.com title: Google Calendar API apis: - name:
google.calendar.Calendar visibility: rules: - selector: "*"
restriction: TRUSTED_TESTER backend: rules: - selector: "*"
address: calendar-prod-backend.gslb.googleapis.com
Fields:
apis: A list of API interfaces exported by this service. Only the `name`
field of the google.protobuf.Api needs to be provided by the
configuration author, as the remaining fields will be derived from the
IDL during the normalization process. It is an error to specify an API
interface here which cannot be resolved against the associated IDL
files.
authentication: Auth configuration.
backend: API backend configuration.
billing: Billing configuration of the service.
configVersion: The version of the service configuration. The config
version may influence interpretation of the configuration, for example,
to determine defaults. This is documented together with applicable
options. The current default for the config version itself is `3`.
context: Context configuration.
control: Configuration for the service control plane.
customError: Custom error configuration.
documentation: Additional API documentation.
enums: A list of all enum types included in this API service. Enums
referenced directly or indirectly by the `apis` are automatically
included. Enums which are not referenced but shall be included should
be listed here by name. Example: enums: - name:
google.someapi.v1.SomeEnum
http: HTTP configuration.
logging: Logging configuration of the service.
logs: Defines the logs used by this service.
metrics: Defines the metrics used by this service.
monitoredResources: Defines the monitored resources used by this service.
This is required by the Service.monitoring and Service.logging
configurations.
monitoring: Monitoring configuration of the service.
name: The DNS address at which this service is available, e.g.
`calendar.googleapis.com`.
producerProjectId: The id of the Google developer project that owns the
service. Members of this project can manage the service configuration,
manage consumption of the service, etc.
projectProperties: Configuration of per-consumer project properties.
quota: Quota configuration.
systemParameters: Configuration for system parameters.
systemTypes: A list of all proto message types included in this API
service. It serves similar purpose as [google.api.Service.types], except
that these types are not needed by user-defined APIs. Therefore, they
will not show up in the generated discovery doc. This field should only
be used to define system APIs in ESF.
title: The product title associated with this service.
types: A list of all proto message types included in this API service.
Types referenced directly or indirectly by the `apis` are automatically
included. Messages which are not referenced but shall be included, such
as types used by the `google.protobuf.Any` type, should be listed here
by name. Example: types: - name: google.protobuf.Int32
usage: Configuration controlling usage of this service.
visibility: API visibility configuration.
"""
apis = _messages.MessageField('Api', 1, repeated=True)
authentication = _messages.MessageField('Authentication', 2)
backend = _messages.MessageField('Backend', 3)
billing = _messages.MessageField('Billing', 4)
configVersion = _messages.IntegerField(5, variant=_messages.Variant.UINT32)
context = _messages.MessageField('Context', 6)
control = _messages.MessageField('Control', 7)
customError = _messages.MessageField('CustomError', 8)
documentation = _messages.MessageField('Documentation', 9)
enums = _messages.MessageField('Enum', 10, repeated=True)
http = _messages.MessageField('Http', 11)
logging = _messages.MessageField('Logging', 12)
logs = _messages.MessageField('LogDescriptor', 13, repeated=True)
metrics = _messages.MessageField('MetricDescriptor', 14, repeated=True)
monitoredResources = _messages.MessageField('MonitoredResourceDescriptor', 15, repeated=True)
monitoring = _messages.MessageField('Monitoring', 16)
name = _messages.StringField(17)
producerProjectId = _messages.StringField(18)
projectProperties = _messages.MessageField('ProjectProperties', 19)
quota = _messages.MessageField('Quota', 20)
systemParameters = _messages.MessageField('SystemParameters', 21)
systemTypes = _messages.MessageField('Type', 22, repeated=True)
title = _messages.StringField(23)
types = _messages.MessageField('Type', 24, repeated=True)
usage = _messages.MessageField('Usage', 25)
visibility = _messages.MessageField('Visibility', 26)
class ServiceAccessList(_messages.Message):
"""List of users and groups that are granted access to a service or
visibility label.
Fields:
members: Members that are granted access. - "user:{$user_email}" - Grant
access to an individual user - "group:{$group_email}" - Grant access to
direct members of the group - "domain:{$domain}" - Grant access to all
members of the domain. For now, domain membership check will be
similar to Devconsole/TT check: compare domain part of the user
email to configured domain name. When IAM integration is complete,
this will be replaced with IAM check.
"""
members = _messages.StringField(1, repeated=True)
class ServiceAccessPolicy(_messages.Message):
"""Policy describing who can access a service and any visibility labels on
that service.
Messages:
VisibilityLabelAccessListsValue: ACLs for access to restricted parts of
the service. The map key is the visibility label that is being
controlled. Note that access to any label also implies access to the
unrestricted surface.
Fields:
accessList: ACL for access to the unrestricted surface of the service.
serviceName: The service protected by this policy.
visibilityLabelAccessLists: ACLs for access to restricted parts of the
service. The map key is the visibility label that is being controlled.
Note that access to any label also implies access to the unrestricted
surface.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class VisibilityLabelAccessListsValue(_messages.Message):
"""ACLs for access to restricted parts of the service. The map key is the
visibility label that is being controlled. Note that access to any label
also implies access to the unrestricted surface.
Messages:
AdditionalProperty: An additional property for a
VisibilityLabelAccessListsValue object.
Fields:
additionalProperties: Additional properties of type
VisibilityLabelAccessListsValue
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a VisibilityLabelAccessListsValue object.
Fields:
key: Name of the additional property.
value: A ServiceAccessList attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('ServiceAccessList', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
accessList = _messages.MessageField('ServiceAccessList', 1)
serviceName = _messages.StringField(2)
visibilityLabelAccessLists = _messages.MessageField('VisibilityLabelAccessListsValue', 3)
class ServicemanagementOperationsGetRequest(_messages.Message):
"""A ServicemanagementOperationsGetRequest object.
Fields:
operationsId: Part of `name`. The name of the operation resource.
"""
operationsId = _messages.StringField(1, required=True)
class ServicemanagementServicesAccessPolicyQueryRequest(_messages.Message):
"""A ServicemanagementServicesAccessPolicyQueryRequest object.
Fields:
serviceName: The service to query access for.
userEmail: The user to query access for.
"""
serviceName = _messages.StringField(1, required=True)
userEmail = _messages.StringField(2)
class ServicemanagementServicesCustomerSettingsGetRequest(_messages.Message):
"""A ServicemanagementServicesCustomerSettingsGetRequest object.
Enums:
ViewValueValuesEnum: Request only fields for the specified view.
Fields:
customerId: ID for the customer. See the comment for
`CustomerSettings.customer_id` field of message for its format. This
field is required.
expand: Fields to expand in any results.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`. This
field is required.
view: Request only fields for the specified view.
"""
class ViewValueValuesEnum(_messages.Enum):
"""Request only fields for the specified view.
Values:
PROJECT_SETTINGS_VIEW_UNSPECIFIED: <no description>
CONSUMER_VIEW: <no description>
PRODUCER_VIEW: <no description>
ALL: <no description>
"""
PROJECT_SETTINGS_VIEW_UNSPECIFIED = 0
CONSUMER_VIEW = 1
PRODUCER_VIEW = 2
ALL = 3
customerId = _messages.StringField(1, required=True)
expand = _messages.StringField(2)
serviceName = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class ServicemanagementServicesCustomerSettingsPatchRequest(_messages.Message):
"""A ServicemanagementServicesCustomerSettingsPatchRequest object.
Fields:
customerId: ID for the customer. See the comment for
`CustomerSettings.customer_id` field of message for its format. This
field is required.
customerSettings: A CustomerSettings resource to be passed as the request
body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`. This
field is required.
updateMask: The field mask specifying which fields are to be updated.
"""
customerId = _messages.StringField(1, required=True)
customerSettings = _messages.MessageField('CustomerSettings', 2)
serviceName = _messages.StringField(3, required=True)
updateMask = _messages.StringField(4)
class ServicemanagementServicesDeleteRequest(_messages.Message):
"""A ServicemanagementServicesDeleteRequest object.
Fields:
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
"""
serviceName = _messages.StringField(1, required=True)
class ServicemanagementServicesGetAccessPolicyRequest(_messages.Message):
"""A ServicemanagementServicesGetAccessPolicyRequest object.
Fields:
serviceName: The name of the service. For example:
`example.googleapis.com`.
"""
serviceName = _messages.StringField(1, required=True)
class ServicemanagementServicesGetConfigRequest(_messages.Message):
"""A ServicemanagementServicesGetConfigRequest object.
Fields:
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
"""
serviceName = _messages.StringField(1, required=True)
class ServicemanagementServicesGetRequest(_messages.Message):
"""A ServicemanagementServicesGetRequest object.
Enums:
ViewValueValuesEnum: If project_settings is expanded, request only fields
for the specified view.
Fields:
consumerProjectId: If project_settings is expanded, return settings for
the specified consumer project.
expand: Fields to expand in any results. By default, the following fields
are not present in the result: - `operations` - `project_settings` -
`project_settings.operations` - `quota_usage` (It requires
`project_settings`) - `historical_quota_usage` (It requires
`project_settings`)
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
view: If project_settings is expanded, request only fields for the
specified view.
"""
class ViewValueValuesEnum(_messages.Enum):
"""If project_settings is expanded, request only fields for the specified
view.
Values:
PROJECT_SETTINGS_VIEW_UNSPECIFIED: <no description>
CONSUMER_VIEW: <no description>
PRODUCER_VIEW: <no description>
ALL: <no description>
"""
PROJECT_SETTINGS_VIEW_UNSPECIFIED = 0
CONSUMER_VIEW = 1
PRODUCER_VIEW = 2
ALL = 3
consumerProjectId = _messages.StringField(1)
expand = _messages.StringField(2)
serviceName = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class ServicemanagementServicesListRequest(_messages.Message):
"""A ServicemanagementServicesListRequest object.
Fields:
category: Include services only in the specified category. Supported
categories are servicemanagement.googleapis.com/categories/google-
services or servicemanagement.googleapis.com/categories/play-games.
consumerProjectId: Include services consumed by the specified project. If
project_settings is expanded, then this field controls which project
project_settings is populated for.
expand: Fields to expand in any results. By default, the following fields
are not fully included in list results: - `operations` -
`project_settings` - `project_settings.operations` - `quota_usage` (It
requires `project_settings`)
pageSize: Requested size of the next page of data.
pageToken: Token identifying which result to start with; returned by a
previous list call.
producerProjectId: Include services produced by the specified project.
"""
category = _messages.StringField(1)
consumerProjectId = _messages.StringField(2)
expand = _messages.StringField(3)
pageSize = _messages.IntegerField(4, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(5)
producerProjectId = _messages.StringField(6)
class ServicemanagementServicesPatchConfigRequest(_messages.Message):
"""A ServicemanagementServicesPatchConfigRequest object.
Fields:
service: A Service resource to be passed as the request body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
updateMask: A mask specifying which fields to update.
"""
service = _messages.MessageField('Service', 1)
serviceName = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class ServicemanagementServicesPatchRequest(_messages.Message):
"""A ServicemanagementServicesPatchRequest object.
Fields:
managedService: A ManagedService resource to be passed as the request
body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
updateMask: A mask specifying which fields to update.
"""
managedService = _messages.MessageField('ManagedService', 1)
serviceName = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class ServicemanagementServicesProjectSettingsGetRequest(_messages.Message):
"""A ServicemanagementServicesProjectSettingsGetRequest object.
Enums:
ViewValueValuesEnum: Request only the fields for the specified view.
Fields:
consumerProjectId: The project ID of the consumer.
expand: Fields to expand in any results. By default, the following fields
are not present in the result: - `operations` - `quota_usage`
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
view: Request only the fields for the specified view.
"""
class ViewValueValuesEnum(_messages.Enum):
"""Request only the fields for the specified view.
Values:
PROJECT_SETTINGS_VIEW_UNSPECIFIED: <no description>
CONSUMER_VIEW: <no description>
PRODUCER_VIEW: <no description>
ALL: <no description>
"""
PROJECT_SETTINGS_VIEW_UNSPECIFIED = 0
CONSUMER_VIEW = 1
PRODUCER_VIEW = 2
ALL = 3
consumerProjectId = _messages.StringField(1, required=True)
expand = _messages.StringField(2)
serviceName = _messages.StringField(3, required=True)
view = _messages.EnumField('ViewValueValuesEnum', 4)
class ServicemanagementServicesProjectSettingsPatchRequest(_messages.Message):
"""A ServicemanagementServicesProjectSettingsPatchRequest object.
Fields:
consumerProjectId: The project ID of the consumer.
projectSettings: A ProjectSettings resource to be passed as the request
body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
updateMask: The field mask specifying which fields are to be updated.
"""
consumerProjectId = _messages.StringField(1, required=True)
projectSettings = _messages.MessageField('ProjectSettings', 2)
serviceName = _messages.StringField(3, required=True)
updateMask = _messages.StringField(4)
class ServicemanagementServicesUpdateConfigRequest(_messages.Message):
"""A ServicemanagementServicesUpdateConfigRequest object.
Fields:
service: A Service resource to be passed as the request body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
updateMask: A mask specifying which fields to update. Update mask has been
deprecated on UpdateServiceConfig service method. Please use
PatchServiceConfig method instead to do partial updates.
"""
service = _messages.MessageField('Service', 1)
serviceName = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class ServicemanagementServicesUpdateRequest(_messages.Message):
"""A ServicemanagementServicesUpdateRequest object.
Fields:
managedService: A ManagedService resource to be passed as the request
body.
serviceName: The name of the service. See the `ServiceManager` overview
for naming requirements. For example: `example.googleapis.com`.
updateMask: A mask specifying which fields to update. Update mask has been
deprecated on UpdateService service method. Please use PatchService
method instead to do partial updates.
"""
managedService = _messages.MessageField('ManagedService', 1)
serviceName = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class SourceContext(_messages.Message):
"""`SourceContext` represents information about the source of a protobuf
element, like the file in which it is defined.
Fields:
fileName: The path-qualified name of the .proto file that contained the
associated protobuf element. For example:
`"google/protobuf/source.proto"`.
"""
fileName = _messages.StringField(1)
class StandardQueryParameters(_messages.Message):
"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
bearer_token: OAuth bearer token.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
pp: Pretty-print response.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
bearer_token = _messages.StringField(4)
callback = _messages.StringField(5)
fields = _messages.StringField(6)
key = _messages.StringField(7)
oauth_token = _messages.StringField(8)
pp = _messages.BooleanField(9, default=True)
prettyPrint = _messages.BooleanField(10, default=True)
quotaUser = _messages.StringField(11)
trace = _messages.StringField(12)
uploadType = _messages.StringField(13)
upload_protocol = _messages.StringField(14)
class Status(_messages.Message):
"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). The error model is designed to be:
- Simple to use and understand for most users - Flexible enough to meet
unexpected needs # Overview The `Status` message contains three pieces of
data: error code, error message, and error details. The error code should be
an enum value of google.rpc.Code, but it may accept additional error codes
if needed. The error message should be a developer-facing English message
that helps developers *understand* and *resolve* the error. If a localized
user-facing error message is needed, put the localized message in the error
details or localize it in the client. The optional error details may contain
arbitrary information about the error. There is a predefined set of error
detail types in the package `google.rpc` which can be used for common error
conditions. # Language mapping The `Status` message is the logical
representation of the error model, but it is not necessarily the actual wire
format. When the `Status` message is exposed in different client libraries
and different wire protocols, it can be mapped differently. For example, it
will likely be mapped to some exceptions in Java, but more likely mapped to
some error codes in C. # Other uses The error model and the `Status`
message can be used in a variety of environments, either with or without
APIs, to provide a consistent developer experience across different
environments. Example uses of this error model include: - Partial errors.
If a service needs to return partial errors to the client, it may embed
the `Status` in the normal response to indicate the partial errors. -
Workflow errors. A typical workflow has multiple steps. Each step may
have a `Status` message for error reporting purpose. - Batch operations. If
a client uses batch request and batch response, the `Status` message
should be used directly inside batch response, one for each error sub-
response. - Asynchronous operations. If an API call embeds asynchronous
operation results in its response, the status of those operations should
be represented directly using the `Status` message. - Logging. If some
API errors are stored in logs, the message `Status` could be used
directly after any stripping needed for security/privacy reasons.
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There will be a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @ype with
type URL.
"""
class AdditionalProperty(_messages.Message):
"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
class Step(_messages.Message):
"""Represents the status of one operation step.
Enums:
StatusValueValuesEnum: The status code.
Fields:
description: The short description of the step.
status: The status code.
"""
class StatusValueValuesEnum(_messages.Enum):
"""The status code.
Values:
STATUS_UNSPECIFIED: Unspecifed code.
DONE: The step has completed without errors.
NOT_STARTED: The step has not started yet.
IN_PROGRESS: The step is in progress.
FAILED: The step has completed with errors.
"""
STATUS_UNSPECIFIED = 0
DONE = 1
NOT_STARTED = 2
IN_PROGRESS = 3
FAILED = 4
description = _messages.StringField(1)
status = _messages.EnumField('StatusValueValuesEnum', 2)
class SwaggerSpec(_messages.Message):
"""A collection of swagger specification files.
Fields:
swaggerFiles: The individual files.
"""
swaggerFiles = _messages.MessageField('File', 1, repeated=True)
class SystemParameter(_messages.Message):
"""Define a parameter's name and location. The parameter may be passed as
either an HTTP header or a URL query parameter, and if both are passed the
behavior is implementation-dependent.
Fields:
httpHeader: Define the HTTP header name to use for the parameter. It is
case insensitive.
name: Define the name of the parameter, such as "api_key", "alt",
"callback", and etc. It is case sensitive.
urlQueryParameter: Define the URL query parameter name to use for the
parameter. It is case sensitive.
"""
httpHeader = _messages.StringField(1)
name = _messages.StringField(2)
urlQueryParameter = _messages.StringField(3)
class SystemParameterRule(_messages.Message):
"""Define a system parameter rule mapping system parameter definitions to
methods.
Fields:
parameters: Define parameters. Multiple names may be defined for a
parameter. For a given method call, only one of them should be used. If
multiple names are used the behavior is implementation-dependent. If
none of the specified names are present the behavior is parameter-
dependent.
selector: Selects the methods to which this rule applies. Use '*' to
indicate all methods in all APIs. Refer to selector for syntax details.
"""
parameters = _messages.MessageField('SystemParameter', 1, repeated=True)
selector = _messages.StringField(2)
class SystemParameters(_messages.Message):
"""### System parameter configuration A system parameter is a special kind
of parameter defined by the API system, not by an individual API. It is
typically mapped to an HTTP header and/or a URL query parameter. This
configuration specifies which methods change the names of the system
parameters.
Fields:
rules: Define system parameters. The parameters defined here will
override the default parameters implemented by the system. If this field
is missing from the service config, default system parameters will be
used. Default system parameters and names is implementation-dependent.
Example: define api key and alt name for all methods SystemParameters
rules: - selector: "*" parameters: - name: api_key
url_query_parameter: api_key - name: alt http_header:
Response-Content-Type Example: define 2 api key names for a specific
method. SystemParameters rules: - selector: "/ListShelves"
parameters: - name: api_key http_header: Api-Key1
- name: api_key http_header: Api-Key2
"""
rules = _messages.MessageField('SystemParameterRule', 1, repeated=True)
class Type(_messages.Message):
"""A protocol buffer message type.
Enums:
SyntaxValueValuesEnum: The source syntax.
Fields:
fields: The list of fields.
name: The fully qualified message name.
oneofs: The list of types appearing in `oneof` definitions in this type.
options: The protocol buffer options.
sourceContext: The source context.
syntax: The source syntax.
"""
class SyntaxValueValuesEnum(_messages.Enum):
"""The source syntax.
Values:
SYNTAX_PROTO2: Syntax `proto2`.
SYNTAX_PROTO3: Syntax `proto3`.
"""
SYNTAX_PROTO2 = 0
SYNTAX_PROTO3 = 1
fields = _messages.MessageField('Field', 1, repeated=True)
name = _messages.StringField(2)
oneofs = _messages.StringField(3, repeated=True)
options = _messages.MessageField('Option', 4, repeated=True)
sourceContext = _messages.MessageField('SourceContext', 5)
syntax = _messages.EnumField('SyntaxValueValuesEnum', 6)
class Usage(_messages.Message):
"""Configuration controlling usage of a service.
Enums:
ServiceAccessValueValuesEnum: Controls which users can see or activate the
service.
Fields:
activationHooks: Services that must be contacted before a consumer can
begin using the service. Each service will be contacted in sequence,
and, if any activation call fails, the entire activation will fail. Each
hook is of the form <service.name>/<hook-id>, where <hook-id> is
optional; for example: 'robotservice.googleapis.com/default'.
deactivationHooks: Services that must be contacted before a consumer can
deactivate a service. Each service will be contacted in sequence, and,
if any deactivation call fails, the entire deactivation will fail. Each
hook is of the form <service.name>/<hook-id>, where <hook-id> is
optional; for example: 'compute.googleapis.com/'.
dependsOnServices: Services that must be activated in order for this
service to be used. The set of services activated as a result of these
relations are all activated in parallel with no guaranteed order of
activation. Each string is a service name, e.g.
`calendar.googleapis.com`.
requirements: Requirements that must be satisfied before a consumer
project can use the service. Each requirement is of the form
<service.name>/<requirement-id>; for example
'serviceusage.googleapis.com/billing-enabled'.
rules: Individual rules for configuring usage on selected methods.
serviceAccess: Controls which users can see or activate the service.
"""
class ServiceAccessValueValuesEnum(_messages.Enum):
"""Controls which users can see or activate the service.
Values:
RESTRICTED: The service can only be seen/used by users identified in the
service's access control policy. If the service has not been
whitelisted by your domain administrator for out-of-org publishing,
then this mode will be treated like ORG_RESTRICTED.
PUBLIC: The service can be seen/used by anyone. If the service has not
been whitelisted by your domain administrator for out-of-org
publishing, then this mode will be treated like ORG_PUBLIC. The
discovery document for the service will also be public and allow
unregistered access.
ORG_RESTRICTED: The service can be seen/used by users identified in the
service's access control policy. Access is further constrained to the
group controlled by the administrator of the project/org that owns the
service.
ORG_PUBLIC: The service can be seen/used by the group of users
controlled by the administrator of the project/org that owns the
service.
"""
RESTRICTED = 0
PUBLIC = 1
ORG_RESTRICTED = 2
ORG_PUBLIC = 3
activationHooks = _messages.StringField(1, repeated=True)
deactivationHooks = _messages.StringField(2, repeated=True)
dependsOnServices = _messages.StringField(3, repeated=True)
requirements = _messages.StringField(4, repeated=True)
rules = _messages.MessageField('UsageRule', 5, repeated=True)
serviceAccess = _messages.EnumField('ServiceAccessValueValuesEnum', 6)
class UsageRule(_messages.Message):
"""Usage configuration rules for the service. NOTE: Under development.
Use this rule to configure unregistered calls for the service. Unregistered
calls are calls that do not contain consumer project identity. (Example:
calls that do not contain an API key). By default, API methods do not allow
unregistered calls, and each method call must be identified by a consumer
project identity. Use this rule to allow/disallow unregistered calls.
Example of an API that wants to allow unregistered calls for entire service.
usage: rules: - selector: "*" allow_unregistered_calls:
true Example of a method that wants to allow unregistered calls.
usage: rules: - selector:
"google.example.library.v1.LibraryService.CreateBook"
allow_unregistered_calls: true
Fields:
allowUnregisteredCalls: True, if the method allows unregistered calls;
false otherwise.
selector: Selects the methods to which this rule applies. Use '*' to
indicate all methods in all APIs. Refer to selector for syntax details.
"""
allowUnregisteredCalls = _messages.BooleanField(1)
selector = _messages.StringField(2)
class UsageSettings(_messages.Message):
"""Usage settings for a consumer of a service.
Enums:
ConsumerEnableStatusValueValuesEnum: Consumer controlled setting to
enable/disable use of this service by the consumer project. The default
value of this is controlled by the service configuration.
Fields:
consumerEnableStatus: Consumer controlled setting to enable/disable use of
this service by the consumer project. The default value of this is
controlled by the service configuration.
"""
class ConsumerEnableStatusValueValuesEnum(_messages.Enum):
"""Consumer controlled setting to enable/disable use of this service by
the consumer project. The default value of this is controlled by the
service configuration.
Values:
DISABLED: The service is disabled.
ENABLED: The service is enabled.
"""
DISABLED = 0
ENABLED = 1
consumerEnableStatus = _messages.EnumField('ConsumerEnableStatusValueValuesEnum', 1)
class VariableTermQuota(_messages.Message):
"""A variable term quota is a bucket of tokens that is consumed over a
specified (usually long) time period. When present, it overrides any "1d"
duration per-project quota specified on the group. Variable terms run from
midnight to midnight, start_date to end_date (inclusive) in the
America/Los_Angeles time zone.
Fields:
createTime: Time when this variable term quota was created. If multiple
quotas are simultaneously active, then the quota with the latest
create_time is the effective one.
displayEndDate: The displayed end of the active period for the variable
term quota. This may be before the effective end to give the user a
grace period. YYYYMMdd date format, e.g. 20140730.
endDate: The effective end of the active period for the variable term
quota (inclusive). This must be no more than 5 years after start_date.
YYYYMMdd date format, e.g. 20140730.
groupName: The quota group that has the variable term quota applied to it.
This must be a google.api.QuotaGroup.name specified in the service
configuration.
limit: The number of tokens available during the configured term.
quotaUsage: The usage data of this quota.
startDate: The beginning of the active period for the variable term quota.
YYYYMMdd date format, e.g. 20140730.
"""
createTime = _messages.StringField(1)
displayEndDate = _messages.StringField(2)
endDate = _messages.StringField(3)
groupName = _messages.StringField(4)
limit = _messages.IntegerField(5)
quotaUsage = _messages.MessageField('QuotaUsage', 6)
startDate = _messages.StringField(7)
class Visibility(_messages.Message):
"""`Visibility` defines restrictions for the visibility of service elements.
Restrictions are specified using visibility labels (e.g., TRUSTED_TESTER)
that are elsewhere linked to users and projects. User and projects can have
access to more than one visibility label. The effective visibility for
multiple labels is the union of each label's elements, plus any unrestricted
elements. You must list any supported label combinations in
`label_combinations`. If an element and its parents have no restrictions,
visibility is unconditionally granted. Example: visibility:
label_combinations: - GOOGLE_INTERNAL, TRUSTED_TESTER rules:
- selector: google.calendar.Calendar.EnhancedSearch restriction:
TRUSTED_TESTER - selector: google.calendar.Calendar.Delegate
restriction: GOOGLE_INTERNAL Here, all methods are publicly visible except
for the restricted methods EnhancedSearch and Delegate. In addition, since
`label_combinations` lists both GOOGLE_INTERNAL and TRUSTED_TESTER, users
and projects can be given access to a combined visibility with both
EnhancedSearch and Delegate.
Fields:
enforceRuntimeVisibility: Controls whether visibility rules are enforced
at runtime for requests to all APIs and methods. If true, requests
without method visibility will receive a NOT_FOUND error, and any non-
visible fields will be scrubbed from the response messages. In service
config version 0, the default is false. In later config versions, it's
true. Note, the `enforce_runtime_visibility` specified in a visibility
rule overrides this setting for the APIs or methods asscoiated with the
rule.
labelCombinations: Lists valid label combinations for this service in
comma-delimited form. This lets users and projects see the union of
these labels' elements. Removing a label combination can be a breaking
change, as clients with access to the combination will now see non-
restricted elements only.
rules: A list of visibility rules providing visibility configuration for
individual API elements.
"""
enforceRuntimeVisibility = _messages.BooleanField(1)
labelCombinations = _messages.StringField(2, repeated=True)
rules = _messages.MessageField('VisibilityRule', 3, repeated=True)
class VisibilityRule(_messages.Message):
"""A visibility rule provides visibility configuration for an individual API
element.
Fields:
enforceRuntimeVisibility: Controls whether visibility is enforced at
runtime for requests to an API method. This setting has meaning only
when the selector applies to a method or an API. If true, requests
without method visibility will receive a NOT_FOUND error, and any non-
visible fields will be scrubbed from the response messages. The default
is determined by the value of
google.api.Visibility.enforce_runtime_visibility.
restriction: Lists the visibility labels for this rule. Any of the listed
labels grants visibility to the element. If a rule has multiple labels,
removing one of the labels but not all of them can break clients.
Example: visibility: rules: - selector:
google.calendar.Calendar.EnhancedSearch restriction:
GOOGLE_INTERNAL, TRUSTED_TESTER Removing GOOGLE_INTERNAL from this
restriction will break clients that rely on this method and only had
access to it through GOOGLE_INTERNAL.
selector: Selects methods, messages, fields, enums, etc. to which this
rule applies. Refer to selector for syntax details.
"""
enforceRuntimeVisibility = _messages.BooleanField(1)
restriction = _messages.StringField(2)
selector = _messages.StringField(3)
class VisibilitySettings(_messages.Message):
"""Settings that control which features of the service are visible to the
consumer project.
Fields:
visibilityLabels: The set of visibility labels that are used to determine
what API surface is visible to calls made by this project. The visible
surface is a union of the surface features associated with each label
listed here, plus the publicly visible (unrestricted) surface. The
service producer may add or remove labels at any time. The service
consumer may add a label if the calling user has been granted permission
to do so by the producer. The service consumer may also remove any
label at any time.
"""
visibilityLabels = _messages.StringField(1, repeated=True)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv',
package=u'servicemanagement')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1',
package=u'servicemanagement')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2',
package=u'servicemanagement')
|
{
"content_hash": "6d55c6f593fe5a1db94ac9c533156920",
"timestamp": "",
"source": "github",
"line_count": 3108,
"max_line_length": 95,
"avg_line_length": 44.21621621621622,
"alnum_prop": 0.7231196879729888,
"repo_name": "flgiordano/netcash",
"id": "733514010ae1ef9e0a56851ece6e4ea7d2656035",
"size": "137424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/servicemanagement/v1/servicemanagement_v1_messages.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
}
|
import unittest
from declarativeunittest import raises
from construct.lib.hex import *
class TestHex(unittest.TestCase):
def test_dump(self):
assert hexundump(hexdump(b"",32),32) == b""
assert hexundump(hexdump(b"??????????",32),32) == b"??????????"
for i in range(100):
assert hexundump(hexdump(b"?"*i,32),32) == b"?"*i
assert hexundump(hexdump(b"?"*100000,32),32) == b"?"*100000
|
{
"content_hash": "23f66b998bf198ea6944d1f6206b3dcf",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 71,
"avg_line_length": 28.933333333333334,
"alnum_prop": 0.5921658986175116,
"repo_name": "riggs/construct",
"id": "d1180222e7958e2c49f45d1c53c10516fb5956f1",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/lib/test_hex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "362614"
},
{
"name": "Shell",
"bytes": "239"
}
],
"symlink_target": ""
}
|
"""Policies used by various agents."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tf_agents.specs import tensor_spec
class DeterministicSoftPolicy(tf.Module):
"""Returns mode of policy distribution."""
def __init__(self, a_network):
super(DeterministicSoftPolicy, self).__init__()
self._a_network = a_network
@tf.function
def __call__(self, observation, state=()):
action = self._a_network(observation)[0]
return action, state
class RandomSoftPolicy(tf.Module):
"""Returns sample from policy distribution."""
def __init__(self, a_network):
super(RandomSoftPolicy, self).__init__()
self._a_network = a_network
@tf.function
def __call__(self, observation, state=()):
action = self._a_network(observation)[1]
return action, state
class MaxQSoftPolicy(tf.Module):
"""Samples a few actions from policy, returns the one with highest Q-value."""
def __init__(self, a_network, q_network, n=10):
super(MaxQSoftPolicy, self).__init__()
self._a_network = a_network
self._q_network = q_network
self._n = n
@tf.function
def __call__(self, observation, state=()):
batch_size = observation.shape[0]
actions = self._a_network.sample_n(observation, self._n)[1]
actions_ = tf.reshape(actions, [self._n * batch_size, -1])
states_ = tf.tile(observation[None], (self._n, 1, 1))
states_ = tf.reshape(states_, [self._n * batch_size, -1])
qvals = self._q_network(states_, actions_)
qvals = tf.reshape(qvals, [self._n, batch_size])
a_indices = tf.argmax(qvals, axis=0)
gather_indices = tf.stack(
[a_indices, tf.range(batch_size, dtype=tf.int64)], axis=-1)
action = tf.gather_nd(actions, gather_indices)
return action, state
class ContinuousRandomPolicy(tf.Module):
"""Samples actions uniformly at random."""
def __init__(self, action_spec):
super(ContinuousRandomPolicy, self).__init__()
self._action_spec = action_spec
def __call__(self, observation, state=()):
action = tensor_spec.sample_bounded_spec(
self._action_spec, outer_dims=[observation.shape[0]])
return action, state
class EpsilonGreedyRandomSoftPolicy(tf.Module):
"""Switches between samples from actor network and uniformly random action."""
def __init__(self, a_network, epsilon):
super(EpsilonGreedyRandomSoftPolicy, self).__init__()
self._a_network = a_network
self._epsilon = epsilon
@tf.function
def __call__(self, observation, state=()):
action = self._a_network(observation)[1]
rand_action = tensor_spec.sample_bounded_spec(
self._a_network.action_spec, outer_dims=[observation.shape[0]])
seed = tf.random.uniform([observation.shape[0]])
is_random = tf.less(seed, self._epsilon)
action = tf.compat.v2.where(is_random, rand_action, action)
return action, state
class GaussianRandomSoftPolicy(tf.Module):
"""Adds Gaussian noise to actor's action."""
def __init__(self, a_network, std=0.1, clip_eps=1e-3):
super(GaussianRandomSoftPolicy, self).__init__()
self._a_network = a_network
self._std = std
self._clip_eps = clip_eps
@tf.function
def __call__(self, observation, state=()):
action = self._a_network(observation)[1]
noise = tf.random_normal(shape=action.shape, stddev=self._std)
action = action + noise
spec = self._a_network.action_spec
action = tf.clip_by_value(action, spec.minimum + self._clip_eps,
spec.maximum - self._clip_eps)
return action, state
class GaussianEpsilonGreedySoftPolicy(tf.Module):
"""Switches between Gaussian-perturbed and uniform random action."""
def __init__(self, a_network, std=0.1, clip_eps=1e-3, eps=0.1):
super(GaussianEpsilonGreedySoftPolicy, self).__init__()
self._a_network = a_network
self._std = std
self._clip_eps = clip_eps
self._eps = eps
@tf.function
def __call__(self, observation, state=()):
action = self._a_network(observation)[1]
noise = tf.random_normal(shape=action.shape, stddev=self._std)
action = action + noise
spec = self._a_network.action_spec
action = tf.clip_by_value(action, spec.minimum + self._clip_eps,
spec.maximum - self._clip_eps)
rand_action = tensor_spec.sample_bounded_spec(
self._a_network.action_spec, outer_dims=[observation.shape[0]])
seed = tf.random.uniform([observation.shape[0]])
is_random = tf.less(seed, self._eps)
action = tf.compat.v2.where(is_random, rand_action, action)
return action, state
class BCQPolicy(tf.Module):
"""Policy used by BCQ."""
def __init__(self, a_network, q_network, b_network, n=10):
super(BCQPolicy, self).__init__()
self._a_network = a_network
self._q_network = q_network
self._b_network = b_network
self._n = n
@tf.function
def __call__(self, observation, state=()):
batch_size = observation.shape[0]
s_dup = tf.tile(observation, [self._n, 1])
sampled_actions = self._b_network.sample(s_dup)
actions = self._a_network(s_dup, sampled_actions)
qvals = self._q_network(s_dup, actions)
qvals = tf.reshape(qvals, [self._n, batch_size])
a_indices = tf.argmax(qvals, axis=0)
gather_indices = tf.stack(
[a_indices, tf.range(batch_size, dtype=tf.int64)], axis=-1)
actions = tf.reshape(actions, [self._n, batch_size, -1])
action = tf.gather_nd(actions, gather_indices)
return action, state
class VAEPolicy(tf.Module):
"""Policy based on VAE."""
def __init__(self, a_network):
super(VAEPolicy, self).__init__()
self._a_network = a_network
@tf.function
def __call__(self, observation, state=()):
action = self._a_network.sample(observation)
return action, state
|
{
"content_hash": "16df0db2385d2c7effe04e127f5f8692",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 80,
"avg_line_length": 33.19318181818182,
"alnum_prop": 0.655255049640534,
"repo_name": "google-research/google-research",
"id": "c6d9cf27238439268c537c6aa8e13c75d0ca9022",
"size": "6450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behavior_regularized_offline_rl/brac/policies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
import json
import numpy
import os
import pathlib
import pickle
import tempfile
def write_delimited(path, data, delimiter='\t'):
"""Write a list of lists (or iterable of iterables) to a delimited file."""
path = pathlib.Path(path)
try:
with path.open('w') as f:
f.write('\n'.join(delimiter.join(map(str, row)) for row in data))
except:
if path.exists():
# an error occured during writing the file. Remove the half-written
# file and re-raise the exception
path.remove()
raise
def read_delimited(path, header=True, coerce_float=True, empty_val=numpy.nan, delimiter='\t'):
"""Iterate over the rows in a delimited file such as a csv or tsv.
Iterate a delimited file line by line, yielding each line's contents split
by the delimiter and optionally converted to floating-point values if possible.
Note: empty lines are skipped.
If more sophisticated control is required, consider numpy.genfromtext or
numpy.loadtxt.
Parameters:
path: path to input file
header: if True (default), return the header line and an iterator over
the remaining lines. If False, return an iterator over all lines.
coerce_float: if True (default), attempt to convert data values to
floating point. If that fails, return the original input string.
empty_val: if coerce_float is True, return this value when an input
value is empty. '' or numpy.nan (default) are the usual choices.
delimiter: symbol on which the file is delimited.
Returns:
(header, iterator) if coerce_float is True, else iterator
where header is a list of the strings on the first line, and iterator
yields a list of values for each subsequent line.
Example:
header, data = read_delimited('path/to/data.csv', delimiter=',')
name_i = header.index('name')
lifespan_i = header.index('lifespan')
lifespans = {}
for row in data:
lifespans[row[name_i]] = row[lifespan_i]
"""
data_iter = _iter_delimited(path, header, coerce_float, empty_val, delimiter)
if header:
return next(data_iter), data_iter
else:
return data_iter
def _iter_delimited(path, header, coerce_float, empty_val, delimiter):
with open(path) as infile:
for line in infile:
vals = line.strip('\n').split(delimiter)
if not vals:
continue # skip blank lines
if header:
yield vals
header = False # OK, we've already read the header, don't do it again
elif not coerce_float:
yield vals # don't try to convert to float, just return strings
else:
new_vals = []
for val in vals:
if val == '':
val = empty_val
else:
try:
val = float(val)
except ValueError:
pass
new_vals.append(val)
yield new_vals
def dump(path, **data_dict):
"""Dump keyword arguments into a file on disk.
dump() and load() can be used to easily save any arbitrary python objects
(or nested objects) to a datafile, organized by keyword. This function takes
arbitrary keyword arguments, and saves that to a file with the pickle module.
Example:
dump('path/to/datafile', vals=[1,2,3], date='2017-01-01', params={1:[2,3,4], 2:'a'})
data = load('path/to/datafile')
print(data.vals, data.date, data.params[2])
"""
path = pathlib.Path(path)
try:
with path.open('wb') as f:
pickle.dump(data_dict, f)
except:
if path.exists():
path.unlink()
raise
def load(path):
"""Load arguments previously dumped to a file on disk.
dump() and load() can be used to easily save any arbitrary python objects
(or nested objects) to a datafile, organized by keyword. This function takes
arbitrary keyword arguments, and saves that to a file with the pickle module.
Load returns a Data object, with the original keywords from dump() present
as attributes. In addition, the path to the original file is stored in the
'_path' attribute.
Example:
dump('path/to/datafile', vals=[1,2,3], date='2017-01-01', params={1:[2,3,4], 2:'a'})
data = load('path/to/datafile')
print(data.vals, data.date, data.params[2])
print(data._path)
"""
path = pathlib.Path(path)
with path.open('rb') as f:
return Data(_path=path, **pickle.load(f))
class Data:
def __init__(self, **kwargs):
"""Add all keyword arguments to self.__dict__, which is to say, to
the namespace of the class. I.e.:
d = Data(foo=5, bar=6)
d.foo == 5 # True
d.bar > d.foo # True
d.baz # AttributeError
"""
self.__dict__.update(kwargs)
class _NumpyEncoder(json.JSONEncoder):
"""JSON encoder that is smart about converting iterators and numpy arrays to
lists, and converting numpy scalars to python scalars.
"""
def default(self, o):
try:
return super().default(o)
except TypeError:
if isinstance(o, numpy.generic):
item = o.item()
if isinstance(item, numpy.generic):
raise
else:
return item
try:
return list(o)
except:
raise
_COMPACT_ENCODER = _NumpyEncoder(separators=(',', ':'))
_READABLE_ENCODER = _NumpyEncoder(indent=4, sort_keys=True)
def json_encode_compact_to_bytes(data):
"""Encode compact JSON for transfer over the network or similar."""
return _COMPACT_ENCODER.encode(data).encode('utf8')
def json_encode_legible_to_str(data):
"""Encode nicely-formatted JSON to a string."""
return _READABLE_ENCODER.encode(data)
def json_encode_legible_to_file(data, f):
"""Encode nicely-formatted JSON to an open file handle."""
for chunk in _READABLE_ENCODER.iterencode(data):
f.write(chunk)
def json_encode_atomic_legible_to_file(data, filename):
"""Encode nicely-formatted JSON, and if there was no error, atomically write.
Care is taken to never overwrite an existing file except in an atomic manner
after all other steps have occured. This prevents errors from causing a
partial overwrite of an existing file: the result of this function is all or
none.
Parameters:
data: python objects to be JSON encoded
filename: string or pathlib.Path object for destination file.
"""
s = json_encode_legible_to_str(data)
filename = pathlib.Path(filename)
prefix = filename.name + '-temp.'
fd, tmp_path = tempfile.mkstemp(prefix=prefix, dir=str(filename.parent))
try:
with os.fdopen(fd, 'w') as f:
f.write(s)
os.replace(tmp_path, filename)
except:
if tmp_path.exists():
os.remove(tmp_path)
raise
|
{
"content_hash": "e77e59c4aba3ebbf08b6679c16554d15",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 94,
"avg_line_length": 36.46192893401015,
"alnum_prop": 0.6071279409717388,
"repo_name": "zplab/zplib",
"id": "1a18dc533773dcb60355b87453878744ebef15ea",
"size": "7183",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zplib/datafile.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17963"
},
{
"name": "Python",
"bytes": "260610"
}
],
"symlink_target": ""
}
|
from unittest import mock
from neutronclient.osc.v2.dynamic_routing import bgp_dragent
from neutronclient.tests.unit.osc.v2.dynamic_routing import fakes
class TestAddBgpSpeakerToDRAgent(fakes.TestNeutronDynamicRoutingOSCV2):
_bgp_speaker = fakes.FakeBgpSpeaker.create_one_bgp_speaker()
_bgp_dragent = fakes.FakeDRAgent.create_one_dragent()
_bgp_speaker_id = _bgp_speaker['id']
_bgp_dragent_id = _bgp_dragent['id']
def setUp(self):
super(TestAddBgpSpeakerToDRAgent, self).setUp()
# Get the command object to test
self.cmd = bgp_dragent.AddBgpSpeakerToDRAgent(self.app, self.namespace)
def test_add_bgp_speaker_to_dragent(self):
arglist = [
self._bgp_dragent_id,
self._bgp_speaker_id,
]
verifylist = [
('dragent_id', self._bgp_dragent_id),
('bgp_speaker', self._bgp_speaker_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch.object(self.neutronclient,
"add_bgp_speaker_to_dragent",
return_value=None):
result = self.cmd.take_action(parsed_args)
self.neutronclient.add_bgp_speaker_to_dragent.\
assert_called_once_with(
self._bgp_dragent_id,
{'bgp_speaker_id': self._bgp_speaker_id})
self.assertIsNone(result)
class TestRemoveBgpSpeakerFromDRAgent(fakes.TestNeutronDynamicRoutingOSCV2):
_bgp_speaker = fakes.FakeBgpSpeaker.create_one_bgp_speaker()
_bgp_dragent = fakes.FakeDRAgent.create_one_dragent()
_bgp_speaker_id = _bgp_speaker['id']
_bgp_dragent_id = _bgp_dragent['id']
def setUp(self):
super(TestRemoveBgpSpeakerFromDRAgent, self).setUp()
# Get the command object to test
self.cmd = bgp_dragent.RemoveBgpSpeakerFromDRAgent(
self.app, self.namespace)
def test_remove_bgp_speaker_from_dragent(self):
arglist = [
self._bgp_dragent_id,
self._bgp_speaker_id,
]
verifylist = [
('dragent_id', self._bgp_dragent_id),
('bgp_speaker', self._bgp_speaker_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch.object(self.neutronclient,
"remove_bgp_speaker_from_dragent",
return_value=None):
result = self.cmd.take_action(parsed_args)
self.neutronclient.remove_bgp_speaker_from_dragent.\
assert_called_once_with(self._bgp_dragent_id,
self._bgp_speaker_id)
self.assertIsNone(result)
class TestListDRAgentsHostingBgpSpeaker(fakes.TestNeutronDynamicRoutingOSCV2):
_bgp_speaker = fakes.FakeBgpSpeaker.create_one_bgp_speaker()
_bgp_speaker_id = _bgp_speaker['id']
attrs = {'bgp_speaker_id': _bgp_speaker_id}
_bgp_dragents = fakes.FakeDRAgent.create_dragents(attrs)
columns = ('ID', 'Host', 'State', 'Alive')
data = [(_bgp_dragent['id'],
_bgp_dragent['host'],
_bgp_dragent['admin_state_up'],
':-)' if _bgp_dragent['alive'] else 'XXX')
for _bgp_dragent in _bgp_dragents['agents']]
def setUp(self):
super(TestListDRAgentsHostingBgpSpeaker, self).setUp()
# Get the command object to test
self.cmd = bgp_dragent.ListDRAgent(self.app, self.namespace)
def test_list_dragents_hosting_bgp_speaker(self):
arglist = [
'--bgp-speaker', self._bgp_speaker_id,
]
verifylist = [
('bgp_speaker', self._bgp_speaker_id),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
with mock.patch.object(self.neutronclient,
"list_dragents_hosting_bgp_speaker",
return_value=self._bgp_dragents):
columns, data = self.cmd.take_action(parsed_args)
attrs = {'bgp_speaker': self._bgp_speaker_id}
self.neutronclient.list_dragents_hosting_bgp_speaker.\
assert_called_once_with(**attrs)
self.assertEqual(self.columns, columns)
self.assertListEqual(self.data, list(data))
|
{
"content_hash": "d8c0c86acf6dee226e7b95748475e476",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 39.153153153153156,
"alnum_prop": 0.596870685687989,
"repo_name": "openstack/python-neutronclient",
"id": "7a94b323af1da40a43fb3a5168953b567d90fc6f",
"size": "4920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutronclient/tests/unit/osc/v2/dynamic_routing/test_bgp_dragent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1756257"
},
{
"name": "Shell",
"bytes": "10126"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
#
# Based on: xcsoar
from sys import platform
from setuptools import setup
from setuptools.command.install import install
from distutils.command.build import build
from subprocess import call
from multiprocessing import cpu_count
import glob
import codecs
import os
import pycudd
import shutil
BASEPATH = os.path.dirname(os.path.abspath(__file__))
CUDD_PATH = os.path.join(BASEPATH, 'cudd-3.0.0')
PYCUDD_PATH = os.path.join(BASEPATH, 'pycudd')
CUDD_CONFIGURE_OPTS = ['--enable-shared', '--enable-dddmp', '--enable-obj',
'\'CFLAGS=-std=c99\'']
# Get the long description from the README file
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
class PyCuddBuild(build):
def linux_prepare_so_files(self):
print('BuildCudd: linux_prepare_so_files')
install_path = os.path.join(CUDD_PATH, 'cudd', '.libs')
lib_search = os.path.join(install_path, '*.so')
target_dir = CUDD_PATH
for filename in glob.glob(lib_search):
shutil.copy(filename, target_dir)
print(filename)
def build_swig_package(self):
print('BuildCudd: build_swig_package')
build_status = call(['make'], shell=True, cwd=PYCUDD_PATH)
def sanity_test_package(self):
print('TODO')
pass
def windows_prepare_so_files(self):
raise NotImplementedError('Windows not yet supported')
def osx_prepare_so_files(self):
raise NotImplementedError('OS X not yet supported')
def run(self):
# run original build code
build.run(self)
# build PyCudd
build_path = os.path.abspath(self.build_temp)
configure_cmd = ['./configure {}'.format(' '.join(CUDD_CONFIGURE_OPTS))]
make_cmd = [
'make',
#'OUT=' + build_path,
#'V=' + str(self.verbose),
]
try:
make_cmd.append('-j%d' % cpu_count())
except NotImplementedError:
print('Unable to determine number of CPUs. Using single threaded make.')
targets = [] #'']
make_cmd.extend(targets)
target_files = [os.path.join(build_path, 'bin', 'cudd.so')]
def compile():
call(configure_cmd, cwd=CUDD_PATH, shell=True)
call(make_cmd, cwd=CUDD_PATH)
self.execute(compile, [], 'Compiling cudd')
# copy resulting tool to library build folder
self.mkpath(self.build_lib)
if not self.dry_run:
#for target in target_files:
#self.copy_file(target, self.build_lib)
# now install the libso or DLL files into the pycudd path
if platform.startswith('win32') or platform.startswith('cygwin'):
self.windows_prepare_so_files()
elif platform.startswith('darwin'):
self.osx_prepare_so_files()
elif platform.startswith('linux'):
self.linux_prepare_so_files()
else:
raise Exception('Unsupported platform: {}'.format(platform))
# Once the files have been copied, run swig
self.build_swig_package()
self.sanity_test_package()
class PyCuddInstall(install):
def initialize_options(self):
install.initialize_options(self)
self.build_scripts = None
def finalize_options(self):
install.finalize_options(self)
self.set_undefined_options('build', ('build_scripts', 'build_scripts'))
def run(self):
# run original install code
install.run(self)
# install PyCudd executables
self.copy_tree(self.build_lib, self.install_lib)
setup(
name='pycudd',
# Treat the prior releases of PyCUDD as v1.0.x. The new packaged libraries
# will be versions 1.1.x+
version=pycudd.__version__,
description='PyCudd',
long_description=long_description,
maintainer='Kunal Arya',
maintainer_email='na',
# The project's main homepage.
url='https://github.com/pycudd/pycudd',
# Author details
author='PyCUDD Community',
author_email='noreply@github.com',
# Choose your license
license='GPLv3',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)',
],
cmdclass={
'build': PyCuddBuild,
'install': PyCuddInstall,
}
)
|
{
"content_hash": "3225165aed6aaa84a6ab057f6582163c",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 85,
"avg_line_length": 30.366863905325445,
"alnum_prop": 0.6110678098207326,
"repo_name": "pycudd/pycudd",
"id": "361d5ec385acf7c35ed44a5617af7076552fe90a",
"size": "5154",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2949591"
},
{
"name": "C++",
"bytes": "195040"
},
{
"name": "HTML",
"bytes": "806905"
},
{
"name": "M4",
"bytes": "7120"
},
{
"name": "Makefile",
"bytes": "265024"
},
{
"name": "Python",
"bytes": "129014"
},
{
"name": "Roff",
"bytes": "14291"
},
{
"name": "Shell",
"bytes": "379527"
}
],
"symlink_target": ""
}
|
import sys, os, fileinput, errno, datetime, commands, re, string, envvars, time
import shutil
from optparse import OptionParser
import subprocess
from subprocess import Popen, PIPE
def arg_handle():
usage = "usage: run_gen.py (options)"
parser = OptionParser(usage)
parser.add_option("-a", "--app", dest="app",
help="application name")
parser.add_option("-g", "--group", dest="group",
help="application name")
parser.add_option("-s", "--subapp", dest="sub_app",
help="application name")
parser.add_option("-e", "--env", dest="env",
help="environment name")
parser.add_option("-v", "--env_ver", dest="env_ver",
help="environment name")
parser.add_option("--cmmn_dt", dest="common_date",
help="application name")
parser.add_option("-b", "--op0", dest="config_file",
help="environment name")
parser.add_option("-c", "--op1", dest="key_store",
help="increment field")
parser.add_option("-d", "--op2", dest="step",
help="increment field min bound")
(options, args) = parser.parse_args()
if options.env == "":
print "run_gen.py -> ERROR: environment value (p/q/d/t) is missing)"
sys.exit(1)
if options.env_ver == "":
print "run_gen.py -> ERROR: env version value (01/02 ) is missing)"
sys.exit(1)
if options.config_file == "":
print "run_gen.py -> ERROR: config file with table information is missing)"
sys.exit(1)
if options.key_store == "":
options.key_store="service"
print "run_gen.py -> INFO: **** Using default key_store= service ******"
if (options.key_store != "service" ) and ( options.key_store != "user" ) and ( options.key_store != "common" ):
print "run_gen.py -> ERROR: **** 4th parameter key_store should be either service/user/common ****"
sys.exit(1)
if ( options.step == "" ):
options.step="all"
print("run_gen.py -> Input : " + str(options))
return options, sys.argv
def main():
options, args = arg_handle()
envvars.populate(options.env,options.env_ver,options.app,options.sub_app)
config_file_path = envvars.list['lfs_app_config']+"/ingest/"+options.config_file
if not os.path.isfile(config_file_path):
print "run_gen.py -> ERROR: config file "+config_file_path+" does not exists ***"
sys.exit(1)
print "**************************************************************************************************"
args = " ".join([envvars.list['lfs_global_scripts'] + "/getmetadata.py ",
"-e "+ options.env.strip(),
"-a "+options.app,
"-u "+options.sub_app,
" -v "+options.env_ver,
" -k "+options.key_store.strip(),
" -s "+options.config_file])
getmetadata_script = "python " + args
if ( options.step == "all" ) or ( options.step == "1" ):
print("run_gen.py -> STEP-1 : ************************************************************************")
print("run_gen.py -> Invoked : " + getmetadata_script)
call = subprocess.Popen(getmetadata_script.split(' '),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
while True:
line = call.stdout.readline()
if not line:
break
print line.strip()
sys.stdout.flush()
call.communicate()
rc = call.returncode
if rc != 0:
print "run_gen.py -> getting metadata using " + args + " is not successful."
sys.exit(1)
else:
print "run_gen.py -> getting metadata command was successful."
if ( options.step == '1' ):
sys.exit(0)
print "**************************************************************************************************"
args = " ".join([envvars.list['lfs_global_scripts'] + "/generate.py -s",
options.config_file,
"-m",
envvars.list['lfs_app_config']+"/ingest/"+options.config_file+".meta",
"-w",
envvars.list['lfs_app_workflows']+"/wf_db_ingest",
"-k",
options.key_store,
"-e "+ options.env.strip(),
"-a "+options.app,
"-u "+options.sub_app,
"-v "+options.env_ver])
generate_script = "python " + args
if ( options.step == "all" ) or ( options.step == "2" ):
print("run_gen.py -> STEP-2 : ************************************************************************")
print("run_gen.py -> Invoked : " + generate_script)
call = subprocess.Popen(generate_script.split(' '),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
while True:
line = call.stdout.readline()
if not line:
break
print line.strip()
sys.stdout.flush()
call.communicate()
rc = call.returncode
if rc != 0:
print "run_gen.py -> Generating create scripts and properties file is not successful."
sys.exit(1)
else:
print "run_gen.py -> generating create scripts and properties files was successful."
if ( options.step == '2' ):
sys.exit(0)
print "**************************************************************************************************"
if ( options.step == "all" ) or ( options.step == "3" ):
print("run_gen.py -> STEP-3 : ************************************************************************")
args = " ".join([envvars.list['lfs_global_scripts'] + "/run_hive_create.py",
"--app "+options.app,
"--subapp "+options.sub_app,
"--env " +options.env,
"--op0 "+envvars.list['lfs_app_config']+"/ingest/"+options.config_file+".list",
"--op1 "+envvars.list['lfs_global_config']+"/oozie_global.properties",
"--env_ver "+options.env_ver,
"--op2 "+envvars.list['lfs_app_workflows']+"/wf_db_ingest",
"--op3 "+envvars.list['lfs_app_src']+"/hive"])
hivecreate_script = "python " + args
print("run_gen.py -> Invoked : " + hivecreate_script)
call = subprocess.Popen(hivecreate_script.split(' '),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
while True:
line = call.stdout.readline()
if not line:
break
print line.strip()
sys.stdout.flush()
call.communicate()
rc = call.returncode
#os.system(hivecreate_script)
if rc != 0:
print "run_gen.py -> Creating hive tables is not successful."
print rc
sys.exit(1)
else:
print "run_gen.py -> Completed executing create table scripts."
print "run_gen.py -> Completed executing create table scripts."
print "**************************************************************************************************"
if __name__ == "__main__":
main()
|
{
"content_hash": "c989e7f96e32370dfccb97d706bf9880",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 125,
"avg_line_length": 43.793103448275865,
"alnum_prop": 0.46194225721784776,
"repo_name": "rbheemana/Sqoop-Automated",
"id": "a8c3878be8209a851d7528c5ccd392999eeb93f9",
"size": "7703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/scripts/run_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "197244"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
from transtech_directory.views import DirectoryListView
# from transtech_directory.views import HomeView
urlpatterns = [
# Examples:
# url(r'^blog/', include('blog.urls')),
url(r'^$', TemplateView.as_view(template_name='transtech_directory/index.html'), name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^directory/$', DirectoryListView.as_view(), name='directory')
]
|
{
"content_hash": "307e60ef8c1e6eaaf6969499f6f0e202",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 98,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.7234848484848485,
"repo_name": "Leila20/transtech-directory",
"id": "bd52d6db8ac4e5d4a7ac68b8d1ab62a95d646d04",
"size": "528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2880"
},
{
"name": "HTML",
"bytes": "3062"
},
{
"name": "Python",
"bytes": "9805"
},
{
"name": "Ruby",
"bytes": "944"
}
],
"symlink_target": ""
}
|
""" Helper script to build libmad
Version: libmad-0.15.1b
Usage:
- Download the libmad sourcecode.
- Unzip the sourcecode
- Set the environment variable LIBMAD_FOLDER to the unzipped dir
- Run this script
"""
import sys
import os
import logging
import time
try:
from powertb import print_exc
except ImportError:
from traceback import print_exc
from ppci.api import cc, link
from ppci.lang.c import COptions
from ppci.common import CompilerError, logformat
from ppci.utils.reporting import html_reporter
def do_compile(filename, include_paths, arch, reporter):
coptions = COptions()
coptions.add_include_paths(include_paths)
coptions.add_define("FPM_DEFAULT", "1")
with open(filename, "r") as f:
obj = cc(f, arch, coptions=coptions, reporter=reporter)
return obj
def main():
environment_variable = "LIBMAD_FOLDER"
if environment_variable in os.environ:
libmad_folder = os.environ[environment_variable]
else:
logging.error(
"Please define %s to point to the libmad source folder",
environment_variable,
)
return
this_dir = os.path.abspath(os.path.dirname(__file__))
report_filename = os.path.join(this_dir, "report_libmad.html")
libc_folder = os.path.join(this_dir, "..", "librt", "libc")
libc_includes = os.path.join(libc_folder, "include")
include_paths = [libc_includes, libmad_folder]
arch = "x86_64"
t1 = time.time()
failed = 0
passed = 0
sources = [
"layer3.c",
"version.c",
"fixed.c",
"bit.c",
"timer.c",
"stream.c",
"frame.c",
"synth.c",
"decoder.c",
"layer12.c",
"huffman.c",
]
objs = []
with html_reporter(report_filename) as reporter:
for filename in sources:
filename = os.path.join(libmad_folder, filename)
print(" ======================")
print(" ========================")
print(" ==> Compiling", filename)
try:
obj = do_compile(filename, include_paths, arch, reporter)
objs.append(obj)
except CompilerError as ex:
print("Error:", ex.msg, ex.loc)
ex.print()
print_exc()
failed += 1
# except Exception as ex:
# print("General exception:", ex)
# print_exc()
# failed += 1
else:
print("Great success!")
passed += 1
t2 = time.time()
elapsed = t2 - t1
print("Passed:", passed, "failed:", failed, "in", elapsed, "seconds")
obj = link(objs)
print(obj)
if __name__ == "__main__":
verbose = "-v" in sys.argv
if verbose:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format=logformat)
main()
|
{
"content_hash": "ce7ea22258e26d9f82ddcbb5ded7d831",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 73,
"avg_line_length": 26.663636363636364,
"alnum_prop": 0.5622229798840778,
"repo_name": "windelbouwman/ppci-mirror",
"id": "141c9678ef9c9a12f686b907a81b7eca5e4e53fc",
"size": "2933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/compile_libmad.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "94"
},
{
"name": "Brainfuck",
"bytes": "5867"
},
{
"name": "C",
"bytes": "229265"
},
{
"name": "C++",
"bytes": "1257"
},
{
"name": "Coq",
"bytes": "98028"
},
{
"name": "HTML",
"bytes": "363"
},
{
"name": "JavaScript",
"bytes": "2165"
},
{
"name": "LLVM",
"bytes": "11206"
},
{
"name": "Python",
"bytes": "2991165"
},
{
"name": "Shell",
"bytes": "960"
},
{
"name": "Verilog",
"bytes": "9363"
}
],
"symlink_target": ""
}
|
from django.dispatch import Signal
class_prepared = Signal(providing_args=["class"])
pre_init = Signal(providing_args=["instance", "args", "kwargs"], use_caching=True)
post_init = Signal(providing_args=["instance"], use_caching=True)
pre_save = Signal(providing_args=["instance", "raw", "using", "update_fields"],
use_caching=True)
post_save = Signal(providing_args=["instance", "raw", "created", "using", "update_fields"], use_caching=True)
pre_delete = Signal(providing_args=["instance", "using"], use_caching=True)
post_delete = Signal(providing_args=["instance", "using"], use_caching=True)
pre_migrate = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"])
pre_syncdb = pre_migrate
post_migrate = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"])
post_syncdb = post_migrate
m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], use_caching=True)
|
{
"content_hash": "eea0c1a08e9e3a69b79e84d73d3bdd81",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 116,
"avg_line_length": 49.5,
"alnum_prop": 0.693939393939394,
"repo_name": "ZhaoCJ/django",
"id": "6b7605839ccbbde5348a1768ef9772ef6700a669",
"size": "990",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "django/db/models/signals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from CommonClasses import * # hxl: comment out this line for submission
debug = True
debug = False
class Solution:
# @param head, a ListNode
# @param m, an integer
# @param n, an integer
# @return a ListNode
def reverseBetween(self, head, m, n):
if head == None or head.next == None or m == n:
return head
# hxl: make use of a dummy head so that the case m = 1 is easy to deal with
dummyHead = ListNode(0)
dummyHead.next = head
c = 1 # hxl: node counter
cur = dummyHead
# hxl: point cur to the node before node m
while c < m:
cur = cur.next
c += 1
nodeBeforeM = cur
nodeM = cur.next
cur = nodeM
next = nodeM.next
nextNext = next.next
while c < n:
next.next = cur
cur = next
next = nextNext
if nextNext != None: # hxl: watch out if the current node is the last node
nextNext = nextNext.next
c += 1
nodeBeforeM.next = cur
nodeM.next = next
return dummyHead.next
|
{
"content_hash": "1380f21745ce6281ba3d00052a6ccc92",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 89,
"avg_line_length": 25.93877551020408,
"alnum_prop": 0.47915027537372146,
"repo_name": "54lihaoxin/leetcode_python",
"id": "6cdbc0edcff7d38db573943b50172744eff87572",
"size": "1576",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/ReverseLinkedListII/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "715933"
}
],
"symlink_target": ""
}
|
import sys
import cv2
from ikalog.scenes.stateful_scene import StatefulScene
from ikalog.utils import *
class GameStart(StatefulScene):
# 720p サイズでの値
mapname_width = 430
mapname_left = 1280 - mapname_width
mapname_top = 580
mapname_height = 640 - mapname_top
rulename_left = 640 - 120
rulename_right = 640 + 120
rulename_width = rulename_right - rulename_left
rulename_top = 250
rulename_bottom = 310
rulename_height = rulename_bottom - rulename_top
def reset(self):
super(GameStart, self).reset()
self._last_event_msec = - 100 * 1000
def find_best_match(self, frame, matchers_list):
most_possible = (0, None)
for matcher in matchers_list:
matched, fg_score, bg_score = matcher.match_score(frame)
if matched and (most_possible[0] < fg_score):
most_possible = (fg_score, matcher)
return most_possible[1]
def elect(self, context, votes):
# 古すぎる投票は捨てる
election_start = context['engine']['msec'] - self.election_period
while (len(votes) and votes[0][0] < election_start):
del votes[0]
# 考えづらいがゼロ票なら開票しない
if len(votes) == 0:
return None
# 開票作業
items = {}
count = 0
item_top = (0, None) # 最高票数の tuple (17[票], <IkaMatcher>)
for vote in votes:
if vote[1] is None:
continue
item = vote[1]
items[item] = items[item] + 1 if item in items else 1
if item_top[0] < items[item]:
item_top = (items[item], item)
# TODO: 必要票数
if item_top[1] is None:
return None
return item_top[1]
def _state_default(self, context):
timer_icon = self.find_scene_object('GameTimerIcon')
if (timer_icon is not None) and timer_icon.matched_in(context, 3000):
return False
frame = context['engine']['frame']
if frame is None:
return False
stage = self.find_best_match(frame, self.stage_matchers)
rule = self.find_best_match(frame, self.rule_matchers)
if not stage is None:
context['game']['map'] = stage
if not rule is None:
context['game']['rule'] = rule
if stage or rule:
self.stage_votes = []
self.rule_votes = []
self.stage_votes.append((context['engine']['msec'], stage))
self.rule_votes.append((context['engine']['msec'], rule))
self._switch_state(self._state_tracking)
return (stage or rule)
def _state_tracking(self, context):
frame = context['engine']['frame']
if frame is None:
return False
stage = self.find_best_match(frame, self.stage_matchers)
rule = self.find_best_match(frame, self.rule_matchers)
matched = (stage or rule)
# 画面が続いているならそのまま
if matched:
self.stage_votes.append((context['engine']['msec'], stage))
self.rule_votes.append((context['engine']['msec'], rule))
return True
# 1000ms 以内の非マッチはチャタリングとみなす
if not matched and self.matched_in(context, 1000):
return False
# それ以上マッチングしなかった場合 -> シーンを抜けている
if not self.matched_in(context, 20000, attr='_last_event_msec'):
context['game']['map'] = self.elect(context, self.stage_votes)
context['game']['rule'] = self.elect(context, self.rule_votes)
self.dump(context)
self._call_plugins('on_game_start')
self._last_event_msec = context['engine']['msec']
self._switch_state(self._state_default)
return False
def _analyze(self, context):
pass
def dump(self, context):
print(self.stage_votes)
print(self.rule_votes)
def _init_scene(self, debug=False):
self.election_period = 5 * 1000 # msec
self.map_list = [
{'name': 'タチウオパーキング', 'file': 'masks/gachi_tachiuo.png'},
{'name': 'モズク農園', 'file': 'masks/nawabari_mozuku.png'},
{'name': 'ネギトロ炭鉱', 'file': 'masks/gachi_negitoro.png'},
{'name': 'アロワナモール', 'file': 'masks/nawabari_arowana.png'},
{'name': 'デカライン高架下', 'file': 'masks/yagura_decaline.png'},
{'name': 'Bバスパーク', 'file': 'masks/gachi_buspark.png'},
{'name': 'ハコフグ倉庫', 'file': 'masks/gachi_hakofugu.png'},
{'name': 'シオノメ油田', 'file': 'masks/gachi_shionome.png'},
{'name': 'モンガラキャンプ場', 'file': 'masks/hoko_mongara.png'},
{'name': 'ホッケふ頭', 'file': 'masks/nawabari_hokke.png'},
{'name': 'ヒラメが丘団地', 'file': 'masks/nawabari_hirame.png'},
{'name': 'マサバ海峡大橋', 'file': 'masks/nawabari_masaba.png'},
{'name': 'キンメダイ美術館', 'file': 'masks/gachi_kinmedai.png'},
{'name': 'マヒマヒリゾート&スパ', 'file': 'masks/gachi_mahimahi.png'}
]
self.rule_list = [
{'name': 'ガチエリア', 'file': 'masks/gachi_tachiuo.png'},
{'name': 'ガチヤグラ', 'file': 'masks/yagura_decaline.png'},
{'name': 'ガチホコバトル', 'file': 'masks/hoko_mongara.png'},
{'name': 'ナワバリバトル', 'file': 'masks/nawabari_mozuku.png'},
]
self.stage_matchers = []
self.rule_matchers = []
for map in self.map_list:
map['mask'] = IkaMatcher(
self.mapname_left, self.mapname_top, self.mapname_width, self.mapname_height,
img_file=map['file'],
threshold=0.95,
orig_threshold=0.30,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='map:%s' % map['name'],
debug=debug,
)
self.stage_matchers.append(map['mask'])
setattr(map['mask'], 'id_', map['name'])
for rule in self.rule_list:
rule['mask'] = IkaMatcher(
self.rulename_left, self.rulename_top, self.rulename_width, self.rulename_height,
img_file=rule['file'],
threshold=0.95,
orig_threshold=0.30,
bg_method=matcher.MM_NOT_WHITE(),
fg_method=matcher.MM_WHITE(),
label='rule:%s' % rule['name'],
debug=debug,
)
setattr(rule['mask'], 'id_', rule['name'])
self.rule_matchers.append(rule['mask'])
if __name__ == "__main__":
GameStart.main_func()
|
{
"content_hash": "03470e72b06e7bdd9ebb3d434fe7a7f0",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 97,
"avg_line_length": 33.095,
"alnum_prop": 0.535126151986705,
"repo_name": "hrhtspr/IkaLog",
"id": "b751077804785bea1af7f10aba03e13a90c96570",
"size": "7734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ikalog/scenes/game/start.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "487165"
}
],
"symlink_target": ""
}
|
from django.template.response import TemplateResponse
def home(request):
return TemplateResponse(request, 'home.html')
|
{
"content_hash": "b5065ab0a0f65508fe62369173cf24be",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 53,
"avg_line_length": 30.75,
"alnum_prop": 0.8048780487804879,
"repo_name": "iamsteadman/bambu-ajax",
"id": "f113a45a0551a567d6b94d6b38e7723199f441a5",
"size": "123",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testproject/testproject/myapp/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16424"
},
{
"name": "JavaScript",
"bytes": "56272"
},
{
"name": "Python",
"bytes": "15250"
}
],
"symlink_target": ""
}
|
from cloudmesh_database.dbconn import get_mongo_db, get_mongo_dbname_from_collection, DBConnFactory
from cloudmesh_base.util import HEADING
from cloudmesh_management.base_user import User
from cloudmesh_management.base_project import Project
from cloudmesh_management.project import Projects
from cloudmesh_management.mongo import Mongo
class TestAddProjectMember:
yaml_dir = "~/.cloudmesh_yaml"
firstname = "gergor"
def setup(self):
# HEADING()
db_name = get_mongo_dbname_from_collection("manage")
if db_name:
meta = {'db_alias': db_name}
obj = Mongo()
obj.check_mongo()
get_mongo_db("manage", DBConnFactory.TYPE_MONGOENGINE)
pass
def teardown(self):
# HEADING()
pass
def _xyz(self):
print("hallo")
def test_addprojectmember(self):
HEADING()
"""
Test to add a member to an existing project
"""
self._xyz()
user = User.objects.first()
project = Project.objects.first()
user_name = user.username
project_id = project.project_id
project = Projects()
project.add_user(user_name, project_id, 'member')
project.list_projects(project_id=project_id)
|
{
"content_hash": "3741ec1353c06a1c17a9ee2f03ec97c9",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 99,
"avg_line_length": 27.5,
"alnum_prop": 0.6355731225296443,
"repo_name": "cloudmesh/management",
"id": "70e129820e9e7d045b1eb73f7454186f1bdcd64e",
"size": "1265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_04_add_project_member.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "4817"
},
{
"name": "Python",
"bytes": "157930"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from six.moves import xrange
def get_monthly_results(goal_doctype, goal_field, date_col, filter_str, aggregation = 'sum'):
'''Get monthly aggregation values for given field of doctype'''
# TODO: move to ORM?
if(frappe.conf.db_type == 'postgres'):
month_year_format_query = '''to_char("{}", 'MM-YYYY')'''.format(date_col)
else:
month_year_format_query = 'date_format(`{}`, "%m-%Y")'.format(date_col)
conditions = ('where ' + filter_str) if filter_str else ''
results = frappe.db.sql('''SELECT {aggregation}(`{goal_field}`) AS {goal_field},
{month_year_format_query} AS month_year
FROM `{table_name}` {conditions}
GROUP BY month_year'''
.format(
aggregation=aggregation,
goal_field=goal_field,
month_year_format_query=month_year_format_query,
table_name="tab" + goal_doctype,
conditions=conditions
), as_dict=True)
month_to_value_dict = {}
for d in results:
month_to_value_dict[d['month_year']] = d[goal_field]
return month_to_value_dict
@frappe.whitelist()
def get_monthly_goal_graph_data(title, doctype, docname, goal_value_field, goal_total_field, goal_history_field,
goal_doctype, goal_doctype_link, goal_field, date_field, filter_str, aggregation="sum"):
'''
Get month-wise graph data for a doctype based on aggregation values of a field in the goal doctype
:param title: Graph title
:param doctype: doctype of graph doc
:param docname: of the doc to set the graph in
:param goal_value_field: goal field of doctype
:param goal_total_field: current month value field of doctype
:param goal_history_field: cached history field
:param goal_doctype: doctype the goal is based on
:param goal_doctype_link: doctype link field in goal_doctype
:param goal_field: field from which the goal is calculated
:param filter_str: where clause condition
:param aggregation: a value like 'count', 'sum', 'avg'
:return: dict of graph data
'''
from frappe.utils.formatters import format_value
import json
meta = frappe.get_meta(doctype)
doc = frappe.get_doc(doctype, docname)
goal = doc.get(goal_value_field)
formatted_goal = format_value(goal, meta.get_field(goal_value_field), doc)
current_month_value = doc.get(goal_total_field)
formatted_value = format_value(current_month_value, meta.get_field(goal_total_field), doc)
from frappe.utils import today, getdate, formatdate, add_months
current_month_year = formatdate(today(), "MM-yyyy")
history = doc.get(goal_history_field)
try:
month_to_value_dict = json.loads(history) if history and '{' in history else None
except ValueError:
month_to_value_dict = None
if month_to_value_dict is None:
doc_filter = (goal_doctype_link + " = '" + docname + "'") if doctype != goal_doctype else ''
if filter_str:
doc_filter += ' and ' + filter_str if doc_filter else filter_str
month_to_value_dict = get_monthly_results(goal_doctype, goal_field, date_field, doc_filter, aggregation)
frappe.db.set_value(doctype, docname, goal_history_field, json.dumps(month_to_value_dict))
month_to_value_dict[current_month_year] = current_month_value
months = []
months_formatted = []
values = []
values_formatted = []
for i in range(0, 12):
date_value = add_months(today(), -i)
month_value = formatdate(date_value, "MM-yyyy")
month_word = getdate(date_value).strftime('%b')
month_year = getdate(date_value).strftime('%B') + ', ' + getdate(date_value).strftime('%Y')
months.insert(0, month_word)
months_formatted.insert(0, month_year)
if month_value in month_to_value_dict:
val = month_to_value_dict[month_value]
else:
val = 0
values.insert(0, val)
values_formatted.insert(0, format_value(val, meta.get_field(goal_total_field), doc))
y_markers = []
summary_values = [
{
'title': _("This month"),
'color': '#ffa00a',
'value': formatted_value
}
]
if float(goal) > 0:
y_markers = [
{
'label': _("Goal"),
'lineType': "dashed",
'value': goal
},
]
summary_values += [
{
'title': _("Goal"),
'color': '#5e64ff',
'value': formatted_goal
},
{
'title': _("Completed"),
'color': '#28a745',
'value': str(int(round(float(current_month_value)/float(goal)*100))) + "%"
}
]
data = {
'title': title,
# 'subtitle':
'data': {
'datasets': [
{
'values': values,
'formatted': values_formatted
}
],
'labels': months,
'yMarkers': y_markers
},
'summary': summary_values,
}
return data
|
{
"content_hash": "82a90472dcc52c28433383a9d07d3e76",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 112,
"avg_line_length": 30.086666666666666,
"alnum_prop": 0.6740527365388876,
"repo_name": "RicardoJohann/frappe",
"id": "90940ba3047dd1f00067a6704f702b82290a5536",
"size": "4641",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/utils/goal.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "447183"
},
{
"name": "HTML",
"bytes": "199549"
},
{
"name": "JavaScript",
"bytes": "2009239"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2338007"
},
{
"name": "Shell",
"bytes": "2296"
},
{
"name": "Vue",
"bytes": "24090"
}
],
"symlink_target": ""
}
|
from tests.conftest import JiraTestCase
class CustomFieldOptionTests(JiraTestCase):
def test_custom_field_option(self):
option = self.jira.custom_field_option("10000")
self.assertEqual(option.value, "To Do")
|
{
"content_hash": "3eaed6b7b6fe55f503d1e013133e613c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 55,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.7304347826086957,
"repo_name": "pycontribs/jira",
"id": "273da848a6ca68ffde3c79df843fadec8e379bc6",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/resources/test_custom_field_option.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "523"
},
{
"name": "Python",
"bytes": "422049"
},
{
"name": "Shell",
"bytes": "2069"
}
],
"symlink_target": ""
}
|
from .compat import StringIO
from . import core
import re
from pygments import highlight
from pygments.formatters import TerminalFormatter
from pygments.lexers import get_lexer_by_name
from pygments.token import Comment
from pygments.formatters.terminal import TERMINAL_COLORS
import sys
scheme = TERMINAL_COLORS.copy()
scheme[Comment] = ('teal', 'turquoise')
_pycon_lexer = get_lexer_by_name('pycon')
class HighlightOutput(object):
def __init__(self, deck, lexer):
self.deck = deck
self.lexer = lexer
def write(self, text):
sys.stdout.write(self.deck._highlight_text(text, self.lexer))
def __getattr__(self, key):
return getattr(sys.stdout, key)
class Deck(core.Deck):
expose = core.Deck.expose + ("highlight",)
def __init__(self, path, **options):
core.Deck.__init__(self, path, **options)
self._highlight = True
def highlight_stdout(self, lexer):
lexer = get_lexer_by_name(lexer)
return HighlightOutput(self, lexer)
def highlight(self):
"""Toggle code highlighting."""
self._highlight = not self._highlight
print("%% Code highlighting is now %s" %
(self._highlight and "ON" or "OFF"))
def _highlight_text(self, text, lexer=_pycon_lexer):
bg = self.color == 'dark' and 'dark' or 'light'
if self._highlight and \
self.color in ('auto', 'light', 'dark'):
whitespace = re.match(r'(.*?)(\s+)$', text, re.S)
if whitespace:
text = whitespace.group(1)
whitespace = whitespace.group(2)
if text.strip():
content = highlight(
text, lexer,
TerminalFormatter(bg=bg, colorscheme=scheme)).rstrip()
else:
content = text
if whitespace:
content = content + whitespace
else:
content = text
return content
|
{
"content_hash": "efcbe521b0806258fac5b33a0db5cf28",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 78,
"avg_line_length": 28.782608695652176,
"alnum_prop": 0.5866062437059416,
"repo_name": "archsh/sliderepl",
"id": "f8bfed01fae453a9dd4b903c6bbbe9699d5132f3",
"size": "1986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sliderepl/hairy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "21369"
}
],
"symlink_target": ""
}
|
from django.db.models.manager import Manager
from django_roa.db.query import RemoteQuerySet
class ROAManager(Manager):
"""
Manager which access remote resources.
"""
use_for_related_fields = True
is_roa_manager = True # ugly but useful because isinstance is evil
def get_query_set(self):
"""
Returns a QuerySet which access remote resources.
"""
return RemoteQuerySet(self.model)
|
{
"content_hash": "2f827892ae341260cbf2a611c1da9449",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 70,
"avg_line_length": 25.88235294117647,
"alnum_prop": 0.675,
"repo_name": "emidln/django_roa",
"id": "c3495daa0bc4d6b1f88a2f4d26c9a75ab2a3fadb",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_roa/db/managers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "92014"
},
{
"name": "Python",
"bytes": "5463974"
},
{
"name": "Shell",
"bytes": "1098"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/food/foraged/shared_edible_jar_bugs.iff"
result.attribute_template_id = 5
result.stfName("food_name","edible_bugs")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "8c3efe02aeceedaddd7f4e8f33fdb93c",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 23.615384615384617,
"alnum_prop": 0.6970684039087948,
"repo_name": "obi-two/Rebelion",
"id": "44ae02d3074dc288dd4e7a3475caf49bb21213b0",
"size": "452",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/food/foraged/shared_edible_jar_bugs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Restart the DNS server service
if it has yielded specific errors to the event log.
The script is intended to run as a service shortly after services startup
and stops after doing its job.
"""
errors=404,407,408
service="dns"
# set up logging #####################################
import sys,logging,logging.handlers,os.path
#in this particular case, argv[0] is likely pythonservice.exe deep in python's lib\
# so it makes no sense to write log there
log_file=os.path.splitext(__file__)[0]+".log"
l = logging.getLogger()
l.setLevel(logging.INFO)
f = logging.Formatter('%(asctime)s %(process)d:%(thread)d %(name)s %(levelname)-8s %(message)s')
h=logging.StreamHandler(sys.stdout)
h.setLevel(logging.NOTSET)
h.setFormatter(f)
l.addHandler(h)
h=logging.handlers.RotatingFileHandler(log_file,maxBytes=1024**2,backupCount=1)
h.setLevel(logging.NOTSET)
h.setFormatter(f)
l.addHandler(h)
del h,f
#hook to log unhandled exceptions
def excepthook(type,value,traceback):
logging.error("Unhandled exception occured",exc_info=(type,value,traceback))
#Don't need another copy of traceback on stderr
if old_excepthook!=sys.__excepthook__:
old_excepthook(type,value,traceback)
old_excepthook = sys.excepthook
sys.excepthook = excepthook
del log_file,os
# ####################################################
class XMLEvent:
import lxml.etree as etree
import iso8601
attr_fns = {
'EventID':lambda self:int(\
self.e.xpath('/_:Event/_:System/_:EventID',namespaces=self.nsmap)[0]\
.text),
'Time':lambda self:XMLEvent.iso8601.parse_date(\
self.e.xpath('/_:Event/_:System/_:TimeCreated[@SystemTime]',namespaces=self.nsmap)[0]\
.attrib['SystemTime'])}
def __init__(self,xml):
self.e=XMLEvent.etree.fromstring(xml)
self.nsmap={'_':self.e.nsmap[None]} #no default namespace support in lxml as of 04.2015
def __getattr__(self,attr):
try: fn=XMLEvent.attr_fns[attr]
except KeyError: raise AttributeError(attr)
return fn(self)
def main():
import win32evtlog
import win32serviceutil
# system start event
hsysq=win32evtlog.EvtQuery("System",win32evtlog.EvtQueryReverseDirection,\
"*[System[Provider[@Name='eventlog'] and (EventID=6009)]]",None)
try: he=win32evtlog.EvtNext(hsysq,1)[0]
except IndexError:
l.warn("System startup event not found")
start_time=0
else:
start_time=XMLEvent(win32evtlog.EvtRender(he,win32evtlog.EvtRenderEventXml)).Time
l.info("Last system startup event found at time `%s'"%start_time)
del hsysq,he
#locate specific errors in DNS log
evtlog_name="DNS Server"
service_name=evtlog_name
service_state=win32serviceutil.QueryServiceStatus(service)[1]
assert service_state==win32service.SERVICE_RUNNING,\
"`%s' service is not running when it should; state=%s"%(service,service_state)
del service_state
hdnsq = win32evtlog.EvtQuery(evtlog_name,win32evtlog.EvtQueryReverseDirection,\
"*[System["+' or '.join("EventID="+str(code) for code in errors)+"]]",None)
try: he=win32evtlog.EvtNext(hdnsq,1)[0]
except IndexError:
l.info("Specified %s errors are not detected in `%s' log, no action needed"%(service_name,evtlog_name))
return
else:
e = XMLEvent(win32evtlog.EvtRender(he,win32evtlog.EvtRenderEventXml))
last_error_time=e.Time
l.info("Found a relevant %s error with code `%d' at time `%s'"%(service_name,e.EventID,last_error_time))
if last_error_time<start_time:
l.info("Last error is older that last startup, no action needed")
return
del hdnsq,he,evtlog_name
l.info("Restarting `%s' service"%service)
win32serviceutil.RestartService(service)
import win32serviceutil,win32service
class DnsFixService(win32serviceutil.ServiceFramework):
_svc_name_="DnsFixService"
_svc_display_name_="DNS server Win2008 fix"
_svc_description_="""Workaround for DNS server in Win2008 not starting at system startup with errors 404,407,408 (can't open socket)"""
_svc_deps_=("tcpip",)
def SvcDoRun(self):
#sys.excepthook doesn't seem to work in this routine -
# apparently, everything is handled by the ServiceFramework machinery
try:
l.info("Starting service")
main()
except Exception,e:
excepthook(*sys.exc_info())
else:
l.info("Finished successfully")
def SvcStop(self):
l.warn("Manual stop request received, ignoring")
if __name__=='__main__':
win32serviceutil.HandleCommandLine(DnsFixService)
|
{
"content_hash": "682253ecea25e8293893d4175fa95d64",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 139,
"avg_line_length": 39.33613445378151,
"alnum_prop": 0.6693014313180944,
"repo_name": "native-api/windns_fix",
"id": "f2568c276fa6ce8f934abed38568631fca5771ca",
"size": "4681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dns_fix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4681"
}
],
"symlink_target": ""
}
|
"""
Iterator
========
The :mod:`streaming.iterator` module is a private module. Most algorithms for streams are implemented here using basic iterators.
"""
import cytoolz
import itertools
import operator
import numpy as np
import operator
import collections
from ._cython import _interpolate_linear as interpolate_linear
from ._cython import _filter_ba, diff
try:
from scipy.signal import fftconvolve as _convolve
except ImportError:
_convolve = np.convolve
def blocks(iterable, nblock, noverlap=0):
"""Partition iterable into blocks.
:param iterable: Iterable.
:param nblock: Samples per block.
:param noverlap: Amount of samples to overlap
:returns: Blocks.
"""
# We use a different function for performance reasons
if noverlap==0:
return _blocks(iterable, nblock)
else:
return _overlapping_blocks(iterable, nblock, noverlap)
def _blocks(iterable, nblock):
"""Partition iterable into blocks.
:param iterable: Iterable.
:param nblock: Samples per block.
:returns: Blocks.
"""
iterator = iter(iterable)
partitions = cytoolz.partition(nblock, iterator)
yield from partitions
def _overlapping_blocks(iterable, nblock, noverlap):
"""Partition iterable into overlapping blocks of size `nblock`.
:param iterable: Iterable.
:param nblock: Samples per block.
:param noverlap: Amount of samples to overlap.
:returns: Blocks.
"""
iterator = iter(iterable)
nadvance = nblock - noverlap
if nadvance < 1:
raise ValueError("`noverlap` has to be smaller than `nblock-1`.")
# First `noverlap` samples
previous = list(cytoolz.take(noverlap, iterator))
advances = map(list, cytoolz.partition(nadvance, iterator))
for advance in advances:
block = previous + advance # Concat lists
yield block
previous = block[-noverlap:]
def change_blocks(iterator, nblock, noverlap, nblock_new, noverlap_new):
"""Change blocksize and/or overlap of iterator.
:param iterator: Iterator.
:param nblock: Current blocksize.
:param noverlap: Current overlap.
:param nblock_new: New blocksize.
:param noverlap_new: New overlap.
:returns: Iterator with new blocksize and/or overlap.
"""
# Same block size, same overlap
if nblock_new==nblock and noverlap_new==noverlap:
return iterator
# New block size is multiple of old block size, same overlap
elif not nblock_new % nblock and noverlap_new==noverlap:
# factor is multiple of current blocksize
factor = nblock_new // nblock
# therefore we concat `factor` blocks into a new block
partitioned = map(np.concatenate, cytoolz.partition(factor, iterator))
return partitioned
# Old block size is multiple of new block size, sample overlap
elif not nblock % nblock_new and noverlap_new==noverlap:
# Partition each block in blocks with size nblock_new
partition = lambda x: cytoolz.partition(nblock_new, x)
# And chain the iterables
partitioned = itertools.chain.from_iterable(map(partition, iterator))
return partitioned
# Convert to samples and create blocks
else:
return blocks(samples(iterator, nblock, noverlap), nblock_new, noverlap_new)
def samples(iterator, nblock, noverlap=0):
"""Convert iterator with (overlapped) blocks to iterator with individual samples.
:param iterator: Iterator.
:param nblock: Samples per block
:param noverlap: Amount of samples to overlap
"""
if noverlap!=0:
nadvance = nblock - noverlap
iterator = map(lambda x: x[0:nadvance], iterator)
yield from itertools.chain.from_iterable(iterator)
# Some convenience functions
def sliding_mean(iterable, nwindow, noverlap=0):
"""Sliding mean.
:param iterable: Iterable.
:param nwindow: Window size in samples.
:param noverlap: Amount of samples to overlap.
:returns: Iterable of means.
"""
yield from map(np.mean, blocks(iterable, nwindow, noverlap))
def sliding_std(iterable, nwindow, noverlap=0):
"""Sliding standard deviation.
:param iterable: Iterable.
:param nwindow: Window size in samples.
:param noverlap: Amount of samples to overlap.
:returns: Iterable of standard deviations.
"""
yield from map(np.std, blocks(iterable, nwindow, noverlap))
def sliding_var(iterable, nwindow, noverlap=0):
"""Sliding variance.
:param iterable: Iterable.
:param nwindow: Window size in samples.
:param noverlap: Amount of samples to overlap.
:returns: Iterable of standard deviations.
"""
yield from map(np.var, blocks(iterable, nwindow, noverlap))
# Convolution
def convolve(signal, impulse_responses, nblock, ntaps=None, initial_values=None):
"""Convolve signal with impulse response.
:param signal: Signal, not in blocks.
:param impulse_responses: Impulse responses of length `ntaps`.
:param nblock: Blocksize to use for the convolution.
:param ntaps: Length of impulse responses.
:param initial_values. Value to use before convolution kicks in.
.. note:: This function takes samples and yields samples. It wraps :func:`convolve_overlap_add` and therefore requires a blocksize for computing the convolution.
"""
signal = blocks(signal, nblock)
convolved = convolve_overlap_add(signal, impulse_responses, nblock=nblock, ntaps=ntaps, initial_values=initial_values)
yield from itertools.chain.from_iterable(convolved)
def _convolve_crossfade(block, ir1, ir2, fading1):
"""Convolve block with two impulse responses and crossfade the result.
:param block: Block.
:param ir1: Impulse response 1.
:param ir2: Impulse response 2.
:param fading1: Fading.
:returns: Crossfaded convolutions of block and impulse responses.
We switch from `ir1` to `ir2`.
A more efficient method is presented in *Efficient time-varying FIR filtering using
crossfading implemented in the DFT domain* by Frank Wefers.
"""
# Convolve segment with both impulse responses.
convolved1 = _convolve(block, ir1, mode='full')
convolved2 = _convolve(block, ir2, mode='full')
# Fading windows
fading2 = 1. - fading1
# Crossfaded convolutions
convolved = convolved1 * fading1 + convolved2 * fading2
return convolved
def convolve_overlap_add_spectra(signal, spectra, nblock, nbins, initial_values=None):
"""Convolve iterable `signal` with impulse responses of `spectra.
This function directly uses the spectra to compute the acyclic convolution.
"""
return NotImplemented
def convolve_overlap_add(signal, impulse_responses, nhop, ntaps, initial_values=None):
"""Convolve iterable `signal` with time-variant `impulse_responses`.
:param signal: Signal in blocks of size `nblock`.
:param impulse_responses: Impulse responses of length `ntaps`.
:param nhop: Impulse responses is updated every `nhop` samples. This should correspond to the blocksize of `signal`.
:param ntaps: Length of impulse responses.
:param initial_values. Value to use before convolution kicks in.
:returns: Convolution.
This function implements the overlap-add method. Time-variant `impulse_responses is supported.
Each item (`block`) in `signal` corresponds to an impulse response (`ir`) in `impulse_responses`.
This implementation of overlap-add buffers only one segment. Therefore, only `nblock>=ntaps` is supported.
.. warning:: This function cannot be used when `ntaps > nblock`.
.. seealso:: :func:`convolve_overlap_save`
"""
nblock = nhop
if not nblock >= ntaps:
raise ValueError("Amount of samples in block should be the same or more than the amount of filter taps.")
if initial_values is None:
tail_previous_block = np.zeros(ntaps-1)
else:
tail_previous_block = initial_values
for block, ir in zip(signal, impulse_responses):
# The result of the convolution consists of a head, a body and a tail
# - the head and tail have length `taps-1`
# - the body has length `signal-taps+1`??
try:
convolved = _convolve(block, ir, mode='full')
except ValueError:
raise GeneratorExit
# The final block consists of
# - the head and body of the current convolution
# - and the tail of the previous convolution.
resulting_block = convolved[:-ntaps+1]
#print(resulting_block)
#resulting_block[:ntaps-1] += tail_previous_block
resulting_block[:ntaps-1] = resulting_block[:ntaps-1] + tail_previous_block # Because of possibly different dtypes
# We store the tail for the next cycle
tail_previous_block = convolved[-ntaps+1:]
#print(tail_previous_block)
# Yield the result of this step
yield resulting_block
def convolve_overlap_discard(signal, impulse_response, nblock_in=None, nblock_out=None):
"""Convolve signal with linear time-invariant `impulse_response` using overlap-discard method.
:param signal: Signal. Can either consists of blocks or samples. `nblock_in` should be set to the block size of the signal.
:param impulse_response: Linear time-invariant impulse response of filter.
:param nblock_in: Actual input blocksize of signal. Should be set to `None` is `signal` is sample-based.
:param nblock_out: Desired output blocksize.
:returns: Convolution of `signal` with `impulse_response`.
:rtype: Generator consisting of arrays.
Setting the input blocksize can be useful because this gives control over the delay of the process.
Setting the output blocksize is convenient because you know on beforehand the output blocksize.
Setting neither will result in blocksize of one, or individual samples. This will be slow.
Setting both is not possible.
.. note:: The *overlap-discard* method is more commonly known as *overlap-save*.
"""
# Amount of filter taps
ntaps = len(impulse_response)
# Amount of overlap that is needed
noverlap = ntaps -1
# In the following block we create overlapping windows.
# Both are set.
if nblock_in is not None and nblock_out is not None:
raise ValueError("Set block size of either input or output.")
# Only output blocksize is explicitly mentioned
elif nblock_out is not None:
nblock_in = nblock_out + ntaps -1
# Only input blocksize is explicitly mentioned
elif nblock_in is not None:
if not nblock_in >= ntaps:
raise ValueError("Amount of samples in block should be the same or more than the amount of filter taps.")
nblock_out = nblock_in - ntaps + 1
else:
nblock_in = ntaps
nblock_out = nblock_in - ntaps + 1
windows = blocks(signal, nblock_in, noverlap)
## We have sample-based signal and we want blocks with specified size out.
#if nblock_in is None and nblock_out is not None:
#nblock_in = nblock_out + ntaps -1
#windows = blocks(signal, nblock_in, noverlap)
## We have sample-based signal and we want samples out (actually blocks of size 1).
#elif nblock_in is None and nblock_out is None:
#nblock_in = ntaps
#windows = blocks(signal, nblock_in, noverlap)
## We have block-based signal and we don't mind output block size
#elif nblock_in is not None and nblock_out is None:
#if not nblock_in >= ntaps:
#raise ValueError("Amount of samples in block should be the same or more than the amount of filter taps.")
#nblock_out = nblock_in - ntaps + 1
#windows = change_blocks(signal, nblock_in, 0, nblock_in, noverlap)
## We have block-based signal and we have specified an output block. We need to change the block size.
#elif nblock_in is not None and nblock_out is not None:
#if not nblock_in >= ntaps:
#raise ValueError("Amount of samples in block should be the same or more than the amount of filter taps.")
#nblock_in_new = nblock_out + ntaps -1
#windows = change_blocks(signal, nblock_in, 0, nblock_in_new, noverlap, )
#nblock_in = nblock_in_new
# Convolve function to use
_convolve_func = lambda x: _convolve(x, impulse_response, mode='valid')
# Convolved blocks
convolved = map(_convolve_func, windows )
return convolved, nblock_out
def convolve_overlap_save(signal, impulse_responses, nhop, ntaps):
"""Convolve signal with linear time-invariant `impulse_response` using overlap-discard method.
:param signal: Signal. Consists of samples.
:param impulse_responses: Linear time-variant impulses response of filter.
:param nhop: Impulse response is renewed every `nhop` samples.
:returns: Convolution of `signal` with `impulse_responses`.
:rtype: Generator consisting of arrays.
.. note:: The *overlap-discard* method is more commonly known as *overlap-save*.
"""
nwindow = nhop + ntaps - 1
noverlap = ntaps - 1
windows = blocks(signal, nwindow, noverlap)
# Convolve function to use
_convolve_func = lambda x, y: _convolve(x, y, mode='valid')
# Convolved blocks
convolved = map(_convolve_func, windows, impulse_responses )
yield from convolved
def cumsum(iterator):
"""Cumulative sum.
.. seealso:: :func:`itertools.accumulate` and :func:`np.cumsum`
"""
yield from itertools.accumulate(iterator, operator.add)
def cummul(iterator):
"""Cumulative p.
.. seealso:: :func:`itertools.accumulate` and :func:`np.cumsum`
"""
yield from itertools.accumulate(iterator, operator.mul)
#def diff(iterator, initial_value=0.0):
#"""Differentiate `iterator`.
#"""
#current = next(iterator)
#while True:
#old = current
#current = next(iterator)
#yield current-old
#def integrate(iterator, initial_value=0.0):
#total = 0.0
#while True:
#total += next(iterator)
#yield total
def vdl(signal, times, delay, initial_value=0.0):
"""Variable delay line which delays `signal` at 'times' with 'delay'.
:param signal: Signal to be delayed.
:type signal: Iterator
:param delay: Delay.
:type delay: Iterator
:param initial_value: Sample to yield before first actual sample is yielded due to initial delay.
.. note:: Times and delay should have the same unit, e.g. both in samples or both in seconds.
"""
dt0, delay = cytoolz.peek(delay)
times, _times = itertools.tee(times)
# Yield initial value before interpolation kicks in
# Note that this method, using tee, buffers all samples that will be discarded.
# Therefore, room for optimization!
n = 0
if initial_value is not None:
while next(_times) < dt0:
n += 1
yield initial_value
times1, times2 = itertools.tee(times)
interpolated = interpolate_linear(map(operator.add, times2, delay), signal, times1)
yield from cytoolz.drop(n, interpolated) # FIXME: move drop before interpolation, saves memory
# Reference implementation
def filter_ba_reference(x, b, a):
"""Filter signal `x` with linear time-invariant IIR filter that has numerator coefficients `b` and denominator coefficients `a`.
:param b: Numerator coefficients.
:param a: Denominator coefficients. The first value is always a zero.
:param x: Signal.
:returns: Filtered signal.
This function applies a linear time-invariant IIR filter using the difference equation
.. math:: y[n] = -\sum_k=1^M a_k y[n-k] + \sum_k=0^(N-1) b_k x[n-k]
"""
b = np.array(b)
a = np.array(a[1:])
na = len(a)
nb = len(b)
# Buffers
xd = collections.deque([0]*nb, nb)
yd = collections.deque([0]*na, na)
# Invert filter coefficients order
b = b[::-1]
a = a[::-1]
while True:
# Update inputs buffer with new signal value
xd.append(next(x))
# Calculate output from difference equation
result = -sum(a*yd) + sum(b*xd)
# Update outputs buffer with new output value
yd.append(result)
# Yield current output value
yield result
def filter_ba(x, b, a):
"""Apply IIR filter to `x`.
:param x: Signal.
:param b: Numerator coefficients.
:param a: Denominator coefficients.
:returns: Filtered signal.
.. seealso:: :func:`filter_sos` and :func:`scipy.signal.lfilter`
"""
a = a[1:] # Drop the first value that is a one. See difference equation.
# Drop trailing zeros. They're not contributing and introduce a bug as well (leading zero in result).
while a[-1] == 0.0:
a = a[:-1]
while b[-1] == 0.0:
b = b[:-1]
a = np.array(a)
b = np.array(b)
na = len(a)
nb = len(b)
# Buffers
xd = np.zeros(nb)
yd = np.zeros(na)
yield from _filter_ba(x, b, a, xd, yd, nb, na)
def filter_sos(x, sos):
"""Apply IIR filter to `x`.
:param x: Signal.
:param sos: Second-order sections.
:returns: Filtered signal.
.. seealso:: :func:`filter_ba` and :func:`scipy.signal.sosfilt`
"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
for section in sos:
x = filter_ba(x, section[:3], section[3:])
yield from x
__all__ = ['blocks', 'convolve_overlap_add', 'convolve', 'convolve_overlap_discard', 'diff', 'interpolate_linear', 'filter_ba', 'filter_ba_reference', 'filter_sos', 'vdl']
|
{
"content_hash": "3b39d2f53da30a7d5b400677eccc4bf6",
"timestamp": "",
"source": "github",
"line_count": 513,
"max_line_length": 171,
"avg_line_length": 34.372319688109165,
"alnum_prop": 0.6745307094652073,
"repo_name": "FRidh/streaming",
"id": "bed141559464f98a73563a087fef720703073611",
"size": "17633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streaming/_iterator.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "275"
},
{
"name": "Nix",
"bytes": "325"
},
{
"name": "Python",
"bytes": "65688"
}
],
"symlink_target": ""
}
|
"""TensorFlow Ops to work with embeddings.
Note: categorical variables are handled via embeddings in many cases.
For example, in case of words.
"""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope as vs
def embedding_lookup(params, ids, name="embedding_lookup"):
"""Provides a N dimensional version of tf.embedding_lookup.
Ids are flattened to a 1d tensor before being passed to embedding_lookup
then, they are unflattend to match the original ids shape plus an extra
leading dimension of the size of the embeddings.
Args:
params: List of tensors of size D0 x D1 x ... x Dn-2 x Dn-1.
ids: N-dimensional tensor of B0 x B1 x .. x Bn-2 x Bn-1.
Must contain indexes into params.
name: Optional name for the op.
Returns:
A tensor of size B0 x B1 x .. x Bn-2 x Bn-1 x D1 x ... x Dn-2 x Dn-1
containing the values from the params tensor(s) for indecies in ids.
Raises:
ValueError: if some parameters are invalid.
"""
with ops.op_scope([params, ids], name, "embedding_lookup"):
params = ops.convert_to_tensor(params)
ids = ops.convert_to_tensor(ids)
shape = array_ops_.shape(ids)
ids_flat = array_ops_.reshape(
ids, math_ops.reduce_prod(shape, keep_dims=True))
embeds_flat = nn.embedding_lookup(params, ids_flat, name)
embed_shape = array_ops_.concat(0, [shape, [-1]])
embeds = array_ops_.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(params.get_shape()[1:]))
return embeds
def categorical_variable(tensor_in, n_classes, embedding_size, name):
"""Creates an embedding for categorical variable with given number of classes.
Args:
tensor_in: Input tensor with class identifier (can be batch or
N-dimensional).
n_classes: Number of classes.
embedding_size: Size of embedding vector to represent each class.
name: Name of this categorical variable.
Returns:
Tensor of input shape, with additional dimension for embedding.
Example:
Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
tensor, where each row is representation of the class.
"""
with vs.variable_scope(name):
embeddings = vs.get_variable(name + "_embeddings",
[n_classes, embedding_size])
return embedding_lookup(embeddings, tensor_in)
|
{
"content_hash": "7e1ca2cb48dd8d47d6fe4a6fa579cb12",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 80,
"avg_line_length": 39.609756097560975,
"alnum_prop": 0.7158251231527094,
"repo_name": "plowman/python-mcparseface",
"id": "4cc709de76c43d665467fa106b86fc1a8feec220",
"size": "3248",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "models/syntaxnet/tensorflow/tensorflow/contrib/learn/python/learn/ops/embeddings_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1130"
},
{
"name": "C",
"bytes": "792202"
},
{
"name": "C#",
"bytes": "1883817"
},
{
"name": "C++",
"bytes": "17605262"
},
{
"name": "CMake",
"bytes": "68613"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "Emacs Lisp",
"bytes": "7809"
},
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "Go",
"bytes": "8549"
},
{
"name": "HTML",
"bytes": "764474"
},
{
"name": "Java",
"bytes": "2864887"
},
{
"name": "JavaScript",
"bytes": "404087"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "M4",
"bytes": "27350"
},
{
"name": "Makefile",
"bytes": "122687"
},
{
"name": "Objective-C",
"bytes": "2664448"
},
{
"name": "Objective-C++",
"bytes": "2897"
},
{
"name": "Protocol Buffer",
"bytes": "904354"
},
{
"name": "Python",
"bytes": "7674638"
},
{
"name": "Ruby",
"bytes": "83163"
},
{
"name": "Shell",
"bytes": "277892"
},
{
"name": "Swift",
"bytes": "20550"
},
{
"name": "TypeScript",
"bytes": "403037"
},
{
"name": "VimL",
"bytes": "3759"
}
],
"symlink_target": ""
}
|
from peewee import PostgresqlDatabase, Model, TextField, DateTimeField, ForeignKeyField, BooleanField, IntegerField
from datetime import datetime
from config import Config
import os
import urllib.parse
db_parsed_url = urllib.parse.urlparse(Config.DATABASE_URL)
username = db_parsed_url.username
password = db_parsed_url.password
database = db_parsed_url.path[1:]
hostname = db_parsed_url.hostname
postgres_db = PostgresqlDatabase(
database=database,
user=username,
password=password,
host=hostname,
autocommit=True,
autorollback=True)
class User(Model):
name = TextField(unique=True)
admin = BooleanField(default=False)
password = TextField()
active = BooleanField(default=True)
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
def get_id(self):
return str(self.id)
@property
def is_active(self):
return self.active
@property
def is_authenticated(self):
return True
@property
def is_anonymous(self):
return False
class Meta:
database = postgres_db
class Post(Model):
title = TextField()
description = TextField()
content = TextField()
tags = TextField()
slug = TextField()
posted_by = ForeignKeyField(User, related_name='posts')
created_at = DateTimeField(default=datetime.now)
updated_at = DateTimeField(default=datetime.now)
class Meta:
database = postgres_db
class Settings(Model):
blog_title = TextField()
initialized = BooleanField()
icon_1_link = TextField()
icon_1_icon_type = TextField()
icon_2_link = TextField()
icon_2_icon_type = TextField()
posts_per_page = IntegerField()
number_of_recent_posts = IntegerField()
max_synopsis_chars = IntegerField()
class Meta:
database = postgres_db
|
{
"content_hash": "8045c0713e574925246c169167ef39b5",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 115,
"avg_line_length": 25.933333333333334,
"alnum_prop": 0.6632390745501285,
"repo_name": "adamlamers/dokku-flask-blog",
"id": "3ff682e6b62d89d303d944efe11a5b150d60f052",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1789"
},
{
"name": "HTML",
"bytes": "32144"
},
{
"name": "Python",
"bytes": "15831"
}
],
"symlink_target": ""
}
|
import os
import shutil
import platform
import pytest
import wandb
from wandb.sdk.data_types import saved_model as SM
from wandb.apis.public import Artifact
from wandb.apis.public import _DownloadedArtifactEntry
from wandb.sdk.wandb_artifacts import ArtifactEntry
from . import saved_model_constructors
sklearn_model = saved_model_constructors.sklearn_model
pytorch_model = saved_model_constructors.pytorch_model
keras_model = saved_model_constructors.keras_model
def test_SavedModel_sklearn(runner, mocker):
savedModel_test(runner, mocker, sklearn_model())
def test_SavedModel_pytorch(runner, mocker):
savedModel_test(
runner,
mocker,
pytorch_model(),
[os.path.abspath(saved_model_constructors.__file__)],
)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="TODO: Windows is legitimately busted",
)
def test_SavedModel_keras(runner, mocker):
savedModel_test(runner, mocker, keras_model())
def test_SklearnSavedModel(runner):
subclass_test(
runner,
SM._SklearnSavedModel,
[sklearn_model()],
[
keras_model(),
pytorch_model(),
],
)
def test_PytorchSavedModel(runner):
subclass_test(
runner,
SM._PytorchSavedModel,
[pytorch_model()],
[
keras_model(),
sklearn_model(),
],
)
@pytest.mark.skipif(
platform.system() == "Windows",
reason="TODO: Windows is legitimately busted",
)
def test_TensorflowKerasSavedModel(runner):
subclass_test(
runner,
SM._TensorflowKerasSavedModel,
[keras_model()],
[sklearn_model(), pytorch_model()],
)
# These classes are used to patch the API
# so we can simulate downloading an artifact without
# actually making a network round trip (using the local filesystem)
class DownloadedArtifactEntryPatch(_DownloadedArtifactEntry):
def download(self, root=None):
root = root or self._parent_artifact._default_root()
return self.copy(self.local_path, os.path.join(root, self.name))
class ArtifactEntryPatch(ArtifactEntry):
def download(self, root=None):
os.makedirs(os.path.dirname(self.path), exist_ok=True)
shutil.copyfile(self.local_path, self.path)
return self.path
def make_local_artifact_public(art, mocker):
mocker.patch(
"wandb.apis.public._DownloadedArtifactEntry", DownloadedArtifactEntryPatch
)
mocker.patch("wandb.sdk.wandb_artifacts.ArtifactEntry", ArtifactEntryPatch)
pub = Artifact(
None,
"FAKE_ENTITY",
"FAKE_PROJECT",
"FAKE_NAME",
{
"artifactSequence": {
"name": "FAKE_SEQUENCE_NAME",
},
"aliases": [],
"id": "FAKE_ID",
"digest": "FAKE_DIGEST",
"state": None,
"size": None,
"createdAt": None,
"updatedAt": None,
"artifactType": {
"name": "FAKE_TYPE_NAME",
},
},
)
pub._manifest = art._manifest
for val in pub._manifest.entries.values():
val.__class__ = ArtifactEntryPatch
return pub
# External SavedModel tests (user facing)
def savedModel_test(runner, mocker, model, py_deps=None):
with pytest.raises(TypeError):
_ = SM._SavedModel(model)
kwargs = {}
if py_deps:
kwargs["dep_py_files"] = py_deps
sm = SM._SavedModel.init(model, **kwargs)
with runner.isolated_filesystem():
art = wandb.Artifact("name", "type")
art.add(sm, "model")
assert art.manifest.entries[f"model.{sm._log_type}.json"] is not None
pub_art = make_local_artifact_public(art, mocker)
sm2 = pub_art.get("model")
assert sm2 is not None
# # Internal adapter tests (non user facing)
def subclass_test(
runner,
adapter_cls,
valid_models,
invalid_models,
):
# Verify valid models can be adapted
for model in valid_models:
assert adapter_cls._validate_obj(model)
# Verify invalid models are denied
for model in invalid_models:
assert not adapter_cls._validate_obj(model)
# Verify file-level serialization and deserialization
with runner.isolated_filesystem():
i = 0
for model in valid_models:
path = adapter_cls._tmp_path()
adapter_cls._serialize(model, path)
model2 = adapter_cls._deserialize(path)
assert model2 is not None
|
{
"content_hash": "bab12d2ef10c355a0b6e5c970317d5ae",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 82,
"avg_line_length": 27.234939759036145,
"alnum_prop": 0.6270736562707365,
"repo_name": "wandb/client",
"id": "6289820517a64fc57313f05ca051cef449b19de1",
"size": "4521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_saved_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "Dockerfile",
"bytes": "3491"
},
{
"name": "Jupyter Notebook",
"bytes": "7751"
},
{
"name": "Makefile",
"bytes": "1863"
},
{
"name": "Objective-C",
"bytes": "80764"
},
{
"name": "Python",
"bytes": "3634228"
},
{
"name": "Shell",
"bytes": "4662"
}
],
"symlink_target": ""
}
|
"""Starter script for Nova Compute."""
import shlex
import sys
import os_vif
from oslo_log import log as logging
from oslo_privsep import priv_context
from oslo_reports import guru_meditation_report as gmr
from nova.cmd import common as cmd_common
from nova.compute import rpcapi as compute_rpcapi
from nova.conductor import rpcapi as conductor_rpcapi
import nova.conf
from nova import config
from nova import objects
from nova.objects import base as objects_base
from nova import service
from nova import utils
from nova import version
CONF = nova.conf.CONF
LOG = logging.getLogger('nova.compute')
def main():
config.parse_args(sys.argv)
logging.setup(CONF, 'nova')
priv_context.init(root_helper=shlex.split(utils.get_root_helper()))
utils.monkey_patch()
objects.register_all()
# Ensure os-vif objects are registered and plugins loaded
os_vif.initialize()
gmr.TextGuruMeditation.setup_autorun(version)
cmd_common.block_db_access('nova-compute')
objects_base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
objects.Service.enable_min_version_cache()
server = service.Service.create(binary='nova-compute',
topic=compute_rpcapi.RPC_TOPIC)
service.serve(server)
service.wait()
|
{
"content_hash": "aa6ce205147beaeef448f73f16b98313",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 29.930232558139537,
"alnum_prop": 0.7404817404817405,
"repo_name": "jianghuaw/nova",
"id": "eb48853d45fb79600c315d0bcfb3482bc1d61cae",
"size": "2019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/cmd/compute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1435"
},
{
"name": "PHP",
"bytes": "32515"
},
{
"name": "Python",
"bytes": "19932348"
},
{
"name": "Shell",
"bytes": "28290"
},
{
"name": "Smarty",
"bytes": "339635"
}
],
"symlink_target": ""
}
|
from testtools import TestCase
from kmip.core.errors import ErrorStrings
from kmip.core import utils
class TestUtils(TestCase):
def setUp(self):
super(TestUtils, self).setUp()
def tearDown(self):
super(TestUtils, self).tearDown()
def test_count_bytes(self):
num = 65535
bytes_exp = 2
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} byte(s)'.format(num, bytes_exp,
bytes_obs))
def test_count_bytes_overflow(self):
num = 65536
bytes_exp = 3
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} bytes'.format(num, bytes_exp,
bytes_obs))
def test_count_bytes_zero(self):
num = 0
bytes_exp = 1
bytes_obs = utils.count_bytes(num)
self.assertEqual(bytes_exp, bytes_obs,
'Value {0} requires {1} bytes to encode, '
'received {2} byte(s)'.format(num, bytes_exp,
bytes_obs))
class TestBytearrayStream(TestCase):
def setUp(self):
super(TestBytearrayStream, self).setUp()
self.stream = utils.BytearrayStream()
self.bad_type = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'type', '{1}', '{2}')
self.bad_len = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'length', '{1}', '{2}')
self.bad_val = ErrorStrings.BAD_EXP_RECV.format('BytearrayStream.{0}',
'value', '{1}', '{2}')
def tearDown(self):
super(TestBytearrayStream, self).tearDown()
def test_init(self):
value = b'\x00'
b = utils.BytearrayStream(value)
buf_type = type(b.buffer)
msg = self.bad_type.format('buffer', type(b''), buf_type)
self.assertIsInstance(b.buffer, type(b''),
msg.format(type(b''), type(b.buffer)))
length = len(b.buffer)
msg = self.bad_len.format('buffer', 1, length)
self.assertEqual(1, length, msg)
content = b.buffer
msg = self.bad_val.format('buffer', value, content)
self.assertEqual(value, content, msg)
def test_init_unset(self):
b = utils.BytearrayStream()
buf_type = type(b.buffer)
msg = self.bad_type.format('buffer', type(b''), buf_type)
self.assertIsInstance(b.buffer, type(b''),
msg.format(type(b''), type(b.buffer)))
length = len(b.buffer)
msg = self.bad_len.format('buffer', 0, length)
self.assertEqual(0, length, msg)
def test_read(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_write(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek(self):
# TODO (peter-hamilton) Finish implementation.
value = (b'\x00\x01\x02\x03')
expected = value
b = expected
expected = b
b = utils.BytearrayStream(value)
def test_peek_overflow(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek_empty(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_peek_none(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
def test_length(self):
# TODO (peter-hamilton) Finish implementation.
self.skip('')
|
{
"content_hash": "3ecd73f0a2223dd59569737d4016a28f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 79,
"avg_line_length": 33.1764705882353,
"alnum_prop": 0.5245694022289767,
"repo_name": "dmend/PyKMIP",
"id": "324f318e29ad861261792f203f1ee5a2235414bc",
"size": "4594",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "kmip/tests/unit/core/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1202704"
},
{
"name": "Shell",
"bytes": "80"
}
],
"symlink_target": ""
}
|
import re, pickle, editdistance
from teafacto.util import tokenize, ticktock, isstring, argprun
from nltk.corpus import stopwords
from nltk.stem import porter
from teafacto.procutil import wordids2string
from IPython import embed
class Processor(object):
def __init__(self):
self.stemmer = porter.PorterStemmer()
def stemmedprocessline(self, x):
x = x.replace("'s", "")
x = x.replace("' s", "")
x = x.replace("'", "")
tokens = tokenize(x)
#print tokens
stokens = [self.stemmer.stem(token) for token in tokens]
return " ".join(stokens)
def processline(self, x):
return " ".join(tokenize(x))
class SubjectSearch(object):
stops = stopwords.words("english")
customstops = set("the a an of on is at in by did do not does had has have for what which when where why who whom how".split())
smallstops = set("the a an of on at by".split())
def __init__(self, subjinfop="subjs-counts-labels-types.fb2m.tsv", revind=None):
self.indexdict = {}
self.ignoresubgrams = True
self.processor = Processor()
self.revind = revind
self.maxeditdistance = 1
if isstring(subjinfop):
self.build(subjinfop)
elif isinstance(subjinfop, dict):
self.indexdict = subjinfop
else:
raise Exception("unknown stuff")
def build(self, p):
i = 0
tt = ticktock("builder")
tt.tick("building")
for line in open(p):
sline = line[:-1].split("\t")
fb_id = sline[0]
triplecount = int(sline[1]) + int(sline[2])
name = self.processor.processline(sline[3])
type_id = sline[4]
type_id = type_id if type_id != "<UNK>" else None
type_name = " ".join(tokenize(sline[5]))
type_name = type_name if type_name != " ".join(tokenize("<UNK>")) else None
if name not in self.indexdict:
self.indexdict[name] = []
self.indexdict[name].append({"fb_id": fb_id,
"triplecount": triplecount, "type_id": type_id,
"type_name": type_name})
i += 1
if i % 1000 == 0:
tt.live("{}k".format(i//1000))
tt.tock("built")
def save(self, p):
with open(p, "w", 1) as f:
for item in self.indexdict.items():
f.write("::{}\n".format(item[0]))
for ve in item[1]:
f.write("{}\t{}\t{}\t{}\n".format(
*[ve[x] for x in "fb_id triplecount type_id type_name".split()]
))
@staticmethod
def load(p):
tt = ticktock("SubjectSearch")
tt.tick("loading")
d = {}
l = []
k = None
with open(p) as f:
for line in f:
if line[:2] == "::":
if k is None:
assert(l == [])
else:
d[k] = l
l = []
k = line[2:-1]
else:
splits = line[:-1].split("\t")
le = dict(zip("fb_id triplecount type_id type_name".split(),
[splits[0], int(splits[1])] + splits[2:]))
l.append(le)
d[k] = l
tt.tock("loaded")
ret = SubjectSearch(subjinfop=d, revind=SubjectSearch.buildrevindex(d))
return ret
@staticmethod
def buildrevindex(d):
revind = {}
for k in d.keys():
words = k.split()
for word in words:
if len(word) < 2:
continue
if word not in revind:
revind[word] = []
revind[word].append(k)
return revind
def search(self, s, top=5, edsearch=True):
ss = self.processor.processline(s)
return self._search(ss, top=top, edsearch=edsearch)
def _search(self, ss, top=5, edsearch=True):
res = self.indexdict[ss] if ss in self.indexdict else []
sres = sorted(res, key=lambda x: x["triplecount"], reverse=True)
ret = sres[:min(top, len(sres))]
for x in ret:
x.update({"name": ss})
if len(ret) == 0 and self.revind is not None and edsearch and self.maxeditdistance > 0: # no exact matches
nonexactsearchstrings = set()
words = ss.split()
if len(words) >= 2:
for word in words:
if len(word) < 2 or word in self.customstops:
continue
if word not in self.revind:
continue
for nonexcan in self.revind[word]:
if abs(len(nonexcan) - len(ss)) >= 3:
continue
nonexcanred = nonexcan.replace(" '", "")
#embed()
if editdistance.eval(nonexcanred, ss) <= self.maxeditdistance:
nonexactsearchstrings.add(nonexcan)
for nonexactsearchstring in nonexactsearchstrings:
edsearchres = self._search(nonexactsearchstring, top=top, edsearch=False)
#embed()
ret.extend(edsearchres)
return ret
def searchsentence(self, sentence, top=5):
if sentence[-1] == "?":
sentence = sentence[:-1]
words = self.processor.processline(sentence).split()
if self.ignoresubgrams:
exact_res = self._searchngrams(words, top=top, edsearch=False)
edit_res = self._searchngrams(words, top=1, edsearch=True)
exact_res_ents = set([x["fb_id"] for x in exact_res])
edit_res_plus = [x for x in edit_res if x["fb_id"] not in exact_res_ents]
res = exact_res + edit_res_plus
else:
res = self._recurngramsearch(words, top=top)
return res
def _searchngrams(self, words, top=5, edsearch=False):
ngramsize = len(words)
bannedpos = set()
ret = []
while ngramsize > 0:
for i in range(0, len(words) - ngramsize + 1):
coveredpos = set(range(i, i + ngramsize))
if len(coveredpos.difference(bannedpos)) == 0 \
and self.ignoresubgrams and not edsearch:
continue
if edsearch and len(coveredpos.intersection(bannedpos)) > 0 \
and self.ignoresubgrams:
continue
ss = words[i: i + ngramsize]
if len(ss) == 1 and (ss[0] in self.stops):
res = []
else:
res = self._search(" ".join(ss), top=top, edsearch=edsearch)
if len(res) > 0 and self.ignoresubgrams:
if not edsearch and ss[0] in self.smallstops:
#if False and ngramsize > 1:
# coveredpos = set(range(i+2, i + ngramsize))
# coveredpos.add(i)
#else:
coveredpos = set([i])
bannedpos.update(coveredpos)
ret += res
ngramsize -= 1
return ret
def _recurngramsearch(self, seq, top=5, right=False):
searchterm = " ".join(seq)
res = self._search(searchterm, top=top)
if len(seq) == 1:
return res if seq[0] not in self.stops else []
else:
lres = self._recurngramsearch(seq[:-1], top=top, right=False) if not right else []
rres = self._recurngramsearch(seq[1:], top=top, right=True)
res = res + lres + rres
return res
def searchwordmat(self, wordmat, wd, top=5):
cans = []
rwd = {v: k for k, v in wd.items()}
tt = ticktock("wordmatsearcher")
tt.tick("started searching")
for i in range(wordmat.shape[0]):
sentence = wordids2string(wordmat[i], rwd=rwd)
#ssentence.replace(" '", "")
res = self.searchsentence(sentence, top=top)
cans.append([r["fb_id"] for r in res])
tt.progress(i, wordmat.shape[0], live=True)
tt.tock("done searching")
return cans
def gensubjclose(cansp="traincans10c.pkl"):
traincans = pickle.load(open(cansp))
allcans = set()
for traincane in traincans:
allcans.update(traincane)
print len(allcans)
qsofcans = {k: set() for k in allcans}
for i in range(len(traincans)):
traincane = traincans[i]
for traincan in traincane:
qsofcans[traincan].add(i)
cansofcans = {k: set() for k in allcans}
for k, v in qsofcans.items():
for ve in v:
cansofcans[k].update(traincans[ve])
for k in cansofcans:
cansofcans[k].remove(k)
cansofcans[k] = set(list(cansofcans[k])[:500]) if len(cansofcans[k]) > 500 else cansofcans[k]
return cansofcans
def run(numcans=10,
build=False,
load=False,
gencan=False,
genclose=False,
test=False,
editdistance=False,
data="test", # or "train" or "valid"
):
if False:
p = Processor()
o = p.processline("porter ' s stemmer works ' in united states")
print o
if build:
s = SubjectSearch(); s.save("subjinfo.idxdic")
embed()
if load:
s = SubjectSearch.load("subjinfo.idxdic")
if editdistance:
s.maxeditdistance = 1
else:
s.maxeditdistance = 0
embed()
#s.searchsentence("what is the book e about")
if gencan:
import pickle
print "loading datamat"
x = pickle.load(open("datamat.word.fb2m.pkl"))
print "datamat loaded"
testdata = x[data][0]
testgold = x[data][1]
wd = x["worddic"]
ed = x["entdic"]
del x
print "generating cans"
testcans = s.searchwordmat(testdata, wd, top=numcans)
testcanids = [[ed[x] for x in testcan] for testcan in testcans]
acc = 0
for i in range(testgold.shape[0]):
if testgold[i, 0] in testcanids[i]:
acc += 1
print acc * 1. / testgold.shape[0]
embed()
if False:
print s.searchsentence("2 meter sessies?")
if genclose:
subjclose = gensubjclose("traincans10c.pkl")
embed()
if __name__ == "__main__":
argprun(run)
|
{
"content_hash": "715de3f53aee8628bb9c42e86064c109",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 131,
"avg_line_length": 36.59861591695502,
"alnum_prop": 0.512338092086603,
"repo_name": "lukovnikov/teafacto",
"id": "f00ed29ef360170c3b2d30d247dc35750a616dc8",
"size": "10577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/simplequestions/clean/subjectsearch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "56665"
},
{
"name": "Python",
"bytes": "819448"
},
{
"name": "Shell",
"bytes": "102"
}
],
"symlink_target": ""
}
|
"""A Collection of useful miscellaneous functions.
misc.py:
Collection of useful miscellaneous functions.
:Author: Hannes Breytenbach (hannes@saao.ac.za)
"""
import collections.abc
import itertools
import operator
def first_true_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns True"""
if pred is None:
func = operator.itemgetter(1)
else:
func = lambda x: pred(x[1])
ii = next(filter(func, enumerate(iterable)), default) # either index-item pair or default
return ii[0] if ii else default
def first_false_index(iterable, pred=None, default=None):
"""find the first index position for the which the callable pred returns False"""
if pred is None:
func = operator.not_
else:
func = lambda x: not pred(x)
return first_true_index(iterable, func, default)
def sortmore(*args, **kw):
"""
Sorts any number of lists according to:
optionally given item sorting key function(s) and/or a global sorting key function.
Parameters
----------
One or more lists
Keywords
--------
globalkey : None
revert to sorting by key function
globalkey : callable
Sort by evaluated value for all items in the lists
(call signature of this function needs to be such that it accepts an
argument tuple of items from each list.
eg.: globalkey = lambda *l: sum(l) will order all the lists by the
sum of the items from each list
if key: None
sorting done by value of first input list
(in this case the objects in the first iterable need the comparison
methods __lt__ etc...)
if key: callable
sorting done by value of key(item) for items in first iterable
if key: tuple
sorting done by value of (key(item_0), ..., key(item_n)) for items in
the first n iterables (where n is the length of the key tuple)
i.e. the first callable is the primary sorting criterion, and the
rest act as tie-breakers.
Returns
-------
Sorted lists
Examples
--------
Capture sorting indeces:
l = list('CharacterS')
In [1]: sortmore( l, range(len(l)) )
Out[1]: (['C', 'S', 'a', 'a', 'c', 'e', 'h', 'r', 'r', 't'],
[0, 9, 2, 4, 5, 7, 1, 3, 8, 6])
In [2]: sortmore( l, range(len(l)), key=str.lower )
Out[2]: (['a', 'a', 'C', 'c', 'e', 'h', 'r', 'r', 'S', 't'],
[2, 4, 0, 5, 7, 1, 3, 8, 9, 6])
"""
first = list(args[0])
if not len(first):
return args
globalkey = kw.get('globalkey')
key = kw.get('key')
if key is None:
if globalkey:
# if global sort function given and no local (secondary) key given, ==> no tiebreakers
key = lambda x: 0
else:
key = lambda x: x # if no global sort and no local sort keys given, sort by item values
if globalkey is None:
globalkey = lambda *x: 0
if not isinstance(globalkey, collections.abc.Callable):
raise ValueError('globalkey needs to be callable')
if isinstance(key, collections.abc.Callable):
k = lambda x: (globalkey(*x), key(x[0]))
elif isinstance(key, tuple):
key = (k if k else lambda x: 0 for k in key)
k = lambda x: (globalkey(*x),) + tuple(f(z) for (f, z) in zip(key, x))
else:
raise KeyError(
"kw arg 'key' should be None, callable, or a sequence of callables, not {}"
.format(type(key)))
res = sorted(list(zip(*args)), key=k)
if 'order' in kw:
if kw['order'].startswith(('descend', 'reverse')):
res = reversed(res)
return tuple(map(list, zip(*res)))
def groupmore(func=None, *its):
"""Extends the itertools.groupby functionality to arbitrary number of iterators."""
if not func:
func = lambda x: x
its = sortmore(*its, key=func)
nfunc = lambda x: func(x[0])
zipper = itertools.groupby(zip(*its), nfunc)
unzipper = ((key, zip(*groups)) for key, groups in zipper)
return unzipper
|
{
"content_hash": "93bab06022ff675db9bbe3a688b85e9f",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 100,
"avg_line_length": 32.76984126984127,
"alnum_prop": 0.5996609348510535,
"repo_name": "bsipocz/astropy",
"id": "3730c34be35caf84868125d81fb9c8fe1d3b113f",
"size": "4129",
"binary": false,
"copies": "3",
"ref": "refs/heads/hacking",
"path": "astropy/io/ascii/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "442627"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9395160"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(
name='dart',
)
|
{
"content_hash": "b89aff97d03de81462414d5507ae3022",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 28,
"avg_line_length": 11.2,
"alnum_prop": 0.6785714285714286,
"repo_name": "RetailMeNotSandbox/dart",
"id": "899d141de41a2673917c56c3a1028155b198fc98",
"size": "81",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103727"
},
{
"name": "HTML",
"bytes": "67636"
},
{
"name": "JavaScript",
"bytes": "2762304"
},
{
"name": "Nginx",
"bytes": "996"
},
{
"name": "PLpgSQL",
"bytes": "1475"
},
{
"name": "Python",
"bytes": "1025954"
},
{
"name": "Ruby",
"bytes": "5523"
},
{
"name": "Shell",
"bytes": "3100"
}
],
"symlink_target": ""
}
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import getpass
from copy import copy
from resource_management.libraries.functions.version import compare_versions
from resource_management import *
def setup_users():
"""
Creates users before cluster installation
"""
import params
if not params.host_sys_prepped and not params.ignore_groupsusers_create:
for group in params.group_list:
Group(group,
)
for user in params.user_list:
User(user,
gid = params.user_to_gid_dict[user],
groups = params.user_to_groups_dict[user],
)
if params.override_uid == "true":
set_uid(params.smoke_user, params.smoke_user_dirs)
else:
Logger.info('Skipping setting uid for smoke user as host is sys prepped')
else:
Logger.info('Skipping creation of User and Group as host is sys prepped or ignore_groupsusers_create flag is on')
pass
if params.has_hbase_masters:
Directory (params.hbase_tmp_dir,
owner = params.hbase_user,
mode=0775,
create_parents= True,
cd_access="a",
)
if not params.host_sys_prepped and params.override_uid == "true":
set_uid(params.hbase_user, params.hbase_user_dirs)
else:
Logger.info('Skipping setting uid for hbase user as host is sys prepped')
pass
if not params.host_sys_prepped:
if params.has_namenode:
create_dfs_cluster_admins()
else:
Logger.info('Skipping setting dfs cluster admin and tez view acls as host is sys prepped')
def create_dfs_cluster_admins():
"""
dfs.cluster.administrators support format <comma-delimited list of usernames><space><comma-delimited list of group names>
"""
import params
groups_list = create_users_and_groups(params.dfs_cluster_administrators_group)
User(params.hdfs_user,
groups = params.user_to_groups_dict[params.hdfs_user] + groups_list,
ignore_failures = params.ignore_groupsusers_create
)
def create_users_and_groups(user_and_groups):
import params
parts = re.split('\s', user_and_groups)
if len(parts) == 1:
parts.append("")
users_list = parts[0].split(",") if parts[0] else []
groups_list = parts[1].split(",") if parts[1] else []
if users_list:
User(users_list,
ignore_failures = params.ignore_groupsusers_create
)
if groups_list:
Group(copy(groups_list),
ignore_failures = params.ignore_groupsusers_create
)
return groups_list
def set_uid(user, user_dirs):
"""
user_dirs - comma separated directories
"""
import params
File(format("{tmp_dir}/changeUid.sh"),
content=StaticFile("changeToSecureUid.sh"),
mode=0555)
ignore_groupsusers_create_str = str(params.ignore_groupsusers_create).lower()
Execute(format("{tmp_dir}/changeUid.sh {user} {user_dirs}"),
not_if = format("(test $(id -u {user}) -gt 1000) || ({ignore_groupsusers_create_str})"))
def setup_hadoop_env():
import params
stackversion = params.stack_version_unformatted
if params.has_namenode or stackversion.find('Gluster') >= 0:
if params.security_enabled:
tc_owner = "root"
else:
tc_owner = params.hdfs_user
# create /etc/hadoop
Directory(params.hadoop_dir, mode=0755)
# write out hadoop-env.sh, but only if the directory exists
if os.path.exists(params.hadoop_conf_dir):
File(os.path.join(params.hadoop_conf_dir, 'hadoop-env.sh'), owner=tc_owner,
group=params.user_group,
content=InlineTemplate(params.hadoop_env_sh_template))
# Create tmp dir for java.io.tmpdir
# Handle a situation when /tmp is set to noexec
Directory(params.hadoop_java_io_tmpdir,
owner=params.hdfs_user,
group=params.user_group,
mode=0777
)
def setup_java():
"""
Installs jdk using specific params, that comes from ambari-server
"""
import params
java_exec = format("{java_home}/bin/java")
if not os.path.isfile(java_exec):
jdk_curl_target = format("{tmp_dir}/{jdk_name}")
java_dir = os.path.dirname(params.java_home)
tmp_java_dir = format("{tmp_dir}/jdk")
if not params.jdk_name:
return
Directory(params.artifact_dir,
create_parents= True,
)
File(jdk_curl_target,
content = DownloadSource(format("{jdk_location}/{jdk_name}")),
not_if = format("test -f {jdk_curl_target}")
)
if params.jdk_name.endswith(".bin"):
chmod_cmd = ("chmod", "+x", jdk_curl_target)
install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && echo A | {jdk_curl_target} -noregister && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
elif params.jdk_name.endswith(".gz"):
chmod_cmd = ("chmod","a+x", java_dir)
install_cmd = format("mkdir -p {tmp_java_dir} && cd {tmp_java_dir} && tar -xf {jdk_curl_target} && {sudo} cp -rp {tmp_java_dir}/* {java_dir}")
Directory(java_dir
)
Execute(chmod_cmd,
sudo = True,
)
Execute(install_cmd,
)
File(format("{java_home}/bin/java"),
mode=0755,
cd_access="a",
)
Execute(("chgrp","-R", params.user_group, params.java_home),
sudo = True,
)
Execute(("chown","-R", getpass.getuser(), params.java_home),
sudo = True,
)
|
{
"content_hash": "af83d0d4f3b4ec065f8cb003051a17d4",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 161,
"avg_line_length": 30.66331658291457,
"alnum_prop": 0.6522451655195018,
"repo_name": "alexryndin/ambari",
"id": "467a5ab941b44ddf4a256e10a5982dd73b1cecf1",
"size": "6102",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-adh-1.5",
"path": "ambari-server/src/main/resources/stacks/ADH/1.0/hooks/before-ANY/scripts/shared_initialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "44884"
},
{
"name": "C",
"bytes": "331204"
},
{
"name": "C#",
"bytes": "215907"
},
{
"name": "C++",
"bytes": "257"
},
{
"name": "CSS",
"bytes": "786184"
},
{
"name": "CoffeeScript",
"bytes": "8465"
},
{
"name": "FreeMarker",
"bytes": "2654"
},
{
"name": "Groovy",
"bytes": "89958"
},
{
"name": "HTML",
"bytes": "2514774"
},
{
"name": "Java",
"bytes": "29565801"
},
{
"name": "JavaScript",
"bytes": "19033151"
},
{
"name": "Makefile",
"bytes": "11111"
},
{
"name": "PHP",
"bytes": "149648"
},
{
"name": "PLpgSQL",
"bytes": "316489"
},
{
"name": "PowerShell",
"bytes": "2090340"
},
{
"name": "Python",
"bytes": "17215686"
},
{
"name": "R",
"bytes": "3943"
},
{
"name": "Roff",
"bytes": "13935"
},
{
"name": "Ruby",
"bytes": "33764"
},
{
"name": "SQLPL",
"bytes": "4277"
},
{
"name": "Shell",
"bytes": "886011"
},
{
"name": "Vim script",
"bytes": "5813"
},
{
"name": "sed",
"bytes": "2303"
}
],
"symlink_target": ""
}
|
import sys, json, httplib, base64
#/(um.1) Assign arguments
if len(sys.argv) < 5:
print "USAGE: user_manager.py server_name:port auth_user auth_pass",
print "ACTION [PARAMS...]"
sys.exit(1)
server, port = sys.argv[1].split(":")
username = sys.argv[2]
password = sys.argv[3]
action = sys.argv[4]
if len(sys.argv) > 5:
res_params = sys.argv[5:]
else:
res_params = []
#/(um.2) Build API path
base_path = "/api/users"
if action == "list":
path = base_path
method = "GET"
if action == "create":
path = base_path + "/" + res_params[0]
method = "PUT"
if action == "delete":
path = base_path + "/" + res_params[0]
method = "DELETE"
if action == "show":
path = base_path + "/" + res_params[0]
method = "GET"
#/(um.3) Build JSON arguments
json_args = ""
if action == "create":
json_args = {"password" : res_params[1],
"administrator" : json.loads(res_params[2])}
json_args = json.dumps(json_args)
#/(um.4) Issue API request
conn = httplib.HTTPConnection(server, port)
credentials = base64.b64encode("%s:%s" % (username, password))
conn.request(method, path, json_args,
{"Content-Type" : "application/json",
"Authorization" : "Basic " + credentials})
response = conn.getresponse()
if response.status > 299:
print "Error executing API call (%d): %s" % (response.status,
response.read())
sys.exit(2)
#/(um.5) Parse and display response
resp_payload = response.read()
if action in ["list", "show"]:
resp_payload = json.loads(resp_payload)
#/(um.6) Process 'list' results
if action == "list":
print "Count: %d" % len(resp_payload)
for user in resp_payload:
print "User: %(name)s" % user
print "\tPassword: %(password_hash)s" % user
print "\tAdministrator: %(administrator)s\n" % user
#/(um.7) Process 'show' results
if action == "show":
print "User: %(name)s" % resp_payload
print "\tPassword: %(password_hash)s" % resp_payload
print "\tAdministrator: %(administrator)s\n" % resp_payload
#/(um.8) Create and delete requests have no result.
else:
print "Completed request!"
sys.exit(0)
|
{
"content_hash": "ccfb76c1371608c9b2ac483490c58812",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 72,
"avg_line_length": 29.22077922077922,
"alnum_prop": 0.5924444444444444,
"repo_name": "rabbitinaction/sourcecode",
"id": "9c5b13b9cd3b0ee05be0305ad0f56b4ec42d2a08",
"size": "2826",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/chapter-9/user_manager.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C#",
"bytes": "12159"
},
{
"name": "Erlang",
"bytes": "3481"
},
{
"name": "Java",
"bytes": "5057"
},
{
"name": "PHP",
"bytes": "9871"
},
{
"name": "Python",
"bytes": "47591"
},
{
"name": "Ruby",
"bytes": "1455"
}
],
"symlink_target": ""
}
|
import logging
from math import sin, cos, atan2, sqrt, radians
import re
import requests
from spacebot.util.utils import field
log = logging.getLogger(__name__)
# Default Google Maps zoom level; values 1 - 21+
# See: https://developers.google.com/maps/documentation/staticmaps/#Zoomlevels
DEFAULT_ZOOM = 1
# Gets the position of the ISS from https://api.wheretheiss.at/v1/satellites/25544
def get_iss_data():
r = requests.get("https://api.wheretheiss.at/v1/satellites/25544", verify=False)
assert r.status_code == 200
response = r.json()
log.debug("ISS location response: %s", response)
return response
def get_iss_text_and_attachments(zoom):
data = get_iss_data()
fields = [
field("Altitude", "{0} km".format(data["altitude"])),
field("Linear Velocity", "{0} km/hr".format(data["velocity"])),
field("Solar Latitude:", "{0} degrees".format(data["solar_lat"])),
field("Solar Longitude", "{0} degrees".format(data["solar_lon"])),
field("Latitude", "{0} degrees".format(data["latitude"])),
field("Longitude", "{0} degrees".format(data["longitude"])),
field("Footprint Diameter", "{0} km".format(data["footprint"])),
field("Visibility", data["visibility"]),
]
lat = data["latitude"]
lon = data["longitude"]
image_url = "https://maps.googleapis.com/maps/api/staticmap?markers={lat},{lon}&zoom={zoom}&size=500x400" \
.format(lat=lat, lon=lon, zoom=zoom)
attachments = {"fields": fields, "image_url": image_url}
return ":star: *Current Position of the ISS* :star:", attachments
# Determines if the ISS is overhead by computing the great circle distance
# between its latitude and longitude and a given latitude and longitude.
# TODO: verify this calculation -ccampo 2015-07-10
def is_iss_overhead(lat, lon):
iss_data = get_iss_data()
lat1 = radians(lat)
lon1 = radians(lon)
lat2 = radians(iss_data["latitude"])
lon2 = radians(iss_data["longitude"])
delta_lon = lon2 - lon1
d = atan2(
sqrt((cos(lat2) * sin(delta_lon)) ** 2 + (cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(delta_lon)) ** 2),
sin(lat1) * sin(lat2) + cos(lat1) * cos(lat2) * cos(delta_lon))
# 6372.795 is the average great-circle radius of the Earth in km, from Wikipedia
iss_dist = 6372.795 * d
return iss_dist <= iss_data["footprint"] / 2
def process_event(bot, event):
command = event["text"].lower()
if "iss" in command:
search = re.search("zoom (\d+)", command)
zoom = search.group(1) if search else DEFAULT_ZOOM
text, attachments = get_iss_text_and_attachments(zoom)
bot.send_message(text, attachments)
def get_help():
return "*{name} ISS [zoom 1-21+]:* Displays information about the " \
"current location of the International Space Station. The " \
"`zoom` parameter specifies the Google Maps zoom, 1 being " \
"the most zoomed out (optional; defaults to 1)."
|
{
"content_hash": "0644c1562bab17b7eab877d0889367a7",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 120,
"avg_line_length": 40.54054054054054,
"alnum_prop": 0.647,
"repo_name": "ccampo133/slack-spacebot",
"id": "3127aca7c06eb791b4e8b8edf2b957895209e97d",
"size": "3000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spacebot/plugins/iss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17287"
}
],
"symlink_target": ""
}
|
import atexit
import getpass
import logging
import os
import shutil
import socket
import subprocess
import threading
import time
from nose.tools import assert_equal, assert_true
from desktop.lib.paths import get_run_root
from hadoop import pseudo_hdfs4
from hadoop.mini_cluster import write_config
from liboozie.oozie_api import get_oozie
from liboozie.conf import OOZIE_URL
_oozie_lock = threading.Lock()
LOG = logging.getLogger(__name__)
class OozieServerProvider(object):
"""
Setup a Oozie server.
"""
OOZIE_TEST_PORT = '18001'
OOZIE_HOME = get_run_root('ext/oozie/oozie')
requires_hadoop = True
is_oozie_running = False
@classmethod
def setup_class(cls):
cls.cluster = pseudo_hdfs4.shared_cluster()
cls.oozie, callback = cls._get_shared_oozie_server()
cls.shutdown = [callback]
@classmethod
def wait_until_completion(cls, oozie_jobid, timeout=300.0, step=5):
job = cls.oozie.get_job(oozie_jobid)
start = time.time()
while job.is_running() and time.time() - start < timeout:
time.sleep(step)
LOG.info('Checking status of %s...' % oozie_jobid)
job = cls.oozie.get_job(oozie_jobid)
LOG.info('[%d] Status after %d: %s' % (time.time(), time.time() - start, job))
logs = cls.oozie.get_job_log(oozie_jobid)
if job.is_running():
msg = "[%d] %s took more than %d to complete: %s" % (time.time(), oozie_jobid, timeout, logs)
LOG.info(msg)
raise Exception(msg)
else:
LOG.info('[%d] Job %s took %d: %s' % (time.time(), job.id, time.time() - start, logs))
return job
@classmethod
def _setup_conf_dir(cls, cluster):
original_oozie_conf_dir = '%s/conf' % OozieServerProvider.OOZIE_HOME
shutil.copytree(original_oozie_conf_dir, cluster._tmppath('conf/oozie'))
cls._write_oozie_site(cluster)
@classmethod
def _write_oozie_site(cls, cluster):
oozie_configs = {
'oozie.service.ProxyUserService.proxyuser.hue.hosts': '*',
'oozie.service.ProxyUserService.proxyuser.hue.groups': '*',
'oozie.service.HadoopAccessorService.hadoop.configurations': '*=%s' % cluster._tmppath('conf'),
'oozie.db.schema.name': 'oozie',
'oozie.data.dir': cluster._tmppath('oozie_tmp_dir'),
'oozie.service.JPAService.create.db.schema': 'false',
'oozie.service.JPAService.jdbc.driver': 'org.apache.derby.jdbc.EmbeddedDriver',
'oozie.service.JPAService.jdbc.url': 'jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true',
'oozie.service.JPAService.jdbc.username': 'sa',
'oozie.service.JPAService.jdbc.password': '',
'oozie.service.SchemaService.wf.ext.schemas': '''shell-action-0.1.xsd,shell-action-0.2.xsd,shell-action-0.3.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,
hive-action-0.3.xsd,hive-action-0.4.xsd,hive-action-0.5.xsd,sqoop-action-0.2.xsd,sqoop-action-0.3.xsd,
sqoop-action-0.4.xsd,ssh-action-0.1.xsd,ssh-action-0.2.xsd,distcp-action-0.1.xsd,distcp-action-0.2.xsd,
oozie-sla-0.1.xsd,oozie-sla-0.2.xsd''',
'oozie.service.ActionService.executor.ext.classes': '''org.apache.oozie.action.email.EmailActionExecutor,
org.apache.oozie.action.hadoop.HiveActionExecutor,
org.apache.oozie.action.hadoop.ShellActionExecutor,
org.apache.oozie.action.hadoop.SqoopActionExecutor,
org.apache.oozie.action.hadoop.DistcpActionExecutor''',
'oozie.service.coord.normal.default.timeout': 120
}
write_config(oozie_configs, cluster._tmppath('conf/oozie/oozie-site.xml'))
@classmethod
def _start_oozie(cls, cluster):
"""
Start oozie process.
"""
OozieServerProvider._setup_conf_dir(cluster)
args = [OozieServerProvider.OOZIE_HOME + '/bin/oozied.sh', 'run']
env = os.environ
env['OOZIE_DATA'] = cluster._tmppath('oozie_tmp_dir')
env['OOZIE_HTTP_PORT'] = OozieServerProvider.OOZIE_TEST_PORT
conf_dir = os.path.join(cluster.log_dir, 'oozie')
os.mkdir(conf_dir)
env['OOZIE_LOG'] = conf_dir
env['OOZIE_CONFIG'] = cluster._tmppath('conf/oozie')
LOG.info("Executing %s, env %s, cwd %s" % (repr(args), repr(env), cluster._tmpdir))
process = subprocess.Popen(args=args, env=env, cwd=cluster._tmpdir, stdin=subprocess.PIPE)
return process
@classmethod
def _reset_oozie(cls, cluster):
env = os.environ
env['OOZIE_DATA'] = cluster._tmppath('oozie_tmp_dir')
args = ['rm', '-r', '%s/data/oozie-db' % cluster._tmppath('oozie_tmp_dir')]
LOG.info("Executing %s, env %s" % (args, env))
subprocess.call(args, env=env)
args = [OozieServerProvider.OOZIE_HOME + '/bin/ooziedb.sh', 'create', '-sqlfile', 'oozie.sql', '-run']
LOG.info("Executing %s, env %s" % (args, env))
subprocess.call(args, env=env)
@classmethod
def _setup_sharelib(cls):
LOG.info("Copying Oozie sharelib")
user_home = cls.cluster.fs.do_as_user(getpass.getuser(), cls.cluster.fs.get_home_dir)
oozie_share_lib = user_home + '/share'
cls.cluster.fs.do_as_user(getpass.getuser(), cls.cluster.fs.create_home_dir)
env = os.environ
args = [
OozieServerProvider.OOZIE_HOME + '/bin/oozie-setup.sh',
'sharelib',
'create',
'-fs',
cls.cluster.fs.fs_defaultfs,
'-locallib',
OozieServerProvider.OOZIE_HOME + '/oozie-sharelib.tar.gz'
]
LOG.info("Executing %s, env %s" % (args, env))
subprocess.call(args, env=env)
LOG.info("Oozie sharelib copied to %s" % oozie_share_lib)
@classmethod
def _get_shared_oozie_server(cls):
callback = lambda: None
_oozie_lock.acquire()
if not OozieServerProvider.is_oozie_running:
LOG.info('\nStarting a Mini Oozie. Requires "tools/jenkins/jenkins.sh" to be previously ran.\n')
LOG.info('See https://issues.cloudera.org/browse/HUE-861\n')
finish = (
OOZIE_URL.set_for_testing("http://%s:%s/oozie" % (socket.getfqdn(), OozieServerProvider.OOZIE_TEST_PORT)),
)
# Setup
cluster = pseudo_hdfs4.shared_cluster()
cls._setup_sharelib()
cls._reset_oozie(cluster)
p = cls._start_oozie(cluster)
def kill():
LOG.info("Killing Oozie server (pid %d)." % p.pid)
os.kill(p.pid, 9)
p.wait()
atexit.register(kill)
start = time.time()
started = False
sleep = 0.01
while not started and time.time() - start < 30.0:
status = None
try:
LOG.info('Check Oozie status...')
status = get_oozie(cluster.superuser).get_oozie_status()
if status['systemMode'] == 'NORMAL':
started = True
break
time.sleep(sleep)
sleep *= 2
except Exception, e:
LOG.info('Oozie server status not NORMAL yet: %s - %s' % (status, e))
time.sleep(sleep)
sleep *= 2
pass
if not started:
raise Exception("Oozie server took too long to come up.")
OozieServerProvider.is_oozie_running = True
def shutdown():
for f in finish:
f()
cluster.stop()
callback = shutdown
_oozie_lock.release()
cluster = pseudo_hdfs4.shared_cluster()
return get_oozie(cluster.superuser), callback
class TestMiniOozie(OozieServerProvider):
def test_oozie_status(self):
user = getpass.getuser()
assert_equal(get_oozie(user).get_oozie_status()['systemMode'], 'NORMAL')
assert_true(self.cluster.fs.exists('/user/%(user)s/share/lib' % {'user': user}))
|
{
"content_hash": "4a4c37c638e7f1df029f6916408358a9",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 159,
"avg_line_length": 33.914027149321264,
"alnum_prop": 0.6442961974649767,
"repo_name": "yongshengwang/builthue",
"id": "098ab2666dcb419404cc80c3aa3564f8fae59045",
"size": "8287",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "desktop/libs/liboozie/src/liboozie/oozie_api_tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10774013"
},
{
"name": "C++",
"bytes": "184593"
},
{
"name": "CSS",
"bytes": "655282"
},
{
"name": "Emacs Lisp",
"bytes": "14875"
},
{
"name": "GAP",
"bytes": "11337"
},
{
"name": "Java",
"bytes": "3080564"
},
{
"name": "JavaScript",
"bytes": "2418037"
},
{
"name": "Makefile",
"bytes": "86977"
},
{
"name": "Perl",
"bytes": "161801"
},
{
"name": "PigLatin",
"bytes": "282"
},
{
"name": "Prolog",
"bytes": "4590"
},
{
"name": "Python",
"bytes": "29990389"
},
{
"name": "Shell",
"bytes": "38643"
},
{
"name": "TeX",
"bytes": "129526"
},
{
"name": "Thrift",
"bytes": "99710"
},
{
"name": "XSLT",
"bytes": "367778"
}
],
"symlink_target": ""
}
|
import wx
from robotide import robotapi, context
from robotide.controller.settingcontrollers import (
DocumentationController, VariableController, TagsController)
from robotide.usages.UsageRunner import ResourceFileUsages
from robotide.publish import (
RideItemSettingsChanged, RideInitFileRemoved, RideFileNameChanged)
from robotide.widgets import (
ButtonWithHandler, Label, HeaderLabel, HorizontalSizer, HtmlWindow)
from .settingeditors import (
DocumentationEditor, SettingEditor, TagsEditor,
ImportSettingListEditor, VariablesListEditor, MetadataListEditor)
class WelcomePage(HtmlWindow):
undo = cut = copy = paste = delete = comment = uncomment = save \
= show_content_assist = tree_item_selected = lambda *args: None
def __init__(self, parent):
HtmlWindow.__init__(self, parent, text=context.ABOUT_RIDE)
def close(self):
self.Show(False)
def destroy(self):
self.close()
self.Destroy()
class EditorPanel(wx.Panel):
"""Base class for all editor panels"""
# TODO: Move outside default editor package, document
name = doc = ''
title = None
undo = cut = copy = paste = delete = comment = uncomment = save \
= show_content_assist = lambda self: None
def __init__(self, plugin, parent, controller, tree):
wx.Panel.__init__(self, parent)
self.plugin = plugin
self.controller = controller
self._tree = tree
def tree_item_selected(self, item):
pass
class _RobotTableEditor(EditorPanel):
name = 'table'
doc = 'table editor'
_settings_open_id = 'robot table settings open'
def __init__(self, plugin, parent, controller, tree):
EditorPanel.__init__(self, plugin, parent, controller, tree)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.Bind(wx.EVT_IDLE, self.OnIdle)
self.SetSizer(self.sizer)
if self.title:
self.sizer.Add(self._create_header(self.title),
0, wx.EXPAND | wx.ALL, 5)
self.sizer.Add((0, 10))
self._editors = []
self._reset_last_show_tooltip()
self._populate()
self.plugin.subscribe(self._settings_changed, RideItemSettingsChanged)
def _should_settings_be_open(self):
if self._settings_open_id not in self.plugin.global_settings:
return False
return self.plugin.global_settings[self._settings_open_id]
def _store_settings_open_status(self):
self.plugin.global_settings[self._settings_open_id] = \
self._settings.IsExpanded()
def _settings_changed(self, data):
if data.item == self.controller:
for editor in self._editors:
editor.update_value()
def OnIdle(self, event):
if self._last_shown_tooltip and self._mouse_outside_tooltip():
self._last_shown_tooltip.hide()
self._reset_last_show_tooltip()
def _mouse_outside_tooltip(self):
mx, my = wx.GetMousePosition()
tx, ty = self._last_shown_tooltip.screen_position
dx, dy = self._last_shown_tooltip.size
return (mx < tx or mx > tx+dx) or (my < ty or my > ty+dy)
def tooltip_allowed(self, tooltip):
if wx.GetMouseState().ControlDown() or \
self._last_shown_tooltip is tooltip:
return False
self._last_shown_tooltip = tooltip
return True
def _reset_last_show_tooltip(self):
self._last_shown_tooltip = None
def close(self):
self.plugin.unsubscribe(
self._settings_changed, RideItemSettingsChanged)
self.Unbind(wx.EVT_MOTION)
self.Show(False)
def destroy(self):
self.close()
self.Destroy()
def _create_header(self, text, readonly=False):
if readonly:
text += ' (READ ONLY)'
self._title_display = HeaderLabel(self, text)
return self._title_display
def _add_settings(self):
self._settings = self._create_settings()
self._restore_settings_open_status()
self._editors.append(self._settings)
self.sizer.Add(self._settings, 0, wx.ALL | wx.EXPAND, 2)
def _create_settings(self):
settings = Settings(self)
settings.Bind(
wx.EVT_COLLAPSIBLEPANE_CHANGED, self._collabsible_changed)
settings.build(self.controller.settings, self.plugin, self._tree)
return settings
def _restore_settings_open_status(self):
if self._should_settings_be_open():
self._settings.Expand()
wx.CallLater(200, self._collabsible_changed)
else:
self._settings.Collapse()
def _collabsible_changed(self, event=None):
self._store_settings_open_status()
self.GetSizer().Layout()
self.Refresh()
if event:
event.Skip()
def highlight_cell(self, obj, row, column):
'''Highlight the given object at the given row and column'''
if isinstance(obj, robotapi.Setting):
setting_editor = self._get_settings_editor(obj)
if setting_editor and hasattr(setting_editor, "highlight"):
setting_editor.highlight(column)
elif row >= 0 and column >= 0:
self.kweditor.select(row, column)
def _get_settings_editor(self, setting):
'''Return the settings editor for the given setting object'''
for child in self.GetChildren():
if isinstance(child, SettingEditor):
if child._item == setting:
return child
return None
def highlight(self, text, expand=True):
for editor in self._editors:
editor.highlight(text, expand=expand)
class Settings(wx.CollapsiblePane):
BORDER = 2
def __init__(self, parent):
wx.CollapsiblePane.__init__(
self, parent, wx.ID_ANY, 'Settings',
style=wx.CP_DEFAULT_STYLE | wx.CP_NO_TLW_RESIZE)
self._sizer = wx.BoxSizer(wx.VERTICAL)
self._editors = []
self.Bind(wx.EVT_SIZE, self._recalc_size)
def Expand(self):
wx.CollapsiblePane.Expand(self)
def GetPane(self):
pane = wx.CollapsiblePane.GetPane(self)
pane.tooltip_allowed = self.GetParent().tooltip_allowed
return pane
def close(self):
for editor in self._editors:
editor.close()
def update_value(self):
for editor in self._editors:
editor.update_value()
def create_editor_for(self, controller, plugin, tree):
editor_cls = self._get_editor_class(controller)
return editor_cls(self.GetPane(), controller, plugin, tree)
def _get_editor_class(self, controller):
if isinstance(controller, DocumentationController):
return DocumentationEditor
if isinstance(controller, TagsController):
return TagsEditor
return SettingEditor
def build(self, settings, plugin, tree):
for setting in settings:
editor = self.create_editor_for(setting, plugin, tree)
self._sizer.Add(editor, 0, wx.ALL | wx.EXPAND, self.BORDER)
self._editors.append(editor)
self.GetPane().SetSizer(self._sizer)
def _recalc_size(self, event=None):
if self.IsExpanded():
expand_button_height = 32 # good guess...
height = sum(editor.Size[1] + 2 * self.BORDER
for editor in self._editors)
self.SetSize((-1, height + expand_button_height))
self._sizer.Layout()
if event:
event.Skip()
def highlight(self, text, expand=True):
match = False
for editor in self._editors:
if editor.contains(text):
editor.highlight(text)
match = True
else:
editor.clear_highlight()
if match and expand:
self.Expand()
self.Parent.GetSizer().Layout()
class _FileEditor(_RobotTableEditor):
def __init__(self, *args):
_RobotTableEditor.__init__(self, *args)
self.plugin.subscribe(
self._update_source_and_name, RideFileNameChanged)
def _update_source(self, message=None):
self._source.SetValue(self.controller.data.source)
def _update_source_and_name(self, message):
self._title_display.SetLabel(self.controller.name)
self._update_source()
def tree_item_selected(self, item):
if isinstance(item, VariableController):
self._var_editor.select(item.name)
def _populate(self):
datafile = self.controller.data
header = self._create_header(
datafile.name, not self.controller.is_modifiable())
self.sizer.Add(header, 0, wx.EXPAND | wx.ALL, 5)
self.sizer.Add(self._create_source_label(datafile.source),
0, wx.EXPAND | wx.ALL, 1)
self.sizer.Add((0, 10))
self._add_settings()
self._add_import_settings()
self._add_variable_table()
def _create_source_label(self, source):
sizer = wx.BoxSizer(wx.HORIZONTAL)
sizer.Add((5, 0))
sizer.Add(Label(self, label='Source',
size=(context.SETTING_LABEL_WIDTH,
context.SETTING_ROW_HEIGTH)))
self._source = wx.TextCtrl(self, style=wx.TE_READONLY | wx.NO_BORDER)
self._source.SetBackgroundColour(self.BackgroundColour)
self._source.SetValue(source)
self._source.SetMaxSize(wx.Size(-1, context.SETTING_ROW_HEIGTH))
sizer.Add(self._source, 1, wx.EXPAND)
return sizer
def _add_import_settings(self):
import_editor = ImportSettingListEditor(
self, self._tree, self.controller.imports)
self.sizer.Add(import_editor, 1, wx.EXPAND)
self._editors.append(import_editor)
def _add_variable_table(self):
self._var_editor = VariablesListEditor(
self, self._tree, self.controller.variables)
self.sizer.Add(self._var_editor, 1, wx.EXPAND)
self._editors.append(self._var_editor)
def close(self):
self.plugin.unsubscribe(
self._update_source_and_name, RideFileNameChanged)
for editor in self._editors:
editor.close()
self._editors = []
_RobotTableEditor.close(self)
# Stubs so that ctrl+d ctrl+i don't throw exceptions
delete_rows = insert_rows = lambda s: None
class FindUsagesHeader(HorizontalSizer):
def __init__(self, parent, header, usages_callback):
HorizontalSizer.__init__(self)
self._header = HeaderLabel(parent, header)
self.add_expanding(self._header)
self.add(ButtonWithHandler(parent, 'Find Usages', usages_callback))
def SetLabel(self, label):
self._header.SetLabel(label)
class ResourceFileEditor(_FileEditor):
_settings_open_id = 'resource file settings open'
def _create_header(self, text, readonly=False):
if readonly:
text += ' (READ ONLY)'
def cb(event):
ResourceFileUsages(self.controller, self._tree.highlight).show()
self._title_display = FindUsagesHeader(self, text, cb)
return self._title_display
class TestCaseFileEditor(_FileEditor):
_settings_open_id = 'test case file settings open'
def _populate(self):
_FileEditor._populate(self)
self.sizer.Add((0, 10))
self._add_metadata()
def _add_metadata(self):
metadata_editor = MetadataListEditor(
self, self._tree, self.controller.metadata)
self.sizer.Add(metadata_editor, 1, wx.EXPAND)
self._editors.append(metadata_editor)
class InitFileEditor(TestCaseFileEditor):
_settings_open_id = 'init file settings open'
def _populate(self):
TestCaseFileEditor._populate(self)
self.plugin.subscribe(self._init_file_removed, RideInitFileRemoved)
def _init_file_removed(self, message):
for setting, editor in zip(self.controller.settings, self._editors):
editor.refresh(setting)
|
{
"content_hash": "e618cf21c008f5da4c0812ce2ee50f52",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 78,
"avg_line_length": 34.36931818181818,
"alnum_prop": 0.6196892048272442,
"repo_name": "fingeronthebutton/RIDE",
"id": "24cc89030628c97662f48964e5fc17d3b8a948db",
"size": "12706",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/editor/editors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "21370"
},
{
"name": "HTML",
"bytes": "110675"
},
{
"name": "JavaScript",
"bytes": "41401"
},
{
"name": "Python",
"bytes": "2902622"
}
],
"symlink_target": ""
}
|
import os
from local_setting_production import *
from settings import DEBUG
TEMPLATE_DEBUG = True
MANAGERS = ADMINS
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
import json
# print os.environ['VCAP_SERVICES']
vcap_services = json.loads(os.environ['VCAP_SERVICES'])
# mysql_srv = vcap_services['mysql-5.1'][0]
mysql_srv = vcap_services['postgresql-9.1'][0]
cred = mysql_srv['credentials']
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.mysql',
'ENGINE' : 'django.db.backends.postgresql_psycopg2',
'NAME': cred['name'],
'USER': cred['user'],
'PASSWORD': cred['password'],
'HOST': cred['hostname'],
'PORT': cred['port'],
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = 'http://disi.unitn.it/~tranquillini/static/todo/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'to.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'to.wsgi.application'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), '..', '../to/do/templates').replace('\\', '/'),)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'do',
'crispy_forms',
'django.contrib.humanize',
'social.apps.django_app.default',
'pomodoro',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'format': "[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'WARNING',
'propagate': True,
},
'do': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'pomodoro': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
}
}
}
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
# 'social_auth.context_processors.social_auth_by_type_backends',
# 'general.context_processors.addProfile',
# 'general.context_processors.addAppName',
'django.core.context_processors.request',
# 'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
# 'social.backends.open_id.OpenIdAuth',
'social.backends.google.GoogleOpenId',
# 'social.backends.google.GoogleOAuth2',
# 'social.backends.google.GoogleOAuth',
# 'social.backends.twitter.TwitterOAuth',
# 'social.backends.facebook.FacebookOAuth2',
# 'social.backends.yahoo.YahooOpenId',
# ...
# 'django.contrib.auth.backends.ModelBackend',
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
ALLOWED_HOSTS = [
'.eu01.aws.af.cm', # Allow domain and subdomains
]
|
{
"content_hash": "0f134399961e527a828a3aefe44cf3f2",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 105,
"avg_line_length": 32.76279069767442,
"alnum_prop": 0.6719193639977286,
"repo_name": "esseti/dododo-dadada",
"id": "90980ff805d9362188aa6e0ff22eebc1a30635a5",
"size": "7079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "to/settings_production.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3810"
},
{
"name": "JavaScript",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "30889"
}
],
"symlink_target": ""
}
|
import json
import mock
from oslo_config import cfg
import six
import webob.exc
import heat.api.middleware.fault as fault
import heat.api.openstack.v1.stacks as stacks
from heat.common import exception as heat_exc
from heat.common import identifier
from heat.common import policy
from heat.common import template_format
from heat.common import urlfetch
from heat.rpc import api as rpc_api
from heat.rpc import client as rpc_client
from heat.tests.api.openstack_v1 import tools
from heat.tests import common
class InstantiationDataTest(common.HeatTestCase):
def test_parse_error_success(self):
with stacks.InstantiationData.parse_error_check('Garbage'):
pass
def test_parse_error(self):
def generate_error():
with stacks.InstantiationData.parse_error_check('Garbage'):
raise ValueError
self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
def test_parse_error_message(self):
# make sure the parser error gets through to the caller.
bad_temp = '''
heat_template_version: '2013-05-23'
parameters:
KeyName:
type: string
description: bla
'''
def generate_error():
with stacks.InstantiationData.parse_error_check('foo'):
template_format.parse(bad_temp)
parse_ex = self.assertRaises(webob.exc.HTTPBadRequest, generate_error)
self.assertIn('foo', six.text_type(parse_ex))
def test_stack_name(self):
body = {'stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertEqual('wibble', data.stack_name())
def test_stack_name_missing(self):
body = {'not the stack_name': 'wibble'}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.stack_name)
def test_template_inline(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(template, data.template())
def test_template_string_json(self):
template = ('{"heat_template_version": "2013-05-23",'
'"foo": "bar", "blarg": "wibble"}')
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(json.loads(template), data.template())
def test_template_string_yaml(self):
template = '''HeatTemplateFormatVersion: 2012-12-12
foo: bar
blarg: wibble
'''
parsed = {u'HeatTemplateFormatVersion': u'2012-12-12',
u'blarg': u'wibble',
u'foo': u'bar'}
body = {'template': template}
data = stacks.InstantiationData(body)
self.assertEqual(parsed, data.template())
def test_template_url(self):
template = {'heat_template_version': '2013-05-23',
'foo': 'bar',
'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
urlfetch.get(url).AndReturn(json.dumps(template))
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_priority(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
url = 'http://example.com/template'
body = {'template': template, 'template_url': url}
data = stacks.InstantiationData(body)
self.m.StubOutWithMock(urlfetch, 'get')
self.m.ReplayAll()
self.assertEqual(template, data.template())
self.m.VerifyAll()
def test_template_missing(self):
template = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the template': template}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.template)
def test_parameters(self):
params = {'foo': 'bar', 'blarg': 'wibble'}
body = {'parameters': params,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(body, data.environment())
def test_environment_only_params(self):
env = {'parameters': {'foo': 'bar', 'blarg': 'wibble'}}
body = {'environment': env}
data = stacks.InstantiationData(body)
self.assertEqual(env, data.environment())
def test_environment_and_parameters(self):
body = {'parameters': {'foo': 'bar'},
'environment': {'parameters': {'blarg': 'wibble'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_parameters_override_environment(self):
# This tests that the cli parameters will override
# any parameters in the environment.
body = {'parameters': {'foo': 'bar',
'tester': 'Yes'},
'environment': {'parameters': {'blarg': 'wibble',
'tester': 'fail'}}}
expect = {'parameters': {'blarg': 'wibble',
'foo': 'bar',
'tester': 'Yes'},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}}
data = stacks.InstantiationData(body)
self.assertEqual(expect, data.environment())
def test_environment_bad_format(self):
env = {'somethingnotsupported': {'blarg': 'wibble'}}
body = {'environment': json.dumps(env)}
data = stacks.InstantiationData(body)
self.assertRaises(webob.exc.HTTPBadRequest, data.environment)
def test_environment_missing(self):
env = {'foo': 'bar', 'blarg': 'wibble'}
body = {'not the environment': env}
data = stacks.InstantiationData(body)
self.assertEqual({'parameters': {}, 'encrypted_param_names': [],
'parameter_defaults': {}, 'resource_registry': {}},
data.environment())
def test_args(self):
body = {
'parameters': {},
'environment': {},
'stack_name': 'foo',
'template': {},
'template_url': 'http://example.com/',
'timeout_mins': 60,
}
data = stacks.InstantiationData(body)
self.assertEqual({'timeout_mins': 60}, data.args())
@mock.patch.object(policy.Enforcer, 'enforce')
class StackControllerTest(tools.ControllerTest, common.HeatTestCase):
"""Tests the API class StackController.
Tests the API class which acts as the WSGI controller,
the endpoint processing API requests after they are routed
"""
def setUp(self):
super(StackControllerTest, self).setUp()
# Create WSGI controller instance
class DummyConfig(object):
bind_port = 8004
cfgopts = DummyConfig()
self.controller = stacks.StackController(options=cfgopts)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {},
u'outputs': [],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.index(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': u'wordpress',
u'stack_status': u'CREATE_COMPLETE'
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelists_pagination_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'limit': 10,
'sort_keys': 'fake sort keys',
'marker': 'fake marker',
'sort_dir': 'fake sort dir',
'balrog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertEqual(13, len(engine_args))
self.assertIn('limit', engine_args)
self.assertIn('sort_keys', engine_args)
self.assertIn('marker', engine_args)
self.assertIn('sort_dir', engine_args)
self.assertIn('filters', engine_args)
self.assertIn('tenant_safe', engine_args)
self.assertNotIn('balrog', engine_args)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_limit_not_int(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'limit': 'not-an-int'}
req = self._get('/stacks', params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req,
tenant_id=self.tenant)
self.assertEqual("Only integer is acceptable by 'limit'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_whitelist_filter_params(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {
'id': 'fake id',
'status': 'fake status',
'name': 'fake name',
'action': 'fake action',
'username': 'fake username',
'tenant': 'fake tenant',
'owner_id': 'fake owner-id',
'stack_name': 'fake stack name',
'stack_identity': 'fake identity',
'creation_time': 'create timestamp',
'updated_time': 'update timestamp',
'deletion_time': 'deletion timestamp',
'notification_topics': 'fake topic',
'description': 'fake description',
'template_description': 'fake description',
'parameters': 'fake params',
'outputs': 'fake outputs',
'stack_action': 'fake action',
'stack_status': 'fake status',
'stack_status_reason': 'fake status reason',
'capabilities': 'fake capabilities',
'disable_rollback': 'fake value',
'timeout_mins': 'fake timeout',
'stack_owner': 'fake owner',
'parent': 'fake parent',
'stack_user_project_id': 'fake project id',
'tags': 'fake tags',
'barlog': 'you shall not pass!'
}
req = self._get('/stacks', params=params)
mock_call.return_value = []
self.controller.index(req, tenant_id=self.tenant)
rpc_call_args, _ = mock_call.call_args
engine_args = rpc_call_args[1][1]
self.assertIn('filters', engine_args)
filters = engine_args['filters']
self.assertEqual(16, len(filters))
for key in ('id', 'status', 'name', 'action', 'username', 'tenant',
'owner_id', 'stack_name', 'stack_action', 'stack_status',
'stack_status_reason', 'disable_rollback', 'timeout_mins',
'stack_owner', 'parent', 'stack_user_project_id'):
self.assertIn(key, filters)
for key in ('stack_identity', 'creation_time', 'updated_time',
'deletion_time', 'notification_topics', 'description',
'template_description', 'parameters', 'outputs',
'capabilities', 'tags', 'barlog'):
self.assertNotIn(key, filters)
def test_index_returns_stack_count_if_with_count_is_true(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock(return_value=0)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
def test_index_doesnt_return_stack_count_if_with_count_is_false(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'false'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
engine.count_stacks = mock.Mock()
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
self.assertFalse(engine.count_stacks.called)
def test_index_with_count_is_invalid(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'invalid_value'}
req = self._get('/stacks', params=params)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index,
req, tenant_id=self.tenant)
excepted = ('Unrecognized value "invalid_value" for "with_count", '
'acceptable values are: true, false')
self.assertIn(excepted, six.text_type(exc))
@mock.patch.object(rpc_client.EngineClient, 'count_stacks')
def test_index_doesnt_break_with_old_engine(self, mock_count_stacks,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
params = {'with_count': 'True'}
req = self._get('/stacks', params=params)
engine = self.controller.rpc_client
engine.list_stacks = mock.Mock(return_value=[])
mock_count_stacks.side_effect = AttributeError("Should not exist")
result = self.controller.index(req, tenant_id=self.tenant)
self.assertNotIn('count', result)
def test_index_enforces_global_index_if_global_tenant(self, mock_enforce):
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
self.controller.index(req, tenant_id=self.tenant)
mock_enforce.assert_called_with(action='global_index',
scope=self.controller.REQUEST_SCOPE,
context=self.context)
def test_global_index_sets_tenant_safe_to_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'global_tenant': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=False)
def test_global_index_show_deleted_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=False)
def test_global_index_show_deleted_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_deleted': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
def test_global_index_show_nested_false(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'False'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=False)
def test_global_index_show_nested_true(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock()
params = {'show_nested': 'True'}
req = self._get('/stacks', params=params)
self.controller.index(req, tenant_id=self.tenant)
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_nested=True)
def test_index_show_deleted_True_with_count_True(self, mock_enforce):
rpc_client = self.controller.rpc_client
rpc_client.list_stacks = mock.Mock(return_value=[])
rpc_client.count_stacks = mock.Mock(return_value=0)
params = {'show_deleted': 'True',
'with_count': 'True'}
req = self._get('/stacks', params=params)
result = self.controller.index(req, tenant_id=self.tenant)
self.assertEqual(0, result['count'])
rpc_client.list_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True)
rpc_client.count_stacks.assert_called_once_with(mock.ANY,
filters=mock.ANY,
tenant_safe=True,
show_deleted=True,
show_nested=False,
show_hidden=False,
tags=None,
tags_any=None,
not_tags=None,
not_tags_any=None)
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_detail(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'detail', True)
req = self._get('/stacks/detail')
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
mock_call.return_value = engine_resp
result = self.controller.detail(req, tenant_id=identity.tenant)
expected = {
'stacks': [
{
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '1',
u'updated_time': u'2012-07-09T09:13:11Z',
u'template_description': u'blah',
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'parameters': {'foo': 'bar'},
u'outputs': ['key', 'value'],
u'notification_topics': [],
u'capabilities': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
]
}
self.assertEqual(expected, result)
default_args = {'limit': None, 'sort_keys': None, 'marker': None,
'sort_dir': None, 'filters': None, 'tenant_safe': True,
'show_deleted': False, 'show_nested': False,
'show_hidden': False, 'tags': None,
'tags_any': None, 'not_tags': None,
'not_tags_any': None}
mock_call.assert_called_once_with(
req.context, ('list_stacks', default_args), version='1.8')
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_aterr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(AttributeError())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_index_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', False)
req = self._get('/stacks')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
@mock.patch.object(rpc_client.EngineClient, 'call')
def test_index_rmt_interr(self, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'index', True)
req = self._get('/stacks')
mock_call.side_effect = tools.to_remote_error(Exception())
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.index,
req, tenant_id=self.tenant)
self.assertEqual(500, resp.json['code'])
self.assertEqual('Exception', resp.json['error']['type'])
mock_call.assert_called_once_with(
req.context, ('list_stacks', mock.ANY), version='1.8')
def test_create(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_create_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {
"heat_template_version": "2013-05-23",
"parameters": {"app_dbx": {"type": "string"}},
"resources": {"res1": {"type": "GenericResourceType"}}}
parameters = {"app_dbx": "test"}
adopt_data = {
"status": "COMPLETE",
"name": "rtrove1",
"parameters": parameters,
"template": template,
"action": "CREATE",
"id": "8532f0d3-ea84-444e-b2bb-2543bb1496a4",
"resources": {"res1": {
"status": "COMPLETE",
"name": "database_password",
"resource_id": "yBpuUROjfGQ2gKOD",
"action": "CREATE",
"type": "GenericResourceType",
"metadata": {}}}}
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
response = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_adopt_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': {},
'timeout_mins': 'not-an-int',
'adopt_stack_data': 'does not matter'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_adopt_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
parameters = {"app_dbx": "test"}
adopt_data = ["Test"]
body = {'template': None,
'stack_name': identity.stack_name,
'parameters': parameters,
'timeout_mins': 30,
'adopt_stack_data': str(adopt_data)}
req = self._post('/stacks', json.dumps(body))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.status_code)
self.assertEqual('400 Bad Request', resp.status)
self.assertIn('Invalid adopt data', resp.text)
self.m.VerifyAll()
def test_create_with_files(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'files': {'my.yaml': 'This is the file contents.'},
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {'my.yaml': 'This is the file contents.'},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndReturn(dict(identity))
self.m.ReplayAll()
result = self.controller.create(req,
tenant_id=identity.tenant,
body=body)
expected = {'stack':
{'id': '1',
'links': [{'href': self._url(identity), 'rel': 'self'}]}}
self.assertEqual(expected, result)
self.m.VerifyAll()
def test_create_err_rpcerr(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True, 3)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
unknown_parameter = heat_exc.UnknownUserParameter(key='a')
missing_parameter = heat_exc.UserParameterMissing(key='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(AttributeError()))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(unknown_parameter))
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(missing_parameter))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('AttributeError', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UnknownUserParameter', resp.json['error']['type'])
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('UserParameterMissing', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_existing(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackExists(stack_name='s')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(409, resp.json['code'])
self.assertEqual('StackExists', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 'not-an-int'}
req = self._post('/stacks', json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req,
tenant_id=self.tenant, body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_create_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', False)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_create_err_engine(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'create', True)
stack_name = "wordpress"
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': stack_name,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.StackValidationFailed(message='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('create_stack',
{'stack_name': stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30},
'owner_id': None,
'nested_depth': 0,
'user_creds_id': None,
'parent_resource_name': None,
'stack_user_project_id': None}),
version='1.8'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create,
req, tenant_id=self.tenant,
body=body)
self.assertEqual(400, resp.json['code'])
self.assertEqual('StackValidationFailed', resp.json['error']['type'])
self.m.VerifyAll()
def test_create_err_stack_bad_reqest(self, mock_enforce):
cfg.CONF.set_override('debug', True)
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'timeout_mins': 30}
req = self._post('/stacks', json.dumps(body))
error = heat_exc.HTTPExceptionDisguise(webob.exc.HTTPBadRequest())
self.controller.create = mock.MagicMock(side_effect=error)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.create, req, body)
# When HTTP disguised exceptions reach the fault app, they are
# converted into regular responses, just like non-HTTP exceptions
self.assertEqual(400, resp.json['code'])
self.assertEqual('HTTPBadRequest', resp.json['error']['type'])
self.assertIsNotNone(resp.json['error']['traceback'])
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_stack(self, mock_format, mock_call, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
body = {'stack_name': 'foo', 'template': {}, 'parameters': {}}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack'
result = self.controller.preview(req, tenant_id=self.tenant, body=body)
self.assertEqual({'stack': 'formatted_stack'}, result)
@mock.patch.object(rpc_client.EngineClient, 'call')
@mock.patch.object(stacks.stacks_view, 'format_stack')
def test_preview_with_tags_timeout(self, mock_format, mock_call,
mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'stack_name': identity.stack_name,
'parameters': parameters,
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._post('/stacks/preview', json.dumps(body))
mock_call.return_value = {}
mock_format.return_value = 'formatted_stack_preview'
response = self.controller.preview(req,
tenant_id=identity.tenant,
body=body)
rpc_client.EngineClient.call.assert_called_once_with(
req.context,
('preview_stack',
{'stack_name': identity.stack_name,
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
)
self.assertEqual({'stack': 'formatted_stack_preview'}, response)
def test_preview_update_stack(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview_update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s/preview' %
identity, json.dumps(body))
resource_changes = {'updated': [],
'deleted': [],
'unchanged': [],
'added': [],
'replaced': []}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('preview_update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}}),
version='1.15'
).AndReturn(resource_changes)
self.m.ReplayAll()
result = self.controller.preview_update(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual({'resource_changes': resource_changes}, result)
self.m.VerifyAll()
def test_preview_update_stack_patch(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'preview_update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': None,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s/preview' %
identity, json.dumps(body))
resource_changes = {'updated': [],
'deleted': [],
'unchanged': [],
'added': [],
'replaced': []}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('preview_update_stack',
{'stack_identity': dict(identity),
'template': None,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}}),
version='1.15'
).AndReturn(resource_changes)
self.m.ReplayAll()
result = self.controller.preview_update_patch(
req, tenant_id=identity.tenant, stack_name=identity.stack_name,
stack_id=identity.stack_id, body=body)
self.assertEqual({'resource_changes': resource_changes}, result)
self.m.VerifyAll()
def test_lookup(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name)
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_arn(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks%s' % identity.arn_url_path())
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup,
req, tenant_id=identity.tenant, stack_name=identity.arn())
self.assertEqual(self._url(identity), found.location)
self.m.VerifyAll()
def test_lookup_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_err_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_lookup_resource(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '1')
req = self._get('/stacks/%(stack_name)s/resources' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': identity.stack_name})
).AndReturn(identity)
self.m.ReplayAll()
found = self.assertRaises(
webob.exc.HTTPFound, self.controller.lookup, req,
tenant_id=identity.tenant, stack_name=identity.stack_name,
path='resources')
self.assertEqual(self._url(identity) + '/resources',
found.location)
self.m.VerifyAll()
def test_lookup_resource_nonexistent(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', True)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('identify_stack', {'stack_name': stack_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_lookup_resource_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'lookup', False)
stack_name = 'wibble'
req = self._get('/stacks/%(stack_name)s/resources' % {
'stack_name': stack_name})
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.lookup,
req, tenant_id=self.tenant,
stack_name=stack_name,
path='resources')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_show(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
parameters = {u'DBUsername': u'admin',
u'LinuxDistribution': u'F17',
u'InstanceType': u'm1.large',
u'DBRootPassword': u'admin',
u'DBPassword': u'admin',
u'DBName': u'wordpress'}
outputs = [{u'output_key': u'WebsiteURL',
u'description': u'URL for Wordpress wiki',
u'output_value': u'http://10.0.0.8/wordpress'}]
engine_resp = [
{
u'stack_identity': dict(identity),
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'notification_topics': [],
u'stack_action': u'CREATE',
u'stack_status': u'COMPLETE',
u'description': u'blah',
u'disable_rollback': True,
u'timeout_mins':60,
u'capabilities': [],
}
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndReturn(engine_resp)
self.m.ReplayAll()
response = self.controller.show(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
expected = {
'stack': {
'links': [{"href": self._url(identity),
"rel": "self"}],
'id': '6',
u'updated_time': u'2012-07-09T09:13:11Z',
u'parameters': parameters,
u'outputs': outputs,
u'description': u'blah',
u'stack_status_reason': u'Stack successfully created',
u'creation_time': u'2012-07-09T09:12:45Z',
u'stack_name': identity.stack_name,
u'stack_status': u'CREATE_COMPLETE',
u'capabilities': [],
u'notification_topics': [],
u'disable_rollback': True,
u'timeout_mins': 60,
}
}
self.assertEqual(expected, response)
self.m.VerifyAll()
def test_show_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('show_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_show_invalidtenant(self, mock_enforce):
identity = identifier.HeatIdentifier('wibble', 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_show_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'show', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.show,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_get_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
template = {u'Foo': u'bar'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndReturn(template)
self.m.ReplayAll()
response = self.controller.template(req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(template, response)
self.m.VerifyAll()
def test_get_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s/template'
% identity)
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
self.m.VerifyAll()
def test_get_template_err_notfound(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'template', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._get('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('get_template', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.template,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30, 'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {u'parameters': parameters,
u'encrypted_param_names': [],
u'parameter_defaults': {},
u'resource_registry': {}},
'files': {},
'args': {'timeout_mins': 30}})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_update_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update', False)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._put('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.update,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_update_with_existing_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
body = {'template': None,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': None,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_existing_parameters_with_tags(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
body = {'template': template,
'parameters': {},
'files': {},
'tags': 'tag1,tag2',
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30,
'tags': ['tag1', 'tag2']}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_existing_parameters(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patch_timeout_not_int(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
body = {'template': template,
'parameters': parameters,
'files': {},
'timeout_mins': 'not-int'}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update_patch, req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.assertEqual("Only integer is acceptable by 'timeout_mins'.",
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_update_with_existing_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': {},
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_update_with_patched_and_default_parameters(
self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'update_patch', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
template = {u'Foo': u'bar'}
parameters = {u'InstanceType': u'm1.xlarge'}
clear_params = [u'DBUsername', u'DBPassword', u'LinuxDistribution']
body = {'template': template,
'parameters': parameters,
'clear_parameters': clear_params,
'files': {},
'timeout_mins': 30}
req = self._patch('/stacks/%(stack_name)s/%(stack_id)s' % identity,
json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('update_stack',
{'stack_identity': dict(identity),
'template': template,
'params': {'parameters': parameters,
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'args': {rpc_api.PARAM_EXISTING: True,
'clear_parameters': clear_params,
'timeout_mins': 30}})
).AndReturn(dict(identity))
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPAccepted,
self.controller.update_patch,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id,
body=body)
self.m.VerifyAll()
def test_delete(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndReturn(None)
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPNoContent,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.m.VerifyAll()
def test_delete_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_abandon(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', True)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns json data on abandon completion
expected = {"name": "test", "id": "123"}
rpc_client.EngineClient.call(
req.context,
('abandon_stack', {'stack_identity': dict(identity)})
).AndReturn(expected)
self.m.ReplayAll()
ret = self.controller.abandon(req,
tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(expected, ret)
self.m.VerifyAll()
def test_abandon_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'abandon', False)
identity = identifier.HeatIdentifier(self.tenant, 'wordpress', '6')
req = self._abandon('/stacks/%(stack_name)s/%(stack_id)s' % identity)
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.abandon,
req, tenant_id=self.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_delete_bad_name(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'delete', True)
identity = identifier.HeatIdentifier(self.tenant, 'wibble', '6')
req = self._delete('/stacks/%(stack_name)s/%(stack_id)s' % identity)
error = heat_exc.StackNotFound(stack_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
# Engine returns None when delete successful
rpc_client.EngineClient.call(
req.context,
('delete_stack', {'stack_identity': dict(identity)})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.delete,
req, tenant_id=identity.tenant,
stack_name=identity.stack_name,
stack_id=identity.stack_id)
self.assertEqual(404, resp.json['code'])
self.assertEqual('StackNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_validate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
engine_response = {
u'Description': u'blah',
u'Parameters': [
{
u'NoEcho': u'false',
u'ParameterKey': u'InstanceType',
u'Description': u'Instance type'
}
]
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'show_nested': False}),
version='1.18'
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.validate_template(req,
tenant_id=self.tenant,
body=body)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_validate_template_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', True)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('validate_template',
{'template': template,
'params': {'parameters': {},
'encrypted_param_names': [],
'parameter_defaults': {},
'resource_registry': {}},
'files': {},
'show_nested': False}),
version='1.18'
).AndReturn({'Error': 'fubar'})
self.m.ReplayAll()
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.m.VerifyAll()
def test_validate_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'validate_template', False)
template = {u'Foo': u'bar'}
body = {'template': template}
req = self._post('/validate', json.dumps(body))
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.validate_template,
req, tenant_id=self.tenant, body=body)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_resource_types(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
engine_response = ['AWS::EC2::Instance',
'AWS::EC2::EIP',
'AWS::EC2::EIPAssociation']
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None
}),
version="1.16"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_resource_types(req,
tenant_id=self.tenant)
self.assertEqual({'resource_types': engine_response}, response)
self.m.VerifyAll()
def test_list_resource_types_error(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', True)
req = self._get('/resource_types')
error = heat_exc.ResourceTypeNotFound(type_name='')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('list_resource_types',
{
'support_status': None,
'type_name': None,
'heat_version': None
}),
version="1.16"
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_list_resource_types_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_resource_types', False)
req = self._get('/resource_types')
resp = tools.request_with_middleware(
fault.FaultWrapper,
self.controller.list_resource_types,
req, tenant_id=self.tenant)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_list_template_versions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_versions', True)
req = self._get('/template_versions')
engine_response = [
{'version': 'heat_template_version.2013-05-23', 'type': 'hot'},
{'version': 'AWSTemplateFormatVersion.2010-09-09', 'type': 'cfn'}]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, ('list_template_versions', {}),
version="1.11"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_versions(
req, tenant_id=self.tenant)
self.assertEqual({'template_versions': engine_response}, response)
self.m.VerifyAll()
def test_list_template_functions(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'list_template_functions', True)
req = self._get('/template_versions/t1/functions')
engine_response = [
{'functions': 'func1', 'description': 'desc1'},
]
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context, (
'list_template_functions', {'template_version': 't1'}),
version="1.13"
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.list_template_functions(
req, tenant_id=self.tenant, template_version='t1')
self.assertEqual({'template_functions': engine_response}, response)
self.m.VerifyAll()
def test_resource_schema(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/ResourceWithProps')
type_name = 'ResourceWithProps'
engine_response = {
'resource_type': type_name,
'properties': {
'Foo': {'type': 'string', 'required': False},
},
'attributes': {
'foo': {'description': 'A generic attribute'},
'Foo': {'description': 'Another generic attribute'},
},
'support_status': {
'status': 'SUPPORTED',
'version': None,
'message': None,
},
}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndReturn(engine_response)
self.m.ReplayAll()
response = self.controller.resource_schema(req,
tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(engine_response, response)
self.m.VerifyAll()
def test_resource_schema_nonexist(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', True)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
error = heat_exc.ResourceTypeNotFound(type_name='BogusResourceType')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('resource_schema', {'type_name': type_name})
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_resource_schema_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'resource_schema', False)
req = self._get('/resource_types/BogusResourceType')
type_name = 'BogusResourceType'
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.resource_schema,
req, tenant_id=self.tenant,
type_name=type_name)
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
def test_generate_template(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/TEST_TYPE/template')
engine_response = {'Type': 'TEST_TYPE'}
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'TEST_TYPE',
'template_type': 'cfn'}),
version='1.9'
).AndReturn(engine_response)
self.m.ReplayAll()
self.controller.generate_template(req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.m.VerifyAll()
def test_generate_template_invalid_template_type(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
params = {'template_type': 'invalid'}
mock_call = self.patchobject(rpc_client.EngineClient, 'call')
req = self._get('/resource_types/TEST_TYPE/template',
params=params)
ex = self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='TEST_TYPE')
self.assertIn('Template type is not supported: Invalid template '
'type "invalid", valid types are: cfn, hot.',
six.text_type(ex))
self.assertFalse(mock_call.called)
def test_generate_template_not_found(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', True)
req = self._get('/resource_types/NOT_FOUND/template')
error = heat_exc.ResourceTypeNotFound(type_name='a')
self.m.StubOutWithMock(rpc_client.EngineClient, 'call')
rpc_client.EngineClient.call(
req.context,
('generate_template', {'type_name': 'NOT_FOUND',
'template_type': 'cfn'}),
version='1.9'
).AndRaise(tools.to_remote_error(error))
self.m.ReplayAll()
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='NOT_FOUND')
self.assertEqual(404, resp.json['code'])
self.assertEqual('ResourceTypeNotFound', resp.json['error']['type'])
self.m.VerifyAll()
def test_generate_template_err_denied_policy(self, mock_enforce):
self._mock_enforce_setup(mock_enforce, 'generate_template', False)
req = self._get('/resource_types/NOT_FOUND/template')
resp = tools.request_with_middleware(fault.FaultWrapper,
self.controller.generate_template,
req, tenant_id=self.tenant,
type_name='blah')
self.assertEqual(403, resp.status_int)
self.assertIn('403 Forbidden', six.text_type(resp))
class StackSerializerTest(common.HeatTestCase):
def setUp(self):
super(StackSerializerTest, self).setUp()
self.serializer = stacks.StackSerializer()
def test_serialize_create(self):
result = {'stack':
{'id': '1',
'links': [{'href': 'location', "rel": "self"}]}}
response = webob.Response()
response = self.serializer.create(response, result)
self.assertEqual(201, response.status_int)
self.assertEqual('location', response.headers['Location'])
self.assertEqual('application/json', response.headers['Content-Type'])
|
{
"content_hash": "5947546885ea5cb3cde8435f25a13c12",
"timestamp": "",
"source": "github",
"line_count": 2393,
"max_line_length": 79,
"avg_line_length": 42.14751358127873,
"alnum_prop": 0.516295025728988,
"repo_name": "pratikmallya/heat",
"id": "c17eb940400a326b93d803417917ff069361e99b",
"size": "101434",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "heat/tests/api/openstack_v1/test_stacks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6929579"
},
{
"name": "Shell",
"bytes": "33092"
}
],
"symlink_target": ""
}
|
"""
csv2ofx.mappings.mintapi
~~~~~~~~~~~~~~~~~~~~~~~~
Provides a mapping for transactions obtained via mint.com
"""
from operator import itemgetter
mapping = {
"is_split": False,
"has_header": True,
"split_account": itemgetter("Category"),
"account": itemgetter("Account Name"),
"date": itemgetter("Date"),
"type": itemgetter("Transaction Type"),
"amount": itemgetter("Amount"),
"desc": itemgetter("Original Description"),
"payee": itemgetter("Description"),
"notes": itemgetter("Notes"),
}
|
{
"content_hash": "8e7913f85cfe577ba8f610c5e935879b",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 57,
"avg_line_length": 26.65,
"alnum_prop": 0.6322701688555347,
"repo_name": "reubano/csv2ofx",
"id": "27c465763b0344a7556df83c23b315913a0e84f7",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "csv2ofx/mappings/mint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85764"
},
{
"name": "Shell",
"bytes": "6258"
}
],
"symlink_target": ""
}
|
from rq import Worker
from rq import Queue
from rq import Connection
from spitter import logger
from spitter.connection import redis_conn
with Connection(redis_conn):
queue = Queue("spitter_messages")
logger.info("starting worker...")
Worker(queue).work()
|
{
"content_hash": "5eae30b6bfd218cf46bd0a289190ae68",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 41,
"avg_line_length": 19.5,
"alnum_prop": 0.7435897435897436,
"repo_name": "tmpapageorgiou/spitter",
"id": "32759c04ec946447ce240765cdd9ef4a31c5c095",
"size": "273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spitter/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "183"
},
{
"name": "Python",
"bytes": "1841"
}
],
"symlink_target": ""
}
|
"""Support for RESTful API sensors."""
import logging
import json
import voluptuous as vol
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
from homeassistant.components.sensor import PLATFORM_SCHEMA, DEVICE_CLASSES_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_NAME,
CONF_METHOD,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_TIMEOUT,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
CONF_DEVICE_CLASS,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
DEFAULT_TIMEOUT = 10
CONF_JSON_ATTRS = "json_attributes"
METHODS = ["POST", "GET"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the RESTful sensor."""
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
json_attrs = config.get(CONF_JSON_ATTRS)
force_update = config.get(CONF_FORCE_UPDATE)
timeout = config.get(CONF_TIMEOUT)
if value_template is not None:
value_template.hass = hass
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render()
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(username, password)
else:
auth = HTTPBasicAuth(username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
rest.update()
if rest.data is None:
raise PlatformNotReady
# Must update the sensor now (including fetching the rest resource) to
# ensure it's updating its state.
add_entities(
[
RestSensor(
hass,
rest,
name,
unit,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
)
],
True,
)
class RestSensor(Entity):
"""Implementation of a REST sensor."""
def __init__(
self,
hass,
rest,
name,
unit_of_measurement,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
):
"""Initialize the REST sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._value_template = value_template
self._json_attrs = json_attrs
self._attributes = None
self._force_update = force_update
self._resource_template = resource_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return if the sensor data are available."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""Get the latest data from REST API and update the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render())
self.rest.update()
value = self.rest.data
if self._json_attrs:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if isinstance(json_dict, list):
json_dict = json_dict[0]
if isinstance(json_dict, dict):
attrs = {
k: json_dict[k] for k in self._json_attrs if k in json_dict
}
self._attributes = attrs
else:
_LOGGER.warning(
"JSON result was not a dictionary"
" or list with 0th element a dictionary"
)
except ValueError:
_LOGGER.warning("REST result could not be parsed as JSON")
_LOGGER.debug("Erroneous JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is not None and self._value_template is not None:
value = self._value_template.render_with_possible_json_value(value, None)
self._state = value
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
class RestData:
"""Class for handling the data retrieval."""
def __init__(
self, method, resource, auth, headers, data, verify_ssl, timeout=DEFAULT_TIMEOUT
):
"""Initialize the data object."""
self._method = method
self._resource = resource
self._auth = auth
self._headers = headers
self._request_data = data
self._verify_ssl = verify_ssl
self._timeout = timeout
self.data = None
def set_url(self, url):
"""Set url."""
self._resource = url
def update(self):
"""Get the latest data from REST service with provided method."""
_LOGGER.debug("Updating from %s", self._resource)
try:
response = requests.request(
self._method,
self._resource,
headers=self._headers,
auth=self._auth,
data=self._request_data,
timeout=self._timeout,
verify=self._verify_ssl,
)
self.data = response.text
except requests.exceptions.RequestException as ex:
_LOGGER.error("Error fetching data: %s failed with %s", self._resource, ex)
self.data = None
|
{
"content_hash": "c63c1d6ddfe8c48b1d67c6905927353b",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 88,
"avg_line_length": 32.08712121212121,
"alnum_prop": 0.5963876755991028,
"repo_name": "joopert/home-assistant",
"id": "6fdf5ce7221c648544eba605f9825e9a26ba5abb",
"size": "8471",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rest/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18670593"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
}
|
"""
SeeKeR 3B Dialogue Model.
"""
from parlai.zoo.seeker.seeker_download import download_with_model_type
def download(datapath):
download_with_model_type(datapath, 'seeker_dialogue_3B', 'v1.0')
|
{
"content_hash": "0ea27ad34258a04d30ebddf11abff16b",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 70,
"avg_line_length": 25,
"alnum_prop": 0.74,
"repo_name": "facebookresearch/ParlAI",
"id": "9cd1087dcf11711b1ed35751d8f13e62cc6169bd",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/zoo/seeker/seeker_dialogue_3B.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
}
|
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "pydata_google_auth/_version.py"
cfg.versionfile_source = "pydata_google_auth/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG) :] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r"\d", r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = "unable to parse git-describe output: '%s'" % describe_out
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
|
{
"content_hash": "c937584a8e8a79396d0dff998cadbfd3",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 88,
"avg_line_length": 32.96343692870201,
"alnum_prop": 0.5695191614441795,
"repo_name": "pydata/pydata-google-auth",
"id": "493d121122cc89f09483f873afb94ec108ad0974",
"size": "18505",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "pydata_google_auth/_version.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "137249"
}
],
"symlink_target": ""
}
|
"""Helpers that help with state related things."""
import asyncio
import json
import logging
from collections import defaultdict
from homeassistant.loader import bind_hass
import homeassistant.util.dt as dt_util
from homeassistant.components.media_player import (
ATTR_MEDIA_CONTENT_ID, ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_VOLUME_LEVEL, ATTR_MEDIA_VOLUME_MUTED, SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE, ATTR_INPUT_SOURCE)
from homeassistant.components.notify import (
ATTR_MESSAGE, SERVICE_NOTIFY)
from homeassistant.components.sun import (
STATE_ABOVE_HORIZON, STATE_BELOW_HORIZON)
from homeassistant.components.switch.mysensors import (
ATTR_IR_CODE, SERVICE_SEND_IR_CODE)
from homeassistant.components.climate import (
ATTR_AUX_HEAT, ATTR_AWAY_MODE, ATTR_FAN_MODE, ATTR_HOLD_MODE,
ATTR_HUMIDITY, ATTR_OPERATION_MODE, ATTR_SWING_MODE,
SERVICE_SET_AUX_HEAT, SERVICE_SET_AWAY_MODE, SERVICE_SET_HOLD_MODE,
SERVICE_SET_FAN_MODE, SERVICE_SET_HUMIDITY, SERVICE_SET_OPERATION_MODE,
SERVICE_SET_SWING_MODE, SERVICE_SET_TEMPERATURE, STATE_HEAT, STATE_COOL,
STATE_IDLE)
from homeassistant.components.climate.ecobee import (
ATTR_FAN_MIN_ON_TIME, SERVICE_SET_FAN_MIN_ON_TIME,
ATTR_RESUME_ALL, SERVICE_RESUME_PROGRAM)
from homeassistant.components.cover import (
ATTR_POSITION)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_OPTION, ATTR_TEMPERATURE, SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_HOME, SERVICE_ALARM_DISARM, SERVICE_ALARM_TRIGGER,
SERVICE_LOCK, SERVICE_MEDIA_PAUSE, SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_SEEK, SERVICE_TURN_OFF, SERVICE_TURN_ON, SERVICE_UNLOCK,
SERVICE_VOLUME_MUTE, SERVICE_VOLUME_SET, SERVICE_OPEN_COVER,
SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED,
STATE_CLOSED, STATE_HOME, STATE_LOCKED, STATE_NOT_HOME, STATE_OFF,
STATE_ON, STATE_OPEN, STATE_PAUSED, STATE_PLAYING, STATE_UNKNOWN,
STATE_UNLOCKED, SERVICE_SELECT_OPTION)
from homeassistant.core import State
from homeassistant.util.async import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
GROUP_DOMAIN = 'group'
HASS_DOMAIN = 'homeassistant'
# Update this dict of lists when new services are added to HA.
# Each item is a service with a list of required attributes.
SERVICE_ATTRIBUTES = {
SERVICE_PLAY_MEDIA: [ATTR_MEDIA_CONTENT_TYPE, ATTR_MEDIA_CONTENT_ID],
SERVICE_MEDIA_SEEK: [ATTR_MEDIA_SEEK_POSITION],
SERVICE_VOLUME_MUTE: [ATTR_MEDIA_VOLUME_MUTED],
SERVICE_VOLUME_SET: [ATTR_MEDIA_VOLUME_LEVEL],
SERVICE_NOTIFY: [ATTR_MESSAGE],
SERVICE_SET_AWAY_MODE: [ATTR_AWAY_MODE],
SERVICE_SET_FAN_MODE: [ATTR_FAN_MODE],
SERVICE_SET_FAN_MIN_ON_TIME: [ATTR_FAN_MIN_ON_TIME],
SERVICE_RESUME_PROGRAM: [ATTR_RESUME_ALL],
SERVICE_SET_TEMPERATURE: [ATTR_TEMPERATURE],
SERVICE_SET_HUMIDITY: [ATTR_HUMIDITY],
SERVICE_SET_SWING_MODE: [ATTR_SWING_MODE],
SERVICE_SET_HOLD_MODE: [ATTR_HOLD_MODE],
SERVICE_SET_OPERATION_MODE: [ATTR_OPERATION_MODE],
SERVICE_SET_AUX_HEAT: [ATTR_AUX_HEAT],
SERVICE_SELECT_SOURCE: [ATTR_INPUT_SOURCE],
SERVICE_SEND_IR_CODE: [ATTR_IR_CODE],
SERVICE_SELECT_OPTION: [ATTR_OPTION],
SERVICE_SET_COVER_POSITION: [ATTR_POSITION]
}
# Update this dict when new services are added to HA.
# Each item is a service with a corresponding state.
SERVICE_TO_STATE = {
SERVICE_TURN_ON: STATE_ON,
SERVICE_TURN_OFF: STATE_OFF,
SERVICE_MEDIA_PLAY: STATE_PLAYING,
SERVICE_MEDIA_PAUSE: STATE_PAUSED,
SERVICE_ALARM_ARM_AWAY: STATE_ALARM_ARMED_AWAY,
SERVICE_ALARM_ARM_HOME: STATE_ALARM_ARMED_HOME,
SERVICE_ALARM_DISARM: STATE_ALARM_DISARMED,
SERVICE_ALARM_TRIGGER: STATE_ALARM_TRIGGERED,
SERVICE_LOCK: STATE_LOCKED,
SERVICE_UNLOCK: STATE_UNLOCKED,
SERVICE_OPEN_COVER: STATE_OPEN,
SERVICE_CLOSE_COVER: STATE_CLOSED
}
class AsyncTrackStates(object):
"""
Record the time when the with-block is entered.
Add all states that have changed since the start time to the return list
when with-block is exited.
Must be run within the event loop.
"""
def __init__(self, hass):
"""Initialize a TrackStates block."""
self.hass = hass
self.states = []
# pylint: disable=attribute-defined-outside-init
def __enter__(self):
"""Record time from which to track changes."""
self.now = dt_util.utcnow()
return self.states
def __exit__(self, exc_type, exc_value, traceback):
"""Add changes states to changes list."""
self.states.extend(get_changed_since(self.hass.states.async_all(),
self.now))
def get_changed_since(states, utc_point_in_time):
"""Return list of states that have been changed since utc_point_in_time."""
return [state for state in states
if state.last_updated >= utc_point_in_time]
@bind_hass
def reproduce_state(hass, states, blocking=False):
"""Reproduce given state."""
return run_coroutine_threadsafe(
async_reproduce_state(hass, states, blocking), hass.loop).result()
@asyncio.coroutine
@bind_hass
def async_reproduce_state(hass, states, blocking=False):
"""Reproduce given state."""
if isinstance(states, State):
states = [states]
to_call = defaultdict(list)
for state in states:
if hass.states.get(state.entity_id) is None:
_LOGGER.warning("reproduce_state: Unable to find entity %s",
state.entity_id)
continue
if state.domain == GROUP_DOMAIN:
service_domain = HASS_DOMAIN
else:
service_domain = state.domain
domain_services = hass.services.async_services().get(service_domain)
if not domain_services:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (1)", state)
continue
service = None
for _service in domain_services.keys():
if (_service in SERVICE_ATTRIBUTES and
all(attr in state.attributes
for attr in SERVICE_ATTRIBUTES[_service]) or
_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
service = _service
if (_service in SERVICE_TO_STATE and
SERVICE_TO_STATE[_service] == state.state):
break
if not service:
_LOGGER.warning(
"reproduce_state: Unable to reproduce state %s (2)", state)
continue
# We group service calls for entities by service call
# json used to create a hashable version of dict with maybe lists in it
key = (service_domain, service,
json.dumps(dict(state.attributes), sort_keys=True))
to_call[key].append(state.entity_id)
domain_tasks = {}
for (service_domain, service, service_data), entity_ids in to_call.items():
data = json.loads(service_data)
data[ATTR_ENTITY_ID] = entity_ids
if service_domain not in domain_tasks:
domain_tasks[service_domain] = []
domain_tasks[service_domain].append(
hass.services.async_call(service_domain, service, data, blocking)
)
@asyncio.coroutine
def async_handle_service_calls(coro_list):
"""Handle service calls by domain sequence."""
for coro in coro_list:
yield from coro
execute_tasks = [async_handle_service_calls(coro_list)
for coro_list in domain_tasks.values()]
if execute_tasks:
yield from asyncio.wait(execute_tasks, loop=hass.loop)
def state_as_number(state):
"""
Try to coerce our state to a number.
Raises ValueError if this is not possible.
"""
if state.state in (STATE_ON, STATE_LOCKED, STATE_ABOVE_HORIZON,
STATE_OPEN, STATE_HOME, STATE_HEAT, STATE_COOL):
return 1
elif state.state in (STATE_OFF, STATE_UNLOCKED, STATE_UNKNOWN,
STATE_BELOW_HORIZON, STATE_CLOSED, STATE_NOT_HOME,
STATE_IDLE):
return 0
return float(state.state)
|
{
"content_hash": "c580fe76e4e1e6ea068bb0a595cacc15",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 79,
"avg_line_length": 37.588235294117645,
"alnum_prop": 0.6643794390273263,
"repo_name": "ewandor/home-assistant",
"id": "254a48c3d0a8e83f18c5fb4b08920c7b8b71d323",
"size": "8307",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/helpers/state.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
}
|
"""Support for Fibaro switches."""
import logging
from homeassistant.components.switch import ENTITY_ID_FORMAT, SwitchDevice
from homeassistant.util import convert
from . import FIBARO_DEVICES, FibaroDevice
DEPENDENCIES = ['fibaro']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fibaro switches."""
if discovery_info is None:
return
add_entities(
[FibaroSwitch(device) for
device in hass.data[FIBARO_DEVICES]['switch']], True)
class FibaroSwitch(FibaroDevice, SwitchDevice):
"""Representation of a Fibaro Switch."""
def __init__(self, fibaro_device):
"""Initialize the Fibaro device."""
self._state = False
super().__init__(fibaro_device)
self.entity_id = ENTITY_ID_FORMAT.format(self.ha_id)
def turn_on(self, **kwargs):
"""Turn device on."""
self.call_turn_on()
self._state = True
def turn_off(self, **kwargs):
"""Turn device off."""
self.call_turn_off()
self._state = False
@property
def current_power_w(self):
"""Return the current power usage in W."""
if 'power' in self.fibaro_device.interfaces:
return convert(self.fibaro_device.properties.power, float, 0.0)
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
if 'energy' in self.fibaro_device.interfaces:
return convert(self.fibaro_device.properties.energy, float, 0.0)
return None
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def update(self):
"""Update device state."""
self._state = self.current_binary_state
|
{
"content_hash": "ddbb27dd60d5205c36540effc523da8c",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 76,
"avg_line_length": 28.714285714285715,
"alnum_prop": 0.6268656716417911,
"repo_name": "jamespcole/home-assistant",
"id": "024531f62c70e8eb95e8f4472b7b37968ac2e827",
"size": "1809",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/fibaro/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
from raysect.core.scenegraph import Primitive, Node, Observer
from .world import World
|
{
"content_hash": "e256c4c1f2711024f74f11196a66b737",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 61,
"avg_line_length": 43,
"alnum_prop": 0.8372093023255814,
"repo_name": "raysect/source",
"id": "c5918a6ac85796dcd4d9fdcc4abe80911842785b",
"size": "1720",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raysect/optical/scenegraph/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "2044904"
},
{
"name": "Python",
"bytes": "8301227"
},
{
"name": "Shell",
"bytes": "1685"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="y", parent_name="layout.grid.domain", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
],
),
**kwargs,
)
|
{
"content_hash": "2188e7797fefdbb00047cfbbd11528f5",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 84,
"avg_line_length": 37,
"alnum_prop": 0.4894894894894895,
"repo_name": "plotly/plotly.py",
"id": "c03f5a032cb1ab8278db524e9cf2343deb085b09",
"size": "666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/grid/domain/_y.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import copy
import functools
import sys
# PyPI imports
import contracts
import pytest
# Putil imports
import putil.exh
import putil.pcontracts
from putil.test import AE, AI, GET_EXMSG
###
# Helper functions
###
def decfunc(func):
"""" Decorator function to test _create_argument_value_pairs function """
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Wrapper function that creates the argument dictionary and returns a
ret_func, which in turn just returns the argument passed. This is for
testing only, obviously in an actual environment the decorator would
return the original (called) function with the passed arguments
"""
return ret_func(
putil.pcontracts._create_argument_value_pairs(
func, *args, **kwargs
)
)
return wrapper
def ret_func(par):
""" Returns the passed argument """
return par
def sample_func_global():
""" Global test function to test get_exdesc function behavior """
tmp_global = putil.pcontracts.get_exdesc()
return tmp_global
###
# Test functions
###
def test_get_replacement_token():
""" Test _get_replacement_token function behavior """
obj = putil.pcontracts._get_replacement_token
ref = 'argument_name'
assert obj('Argument `*[argument_name]*` could not be found') == ref
assert obj('Argument `*file_name*` could not be found') is None
def test_format_arg():
""" Test _format_arg function behavior """
fobj = putil.pcontracts._format_arg
assert fobj('Message') == {'msg':'Message', 'type':RuntimeError}
assert fobj(OSError) == {
'msg':'Argument `*[argument_name]*` is not valid', 'type':OSError
}
assert fobj((ValueError, 'Description 1')) == {
'msg':'Description 1', 'type':ValueError
}
assert fobj(('Description 2', TypeError)) == {
'msg':'Description 2', 'type':TypeError
}
def test_format_arg_exceptions():
""" Test _format_arg function exceptions """
obj = putil.pcontracts._format_arg
items = ['', [RuntimeError, ''], ['', RuntimeError], ['']]
exmsg = 'Empty custom contract exception message'
for item in items:
AE(obj, ValueError, exmsg, arg=item)
items = [
set([RuntimeError, 'Message']),
[],
(RuntimeError, 'Message', 3),
[3],
['a', 3],
[3, 'a'],
[ValueError, 3],
[3, ValueError]
]
exmsg = 'Illegal custom contract exception definition'
for item in items:
AE(obj, TypeError, exmsg, arg=item)
def test_isexception():
""" Test _isexception function behavior """
assert not putil.pcontracts._isexception(str)
assert not putil.pcontracts._isexception(3)
assert putil.pcontracts._isexception(RuntimeError)
def test_parse_new_contract_args():
""" Test _parse_new_contract_args function behavior """
fobj = putil.pcontracts._parse_new_contract_args
# Validate *args
with pytest.raises(TypeError) as excinfo:
fobj('Desc1', file_not_found='Desc2')
assert GET_EXMSG(excinfo) == 'Illegal custom contract exception definition'
with pytest.raises(TypeError) as excinfo:
fobj('Desc1', 'Desc2')
assert GET_EXMSG(excinfo) == 'Illegal custom contract exception definition'
with pytest.raises(TypeError) as excinfo:
fobj(5)
assert GET_EXMSG(excinfo) == 'Illegal custom contract exception definition'
# Normal behavior
assert fobj() == [
{
'name':'argument_invalid',
'msg':'Argument `*[argument_name]*` is not valid',
'type':RuntimeError
}
]
assert fobj('Desc') == [
{
'name':'default',
'msg':'Desc',
'type':RuntimeError
}
]
assert fobj(OSError) == [
{
'name':'default',
'msg':'Argument `*[argument_name]*` is not valid',
'type':OSError
}
]
assert fobj(('a', )) == [
{'name':'default', 'msg':'a', 'type':RuntimeError}
]
assert fobj((OSError, )) == [
{
'name':'default',
'msg':'Argument `*[argument_name]*` is not valid',
'type':OSError
}
]
assert fobj([TypeError, 'bcd']) == [
{
'name':'default', 'msg':'bcd', 'type':TypeError
}
]
assert fobj(['xyz', ValueError]) == [
{
'name':'default', 'msg':'xyz', 'type':ValueError
}
]
assert putil.test.comp_list_of_dicts(
fobj(mycontract=('xyz', ValueError), othercontract=('abc', OSError)),
[
{'name':'othercontract', 'msg':'abc', 'type':OSError},
{'name':'mycontract', 'msg':'xyz', 'type':ValueError}
]
)
# Validate **kwargs
exmsg = 'Illegal custom contract exception definition'
AE(fobj, TypeError, exmsg, a=45)
ref = [
{'name':'char', 'msg':'Desc1', 'type':RuntimeError},
{'name':'other', 'msg':'a', 'type':ValueError}
]
assert putil.test.comp_list_of_dicts(
ref, fobj(char='Desc1', other=['a', ValueError])
)
def test_register_custom_contracts():
""" Test _register_custom_contracts function behavior """
original_custom_contracts = copy.deepcopy(
putil.pcontracts._CUSTOM_CONTRACTS
)
fobj = putil.pcontracts._register_custom_contracts
key1 = 'contract_name'
key2 = 'contract_exceptions'
# Test data validation
exmsg = 'Argument `contract_name` is of the wrong type'
AE(fobj, TypeError, exmsg, **{key1:5, key2:{}})
exmsg = 'Argument `contract_exceptions` is of the wrong type'
AE(fobj, TypeError, exmsg, **{key1:'test', key2:5})
exmsg = 'Contract exception definition is of the wrong type'
AE(fobj, TypeError, exmsg, **{key1:'test', key2:{'msg':'b', 'key':'hole'}})
AE(fobj, TypeError, exmsg, **{key1:'test', key2:[{5:'b'}]})
AE(fobj, TypeError, exmsg, **{key1:'test', key2:[{'a':'b'}]})
AE(
fobj, TypeError, exmsg,
**{key1:'test', key2:[{'name':'a', 'msg':'b', 'x':RuntimeError}]}
)
AE(
fobj, TypeError, exmsg,
**{key1:'test', key2:[{'name':5, 'msg':'b', 'type':RuntimeError}]}
)
AE(
fobj, TypeError, exmsg,
**{key1:'test', key2:[{'name':'a', 'msg':5, 'type':RuntimeError}]}
)
AE(
fobj, TypeError, exmsg,
**{key1:'test', key2:[{'name':'a', 'msg':'b', 'type':5}]}
)
exmsg = 'Contract exception names are not unique'
AE(
fobj, ValueError, exmsg,
**{
key1:'test',
key2:[{'name':'a', 'msg':'b'}, {'name':'a', 'msg':'c',}]
}
)
exmsg = 'Contract exception messages are not unique'
AE(
fobj, ValueError, exmsg,
**{
key1:'test',
key2:[
{'name':'a', 'msg':'desc'}, {'name':'b', 'msg':'desc',}
]
}
)
exmsg = 'Multiple replacement fields to be substituted by argument value'
AE(
fobj, ValueError, exmsg,
**{
key1:'test',
key2:[
{'name':'x', 'msg':'I am *[spartacus]*'},
{'name':'y', 'msg':'A move is *[spartacus]*',}
]
}
)
putil.pcontracts._register_custom_contracts(
contract_name='test1', contract_exceptions=[{'name':'a', 'msg':'desc'}]
)
exmsg = 'Attempt to redefine custom contract `test1`'
AE(
fobj, RuntimeError, exmsg,
**{key1:'test1', key2:[{'name':'a', 'msg':'other desc'}]}
)
# Test homogenization of exception definitions
putil.pcontracts._CUSTOM_CONTRACTS = dict()
fobj('test_contract1', 'my description')
assert putil.pcontracts._CUSTOM_CONTRACTS == {
'test_contract1':{
'default':{
'num':0,
'msg':'my description',
'type':RuntimeError,
'field':None
}
}
}
putil.pcontracts._CUSTOM_CONTRACTS = dict()
fobj(
'test_contract2',
[
{'name':'mex1', 'msg':'msg1', 'type':ValueError},
{'name':'mex2', 'msg':'msg2 *[token_name]* hello world'}
]
)
assert putil.pcontracts._CUSTOM_CONTRACTS == {
'test_contract2':{
'mex1':{'num':0, 'msg':'msg1', 'type':ValueError, 'field':None},
'mex2':{
'num':1,
'msg':'msg2 *[token_name]* hello world',
'type':RuntimeError,
'field':'token_name'}
}
}
putil.pcontracts._CUSTOM_CONTRACTS = dict()
fobj('test_contract3', [{'name':'mex1', 'msg':'msg1', 'type':ValueError}])
fobj(
'test_contract4',
[{'name':'mex2', 'msg':'msg2 *[token_name]* hello world'}]
)
assert putil.pcontracts._CUSTOM_CONTRACTS == {
'test_contract3': {
'mex1':
{'num':0, 'msg':'msg1', 'type':ValueError, 'field':None}
},
'test_contract4':{
'mex2':
{
'num':0,
'msg':'msg2 *[token_name]* hello world',
'type':RuntimeError,
'field':'token_name'
}
}
}
putil.pcontracts._CUSTOM_CONTRACTS = dict()
fobj('test_contract5', {'name':'mex5', 'msg':'msg5', 'type':ValueError})
assert putil.pcontracts._CUSTOM_CONTRACTS == {
'test_contract5':{
'mex5':{'num':0, 'msg':'msg5', 'type':ValueError, 'field':None}
}
}
#putil.pcontracts._CUSTOM_CONTRACTS = dict()
putil.pcontracts._CUSTOM_CONTRACTS = copy.deepcopy(
original_custom_contracts
)
def test_contract():
""" Test contract decorator behavior """
# pylint: disable=R0912,R0914
original_custom_contracts = copy.deepcopy(
putil.pcontracts._CUSTOM_CONTRACTS
)
putil.pcontracts._CUSTOM_CONTRACTS = dict()
@putil.pcontracts.new_contract('Illegal number: *[number]*')
def not_zero(number):
exdesc = putil.pcontracts.get_exdesc()
if number == 0:
raise ValueError(exdesc)
return True
@putil.pcontracts.new_contract(
wrong_file_name='The argument *[argument_name]* is wrong',
file_not_found=(OSError, 'File name `*[file_name]*` not found')
)
def file_name_valid(name):
exdesc = putil.pcontracts.get_exdesc()
if name == 'a':
raise ValueError(exdesc['wrong_file_name'])
if name == 'b':
raise ValueError(exdesc['file_not_found'])
return True
@putil.pcontracts.contract(number='int|float')
def func1(number):
return number
@putil.pcontracts.contract(number='not_zero')
def func2(number):
if number == 1:
raise TypeError('Unfathomable')
return number
@putil.pcontracts.contract(fname='str,file_name_valid', flag=bool)
def func3(fname, fnumber, flag=False):
return fname, fnumber
@putil.pcontracts.new_contract('Illegal number: unity')
def not_one(number):
exdesc = putil.pcontracts.get_exdesc()
if number == 1:
raise ValueError(exdesc)
return True
@putil.pcontracts.contract(fname='not_a_valid_contract')
def func6(fname):
return fname
@putil.pcontracts.contract(value=int)
def func7(value):
return value
AI(func1, 'number', number='a string')
AE(func2, RuntimeError, 'Illegal number: 0', number=0)
AE(func2, TypeError, 'Unfathomable', number=1)
exmsg = 'The argument fname is wrong'
AE(func3, RuntimeError, exmsg, fname='a', fnumber=5, flag=False)
exmsg = 'File name `b` not found'
AE(func3, OSError, exmsg, fname='b', fnumber=5, flag=False)
exmsg = 'Argument `flag` is not valid'
AE(func3, RuntimeError, exmsg, fname='zzz', fnumber=5, flag=45)
with pytest.raises(TypeError) as excinfo:
func2(2, 5, 10)
ref = (
'func2() takes exactly 1 argument (3 given)'
if sys.hexversion < 0x03000000 else
'func2() takes 1 positional argument but 3 were given'
)
assert GET_EXMSG(excinfo) == ref
assert func1(5) == 5
assert func2(10) == 10
assert func3('hello', 'world', False) == ('hello', 'world')
putil.exh.set_exh_obj(putil.exh.ExHandle())
@putil.pcontracts.contract(fname='str,file_name_valid')
def func4(fname, fnumber):
return fname, fnumber
@putil.pcontracts.contract(num='float|not_one', flag=bool, fudge='str|int')
def func5(num, flag=True, fudge=5):
return num
# Register exceptions
func4('x', 5)
func5(0)
exdict = putil.exh.get_exh_obj()._flatten_ex_dict()
pexlist = list()
for exkey, exitem in exdict.items():
pexlist.append(
{
'name':exkey[
exkey.rfind(
putil.exh.get_exh_obj()._callables_separator
)+1:
],
'type':exitem['type'],
'msg':exitem['msg']
}
)
ref = [
{
'name':'contract:tests.test_pcontracts.func5.flag_0',
'type':RuntimeError, 'msg':'Argument `flag` is not valid'
},
{
'name':'contract:tests.test_pcontracts.func5.fudge_0',
'type':RuntimeError,
'msg':'Argument `fudge` is not valid'
},
{
'name':'contract:tests.test_pcontracts.func5.num_0',
'type':RuntimeError,
'msg':'Illegal number: unity'
},
{
'name':'contract:tests.test_pcontracts.func4.fname_0',
'type':OSError,
'msg':'File name `*[file_name]*` not found'
},
{
'name':'contract:tests.test_pcontracts.func4.fname_1',
'type':RuntimeError,
'msg':'The argument fname is wrong'
}
]
assert putil.test.comp_list_of_dicts(pexlist, ref)
exmsg = 'The argument fname is wrong'
AE(func4, RuntimeError, exmsg, fname='a', fnumber=5)
AE(func4, OSError, 'File name `b` not found', fname='b', fnumber=5)
AE(func5, RuntimeError, 'Illegal number: unity', num=1)
AI(func5, 'flag', num=1.0, flag=45)
AI(func5, 'fudge', num=1.0, fudge=1.0)
putil.exh.del_exh_obj()
AE(func6, contracts.interface.ContractSyntaxError, '', fname=5)
AI(func7, 'value', value='a')
putil.pcontracts._CUSTOM_CONTRACTS = copy.deepcopy(
original_custom_contracts
)
def test_enable_disable_contracts():
"""
Test wrappers around disable_all, enable_all and
all_disabled functions behavior
"""
@putil.pcontracts.contract(number=int)
def func(number):
return number
assert not putil.pcontracts.all_disabled()
AI(func, 'number', number=None)
putil.pcontracts.disable_all()
assert putil.pcontracts.all_disabled()
# Contracts are disabled, no exception should be raised
assert func(['a', 'b']) == ['a', 'b']
putil.pcontracts.enable_all()
assert not putil.pcontracts.all_disabled()
AI(func, 'number', number=None)
def test_get_exdesc():
""" Test get_exdesc function behavior """
def sample_func_local():
""" Local test function to test get_exdesc function behavior """
tmp_local = putil.pcontracts.get_exdesc()
return tmp_local
sample_func_local.exdesc = 'Test local function property'
sample_func_global.exdesc = 'Test global function property'
assert sample_func_local() == 'Test local function property'
assert sample_func_global() == 'Test global function property'
del globals()['sample_func_global']
exmsg = (
'Function object could not be found for function `assert_exception`'
)
AE(putil.pcontracts.get_exdesc, RuntimeError, exmsg)
def test_new_contract():
""" Tests for new_contract decorator behavior """
# pylint: disable=R0204
original_custom_contracts = copy.deepcopy(
putil.pcontracts._CUSTOM_CONTRACTS
)
putil.pcontracts._CUSTOM_CONTRACTS = dict()
@putil.pcontracts.new_contract()
def func1(name1):
return name1, putil.pcontracts.get_exdesc()
ref = (
'a',
'[START CONTRACT MSG: func1]Argument `*[argument_name]*` '
'is not valid[STOP CONTRACT MSG]'
)
assert func1('a') == ref
ref = {
'func1':{
'argument_invalid':{
'num':0,
'msg':'Argument `*[argument_name]*` is not valid',
'type':RuntimeError,
'field':'argument_name'
}
}
}
assert putil.pcontracts._CUSTOM_CONTRACTS == ref
putil.pcontracts._CUSTOM_CONTRACTS = dict()
@putil.pcontracts.new_contract('Simple message')
def func2(name2):
return name2, putil.pcontracts.get_exdesc()
ref = (
'bc',
'[START CONTRACT MSG: func2]Simple message[STOP CONTRACT MSG]'
)
assert func2('bc') == ref
ref = {
'func2':{
'default':{
'num':0,
'msg':'Simple message',
'type':RuntimeError,
'field':None
}
}
}
assert putil.pcontracts._CUSTOM_CONTRACTS == ref
putil.pcontracts._CUSTOM_CONTRACTS = dict()
@putil.pcontracts.new_contract(
ex1='Medium message',
ex2=('Complex *[data]*', TypeError)
)
def func3(name3):
return name3, putil.pcontracts.get_exdesc()
ref = (
'def',
{
'ex1':(
'[START CONTRACT MSG: func3]'
'Medium message'
'[STOP CONTRACT MSG]'
),
'ex2':(
'[START CONTRACT MSG: func3]'
'Complex *[data]*'
'[STOP CONTRACT MSG]'
)
}
)
assert func3('def') == ref
ref = {
'func3':{
'ex1':{
'num':0,
'msg':'Medium message',
'type':RuntimeError,
'field':None
},
'ex2':{
'num':1,
'msg':'Complex *[data]*',
'type':TypeError,
'field':'data'
}
}
}
assert putil.pcontracts._CUSTOM_CONTRACTS == ref
putil.pcontracts._CUSTOM_CONTRACTS = copy.deepcopy(
original_custom_contracts
)
###
# Test classes
###
class TestCreateArgumentValuePairs(object):
""" Tests for _create_argument_value_pairs function behavior """
# pylint: disable=E1123,E1124,R0201,R0913
def test_all_positional_arguments(self):
"""
Test that function behaves properly when all arguments
are positional arguments
"""
@decfunc
def orig_func_all_positional_arguments(ppar1, ppar2, ppar3):
pass
ref = {'ppar1':1, 'ppar2':2, 'ppar3':3}
assert orig_func_all_positional_arguments(1, 2, 3) == ref
def test_all_keyword_arguments(self):
"""
Test that function behaves properly when all arguments are
keyword arguments
"""
@decfunc
def orig_func_all_keyword_arguments(kpar1, kpar2, kpar3):
pass
assert orig_func_all_keyword_arguments(kpar3=3, kpar2=2, kpar1=1) == {
'kpar1':1, 'kpar2':2, 'kpar3':3
}
def test_positional_and_keyword_arguments(self):
"""
Test that function behaves properly when arguments are a mix of
positional and keywords arguments
"""
@decfunc
def orig_func_positional_and_keyword_arguments(
ppar1, ppar2, ppar3, kpar1=1, kpar2=2, kpar3=3
):
pass
ref = {
'ppar1':10,
'ppar2':20,
'ppar3':30,
'kpar1':[1, 2],
'kpar2':1.5,
'kpar3':'x'
}
assert orig_func_positional_and_keyword_arguments(
10, 20, 30, kpar2=1.5, kpar3='x', kpar1=[1, 2]
) == ref
def test_no_arguments(self):
"""
Test that function behaves properly when there are no
arguments passed
"""
@decfunc
def orig_func_no_arguments():
pass
assert orig_func_no_arguments() == {}
def test_more_positional_arguments_passed_than_defined(self):
"""
Test that function behaves properly when there are more arguments
passed by position than in the function definition
"""
@decfunc
def orig_func(ppar1):
pass
assert orig_func(1, 2, 3) == {}
def test_more_keyword_arguments_passed_than_defined(self):
"""
Test that function behaves properly when there are more arguments
passed by keyword than in the function definition
"""
@decfunc
def orig_func(kpar1=0, kpar2=2):
pass
assert orig_func(kpar1=1, kpar2=2, kpar3=3) == {}
def test_argument_passed_by_position_and_keyword(self):
"""
Test that function behaves properly when there are arguments passed
both by position and keyword
"""
@decfunc
def orig_func(ppar1, ppar2, kpar1=1, kpar2=2):
pass
assert orig_func(1, 2, ppar1=5) == {}
def test_default_arguments(self):
"""
Test that function behaves properly when omitting keyword arguments
that have defaults
"""
@decfunc
def orig_func(ppar1, ppar2, kpar1='a', kpar2=2):
pass
ref = {'ppar1':1, 'ppar2':2, 'kpar1':'a', 'kpar2':20}
assert orig_func(1, 2, kpar2=20) == ref
|
{
"content_hash": "1a63f83aa184aef11a3952c3d5b3f6ee",
"timestamp": "",
"source": "github",
"line_count": 669,
"max_line_length": 79,
"avg_line_length": 32.41554559043348,
"alnum_prop": 0.5564419441114082,
"repo_name": "pmacosta/putil",
"id": "c9b14db15945b4cd0bc4c5c47b4afef130a7bf92",
"size": "21875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pcontracts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "16611"
},
{
"name": "Makefile",
"bytes": "2425"
},
{
"name": "PowerShell",
"bytes": "7209"
},
{
"name": "Python",
"bytes": "1220525"
},
{
"name": "Shell",
"bytes": "56372"
}
],
"symlink_target": ""
}
|
import collections
import hashlib
import signal
import sys
import time
import uuid
import functools
import netaddr
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
import six
from six import moves
from neutron.agent.common import ovs_lib
from neutron.agent.common import polling
from neutron.agent.common import utils
from neutron.agent.l2.extensions import manager as ext_manager
from neutron.agent.linux import ip_lib
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.common import config
from neutron.common import constants as n_const
from neutron.common import exceptions
from neutron.common import ipv6_utils as ipv6
from neutron.common import topics
from neutron.common import utils as n_utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc
from neutron.plugins.ml2.drivers.openvswitch.agent.common \
import constants
from neutron.plugins.ml2.drivers.openvswitch.agent \
import ovs_dvr_neutron_agent
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.'
'agent.common.config')
cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.'
'common.config')
# A placeholder for dead vlans.
DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1
UINT64_BITMASK = (1 << 64) - 1
class _mac_mydialect(netaddr.mac_unix):
word_fmt = '%.2x'
class DeviceListRetrievalError(exceptions.NeutronException):
message = _("Unable to retrieve port details for devices: %(devices)s ")
class LocalVLANMapping(object):
def __init__(self, vlan, network_type, physical_network, segmentation_id,
vif_ports=None):
if vif_ports is None:
vif_ports = {}
self.vlan = vlan
self.network_type = network_type
self.physical_network = physical_network
self.segmentation_id = segmentation_id
self.vif_ports = vif_ports
# set of tunnel ports on which packets should be flooded
self.tun_ofports = set()
def __str__(self):
return ("lv-id = %s type = %s phys-net = %s phys-id = %s" %
(self.vlan, self.network_type, self.physical_network,
self.segmentation_id))
class OVSPluginApi(agent_rpc.PluginApi):
pass
def has_zero_prefixlen_address(ip_addresses):
return any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in ip_addresses)
class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin,
l2population_rpc.L2populationRpcCallBackTunnelMixin,
dvr_rpc.DVRAgentRpcCallbackMixin):
'''Implements OVS-based tunneling, VLANs and flat networks.
Two local bridges are created: an integration bridge (defaults to
'br-int') and a tunneling bridge (defaults to 'br-tun'). An
additional bridge is created for each physical network interface
used for VLANs and/or flat networks.
All VM VIFs are plugged into the integration bridge. VM VIFs on a
given virtual network share a common "local" VLAN (i.e. not
propagated externally). The VLAN id of this local VLAN is mapped
to the physical networking details realizing that virtual network.
For virtual networks realized as GRE tunnels, a Logical Switch
(LS) identifier is used to differentiate tenant traffic on
inter-HV tunnels. A mesh of tunnels is created to other
Hypervisors in the cloud. These tunnels originate and terminate on
the tunneling bridge of each hypervisor. Port patching is done to
connect local VLANs on the integration bridge to inter-hypervisor
tunnels on the tunnel bridge.
For each virtual network realized as a VLAN or flat network, a
veth or a pair of patch ports is used to connect the local VLAN on
the integration bridge with the physical network bridge, with flow
rules adding, modifying, or stripping VLAN tags as necessary.
'''
# history
# 1.0 Initial version
# 1.1 Support Security Group RPC
# 1.2 Support DVR (Distributed Virtual Router) RPC
# 1.3 Added param devices_to_update to security_groups_provider_updated
# 1.4 Added support for network_update
target = oslo_messaging.Target(version='1.4')
def __init__(self, bridge_classes, integ_br, tun_br, local_ip,
bridge_mappings, polling_interval, tunnel_types=None,
veth_mtu=None, l2_population=False,
enable_distributed_routing=False,
minimize_polling=False,
ovsdb_monitor_respawn_interval=(
constants.DEFAULT_OVSDBMON_RESPAWN),
arp_responder=False,
prevent_arp_spoofing=True,
use_veth_interconnection=False,
quitting_rpc_timeout=None,
conf=None):
'''Constructor.
:param bridge_classes: a dict for bridge classes.
:param integ_br: name of the integration bridge.
:param tun_br: name of the tunnel bridge.
:param local_ip: local IP address of this hypervisor.
:param bridge_mappings: mappings from physical network name to bridge.
:param polling_interval: interval (secs) to poll DB.
:param tunnel_types: A list of tunnel types to enable support for in
the agent. If set, will automatically set enable_tunneling to
True.
:param veth_mtu: MTU size for veth interfaces.
:param l2_population: Optional, whether L2 population is turned on
:param minimize_polling: Optional, whether to minimize polling by
monitoring ovsdb for interface changes.
:param ovsdb_monitor_respawn_interval: Optional, when using polling
minimization, the number of seconds to wait before respawning
the ovsdb monitor.
:param arp_responder: Optional, enable local ARP responder if it is
supported.
:param prevent_arp_spoofing: Optional, enable suppression of any ARP
responses from ports that don't match an IP address that belongs
to the ports. Spoofing rules will not be added to ports that
have port security disabled.
:param use_veth_interconnection: use veths instead of patch ports to
interconnect the integration bridge to physical bridges.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
SIGTERM is received
:param conf: an instance of ConfigOpts
'''
super(OVSNeutronAgent, self).__init__()
self.conf = conf or cfg.CONF
# init bridge classes with configured datapath type.
self.br_int_cls, self.br_phys_cls, self.br_tun_cls = (
functools.partial(bridge_classes[b],
datapath_type=self.conf.OVS.datapath_type)
for b in ('br_int', 'br_phys', 'br_tun'))
self.use_veth_interconnection = use_veth_interconnection
self.veth_mtu = veth_mtu
self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG,
p_const.MAX_VLAN_TAG))
self.use_call = True
self.tunnel_types = tunnel_types or []
self.l2_pop = l2_population
# TODO(ethuleau): Change ARP responder so it's not dependent on the
# ML2 l2 population mechanism driver.
self.enable_distributed_routing = enable_distributed_routing
self.arp_responder_enabled = arp_responder and self.l2_pop
self.prevent_arp_spoofing = prevent_arp_spoofing
host = self.conf.host
self.agent_id = 'ovs-agent-%s' % host
if tunnel_types:
self.enable_tunneling = True
else:
self.enable_tunneling = False
# Validate agent configurations
self._check_agent_configurations()
# Keep track of int_br's device count for use by _report_state()
self.int_br_device_count = 0
self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK
self.int_br = self.br_int_cls(integ_br)
self.setup_integration_br()
# Stores port update notifications for processing in main rpc loop
self.updated_ports = set()
# Stores port delete notifications
self.deleted_ports = set()
self.network_ports = collections.defaultdict(set)
# keeps association between ports and ofports to detect ofport change
self.vifname_to_ofport_map = {}
self.setup_rpc()
self.init_extension_manager(self.connection)
self.bridge_mappings = bridge_mappings
self.setup_physical_bridges(self.bridge_mappings)
self.local_vlan_map = {}
self.tun_br_ofports = {p_const.TYPE_GENEVE: {},
p_const.TYPE_GRE: {},
p_const.TYPE_VXLAN: {}}
self.polling_interval = polling_interval
self.minimize_polling = minimize_polling
self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval
self.local_ip = local_ip
self.tunnel_count = 0
self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port
self.dont_fragment = self.conf.AGENT.dont_fragment
self.tunnel_csum = cfg.CONF.AGENT.tunnel_csum
self.tun_br = None
self.patch_int_ofport = constants.OFPORT_INVALID
self.patch_tun_ofport = constants.OFPORT_INVALID
if self.enable_tunneling:
# The patch_int_ofport and patch_tun_ofport are updated
# here inside the call to setup_tunnel_br()
self.setup_tunnel_br(tun_br)
self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent(
self.context,
self.dvr_plugin_rpc,
self.int_br,
self.tun_br,
self.bridge_mappings,
self.phys_brs,
self.int_ofports,
self.phys_ofports,
self.patch_int_ofport,
self.patch_tun_ofport,
host,
self.enable_tunneling,
self.enable_distributed_routing)
self.agent_state = {
'binary': 'neutron-openvswitch-agent',
'host': host,
'topic': n_const.L2_AGENT_TOPIC,
'configurations': {'bridge_mappings': bridge_mappings,
'tunnel_types': self.tunnel_types,
'tunneling_ip': local_ip,
'l2_population': self.l2_pop,
'arp_responder_enabled':
self.arp_responder_enabled,
'enable_distributed_routing':
self.enable_distributed_routing,
'log_agent_heartbeats':
self.conf.AGENT.log_agent_heartbeats,
'extensions': self.ext_manager.names()},
'agent_type': self.conf.AGENT.agent_type,
'start_flag': True}
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
if self.enable_tunneling:
self.setup_tunnel_br_flows()
self.dvr_agent.setup_dvr_flows()
# Collect additional bridges to monitor
self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br)
# In order to keep existed device's local vlan unchanged,
# restore local vlan mapping at start
self._restore_local_vlan_map()
# Security group agent support
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.local_vlan_map,
defer_refresh_firewall=True)
# Initialize iteration counter
self.iter_num = 0
self.run_daemon_loop = True
self.catch_sigterm = False
self.catch_sighup = False
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.quitting_rpc_timeout = quitting_rpc_timeout
def _report_state(self):
# How many devices are likely used by a VM
self.agent_state.get('configurations')['devices'] = (
self.int_br_device_count)
self.agent_state.get('configurations')['in_distributed_mode'] = (
self.dvr_agent.in_distributed_mode())
try:
self.state_rpc.report_state(self.context,
self.agent_state,
self.use_call)
self.use_call = False
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def _restore_local_vlan_map(self):
self._local_vlan_hints = {}
cur_ports = self.int_br.get_vif_ports()
port_names = [p.port_name for p in cur_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "other_config", "tag"], ports=port_names)
by_name = {x['name']: x for x in port_info}
for port in cur_ports:
# if a port was deleted between get_vif_ports and
# get_ports_attributes, we
# will get a KeyError
try:
local_vlan_map = by_name[port.port_name]['other_config']
local_vlan = by_name[port.port_name]['tag']
except KeyError:
continue
if not local_vlan:
continue
net_uuid = local_vlan_map.get('net_uuid')
if (net_uuid and net_uuid not in self._local_vlan_hints
and local_vlan != DEAD_VLAN_TAG):
self.available_local_vlans.remove(local_vlan)
self._local_vlan_hints[local_vlan_map['net_uuid']] = \
local_vlan
def _dispose_local_vlan_hints(self):
self.available_local_vlans.update(self._local_vlan_hints.values())
self._local_vlan_hints = {}
def setup_rpc(self):
self.plugin_rpc = OVSPluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
self.context = context.get_admin_context_without_session()
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.PORT, topics.DELETE],
[constants.TUNNEL, topics.UPDATE],
[constants.TUNNEL, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE],
[topics.DVR, topics.UPDATE],
[topics.NETWORK, topics.UPDATE]]
if self.l2_pop:
consumers.append([topics.L2POPULATION, topics.UPDATE])
self.connection = agent_rpc.create_consumers([self],
topics.AGENT,
consumers,
start_listening=False)
def init_extension_manager(self, connection):
ext_manager.register_opts(self.conf)
self.ext_manager = (
ext_manager.AgentExtensionsManager(self.conf))
self.ext_manager.initialize(
connection, constants.EXTENSION_DRIVER_TYPE)
def get_net_uuid(self, vif_id):
for network_id, vlan_mapping in six.iteritems(self.local_vlan_map):
if vif_id in vlan_mapping.vif_ports:
return network_id
def port_update(self, context, **kwargs):
port = kwargs.get('port')
# Put the port identifier in the updated_ports set.
# Even if full port details might be provided to this call,
# they are not used since there is no guarantee the notifications
# are processed in the same order as the relevant API requests
self.updated_ports.add(port['id'])
LOG.debug("port_update message processed for port %s", port['id'])
def port_delete(self, context, **kwargs):
port_id = kwargs.get('port_id')
self.deleted_ports.add(port_id)
self.updated_ports.discard(port_id)
LOG.debug("port_delete message processed for port %s", port_id)
def network_update(self, context, **kwargs):
network_id = kwargs['network']['id']
for port_id in self.network_ports[network_id]:
# notifications could arrive out of order, if the port is deleted
# we don't want to update it anymore
if port_id not in self.deleted_ports:
self.updated_ports.add(port_id)
LOG.debug("network_update message processed for network "
"%(network_id)s, with ports: %(ports)s",
{'network_id': network_id,
'ports': self.network_ports[network_id]})
def _clean_network_ports(self, port_id):
for port_set in self.network_ports.values():
if port_id in port_set:
port_set.remove(port_id)
break
def process_deleted_ports(self, port_info):
# don't try to process removed ports as deleted ports since
# they are already gone
if 'removed' in port_info:
self.deleted_ports -= port_info['removed']
deleted_ports = list(self.deleted_ports)
while self.deleted_ports:
port_id = self.deleted_ports.pop()
port = self.int_br.get_vif_port_by_id(port_id)
self._clean_network_ports(port_id)
self.ext_manager.delete_port(self.context,
{"vif_port": port,
"port_id": port_id})
# move to dead VLAN so deleted ports no
# longer have access to the network
if port:
# don't log errors since there is a chance someone will be
# removing the port from the bridge at the same time
self.port_dead(port, log_errors=False)
self.port_unbound(port_id)
# Flush firewall rules after ports are put on dead VLAN to be
# more secure
self.sg_agent.remove_devices_filter(deleted_ports)
def tunnel_update(self, context, **kwargs):
LOG.debug("tunnel_update received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip)
if not tunnel_ip_hex:
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot create tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
if tunnel_ip == self.local_ip:
return
tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex)
if not self.l2_pop:
self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip,
tunnel_type)
def tunnel_delete(self, context, **kwargs):
LOG.debug("tunnel_delete received")
if not self.enable_tunneling:
return
tunnel_ip = kwargs.get('tunnel_ip')
if not tunnel_ip:
LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels"))
return
tunnel_type = kwargs.get('tunnel_type')
if not tunnel_type:
LOG.error(_LE("No tunnel_type specified, cannot delete tunnels"))
return
if tunnel_type not in self.tunnel_types:
LOG.error(_LE("tunnel_type %s not supported by agent"),
tunnel_type)
return
ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip)
self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type)
def _tunnel_port_lookup(self, network_type, remote_ip):
return self.tun_br_ofports[network_type].get(remote_ip)
def fdb_add(self, context, fdb_entries):
LOG.debug("fdb_add received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_add_tun(context, deferred_br, lvm,
agent_ports, self._tunnel_port_lookup)
else:
self.fdb_add_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def fdb_remove(self, context, fdb_entries):
LOG.debug("fdb_remove received")
for lvm, agent_ports in self.get_agent_ports(fdb_entries,
self.local_vlan_map):
agent_ports.pop(self.local_ip, None)
if len(agent_ports):
if not self.enable_distributed_routing:
with self.tun_br.deferred() as deferred_br:
self.fdb_remove_tun(context, deferred_br, lvm,
agent_ports,
self._tunnel_port_lookup)
else:
self.fdb_remove_tun(context, self.tun_br, lvm,
agent_ports, self._tunnel_port_lookup)
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
lvm.tun_ofports.add(ofport)
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.install_unicast_to_tun(lvm.vlan,
lvm.segmentation_id,
ofport,
port_info.mac_address)
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
if port_info == n_const.FLOODING_ENTRY:
if ofport not in lvm.tun_ofports:
LOG.debug("attempt to remove a non-existent port %s", ofport)
return
lvm.tun_ofports.remove(ofport)
if len(lvm.tun_ofports) > 0:
br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id,
lvm.tun_ofports)
else:
# This local vlan doesn't require any more tunneling
br.delete_flood_to_tun(lvm.vlan)
else:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
port_info.mac_address,
port_info.ip_address)
br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address)
def _fdb_chg_ip(self, context, fdb_entries):
LOG.debug("update chg_ip received")
with self.tun_br.deferred() as deferred_br:
self.fdb_chg_ip_tun(context, deferred_br, fdb_entries,
self.local_ip, self.local_vlan_map)
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Set the ARP respond entry.
When the l2 population mechanism driver and OVS supports to edit ARP
fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the
tunnel bridge.
'''
if not self.arp_responder_enabled:
return
ip = netaddr.IPAddress(ip_address)
if ip.version == 6:
return
ip = str(ip)
mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect))
if action == 'add':
br.install_arp_responder(local_vid, ip, mac)
elif action == 'remove':
br.delete_arp_responder(local_vid, ip)
else:
LOG.warning(_LW('Action %s not supported'), action)
def _local_vlan_for_flat(self, lvid, physical_network):
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=None,
distributed=False)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=None)
def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id):
distributed = self.enable_distributed_routing
phys_br = self.phys_brs[physical_network]
phys_port = self.phys_ofports[physical_network]
int_br = self.int_br
int_port = self.int_ofports[physical_network]
phys_br.provision_local_vlan(port=phys_port, lvid=lvid,
segmentation_id=segmentation_id,
distributed=distributed)
int_br.provision_local_vlan(port=int_port, lvid=lvid,
segmentation_id=segmentation_id)
def provision_local_vlan(self, net_uuid, network_type, physical_network,
segmentation_id):
'''Provisions a local VLAN.
:param net_uuid: the uuid of the network associated with this vlan.
:param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat',
'local', 'geneve')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
'''
# On a restart or crash of OVS, the network associated with this VLAN
# will already be assigned, so check for that here before assigning a
# new one.
lvm = self.local_vlan_map.get(net_uuid)
if lvm:
lvid = lvm.vlan
else:
lvid = self._local_vlan_hints.pop(net_uuid, None)
if lvid is None:
if not self.available_local_vlans:
LOG.error(_LE("No local VLAN available for net-id=%s"),
net_uuid)
return
lvid = self.available_local_vlans.pop()
self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid,
network_type,
physical_network,
segmentation_id)
LOG.info(_LI("Assigning %(vlan_id)s as local vlan for "
"net-id=%(net_uuid)s"),
{'vlan_id': lvid, 'net_uuid': net_uuid})
if network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
# outbound broadcast/multicast
ofports = list(self.tun_br_ofports[network_type].values())
if ofports:
self.tun_br.install_flood_to_tun(lvid,
segmentation_id,
ofports)
# inbound from tunnels: set lvid in the right table
# and resubmit to Table LEARN_FROM_TUN for mac learning
if self.enable_distributed_routing:
self.dvr_agent.process_tunneled_network(
network_type, lvid, segmentation_id)
else:
self.tun_br.provision_local_vlan(
network_type=network_type, lvid=lvid,
segmentation_id=segmentation_id)
else:
LOG.error(_LE("Cannot provision %(network_type)s network for "
"net-id=%(net_uuid)s - tunneling disabled"),
{'network_type': network_type,
'net_uuid': net_uuid})
elif network_type == p_const.TYPE_FLAT:
if physical_network in self.phys_brs:
self._local_vlan_for_flat(lvid, physical_network)
else:
LOG.error(_LE("Cannot provision flat network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_VLAN:
if physical_network in self.phys_brs:
self._local_vlan_for_vlan(lvid, physical_network,
segmentation_id)
else:
LOG.error(_LE("Cannot provision VLAN network for "
"net-id=%(net_uuid)s - no bridge for "
"physical_network %(physical_network)s"),
{'net_uuid': net_uuid,
'physical_network': physical_network})
elif network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot provision unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': network_type,
'net_uuid': net_uuid})
def reclaim_local_vlan(self, net_uuid):
'''Reclaim a local VLAN.
:param net_uuid: the network uuid associated with this vlan.
'''
lvm = self.local_vlan_map.pop(net_uuid, None)
if lvm is None:
LOG.debug("Network %s not used on agent.", net_uuid)
return
LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from "
"net-id = %(net_uuid)s"),
{'vlan_id': lvm.vlan, 'net_uuid': net_uuid})
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
if self.enable_tunneling:
self.tun_br.reclaim_local_vlan(
network_type=lvm.network_type,
segmentation_id=lvm.segmentation_id)
self.tun_br.delete_flood_to_tun(lvm.vlan)
self.tun_br.delete_unicast_to_tun(lvm.vlan, None)
self.tun_br.delete_arp_responder(lvm.vlan, None)
if self.l2_pop:
# Try to remove tunnel ports if not used by other networks
for ofport in lvm.tun_ofports:
self.cleanup_tunnel_port(self.tun_br, ofport,
lvm.network_type)
elif lvm.network_type == p_const.TYPE_FLAT:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=None)
elif lvm.network_type == p_const.TYPE_VLAN:
if lvm.physical_network in self.phys_brs:
# outbound
br = self.phys_brs[lvm.physical_network]
br.reclaim_local_vlan(
port=self.phys_ofports[lvm.physical_network],
lvid=lvm.vlan)
# inbound
br = self.int_br
br.reclaim_local_vlan(
port=self.int_ofports[lvm.physical_network],
segmentation_id=lvm.segmentation_id)
elif lvm.network_type == p_const.TYPE_LOCAL:
# no flows needed for local networks
pass
else:
LOG.error(_LE("Cannot reclaim unknown network type "
"%(network_type)s for net-id=%(net_uuid)s"),
{'network_type': lvm.network_type,
'net_uuid': net_uuid})
self.available_local_vlans.add(lvm.vlan)
def port_bound(self, port, net_uuid,
network_type, physical_network,
segmentation_id, fixed_ips, device_owner,
ovs_restarted):
'''Bind port to net_uuid/lsw_id and install flow for inbound traffic
to vm.
:param port: a ovs_lib.VifPort object.
:param net_uuid: the net_uuid this port is to be associated with.
:param network_type: the network type ('gre', 'vlan', 'flat', 'local')
:param physical_network: the physical network for 'vlan' or 'flat'
:param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel'
:param fixed_ips: the ip addresses assigned to this port
:param device_owner: the string indicative of owner of this port
:param ovs_restarted: indicates if this is called for an OVS restart.
'''
if net_uuid not in self.local_vlan_map or ovs_restarted:
self.provision_local_vlan(net_uuid, network_type,
physical_network, segmentation_id)
lvm = self.local_vlan_map[net_uuid]
lvm.vif_ports[port.vif_id] = port
self.dvr_agent.bind_port_to_dvr(port, lvm,
fixed_ips,
device_owner)
port_other_config = self.int_br.db_get_val("Port", port.port_name,
"other_config")
if port_other_config is None:
if port.vif_id in self.deleted_ports:
LOG.debug("Port %s deleted concurrently", port.vif_id)
elif port.vif_id in self.updated_ports:
LOG.error(_LE("Expected port %s not found"), port.vif_id)
else:
LOG.debug("Unable to get config for port %s", port.vif_id)
return False
vlan_mapping = {'net_uuid': net_uuid,
'network_type': network_type,
'physical_network': physical_network}
if segmentation_id is not None:
vlan_mapping['segmentation_id'] = segmentation_id
port_other_config.update(vlan_mapping)
self.int_br.set_db_attribute("Port", port.port_name, "other_config",
port_other_config)
return True
def _bind_devices(self, need_binding_ports):
devices_up = []
devices_down = []
port_names = [p['vif_port'].port_name for p in need_binding_ports]
port_info = self.int_br.get_ports_attributes(
"Port", columns=["name", "tag"], ports=port_names, if_exists=True)
tags_by_name = {x['name']: x['tag'] for x in port_info}
for port_detail in need_binding_ports:
lvm = self.local_vlan_map.get(port_detail['network_id'])
if not lvm:
# network for port was deleted. skip this port since it
# will need to be handled as a DEAD port in the next scan
continue
port = port_detail['vif_port']
device = port_detail['device']
# Do not bind a port if it's already bound
cur_tag = tags_by_name.get(port.port_name)
if cur_tag is None:
LOG.debug("Port %s was deleted concurrently, skipping it",
port.port_name)
continue
if cur_tag != lvm.vlan:
self.int_br.delete_flows(in_port=port.ofport)
if self.prevent_arp_spoofing:
self.setup_arp_spoofing_protection(self.int_br,
port, port_detail)
if cur_tag != lvm.vlan:
self.int_br.set_db_attribute(
"Port", port.port_name, "tag", lvm.vlan)
# update plugin about port status
# FIXME(salv-orlando): Failures while updating device status
# must be handled appropriately. Otherwise this might prevent
# neutron server from sending network-vif-* events to the nova
# API server, thus possibly preventing instance spawn.
if port_detail.get('admin_state_up'):
LOG.debug("Setting status for %s to UP", device)
devices_up.append(device)
else:
LOG.debug("Setting status for %s to DOWN", device)
devices_down.append(device)
failed_devices = []
if devices_up or devices_down:
devices_set = self.plugin_rpc.update_device_list(
self.context, devices_up, devices_down, self.agent_id,
self.conf.host)
failed_devices = (devices_set.get('failed_devices_up') +
devices_set.get('failed_devices_down'))
if failed_devices:
LOG.error(_LE("Configuration for devices %s failed!"),
failed_devices)
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=failed_devices)
LOG.info(_LI("Configuration for devices up %(up)s and devices "
"down %(down)s completed."),
{'up': devices_up, 'down': devices_down})
@staticmethod
def setup_arp_spoofing_protection(bridge, vif, port_details):
# clear any previous flows related to this port in our ARP table
bridge.delete_arp_spoofing_protection(port=vif.ofport)
if not port_details.get('port_security_enabled', True):
LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because "
"it has port security disabled"), vif.port_name)
return
if port_details['device_owner'].startswith('network:'):
LOG.debug("Skipping ARP spoofing rules for network owned port "
"'%s'.", vif.port_name)
return
# collect all of the addresses and cidrs that belong to the port
addresses = {f['ip_address'] for f in port_details['fixed_ips']}
mac_addresses = {vif.vif_mac}
if port_details.get('allowed_address_pairs'):
addresses |= {p['ip_address']
for p in port_details['allowed_address_pairs']}
mac_addresses |= {p['mac_address']
for p in port_details['allowed_address_pairs']
if p.get('mac_address')}
ipv6_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 6}
# Allow neighbor advertisements for LLA address.
ipv6_addresses |= {str(ipv6.get_ipv6_addr_by_EUI64(
n_const.IPV6_LLA_PREFIX, mac))
for mac in mac_addresses}
if not has_zero_prefixlen_address(ipv6_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the nd_target can only
# match on /1 or more.
bridge.install_icmpv6_na_spoofing_protection(port=vif.ofport,
ip_addresses=ipv6_addresses)
ipv4_addresses = {ip for ip in addresses
if netaddr.IPNetwork(ip).version == 4}
if not has_zero_prefixlen_address(ipv4_addresses):
# Install protection only when prefix is not zero because a /0
# prefix allows any address anyway and the ARP_SPA can only
# match on /1 or more.
bridge.install_arp_spoofing_protection(port=vif.ofport,
ip_addresses=ipv4_addresses)
def port_unbound(self, vif_id, net_uuid=None):
'''Unbind port.
Removes corresponding local vlan mapping object if this is its last
VIF.
:param vif_id: the id of the vif
:param net_uuid: the net_uuid this port is associated with.
'''
if net_uuid is None:
net_uuid = self.get_net_uuid(vif_id)
if not self.local_vlan_map.get(net_uuid):
LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'),
net_uuid)
return
lvm = self.local_vlan_map[net_uuid]
if vif_id in lvm.vif_ports:
vif_port = lvm.vif_ports[vif_id]
self.dvr_agent.unbind_port_from_dvr(vif_port, lvm)
lvm.vif_ports.pop(vif_id, None)
if not lvm.vif_ports:
self.reclaim_local_vlan(net_uuid)
def port_dead(self, port, log_errors=True):
'''Once a port has no binding, put it on the "dead vlan".
:param port: a ovs_lib.VifPort object.
'''
# Don't kill a port if it's already dead
cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag",
log_errors=log_errors)
if cur_tag != DEAD_VLAN_TAG:
self.int_br.set_db_attribute("Port", port.port_name, "tag",
DEAD_VLAN_TAG, log_errors=log_errors)
self.int_br.drop_port(in_port=port.ofport)
def setup_integration_br(self):
'''Setup the integration bridge.
'''
self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
# Ensure the integration bridge is created.
# ovs_lib.OVSBridge.create() will run
# ovs-vsctl -- --may-exist add-br BRIDGE_NAME
# which does nothing if bridge already exists.
self.int_br.create()
self.int_br.set_secure_mode()
self.int_br.setup_controllers(self.conf)
self.int_br.delete_port(self.conf.OVS.int_peer_patch_port)
if self.conf.AGENT.drop_flows_on_start:
self.int_br.delete_flows()
self.int_br.setup_default_table()
def setup_ancillary_bridges(self, integ_br, tun_br):
'''Setup ancillary bridges - for example br-ex.'''
ovs = ovs_lib.BaseOVS()
ovs_bridges = set(ovs.get_bridges())
# Remove all known bridges
ovs_bridges.remove(integ_br)
if self.enable_tunneling:
ovs_bridges.remove(tun_br)
br_names = [self.phys_brs[physical_network].br_name for
physical_network in self.phys_brs]
ovs_bridges.difference_update(br_names)
# Filter list of bridges to those that have external
# bridge-id's configured
br_names = []
for bridge in ovs_bridges:
bridge_id = ovs.get_bridge_external_bridge_id(bridge)
if bridge_id != bridge:
br_names.append(bridge)
ovs_bridges.difference_update(br_names)
ancillary_bridges = []
for bridge in ovs_bridges:
br = ovs_lib.OVSBridge(bridge)
LOG.info(_LI('Adding %s to list of bridges.'), bridge)
ancillary_bridges.append(br)
return ancillary_bridges
def setup_tunnel_br(self, tun_br_name=None):
'''(re)initialize the tunnel bridge.
Creates tunnel bridge, and links it to the integration bridge
using a patch port.
:param tun_br_name: the name of the tunnel bridge.
'''
if not self.tun_br:
self.tun_br = self.br_tun_cls(tun_br_name)
self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp)
if not self.tun_br.bridge_exists(self.tun_br.br_name):
self.tun_br.create(secure_mode=True)
self.tun_br.setup_controllers(self.conf)
if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or
self.patch_tun_ofport == ovs_lib.INVALID_OFPORT):
self.patch_tun_ofport = self.int_br.add_patch_port(
self.conf.OVS.int_peer_patch_port,
self.conf.OVS.tun_peer_patch_port)
if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or
self.patch_int_ofport == ovs_lib.INVALID_OFPORT):
self.patch_int_ofport = self.tun_br.add_patch_port(
self.conf.OVS.tun_peer_patch_port,
self.conf.OVS.int_peer_patch_port)
if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport,
self.patch_int_ofport):
LOG.error(_LE("Failed to create OVS patch port. Cannot have "
"tunneling enabled on this agent, since this "
"version of OVS does not support tunnels or patch "
"ports. Agent terminated!"))
exit(1)
if self.conf.AGENT.drop_flows_on_start:
self.tun_br.delete_flows()
def setup_tunnel_br_flows(self):
'''Setup the tunnel bridge.
Add all flows to the tunnel bridge.
'''
self.tun_br.setup_default_table(self.patch_int_ofport,
self.arp_responder_enabled)
def get_peer_name(self, prefix, name):
"""Construct a peer name based on the prefix and name.
The peer name can not exceed the maximum length allowed for a linux
device. Longer names are hashed to help ensure uniqueness.
"""
if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN:
return prefix + name
# We can't just truncate because bridges may be distinguished
# by an ident at the end. A hash over the name should be unique.
# Leave part of the bridge name on for easier identification
hashlen = 6
namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen
if isinstance(name, six.text_type):
hashed_name = hashlib.sha1(name.encode('utf-8'))
else:
hashed_name = hashlib.sha1(name)
new_name = ('%(prefix)s%(truncated)s%(hash)s' %
{'prefix': prefix, 'truncated': name[0:namelen],
'hash': hashed_name.hexdigest()[0:hashlen]})
LOG.warning(_LW("Creating an interface named %(name)s exceeds the "
"%(limit)d character limitation. It was shortened to "
"%(new_name)s to fit."),
{'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN,
'new_name': new_name})
return new_name
def setup_physical_bridges(self, bridge_mappings):
'''Setup the physical network bridges.
Creates physical network bridges and links them to the
integration bridge using veths or patch ports.
:param bridge_mappings: map physical network names to bridge names.
'''
self.phys_brs = {}
self.int_ofports = {}
self.phys_ofports = {}
ip_wrapper = ip_lib.IPWrapper()
ovs = ovs_lib.BaseOVS()
ovs_bridges = ovs.get_bridges()
for physical_network, bridge in six.iteritems(bridge_mappings):
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
{'physical_network': physical_network,
'bridge': bridge})
# setup physical bridge
if bridge not in ovs_bridges:
LOG.error(_LE("Bridge %(bridge)s for physical network "
"%(physical_network)s does not exist. Agent "
"terminated!"),
{'physical_network': physical_network,
'bridge': bridge})
sys.exit(1)
br = self.br_phys_cls(bridge)
br.setup_controllers(self.conf)
br.setup_default_table()
self.phys_brs[physical_network] = br
# interconnect physical and integration bridges using veth/patchs
int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX,
bridge)
phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX,
bridge)
# Interface type of port for physical and integration bridges must
# be same, so check only one of them.
int_type = self.int_br.db_get_val("Interface", int_if_name, "type")
if self.use_veth_interconnection:
# Drop ports if the interface types doesn't match the
# configuration value.
if int_type == 'patch':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
device = ip_lib.IPDevice(int_if_name)
if device.exists():
device.link.delete()
# Give udev a chance to process its rules here, to avoid
# race conditions between commands launched by udev rules
# and the subsequent call to ip_wrapper.add_veth
utils.execute(['udevadm', 'settle', '--timeout=10'])
int_veth, phys_veth = ip_wrapper.add_veth(int_if_name,
phys_if_name)
int_ofport = self.int_br.add_port(int_veth)
phys_ofport = br.add_port(phys_veth)
else:
# Drop ports if the interface type doesn't match the
# configuration value
if int_type == 'veth':
self.int_br.delete_port(int_if_name)
br.delete_port(phys_if_name)
# Create patch ports without associating them in order to block
# untranslated traffic before association
int_ofport = self.int_br.add_patch_port(
int_if_name, constants.NONEXISTENT_PEER)
phys_ofport = br.add_patch_port(
phys_if_name, constants.NONEXISTENT_PEER)
self.int_ofports[physical_network] = int_ofport
self.phys_ofports[physical_network] = phys_ofport
# block all untranslated traffic between bridges
self.int_br.drop_port(in_port=int_ofport)
br.drop_port(in_port=phys_ofport)
if self.use_veth_interconnection:
# enable veth to pass traffic
int_veth.link.set_up()
phys_veth.link.set_up()
if self.veth_mtu:
# set up mtu size for veth interfaces
int_veth.link.set_mtu(self.veth_mtu)
phys_veth.link.set_mtu(self.veth_mtu)
else:
# associate patch ports to pass traffic
self.int_br.set_db_attribute('Interface', int_if_name,
'options:peer', phys_if_name)
br.set_db_attribute('Interface', phys_if_name,
'options:peer', int_if_name)
def update_stale_ofport_rules(self):
# right now the ARP spoofing rules are the only thing that utilizes
# ofport-based rules, so make arp_spoofing protection a conditional
# until something else uses ofport
if not self.prevent_arp_spoofing:
return []
previous = self.vifname_to_ofport_map
current = self.int_br.get_vif_port_to_ofport_map()
# if any ofport numbers have changed, re-process the devices as
# added ports so any rules based on ofport numbers are updated.
moved_ports = self._get_ofport_moves(current, previous)
# delete any stale rules based on removed ofports
ofports_deleted = set(previous.values()) - set(current.values())
for ofport in ofports_deleted:
self.int_br.delete_arp_spoofing_protection(port=ofport)
# store map for next iteration
self.vifname_to_ofport_map = current
return moved_ports
@staticmethod
def _get_ofport_moves(current, previous):
"""Returns a list of moved ports.
Takes two port->ofport maps and returns a list ports that moved to a
different ofport. Deleted ports are not included.
"""
port_moves = []
for name, ofport in previous.items():
if name not in current:
continue
current_ofport = current[name]
if ofport != current_ofport:
port_moves.append(name)
return port_moves
def _get_port_info(self, registered_ports, cur_ports,
readd_registered_ports):
port_info = {'current': cur_ports}
# FIXME(salv-orlando): It's not really necessary to return early
# if nothing has changed.
if not readd_registered_ports and cur_ports == registered_ports:
return port_info
if readd_registered_ports:
port_info['added'] = cur_ports
else:
port_info['added'] = cur_ports - registered_ports
# Update port_info with ports not found on the integration bridge
port_info['removed'] = registered_ports - cur_ports
return port_info
def scan_ports(self, registered_ports, sync, updated_ports=None):
cur_ports = self.int_br.get_vif_port_set()
self.int_br_device_count = len(cur_ports)
port_info = self._get_port_info(registered_ports, cur_ports, sync)
if updated_ports is None:
updated_ports = set()
updated_ports.update(self.check_changed_vlans())
if updated_ports:
# Some updated ports might have been removed in the
# meanwhile, and therefore should not be processed.
# In this case the updated port won't be found among
# current ports.
updated_ports &= cur_ports
if updated_ports:
port_info['updated'] = updated_ports
return port_info
def scan_ancillary_ports(self, registered_ports, sync):
cur_ports = set()
for bridge in self.ancillary_brs:
cur_ports |= bridge.get_vif_port_set()
return self._get_port_info(registered_ports, cur_ports, sync)
def check_changed_vlans(self):
"""Return ports which have lost their vlan tag.
The returned value is a set of port ids of the ports concerned by a
vlan tag loss.
"""
port_tags = self.int_br.get_port_tag_dict()
changed_ports = set()
for lvm in self.local_vlan_map.values():
for port in lvm.vif_ports.values():
if (
port.port_name in port_tags
and port_tags[port.port_name] != lvm.vlan
):
LOG.info(
_LI("Port '%(port_name)s' has lost "
"its vlan tag '%(vlan_tag)d'!"),
{'port_name': port.port_name,
'vlan_tag': lvm.vlan}
)
changed_ports.add(port.vif_id)
return changed_ports
def treat_vif_port(self, vif_port, port_id, network_id, network_type,
physical_network, segmentation_id, admin_state_up,
fixed_ips, device_owner, ovs_restarted):
# When this function is called for a port, the port should have
# an OVS ofport configured, as only these ports were considered
# for being treated. If that does not happen, it is a potential
# error condition of which operators should be aware
port_needs_binding = True
if not vif_port.ofport:
LOG.warn(_LW("VIF port: %s has no ofport configured, "
"and might not be able to transmit"), vif_port.vif_id)
if vif_port:
if admin_state_up:
port_needs_binding = self.port_bound(
vif_port, network_id, network_type,
physical_network, segmentation_id,
fixed_ips, device_owner, ovs_restarted)
else:
self.port_dead(vif_port)
port_needs_binding = False
else:
LOG.debug("No VIF port for port %s defined on agent.", port_id)
return port_needs_binding
def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type):
ofport = br.add_tunnel_port(port_name,
remote_ip,
self.local_ip,
tunnel_type,
self.vxlan_udp_port,
self.dont_fragment,
self.tunnel_csum)
if ofport == ovs_lib.INVALID_OFPORT:
LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"),
{'type': tunnel_type, 'ip': remote_ip})
return 0
self.tun_br_ofports[tunnel_type][remote_ip] = ofport
# Add flow in default table to resubmit to the right
# tunneling table (lvid will be set in the latter)
br.setup_tunnel_port(tunnel_type, ofport)
ofports = self.tun_br_ofports[tunnel_type].values()
if ofports and not self.l2_pop:
# Update flooding flows to include the new tunnel
for vlan_mapping in list(self.local_vlan_map.values()):
if vlan_mapping.network_type == tunnel_type:
br.install_flood_to_tun(vlan_mapping.vlan,
vlan_mapping.segmentation_id,
ofports)
return ofport
def setup_tunnel_port(self, br, remote_ip, network_type):
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
return 0
port_name = '%s-%s' % (network_type, remote_ip_hex)
ofport = self._setup_tunnel_port(br,
port_name,
remote_ip,
network_type)
return ofport
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
# Check if this tunnel port is still used
for lvm in self.local_vlan_map.values():
if tun_ofport in lvm.tun_ofports:
break
# If not, remove it
else:
items = list(self.tun_br_ofports[tunnel_type].items())
for remote_ip, ofport in items:
if ofport == tun_ofport:
port_name = '%s-%s' % (tunnel_type,
self.get_ip_in_hex(remote_ip))
br.delete_port(port_name)
br.cleanup_tunnel_port(ofport)
self.tun_br_ofports[tunnel_type].pop(remote_ip, None)
def treat_devices_added_or_updated(self, devices, ovs_restarted):
skipped_devices = []
need_binding_devices = []
security_disabled_devices = []
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices = devices_details_list.get('devices')
vif_by_id = self.int_br.get_vifs_by_ids(
[vif['device'] for vif in devices])
for details in devices:
device = details['device']
LOG.debug("Processing port: %s", device)
port = vif_by_id.get(device)
if not port:
# The port disappeared and cannot be processed
LOG.info(_LI("Port %s was not found on the integration bridge "
"and will therefore not be processed"), device)
skipped_devices.append(device)
continue
if 'port_id' in details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': details})
details['vif_port'] = port
need_binding = self.treat_vif_port(port, details['port_id'],
details['network_id'],
details['network_type'],
details['physical_network'],
details['segmentation_id'],
details['admin_state_up'],
details['fixed_ips'],
details['device_owner'],
ovs_restarted)
if need_binding:
need_binding_devices.append(details)
port_security = details['port_security_enabled']
has_sgs = 'security_groups' in details
if not port_security or not has_sgs:
security_disabled_devices.append(device)
self._update_port_network(details['port_id'],
details['network_id'])
self.ext_manager.handle_port(self.context, details)
else:
LOG.warn(_LW("Device %s not defined on plugin"), device)
if (port and port.ofport != -1):
self.port_dead(port)
return skipped_devices, need_binding_devices, security_disabled_devices
def _update_port_network(self, port_id, network_id):
self._clean_network_ports(port_id)
self.network_ports[network_id].add(port_id)
def treat_ancillary_devices_added(self, devices):
devices_details_list = (
self.plugin_rpc.get_devices_details_list_and_failed_devices(
self.context,
devices,
self.agent_id,
self.conf.host))
if devices_details_list.get('failed_devices'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError(devices=devices)
devices_added = [
d['device'] for d in devices_details_list.get('devices')]
LOG.info(_LI("Ancillary Ports %s added"), devices_added)
# update plugin about port status
devices_set_up = (
self.plugin_rpc.update_device_list(self.context,
devices_added,
[],
self.agent_id,
self.conf.host))
if devices_set_up.get('failed_devices_up'):
#TODO(rossella_s) handle better the resync in next patches,
# this is just to preserve the current behavior
raise DeviceListRetrievalError()
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
LOG.info(_LI("Ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for device in devices:
self.port_unbound(device)
return resync
def treat_ancillary_devices_removed(self, devices):
resync = False
LOG.info(_LI("Ancillary ports %s removed"), devices)
devices_down = self.plugin_rpc.update_device_list(self.context,
[],
devices,
self.agent_id,
self.conf.host)
failed_devices = devices_down.get('failed_devices_down')
if failed_devices:
LOG.debug("Port removal failed for %(devices)s ", failed_devices)
resync = True
for detail in devices_down.get('devices_down'):
if detail['exists']:
LOG.info(_LI("Port %s updated."), detail['device'])
# Nothing to do regarding local networking
else:
LOG.debug("Device %s not defined on plugin", detail['device'])
return resync
def process_network_ports(self, port_info, ovs_restarted):
resync_a = False
resync_b = False
# TODO(salv-orlando): consider a solution for ensuring notifications
# are processed exactly in the same order in which they were
# received. This is tricky because there are two notification
# sources: the neutron server, and the ovs db monitor process
# If there is an exception while processing security groups ports
# will not be wired anyway, and a resync will be triggered
# VIF wiring needs to be performed always for 'new' devices.
# For updated ports, re-wiring is not needed in most cases, but needs
# to be performed anyway when the admin state of a device is changed.
# A device might be both in the 'added' and 'updated'
# list at the same time; avoid processing it twice.
devices_added_updated = (port_info.get('added', set()) |
port_info.get('updated', set()))
need_binding_devices = []
security_disabled_ports = []
if devices_added_updated:
start = time.time()
try:
(skipped_devices, need_binding_devices,
security_disabled_ports) = (
self.treat_devices_added_or_updated(
devices_added_updated, ovs_restarted))
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_added_or_updated completed. "
"Skipped %(num_skipped)d devices of "
"%(num_current)d devices currently available. "
"Time elapsed: %(elapsed).3f",
{'iter_num': self.iter_num,
'num_skipped': len(skipped_devices),
'num_current': len(port_info['current']),
'elapsed': time.time() - start})
# Update the list of current ports storing only those which
# have been actually processed.
port_info['current'] = (port_info['current'] -
set(skipped_devices))
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_network_ports - iteration:%d - "
"failure while retrieving port details "
"from server"), self.iter_num)
resync_a = True
# Ports are bound before calling the sg_agent setup. This function
# fulfill the information needed by the sg_agent setup.
self._bind_devices(need_binding_devices)
# TODO(salv-orlando): Optimize avoiding applying filters
# unnecessarily, (eg: when there are no IP address changes)
added_ports = port_info.get('added', set())
if security_disabled_ports:
added_ports -= set(security_disabled_ports)
self.sg_agent.setup_port_filters(added_ports,
port_info.get('updated', set()))
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_devices_removed(port_info['removed'])
LOG.debug("process_network_ports - iteration:%(iter_num)d - "
"treat_devices_removed completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def process_ancillary_network_ports(self, port_info):
resync_a = False
resync_b = False
if 'added' in port_info and port_info['added']:
start = time.time()
try:
self.treat_ancillary_devices_added(port_info['added'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_added "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
except DeviceListRetrievalError:
# Need to resync as there was an error with server
# communication.
LOG.exception(_LE("process_ancillary_network_ports - "
"iteration:%d - failure while retrieving "
"port details from server"), self.iter_num)
resync_a = True
if 'removed' in port_info and port_info['removed']:
start = time.time()
resync_b = self.treat_ancillary_devices_removed(
port_info['removed'])
LOG.debug("process_ancillary_network_ports - iteration: "
"%(iter_num)d - treat_ancillary_devices_removed "
"completed in %(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def get_ip_in_hex(self, ip_address):
try:
return '%08x' % netaddr.IPAddress(ip_address, version=4)
except Exception:
LOG.warn(_LW("Invalid remote IP: %s"), ip_address)
return
def tunnel_sync(self):
try:
for tunnel_type in self.tunnel_types:
details = self.plugin_rpc.tunnel_sync(self.context,
self.local_ip,
tunnel_type,
self.conf.host)
if not self.l2_pop:
tunnels = details['tunnels']
for tunnel in tunnels:
if self.local_ip != tunnel['ip_address']:
remote_ip = tunnel['ip_address']
remote_ip_hex = self.get_ip_in_hex(remote_ip)
if not remote_ip_hex:
continue
tun_name = '%s-%s' % (tunnel_type, remote_ip_hex)
self._setup_tunnel_port(self.tun_br,
tun_name,
tunnel['ip_address'],
tunnel_type)
except Exception as e:
LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s",
{'local_ip': self.local_ip, 'e': e})
return True
return False
def _agent_has_updates(self, polling_manager):
return (polling_manager.is_polling_required or
self.updated_ports or
self.deleted_ports or
self.sg_agent.firewall_refresh_needed())
def _port_info_has_changes(self, port_info):
return (port_info.get('added') or
port_info.get('removed') or
port_info.get('updated'))
def check_ovs_status(self):
# Check for the canary flow
status = self.int_br.check_canary_table()
if status == constants.OVS_RESTARTED:
LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset "
"bridges and recover ports."))
elif status == constants.OVS_DEAD:
LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running "
"and checking OVS status periodically."))
return status
def loop_count_and_wait(self, start_time, port_stats):
# sleep till end of polling interval
elapsed = time.time() - start_time
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d "
"completed. Processed ports statistics: "
"%(port_stats)s. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'port_stats': port_stats,
'elapsed': elapsed})
if elapsed < self.polling_interval:
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
def get_port_stats(self, port_info, ancillary_port_info):
port_stats = {
'regular': {
'added': len(port_info.get('added', [])),
'updated': len(port_info.get('updated', [])),
'removed': len(port_info.get('removed', []))}}
if self.ancillary_brs:
port_stats['ancillary'] = {
'added': len(ancillary_port_info.get('added', [])),
'removed': len(ancillary_port_info.get('removed', []))}
return port_stats
def cleanup_stale_flows(self):
if self.iter_num == 0:
bridges = [self.int_br]
if self.enable_tunneling:
bridges.append(self.tun_br)
for bridge in bridges:
LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name)
bridge.cleanup_flows()
def rpc_loop(self, polling_manager=None):
if not polling_manager:
polling_manager = polling.get_polling_manager(
minimize_polling=False)
sync = True
ports = set()
updated_ports_copy = set()
ancillary_ports = set()
tunnel_sync = True
ovs_restarted = False
consecutive_resyncs = 0
while self._check_and_handle_signal():
port_info = {}
ancillary_port_info = {}
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
polling_manager.force_polling()
consecutive_resyncs = consecutive_resyncs + 1
if consecutive_resyncs >= constants.MAX_DEVICE_RETRIES:
LOG.warn(_LW("Clearing cache of registered ports, retrials"
" to resync were > %s"),
constants.MAX_DEVICE_RETRIES)
ports.clear()
ancillary_ports.clear()
sync = False
consecutive_resyncs = 0
else:
consecutive_resyncs = 0
ovs_status = self.check_ovs_status()
if ovs_status == constants.OVS_RESTARTED:
self.setup_integration_br()
self.setup_physical_bridges(self.bridge_mappings)
if self.enable_tunneling:
self.setup_tunnel_br()
self.setup_tunnel_br_flows()
tunnel_sync = True
if self.enable_distributed_routing:
self.dvr_agent.reset_ovs_parameters(self.int_br,
self.tun_br,
self.patch_int_ofport,
self.patch_tun_ofport)
self.dvr_agent.reset_dvr_parameters()
self.dvr_agent.setup_dvr_flows()
elif ovs_status == constants.OVS_DEAD:
# Agent doesn't apply any operations when ovs is dead, to
# prevent unexpected failure or crash. Sleep and continue
# loop in which ovs status will be checked periodically.
port_stats = self.get_port_stats({}, {})
self.loop_count_and_wait(start, port_stats)
continue
# Notify the plugin of tunnel IP
if self.enable_tunneling and tunnel_sync:
LOG.info(_LI("Agent tunnel out of sync with plugin!"))
try:
tunnel_sync = self.tunnel_sync()
except Exception:
LOG.exception(_LE("Error while synchronizing tunnels"))
tunnel_sync = True
ovs_restarted |= (ovs_status == constants.OVS_RESTARTED)
if self._agent_has_updates(polling_manager) or ovs_restarted:
try:
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"starting polling. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Save updated ports dict to perform rollback in
# case resync would be needed, and then clear
# self.updated_ports. As the greenthread should not yield
# between these two statements, this will be thread-safe
updated_ports_copy = self.updated_ports
self.updated_ports = set()
reg_ports = (set() if ovs_restarted else ports)
port_info = self.scan_ports(reg_ports, sync,
updated_ports_copy)
self.process_deleted_ports(port_info)
ofport_changed_ports = self.update_stale_ofport_rules()
if ofport_changed_ports:
port_info.setdefault('updated', set()).update(
ofport_changed_ports)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"port information retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
# Treat ancillary devices if they exist
if self.ancillary_brs:
ancillary_port_info = self.scan_ancillary_ports(
ancillary_ports, sync)
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ancillary port info retrieved. "
"Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
sync = False
# Secure and wire/unwire VIFs and update their status
# on Neutron server
if (self._port_info_has_changes(port_info) or
self.sg_agent.firewall_refresh_needed() or
ovs_restarted):
LOG.debug("Starting to process devices in:%s",
port_info)
# If treat devices fails - must resync with plugin
sync = self.process_network_ports(port_info,
ovs_restarted)
self.cleanup_stale_flows()
LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - "
"ports processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ports = port_info['current']
if self.ancillary_brs:
sync |= self.process_ancillary_network_ports(
ancillary_port_info)
LOG.debug("Agent rpc_loop - iteration: "
"%(iter_num)d - ancillary ports "
"processed. Elapsed:%(elapsed).3f",
{'iter_num': self.iter_num,
'elapsed': time.time() - start})
ancillary_ports = ancillary_port_info['current']
polling_manager.polling_completed()
# Keep this flag in the last line of "try" block,
# so we can sure that no other Exception occurred.
if not sync:
ovs_restarted = False
self._dispose_local_vlan_hints()
except Exception:
LOG.exception(_LE("Error while processing VIF ports"))
# Put the ports back in self.updated_port
self.updated_ports |= updated_ports_copy
sync = True
port_stats = self.get_port_stats(port_info, ancillary_port_info)
self.loop_count_and_wait(start, port_stats)
def daemon_loop(self):
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
signal.signal(signal.SIGTERM, self._handle_sigterm)
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, self._handle_sighup)
with polling.get_polling_manager(
self.minimize_polling,
self.ovsdb_monitor_respawn_interval) as pm:
self.rpc_loop(polling_manager=pm)
def _handle_sigterm(self, signum, frame):
self.catch_sigterm = True
if self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
def _handle_sighup(self, signum, frame):
self.catch_sighup = True
def _check_and_handle_signal(self):
if self.catch_sigterm:
LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop."))
self.run_daemon_loop = False
self.catch_sigterm = False
if self.catch_sighup:
LOG.info(_LI("Agent caught SIGHUP, resetting."))
self.conf.reload_config_files()
config.setup_logging()
LOG.debug('Full set of CONF:')
self.conf.log_opt_values(LOG, logging.DEBUG)
self.catch_sighup = False
return self.run_daemon_loop
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.dvr_plugin_rpc, self.state_rpc):
rpc_api.client.timeout = timeout
def _check_agent_configurations(self):
if (self.enable_distributed_routing and self.enable_tunneling
and not self.l2_pop):
raise ValueError(_("DVR deployments for VXLAN/GRE/Geneve "
"underlays require L2-pop to be enabled, "
"in both the Agent and Server side."))
def create_agent_config_map(config):
"""Create a map of agent config parameters.
:param config: an instance of cfg.CONF
:returns: a map of agent configuration parameters
"""
try:
bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings)
except ValueError as e:
raise ValueError(_("Parsing bridge_mappings failed: %s.") % e)
kwargs = dict(
integ_br=config.OVS.integration_bridge,
tun_br=config.OVS.tunnel_bridge,
local_ip=config.OVS.local_ip,
bridge_mappings=bridge_mappings,
polling_interval=config.AGENT.polling_interval,
minimize_polling=config.AGENT.minimize_polling,
tunnel_types=config.AGENT.tunnel_types,
veth_mtu=config.AGENT.veth_mtu,
enable_distributed_routing=config.AGENT.enable_distributed_routing,
l2_population=config.AGENT.l2_population,
arp_responder=config.AGENT.arp_responder,
prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing,
use_veth_interconnection=config.OVS.use_veth_interconnection,
quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout
)
# Verify the tunnel_types specified are valid
for tun in kwargs['tunnel_types']:
if tun not in constants.TUNNEL_NETWORK_TYPES:
msg = _('Invalid tunnel type specified: %s'), tun
raise ValueError(msg)
if not kwargs['local_ip']:
msg = _('Tunneling cannot be enabled without a valid local_ip.')
raise ValueError(msg)
return kwargs
def validate_local_ip(local_ip):
"""If tunneling is enabled, verify if the ip exists on the agent's host."""
if not cfg.CONF.AGENT.tunnel_types:
return
if not ip_lib.IPWrapper().get_device_by_ip(local_ip):
LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'."
" IP couldn't be found on this host's interfaces."),
local_ip)
raise SystemExit(1)
def prepare_xen_compute():
is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper
if is_xen_compute_host:
# Force ip_lib to always use the root helper to ensure that ip
# commands target xen dom0 rather than domU.
cfg.CONF.register_opts(ip_lib.OPTS)
cfg.CONF.set_default('ip_lib_force_root', True)
def main(bridge_classes):
try:
agent_config = create_agent_config_map(cfg.CONF)
except ValueError:
LOG.exception(_LE("Agent failed to create agent config map"))
raise SystemExit(1)
prepare_xen_compute()
validate_local_ip(agent_config['local_ip'])
try:
agent = OVSNeutronAgent(bridge_classes, **agent_config)
except (RuntimeError, ValueError) as e:
LOG.error(_LE("%s Agent terminated!"), e)
sys.exit(1)
agent.daemon_loop()
|
{
"content_hash": "711c0026538bbb7d809c1e60bcd65409",
"timestamp": "",
"source": "github",
"line_count": 1906,
"max_line_length": 79,
"avg_line_length": 46.04616998950682,
"alnum_prop": 0.5430472631147167,
"repo_name": "jumpojoy/neutron",
"id": "230fbb20dfa3543fd11ed275c0684c6c188f3409",
"size": "88392",
"binary": false,
"copies": "2",
"ref": "refs/heads/generic_switch",
"path": "neutron/plugins/ml2/drivers/openvswitch/agent/ovs_neutron_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7609639"
},
{
"name": "Shell",
"bytes": "14832"
}
],
"symlink_target": ""
}
|
from PIL import Image, ImageTk
import math
import terralib.pixelgrid as pixelgrid
class ColorSet:
def __init__(self, maxcolors):
self.maxcolors = maxcolors
self.colors = []
@property
def palette(self):
if len(self.colors) == self.maxcolors:
return list(self.colors)
else:
c = list(self.colors)
while (len(c) != self.maxcolors):
c.append((0,0,0))
return c
def _maintain(self):
if len(self.colors) <= self.maxcolors:
# No need
return
#distmat = [[_colordist(c, x) for x in self.colors]
# for c in self.colors]
mindist = _colordist((0,0,0),
(256,256,256))
closest1 = 0
closest2 = 1
for i, c in enumerate(self.colors):
for j, k in enumerate(self.colors):
if i == j:
continue
dist = _colordist(c, k)
if (dist < mindist):
mindist = dist
closest1 = i
closest2 = j
newcolor = _averagecolor(self.colors[closest1],
self.colors[closest2])
self.colors.pop(max(closest2,closest1))
self.colors.pop(min(closest1,closest2))
self.colors.append(newcolor)
self._maintain()
def addColor(self, color):
color = color[0:3]
if color not in self.colors:
self.colors.append(color)
self._maintain()
def nearestIndex(self, color):
color = color[0:3]
dist = [(_colordist(color, j), i) for i,j in enumerate(self.colors)]
return min(dist)[1]
#nearest = self.colors[0]
#mindist = _colordist((0,0,0),
# (256,256,256))
#for c in self.colors:
# dist = _colordist(c, color)
def _colordist(color1, color2):
return (_square(color1[0] - color2[0]) +
_square(color1[1] - color2[1]) +
_square(color1[2] - color2[2]))
def _averagecolor(color1, color2):
return ( math.floor((color1[0] + color1[0]) / 3),
math.floor((color1[1] + color1[1]) / 3),
math.floor((color1[2] + color1[2]) / 3) )
def _square(a):
return a*a
def importpixelgrid(filen, maxcolors, palette=-1):
colors = ColorSet(maxcolors)
image = Image.open(filen).convert("RGB")
if palette == -1:
# Build palette
for i in range(0, min(image.width, 256)):
for j in range(0, min(image.height, 256)):
pixel = image.getpixel((i,j))
colors.addColor(pixel)
else:
colors.colors = palette
grid = pixelgrid.PixelGrid(colors.palette)
for i in range(0, min(image.width, 256)):
for j in range(0, min(image.height, 256)):
pixel = image.getpixel((i,j))
val = colors.nearestIndex(pixel)
if val != 0:
grid.set(i,j,val)
return grid
|
{
"content_hash": "a3e088570a92fa2c1930fb2e8b05a9f9",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 76,
"avg_line_length": 29.766990291262136,
"alnum_prop": 0.5107632093933464,
"repo_name": "nicolebranagan/terraformer",
"id": "0cd4e62fbc4239f569fcd8de1fc9bb4c31cdd992",
"size": "3066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terralib/importer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64061"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.utils.imdb import ImdbParser
@pytest.mark.online
class TestImdbParser(object):
def test_parsed_data(self):
parser = ImdbParser()
parser.parse('tt0114814')
assert parser.actors == {
'nm0000592': 'Pete Postlethwaite',
'nm0261452': 'Christine Estabrook',
'nm0000751': 'Suzy Amis',
'nm0000286': 'Stephen Baldwin',
'nm0000445': 'Dan Hedaya',
'nm0800339': 'Phillipe Simon',
'nm0002064': 'Giancarlo Esposito',
'nm0001590': 'Chazz Palminteri',
'nm0000321': 'Gabriel Byrne',
'nm0790436': 'Jack Shearer',
'nm0000228': 'Kevin Spacey',
'nm0001629': 'Kevin Pollak',
'nm0107808': 'Carl Bressler',
'nm0001125': 'Benicio Del Toro',
'nm0000860': 'Paul Bartel'
}, 'Actors not parsed correctly'
assert parser.directors == {'nm0001741': 'Bryan Singer'}, 'Directors not parsed correctly'
print(parser.genres)
assert len(set(parser.genres).intersection([u'crime', u'mystery', u'thriller'])) == \
len([u'crime', u'mystery', u'thriller']), 'Genres not parsed correctly'
assert parser.imdb_id == 'tt0114814', 'ID not parsed correctly'
assert len(set(parser.languages).intersection(
['english', 'hungarian', 'spanish', 'french'])) == 4, 'Languages not parsed correctly'
assert parser.mpaa_rating == 'R', 'Rating not parsed correctly'
assert parser.name == 'The Usual Suspects', 'Name not parsed correctly'
assert parser.photo, 'Photo not parsed correctly'
assert parser.plot_outline == (
'Following a truck hijack in New York, five conmen are arrested and brought together for questioning. '
'As none of them are guilty, they plan a revenge operation against the police. The operation goes well, '
'but then the influence of a legendary mastermind criminal called Keyser S\xf6ze is felt. It becomes '
'clear that each one of them has wronged S\xf6ze at some point and must pay back now. The payback job '
'leaves 27 men dead in a boat explosion, but the real question arises now: Who actually is Keyser S\xf6ze?'
), 'Plot outline not parsed correctly'
assert 8.0 < parser.score < 9.0, 'Score not parsed correctly'
assert parser.url == 'https://www.imdb.com/title/tt0114814/', 'URL not parsed correctly'
assert 400000 < parser.votes < 1000000, 'Votes not parsed correctly'
assert parser.year == 1995, 'Year not parsed correctly'
def test_no_plot(self):
# Make sure parser doesn't crash for movies with no plot
parser = ImdbParser()
parser.parse('tt1300562')
assert parser.name == 'Goodbye Mothers'
# There is no plot
assert not parser.plot_outline
def test_no_year(self):
# Make sure parser doesn't crash for movies with no year
parser = ImdbParser()
parser.parse('tt3303790')
assert parser.name == 'Master of None'
# There is no year
assert not parser.year
def test_plot_with_links(self):
"""Make sure plot doesn't terminate at the first link. GitHub #756"""
parser = ImdbParser()
parser.parse('tt2503944')
assert parser.plot_outline == ("Chef Adam Jones (Bradley Cooper) had it all - and lost it. A two-star Michelin "
"rockstar with the bad habits to match, the former enfant terrible of the Paris "
"restaurant scene did everything different every time out, and only ever cared "
"about the thrill of creating explosions of taste. To land his own kitchen and "
"that third elusive Michelin star though, he'll need the best of the best on "
"his side, including the beautiful Helene (Sienna Miller).")
|
{
"content_hash": "c546f8fa8a8cf671c3e0bb1f6e2526ab",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 120,
"avg_line_length": 53.92307692307692,
"alnum_prop": 0.6150737042320494,
"repo_name": "LynxyssCZ/Flexget",
"id": "e8a1884b0a241e4278a753415eb8a88d86647726",
"size": "4206",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_imdb_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3371493"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
import os
import bdb
import types
from Tkinter import *
from idlelib.WindowList import ListedToplevel
from idlelib.ScrolledList import ScrolledList
from idlelib import macosxSupport
class Idb(bdb.Bdb):
def __init__(self, gui):
self.gui = gui
bdb.Bdb.__init__(self)
def user_line(self, frame):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame)
def user_exception(self, frame, info):
if self.in_rpc_code(frame):
self.set_step()
return
message = self.__frame2message(frame)
self.gui.interaction(message, frame, info)
def in_rpc_code(self, frame):
if frame.f_code.co_filename.count('rpc.py'):
return True
else:
prev_frame = frame.f_back
if prev_frame.f_code.co_filename.count('Debugger.py'):
# (that test will catch both Debugger.py and RemoteDebugger.py)
return False
return self.in_rpc_code(prev_frame)
def __frame2message(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
basename = os.path.basename(filename)
message = "%s:%s" % (basename, lineno)
if code.co_name != "?":
message = "%s: %s()" % (message, code.co_name)
return message
class Debugger:
vstack = vsource = vlocals = vglobals = None
def __init__(self, pyshell, idb=None):
if idb is None:
idb = Idb(self)
self.pyshell = pyshell
self.idb = idb
self.frame = None
self.make_gui()
self.interacting = 0
def run(self, *args):
try:
self.interacting = 1
return self.idb.run(*args)
finally:
self.interacting = 0
def close(self, event=None):
if self.interacting:
self.top.bell()
return
if self.stackviewer:
self.stackviewer.close(); self.stackviewer = None
# Clean up pyshell if user clicked debugger control close widget.
# (Causes a harmless extra cycle through close_debugger() if user
# toggled debugger from pyshell Debug menu)
self.pyshell.close_debugger()
# Now close the debugger control window....
self.top.destroy()
def make_gui(self):
pyshell = self.pyshell
self.flist = pyshell.flist
self.root = root = pyshell.root
self.top = top = ListedToplevel(root)
self.top.wm_title("Debug Control")
self.top.wm_iconname("Debug")
top.wm_protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<Escape>", self.close)
#
self.bframe = bframe = Frame(top)
self.bframe.pack(anchor="w")
self.buttons = bl = []
#
self.bcont = b = Button(bframe, text="Go", command=self.cont)
bl.append(b)
self.bstep = b = Button(bframe, text="Step", command=self.step)
bl.append(b)
self.bnext = b = Button(bframe, text="Over", command=self.next)
bl.append(b)
self.bret = b = Button(bframe, text="Out", command=self.ret)
bl.append(b)
self.bret = b = Button(bframe, text="Quit", command=self.quit)
bl.append(b)
#
for b in bl:
b.configure(state="disabled")
b.pack(side="left")
#
self.cframe = cframe = Frame(bframe)
self.cframe.pack(side="left")
#
if not self.vstack:
self.__class__.vstack = BooleanVar(top)
self.vstack.set(1)
self.bstack = Checkbutton(cframe,
text="Stack", command=self.show_stack, variable=self.vstack)
self.bstack.grid(row=0, column=0)
if not self.vsource:
self.__class__.vsource = BooleanVar(top)
self.bsource = Checkbutton(cframe,
text="Source", command=self.show_source, variable=self.vsource)
self.bsource.grid(row=0, column=1)
if not self.vlocals:
self.__class__.vlocals = BooleanVar(top)
self.vlocals.set(1)
self.blocals = Checkbutton(cframe,
text="Locals", command=self.show_locals, variable=self.vlocals)
self.blocals.grid(row=1, column=0)
if not self.vglobals:
self.__class__.vglobals = BooleanVar(top)
self.bglobals = Checkbutton(cframe,
text="Globals", command=self.show_globals, variable=self.vglobals)
self.bglobals.grid(row=1, column=1)
#
self.status = Label(top, anchor="w")
self.status.pack(anchor="w")
self.error = Label(top, anchor="w")
self.error.pack(anchor="w", fill="x")
self.errorbg = self.error.cget("background")
#
self.fstack = Frame(top, height=1)
self.fstack.pack(expand=1, fill="both")
self.flocals = Frame(top)
self.flocals.pack(expand=1, fill="both")
self.fglobals = Frame(top, height=1)
self.fglobals.pack(expand=1, fill="both")
#
if self.vstack.get():
self.show_stack()
if self.vlocals.get():
self.show_locals()
if self.vglobals.get():
self.show_globals()
def interaction(self, message, frame, info=None):
self.frame = frame
self.status.configure(text=message)
#
if info:
type, value, tb = info
try:
m1 = type.__name__
except AttributeError:
m1 = "%s" % str(type)
if value is not None:
try:
m1 = "%s: %s" % (m1, str(value))
except:
pass
bg = "yellow"
else:
m1 = ""
tb = None
bg = self.errorbg
self.error.configure(text=m1, background=bg)
#
sv = self.stackviewer
if sv:
stack, i = self.idb.get_stack(self.frame, tb)
sv.load_stack(stack, i)
#
self.show_variables(1)
#
if self.vsource.get():
self.sync_source_line()
#
for b in self.buttons:
b.configure(state="normal")
#
self.top.wakeup()
self.root.mainloop()
#
for b in self.buttons:
b.configure(state="disabled")
self.status.configure(text="")
self.error.configure(text="", background=self.errorbg)
self.frame = None
def sync_source_line(self):
frame = self.frame
if not frame:
return
filename, lineno = self.__frame2fileline(frame)
if filename[:1] + filename[-1:] != "<>" and os.path.exists(filename):
self.flist.gotofileline(filename, lineno)
def __frame2fileline(self, frame):
code = frame.f_code
filename = code.co_filename
lineno = frame.f_lineno
return filename, lineno
def cont(self):
self.idb.set_continue()
self.root.quit()
def step(self):
self.idb.set_step()
self.root.quit()
def next(self):
self.idb.set_next(self.frame)
self.root.quit()
def ret(self):
self.idb.set_return(self.frame)
self.root.quit()
def quit(self):
self.idb.set_quit()
self.root.quit()
stackviewer = None
def show_stack(self):
if not self.stackviewer and self.vstack.get():
self.stackviewer = sv = StackViewer(self.fstack, self.flist, self)
if self.frame:
stack, i = self.idb.get_stack(self.frame, None)
sv.load_stack(stack, i)
else:
sv = self.stackviewer
if sv and not self.vstack.get():
self.stackviewer = None
sv.close()
self.fstack['height'] = 1
def show_source(self):
if self.vsource.get():
self.sync_source_line()
def show_frame(self, stackitem):
self.frame = stackitem[0] # lineno is stackitem[1]
self.show_variables()
localsviewer = None
globalsviewer = None
def show_locals(self):
lv = self.localsviewer
if self.vlocals.get():
if not lv:
self.localsviewer = NamespaceViewer(self.flocals, "Locals")
else:
if lv:
self.localsviewer = None
lv.close()
self.flocals['height'] = 1
self.show_variables()
def show_globals(self):
gv = self.globalsviewer
if self.vglobals.get():
if not gv:
self.globalsviewer = NamespaceViewer(self.fglobals, "Globals")
else:
if gv:
self.globalsviewer = None
gv.close()
self.fglobals['height'] = 1
self.show_variables()
def show_variables(self, force=0):
lv = self.localsviewer
gv = self.globalsviewer
frame = self.frame
if not frame:
ldict = gdict = None
else:
ldict = frame.f_locals
gdict = frame.f_globals
if lv and gv and ldict is gdict:
ldict = None
if lv:
lv.load_dict(ldict, force, self.pyshell.interp.rpcclt)
if gv:
gv.load_dict(gdict, force, self.pyshell.interp.rpcclt)
def set_breakpoint_here(self, filename, lineno):
self.idb.set_break(filename, lineno)
def clear_breakpoint_here(self, filename, lineno):
self.idb.clear_break(filename, lineno)
def clear_file_breaks(self, filename):
self.idb.clear_all_file_breaks(filename)
def load_breakpoints(self):
"Load PyShellEditorWindow breakpoints into subprocess debugger"
pyshell_edit_windows = self.pyshell.flist.inversedict.keys()
for editwin in pyshell_edit_windows:
filename = editwin.io.filename
try:
for lineno in editwin.breakpoints:
self.set_breakpoint_here(filename, lineno)
except AttributeError:
continue
class StackViewer(ScrolledList):
def __init__(self, master, flist, gui):
if macosxSupport.isAquaTk():
# At least on with the stock AquaTk version on OSX 10.4 you'll
# get an shaking GUI that eventually kills IDLE if the width
# argument is specified.
ScrolledList.__init__(self, master)
else:
ScrolledList.__init__(self, master, width=80)
self.flist = flist
self.gui = gui
self.stack = []
def load_stack(self, stack, index=None):
self.stack = stack
self.clear()
for i in range(len(stack)):
frame, lineno = stack[i]
try:
modname = frame.f_globals["__name__"]
except:
modname = "?"
code = frame.f_code
filename = code.co_filename
funcname = code.co_name
import linecache
sourceline = linecache.getline(filename, lineno)
import string
sourceline = string.strip(sourceline)
if funcname in ("?", "", None):
item = "%s, line %d: %s" % (modname, lineno, sourceline)
else:
item = "%s.%s(), line %d: %s" % (modname, funcname,
lineno, sourceline)
if i == index:
item = "> " + item
self.append(item)
if index is not None:
self.select(index)
def popup_event(self, event):
"override base method"
if self.stack:
return ScrolledList.popup_event(self, event)
def fill_menu(self):
"override base method"
menu = self.menu
menu.add_command(label="Go to source line",
command=self.goto_source_line)
menu.add_command(label="Show stack frame",
command=self.show_stack_frame)
def on_select(self, index):
"override base method"
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def on_double(self, index):
"override base method"
self.show_source(index)
def goto_source_line(self):
index = self.listbox.index("active")
self.show_source(index)
def show_stack_frame(self):
index = self.listbox.index("active")
if 0 <= index < len(self.stack):
self.gui.show_frame(self.stack[index])
def show_source(self, index):
if not (0 <= index < len(self.stack)):
return
frame, lineno = self.stack[index]
code = frame.f_code
filename = code.co_filename
if os.path.isfile(filename):
edit = self.flist.open(filename)
if edit:
edit.gotoline(lineno)
class NamespaceViewer:
def __init__(self, master, title, dict=None):
width = 0
height = 40
if dict:
height = 20*len(dict) # XXX 20 == observed height of Entry widget
self.master = master
self.title = title
import repr
self.repr = repr.Repr()
self.repr.maxstring = 60
self.repr.maxother = 60
self.frame = frame = Frame(master)
self.frame.pack(expand=1, fill="both")
self.label = Label(frame, text=title, borderwidth=2, relief="groove")
self.label.pack(fill="x")
self.vbar = vbar = Scrollbar(frame, name="vbar")
vbar.pack(side="right", fill="y")
self.canvas = canvas = Canvas(frame,
height=min(300, max(40, height)),
scrollregion=(0, 0, width, height))
canvas.pack(side="left", fill="both", expand=1)
vbar["command"] = canvas.yview
canvas["yscrollcommand"] = vbar.set
self.subframe = subframe = Frame(canvas)
self.sfid = canvas.create_window(0, 0, window=subframe, anchor="nw")
self.load_dict(dict)
dict = -1
def load_dict(self, dict, force=0, rpc_client=None):
if dict is self.dict and not force:
return
subframe = self.subframe
frame = self.frame
for c in subframe.children.values():
c.destroy()
self.dict = None
if not dict:
l = Label(subframe, text="None")
l.grid(row=0, column=0)
else:
names = dict.keys()
names.sort()
row = 0
for name in names:
value = dict[name]
svalue = self.repr.repr(value) # repr(value)
# Strip extra quotes caused by calling repr on the (already)
# repr'd value sent across the RPC interface:
if rpc_client:
svalue = svalue[1:-1]
l = Label(subframe, text=name)
l.grid(row=row, column=0, sticky="nw")
l = Entry(subframe, width=0, borderwidth=0)
l.insert(0, svalue)
l.grid(row=row, column=1, sticky="nw")
row = row+1
self.dict = dict
# XXX Could we use a <Configure> callback for the following?
subframe.update_idletasks() # Alas!
width = subframe.winfo_reqwidth()
height = subframe.winfo_reqheight()
canvas = self.canvas
self.canvas["scrollregion"] = (0, 0, width, height)
if height > 300:
canvas["height"] = 300
frame.pack(expand=1)
else:
canvas["height"] = height
frame.pack(expand=0)
def close(self):
self.frame.destroy()
|
{
"content_hash": "655c52bdcc97737b2d5aee562b92d783",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 79,
"avg_line_length": 32.92723492723493,
"alnum_prop": 0.5400303068569264,
"repo_name": "MonicaHsu/truvaluation",
"id": "94a8cb251405afe4307d766e031b7c71fd8b7fb6",
"size": "15838",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/idlelib/Debugger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2785282"
},
{
"name": "C++",
"bytes": "472284"
},
{
"name": "CSS",
"bytes": "9019"
},
{
"name": "JavaScript",
"bytes": "107089"
},
{
"name": "Perl",
"bytes": "58637"
},
{
"name": "Python",
"bytes": "14350366"
},
{
"name": "Shell",
"bytes": "32063"
},
{
"name": "Tcl",
"bytes": "1349119"
}
],
"symlink_target": ""
}
|
'''
test for batch changing vm password
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import threading
import test_stub
import time
vm_num = 3
vms = []
ts = []
invs = []
users = ["root", "root" ]
passwds = ["password", "95_aaapcn"]
def change_vm_password_wrapper(vm_uuid, usr, passwd, skip_stopped_vm = None, session_uuid = None):
global invs
inv = vm_ops.change_vm_password(vm_uuid, usr, passwd, skip_stopped_vm, session_uuid)
if inv:
invs.append(inv)
def test():
global vms, ts, invs
test_util.test_dsc('create VM with setting password')
for (usr,passwd) in zip(users, passwds):
test_util.test_dsc("user:%s; password:%s" %(usr, passwd))
vms = []
for i in range(vm_num):
vms.append(test_stub.create_vm(vm_name = 'c7-vm'+str(i), image_name = "batch_test_image"))
time.sleep(30)
for vm in vms:
t = threading.Thread(target=change_vm_password_wrapper, args=(vm.get_vm().uuid, usr, passwd))
ts.append(t)
t.start()
for t in ts:
t.join()
for vm in vms:
if not test_lib.lib_check_login_in_vm(vm.get_vm(), usr, passwd):
test_util.test_fail("create vm with user:%s password: %s failed", usr, passwd)
ts = []
invs = []
#When vm is stopped:
#for vm in vms:
# vm.stop()
for vm in vms:
t = threading.Thread(target=change_vm_password_wrapper, args=(vm.get_vm().uuid, "root", "password"))
ts.append(t)
t.start()
for t in ts:
t.join()
for vm in vms:
#vm.start()
vm.check()
vm.destroy()
vm.expunge()
vm.check()
test_util.test_pass('Set password when VM is creating is successful.')
#Will be called only if exception happens in test().
def error_cleanup():
global vms
for vm in vms:
if vm:
vm.destroy()
vm.expunge()
|
{
"content_hash": "fe07648271d0a0c2cc07bef9fba6dd20",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 112,
"avg_line_length": 25.301075268817204,
"alnum_prop": 0.5490862728431789,
"repo_name": "zstackorg/zstack-woodpecker",
"id": "6c6bde4ba52c415d6e25101798cacd2e58086813",
"size": "2353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "integrationtest/vm/vm_password/test_bath_chg_vm_passwd_c7.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "46522"
},
{
"name": "Makefile",
"bytes": "692"
},
{
"name": "Puppet",
"bytes": "875"
},
{
"name": "Python",
"bytes": "2891030"
},
{
"name": "Shell",
"bytes": "54266"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.db.models import Field, Lookup
@Field.register_lookup
class SearchLookup(Lookup):
lookup_name = 'search'
def as_mysql(self, compiler, connection):
lhs, lhs_params = self.process_lhs(compiler, connection)
rhs, rhs_params = self.process_rhs(compiler, connection)
params = lhs_params + rhs_params
return 'MATCH (%s) AGAINST (%s IN BOOLEAN MODE)' % (lhs, rhs), params
class Bag(models.Model):
name = models.CharField(
max_length=255,
primary_key=True,
help_text='Name of Bag')
files = models.IntegerField(
help_text='Number of files in Bag')
size = models.BigIntegerField(
help_text="Size of Bag's Payload (in bytes)")
bagit_version = models.CharField(
max_length=10,
help_text='BagIt version number')
last_verified_date = models.DateField(
help_text='Date of last Bag Verification')
last_verified_status = models.CharField(
max_length=25,
help_text='Status of last bag Verification')
bagging_date = models.DateField(
help_text='Date of Bag Creation')
@property
def oxum(self):
return '{size}.{files}'.format(**vars(self))
def __str__(self):
return self.name
class Bag_Info(models.Model):
bag_name = models.ForeignKey(Bag, on_delete=models.CASCADE)
field_name = models.CharField(
max_length=255,
help_text="Field Name",
db_index=True
)
field_body = models.TextField(
help_text="Field Body"
)
def __str__(self):
return "%s:%s" % (self.bag_name, self.field_name)
class Meta:
verbose_name_plural = "Bag Info Fields"
class Node(models.Model):
"""
This model defines a storage node for the mdstore
"""
STATUS_CHOICES = [
('0', 'Inactive'),
('1', 'Active'),
]
node_name = models.CharField(
max_length=255,
help_text="The name of the node",
unique=True,
db_index=True
)
node_url = models.TextField(
help_text="The external url for the root of the node")
node_path = models.CharField(
max_length=255, help_text="The path on disk to the node's root")
node_capacity = models.BigIntegerField(
help_text="The total amount of storage (in bytes)", blank=True)
node_size = models.BigIntegerField(
help_text="The current size of files on disk (in bytes)", blank=True)
last_checked = models.DateTimeField(
help_text="Date node size last checked", blank=True)
status = models.CharField(
max_length=1, choices=STATUS_CHOICES,
help_text="The current status of the node", blank=True)
class External_Identifier(models.Model):
value = models.CharField(max_length=250, db_index=True)
belong_to_bag = models.ForeignKey(Bag, on_delete=models.CASCADE)
class Meta:
ordering = ['value']
def __str__(self):
return self.value
|
{
"content_hash": "443c40e5d43393332359c5727159a192",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 77,
"avg_line_length": 28.339622641509433,
"alnum_prop": 0.625832223701731,
"repo_name": "unt-libraries/coda",
"id": "20ea0ec5f16ea814a738d7f558caa1402ac374b7",
"size": "3004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coda/coda_mdstore/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1325"
},
{
"name": "Dockerfile",
"bytes": "444"
},
{
"name": "HTML",
"bytes": "83077"
},
{
"name": "JavaScript",
"bytes": "475"
},
{
"name": "Python",
"bytes": "280722"
},
{
"name": "Shell",
"bytes": "295"
}
],
"symlink_target": ""
}
|
"""Correctness tests for tf.keras CNN models using DistributionStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute import combinations
from tensorflow.python.eager import context
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import keras_correctness_test_base
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.platform import test
@testing_utils.run_all_without_tensor_float_32(
'Uses Dense layers, which call matmul. Even if Dense layers run in '
'float64, the test sometimes fails with tf32 enabled for unknown reasons')
class DistributionStrategyCnnCorrectnessTest(
keras_correctness_test_base.TestDistributionStrategyCorrectnessBase):
def get_model(self,
initial_weights=None,
distribution=None,
input_shapes=None):
del input_shapes
with keras_correctness_test_base.MaybeDistributionScope(distribution):
image = keras.layers.Input(shape=(28, 28, 3), name='image')
c1 = keras.layers.Conv2D(
name='conv1',
filters=16,
kernel_size=(3, 3),
strides=(4, 4),
kernel_regularizer=keras.regularizers.l2(1e-4))(
image)
if self.with_batch_norm == 'regular':
c1 = keras.layers.BatchNormalization(name='bn1')(c1)
elif self.with_batch_norm == 'sync':
c1 = keras.layers.SyncBatchNormalization(name='bn1')(c1)
c1 = keras.layers.MaxPooling2D(pool_size=(2, 2))(c1)
logits = keras.layers.Dense(
10, activation='softmax', name='pred')(
keras.layers.Flatten()(c1))
model = keras.Model(inputs=[image], outputs=[logits])
if initial_weights:
model.set_weights(initial_weights)
model.compile(
optimizer=gradient_descent.SGD(learning_rate=0.1),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
def _get_data(self, count, shape=(28, 28, 3), num_classes=10):
centers = np.random.randn(num_classes, *shape)
features = []
labels = []
for _ in range(count):
label = np.random.randint(0, num_classes, size=1)[0]
offset = np.random.normal(loc=0, scale=0.1, size=np.prod(shape))
offset = offset.reshape(shape)
labels.append(label)
features.append(centers[label] + offset)
x = np.asarray(features, dtype=np.float32)
y = np.asarray(labels, dtype=np.float32).reshape((count, 1))
return x, y
def get_data(self):
x_train, y_train = self._get_data(
count=keras_correctness_test_base._GLOBAL_BATCH_SIZE *
keras_correctness_test_base._EVAL_STEPS)
x_predict = x_train
return x_train, y_train, x_predict
def get_data_with_partial_last_batch_eval(self):
x_train, y_train = self._get_data(count=1280)
x_eval, y_eval = self._get_data(count=1000)
return x_train, y_train, x_eval, y_eval, x_eval
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_correctness(self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(distribution, use_numpy, use_validation_data)
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_with_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data):
self.skipTest('Flakily times out, b/134670856')
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='regular')
@combinations.generate(
keras_correctness_test_base.all_strategy_and_input_config_combinations())
def test_cnn_with_sync_batch_norm_correctness(self, distribution, use_numpy,
use_validation_data):
if not context.executing_eagerly():
self.skipTest('SyncBatchNorm is not enabled in graph mode.')
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='sync')
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies() +
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager())
def test_cnn_correctness_with_partial_last_batch_eval(self, distribution,
use_numpy,
use_validation_data):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
partial_last_batch=True,
training_epochs=1)
@combinations.generate(
keras_correctness_test_base.test_combinations_with_tpu_strategies() +
keras_correctness_test_base
.strategy_minus_tpu_and_input_config_combinations_eager())
def test_cnn_with_batch_norm_correctness_and_partial_last_batch_eval(
self, distribution, use_numpy, use_validation_data):
self.run_correctness_test(
distribution,
use_numpy,
use_validation_data,
with_batch_norm='regular',
partial_last_batch=True)
if __name__ == '__main__':
test.main()
|
{
"content_hash": "fc12cf346480394e1ecc2fbad4d22906",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 38.352112676056336,
"alnum_prop": 0.6569959603378627,
"repo_name": "aldian/tensorflow",
"id": "57b9b718491120ae336a6219f08c27cb76a10596",
"size": "6135",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/distribute/keras_image_model_correctness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
import os
from configurations import values
from boto.s3.connection import OrdinaryCallingFormat
from .common import Common
try:
# Python 2.x
import urlparse
except ImportError:
# Python 3.x
from urllib import parse as urlparse
class Production(Common):
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
# https://devcenter.heroku.com/articles/getting-started-with-django
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
INSTALLED_APPS = Common.INSTALLED_APPS
# SECRET_KEY = values.SecretValue()
SECRET_KEY = 'a55zfl=jf^%lkzkfero*6lde0p%e4f=ed0ehwhz+pu+c29a5em'
# django-secure
# http://django-secure.readthedocs.org/en/v0.1.2/settings.html
# INSTALLED_APPS += ("djangosecure", )
#
# SECURE_HSTS_SECONDS = 60
# SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
# SECURE_FRAME_DENY = values.BooleanValue(True)
# SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
# SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
# SESSION_COOKIE_SECURE = values.BooleanValue(False)
# SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
# SECURE_SSL_REDIRECT = values.BooleanValue(True)
# Site
# https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
INSTALLED_APPS += ("gunicorn",)
# Mysql
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'simona_gsoops_db',
'USER': 'simona_db_gsoops',
'PASSWORD': 'huidugsoopsqwepoi',
'HOST': 'rds9ryhk89263q16910i.mysql.rds.aliyuncs.com',
'PORT': '',
}
}
# Template
# https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
# TEMPLATE_LOADERS = (
# ('django.template.loaders.cached.Loader', (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )),
# )
# Media files
# http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += ('storages',)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = values.Value('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = values.Value('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = values.Value('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
MEDIA_URL = 'https://s3.amazonaws.com/{}/'.format(AWS_STORAGE_BUCKET_NAME)
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# https://developers.google.com/web/fundamentals/performance/optimizing-content-efficiency/http-caching#cache-control
# Response can be cached by browser and any intermediary caches (i.e. it is "public") for up to 1 day
# 86400 = (60 seconds x 60 minutes x 24 hours)
AWS_HEADERS = {
'Cache-Control': 'max-age=86400, s-maxage=86400, must-revalidate',
}
# Static files
#STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# Caching
redis_url = urlparse.urlparse(os.environ.get('REDISTOGO_URL', 'redis://localhost:6379'))
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '{}:{}'.format(redis_url.hostname, redis_url.port),
'OPTIONS': {
'DB': 0,
'PASSWORD': redis_url.password,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
}
}
}
}
# Django RQ production settings
RQ_QUEUES = {
'default': {
'URL': os.getenv('REDISTOGO_URL', 'redis://localhost:6379'),
'DB': 1,
'DEFAULT_TIMEOUT': 500,
},
}
BROKER_URL = os.getenv('REDISTOGO_URL', 'redis://localhost:6379/2')
BROKER_BACKEND = "redis"
CELERY_RESULT_BACKEND = 'redis://localhost:6379/3'
# CELERY_ACCEPT_CONTENT = ['json']
# CELERY_TASK_SERIALIZER = 'json'
# CELERY_RESULT_SERIALIZER = 'json'
ALI_SLS = {
'endpoint' : 'cn-hangzhou-intranet.sls.aliyuncs.com', # 选择与上面步骤创建Project所属区域匹配的Endpoint
'accessKeyId' : os.environ.get('ALI_ACCESS_KEY_ID', 'ZWZ9y533sNbpCRM5'), # 使用你的阿里云访问秘钥AccessKeyId
'accessKey' : os.environ.get('ALI_ACCESS_KEY', 'pUvwBHyEUS3VVHGr6XNAYxWGjL26Hf'), # 使用你的阿里云访问秘钥AccessKeySecre
'project' : 'simona', # 上面步骤创建的项目名称
'logstore' : 'simona-logstore' # 上面步骤创建的日志库名称
}
|
{
"content_hash": "640260ec36e19ccc1777dcd9b60f5bb7",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 137,
"avg_line_length": 39.16,
"alnum_prop": 0.6016343207354443,
"repo_name": "amorwilliams/gsoops",
"id": "b9ac6eef1d8fea293f8d7dce6093b6642899971d",
"size": "5035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/gsoops/gsoops/settings/production.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "660718"
},
{
"name": "HTML",
"bytes": "110733"
},
{
"name": "JavaScript",
"bytes": "2482224"
},
{
"name": "Protocol Buffer",
"bytes": "441"
},
{
"name": "Python",
"bytes": "521960"
},
{
"name": "Shell",
"bytes": "400"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="align", parent_name="violin.hoverlabel", **kwargs):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
{
"content_hash": "28960918139a06def3af181a5d419d82",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 40.642857142857146,
"alnum_prop": 0.5922671353251318,
"repo_name": "plotly/python-api",
"id": "837c21ec4926fd44df60ffdcfd3d5fdea1bb38a1",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/violin/hoverlabel/_align.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from .viterbi_decode import ViterbiDecoder, viterbi_decode
from .datasets import Conll05st # noqa: F401
from .datasets import Imdb # noqa: F401
from .datasets import Imikolov # noqa: F401
from .datasets import Movielens # noqa: F401
from .datasets import UCIHousing # noqa: F401
from .datasets import WMT14 # noqa: F401
from .datasets import WMT16 # noqa: F401
__all__ = [ # noqa
'Conll05st',
'Imdb',
'Imikolov',
'Movielens',
'UCIHousing',
'WMT14',
'WMT16',
'ViterbiDecoder',
'viterbi_decode',
]
|
{
"content_hash": "203f4ac544958e6bc5a74192d2e67e11",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 58,
"avg_line_length": 27.05,
"alnum_prop": 0.6765249537892791,
"repo_name": "PaddlePaddle/Paddle",
"id": "fbfa0c3fe2e028bb0e6de028bdb4d4c005ca752e",
"size": "1154",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/text/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36848680"
},
{
"name": "CMake",
"bytes": "902619"
},
{
"name": "Cuda",
"bytes": "5227207"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36203874"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553177"
}
],
"symlink_target": ""
}
|
from unittest.mock import patch
import moment
from .base_tests import BaseTestCase
from restriccion.crawlers.uoct import UOCT_Crawler
from restriccion.models.restriction import RestrictionReport
class TestModelsRestriction(BaseTestCase):
@patch('restriccion.models.base_report.moment.utcnow')
def test_models_restriction_get(self, mock_moment):
mock_datetime = moment.utc('2015-06-21', '%Y-%m-%d')
mock_moment.side_effect = lambda: mock_datetime
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_0.html')
RestrictionReport.insert_many(self.mongo_db, crawler.parse()['restriction'])
restrictions = RestrictionReport.get(self.mongo_db)
self.assertEqual(10, len(restrictions))
self.assertEqual(
{
'ciudad': 'Santiago',
'fecha': '2015-06-21',
'hash': 'ed55bf3ea8e18f328eb03471874be28e5779424b',
'sin_sello_verde': ['3', '4', '5', '6', '7', '8'],
'con_sello_verde': ['0', '9'],
'actualizacion': mock_datetime.isoformat(),
'fuente': 'http://www.uoct.cl/restriccion-vehicular/',
},
restrictions[0]
)
@patch('restriccion.models.base_report.moment.utcnow')
def test_models_restriction_get_limit(self, mock_moment):
mock_moment.side_effect = lambda: moment.utc('2015-06-21', '%Y-%m-%d')
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_0.html')
RestrictionReport.insert_many(self.mongo_db, crawler.parse()['restriction'])
self.assertEqual(26, len(RestrictionReport.get(self.mongo_db, limit=30)))
@patch('restriccion.models.base_report.moment.utcnow')
def test_models_restriction_insert_many(self, mock_moment):
mock_datetime = moment.utc('2015-06-22', '%Y-%m-%d')
mock_moment.side_effect = lambda: mock_datetime
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_0.html')
new_restrictions = crawler.parse()['restriction']
RestrictionReport.insert_many(self.mongo_db, new_restrictions)
self.assertEqual(len(new_restrictions), self.mongo_db[RestrictionReport.get_mongo_collection()].count())
rows = self.mongo_db[RestrictionReport.get_mongo_collection()].find({}, {'_id': 0})
for i in range(len(new_restrictions)):
new_restrictions[i]['actualizacion'] = mock_datetime.isoformat()
self.assertEqual(new_restrictions[i], rows[i])
@patch('restriccion.models.base_report.moment.utcnow')
def test_models_restriction_insert_many_keep_old_data(self, mock_moment):
mock_moment.side_effect = lambda: moment.utc('2015-06-21', '%Y-%m-%d')
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_0.html')
RestrictionReport.insert_many(self.mongo_db, crawler.parse()['restriction'])
self.assertEqual(26, self.mongo_db[RestrictionReport.get_mongo_collection()].count())
first_entries = []
rows = self.mongo_db[RestrictionReport.get_mongo_collection()].find(
{'$query': {}, '$orderby': {'fecha': -1}}, {'_id': 0}
)
for row in rows:
first_entries.append(row)
mock_datetime = moment.utc('2015-06-22', '%Y-%m-%d')
mock_moment.side_effect = lambda: mock_datetime
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_1.html')
new_restrictions = crawler.parse()['restriction']
RestrictionReport.insert_many(self.mongo_db, new_restrictions)
self.assertEqual(len(new_restrictions), self.mongo_db[RestrictionReport.get_mongo_collection()].count())
second_entries = []
rows = self.mongo_db[RestrictionReport.get_mongo_collection()].find(
{'$query': {}, '$orderby': {'fecha': -1}}, {'_id': 0}
)
for row in rows:
second_entries.append(row)
# Keep old data
self.assertEqual(first_entries, second_entries[1:])
self.assertEqual(
{
'ciudad': 'Santiago',
'fecha': '2015-06-22',
'hash': '4550713861c4b74e957963c03195202980f4b831',
'sin_sello_verde': ['0', '1', '2', '5', '6', '7', '8', '9'],
'con_sello_verde': ['1', '2', '3', '4'],
'actualizacion': mock_datetime.isoformat(),
'fuente': 'http://www.uoct.cl/restriccion-vehicular/',
},
second_entries[0]
)
@patch('restriccion.models.base_report.moment.utcnow')
def test_models_restriction_insert_many_updated_data(self, mock_moment):
# First data
mock_moment.side_effect = lambda: moment.utc('2015-06-22T00:00:00', '%Y-%m-%dT%H:%M:%S')
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_1.html')
RestrictionReport.insert_many(self.mongo_db, crawler.parse()['restriction'])
first_entries = []
rows = self.mongo_db[RestrictionReport.get_mongo_collection()].find({'$query': {}, '$orderby': {'fecha': -1}})
for row in rows:
first_entries.append(row)
# Modified data
mock_moment.side_effect = lambda: moment.utc('2015-06-22T01:00:00', '%Y-%m-%dT%H:%M:%S')
crawler = UOCT_Crawler()
crawler.url = self.get_fixture_file_path('uoct.cl_restriccion-vehicular_2.html')
RestrictionReport.insert_many(self.mongo_db, crawler.parse()['restriction'])
second_entries = []
rows = self.mongo_db[RestrictionReport.get_mongo_collection()].find({'$query': {}, '$orderby': {'fecha': -1}})
for row in rows:
second_entries.append(row)
# Keep old data
self.assertEqual(first_entries[0], second_entries[0])
self.assertEqual(first_entries[2:], second_entries[2:])
# Check updated
for key in ['_id', 'fecha', 'sin_sello_verde', 'fuente', 'ciudad']:
self.assertEqual(first_entries[1][key], second_entries[1][key])
for key in ['hash', 'con_sello_verde', 'actualizacion']:
self.assertNotEqual(first_entries[1][key], second_entries[1][key])
|
{
"content_hash": "d216426e73e58dc2c680873d22b6e0ca",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 118,
"avg_line_length": 43.76027397260274,
"alnum_prop": 0.6143371419627485,
"repo_name": "m4droid/Restriccion-SCL-API",
"id": "cfff1906ed6f6d7181233927f2eb4daf371de3ac",
"size": "6389",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_models_restriction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "226"
},
{
"name": "Python",
"bytes": "47711"
},
{
"name": "Shell",
"bytes": "385"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("zerver", "0161_realm_message_content_delete_limit_seconds"),
]
operations = [
migrations.AlterField(
model_name="realm",
name="allow_community_topic_editing",
field=models.BooleanField(default=True),
),
]
|
{
"content_hash": "f8ea19d9cb873507680fb3a7ad61be52",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 24,
"alnum_prop": 0.609375,
"repo_name": "kou/zulip",
"id": "13e960174ce165439805aa6df514c19d86fdc60d",
"size": "435",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "zerver/migrations/0162_change_default_community_topic_editing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433376"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "635452"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3361648"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79932"
},
{
"name": "Python",
"bytes": "8142846"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "134587"
},
{
"name": "TypeScript",
"bytes": "20233"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import log as logging
from networking_cisco.plugins.ml2.drivers.cisco.ucsm import constants as const
LOG = logging.getLogger(__name__)
""" Cisco UCS Manager ML2 Mechanism driver specific configuration.
Following are user configurable options for UCS Manager ML2 Mechanism
driver. The ucsm_username, ucsm_password, and ucsm_ip are
required options. Additional configuration knobs are provided to pre-
create UCS Manager port profiles.
"""
ml2_cisco_ucsm_opts = [
cfg.ListOpt('supported_pci_devs',
default=[const.PCI_INFO_CISCO_VIC_1240,
const.PCI_INFO_INTEL_82599],
help=_('List of comma separated vendor_id:product_id of '
'SR_IOV capable devices supported by this MD. This MD '
'supports both VM-FEX and SR-IOV devices.')),
]
cfg.CONF.register_opts(ml2_cisco_ucsm_opts, "ml2_cisco_ucsm")
def parse_pci_vendor_config():
vendor_list = []
vendor_config_list = cfg.CONF.ml2_cisco_ucsm.supported_pci_devs
for vendor in vendor_config_list:
vendor_product = vendor.split(':')
if len(vendor_product) != 2:
raise cfg.Error(_("UCS Mech Driver: Invalid PCI device "
"config: %s") % vendor)
vendor_list.append(vendor)
return vendor_list
class UcsmConfig(object):
"""ML2 Cisco UCSM Mechanism Driver Configuration class."""
ucsm_dict = {}
def __init__(self):
self._create_ucsm_dict()
def _create_ucsm_dict(self):
"""Create a dictionary of all UCS Manager data from the config file."""
multi_parser = cfg.MultiConfigParser()
read_ok = multi_parser.read(cfg.CONF.config_file)
if len(read_ok) != len(cfg.CONF.config_file):
raise cfg.Error(_('Some config files were not parsed properly'))
for parsed_file in multi_parser.parsed:
for parsed_item in parsed_file.keys():
dev_id, sep, dev_ip = parsed_item.partition(':')
if dev_id.lower() == 'ml2_cisco_ucsm_ip':
ucsm_info = []
for dev_key, value in parsed_file[parsed_item].items():
ucsm_info.append(value[0])
self.ucsm_dict[dev_ip] = ucsm_info
def get_credentials_for_ucsm_ip(self, ucsm_ip):
if ucsm_ip in self.ucsm_dict:
return self.ucsm_dict[ucsm_ip]
def get_all_ucsm_ips(self):
return self.ucsm_dict.keys()
|
{
"content_hash": "d5a078cbc2359bad17b0f60c84a60c02",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 36.128571428571426,
"alnum_prop": 0.6156583629893239,
"repo_name": "CiscoSystems/networking-cisco",
"id": "4a74b4f96124d91a4160e02882e1ea8e1b902de5",
"size": "3164",
"binary": false,
"copies": "1",
"ref": "refs/heads/asr1k_liberty_master_wip",
"path": "networking_cisco/plugins/ml2/drivers/cisco/ucsm/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "2082062"
},
{
"name": "Shell",
"bytes": "44368"
}
],
"symlink_target": ""
}
|
import copy
from oslo.config import cfg
from oslo.serialization import jsonutils
from oslo.utils import timeutils
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova.i18n import _LE
from nova import notifications
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import log as logging
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
# List of fields that can be joined in DB layer.
_INSTANCE_OPTIONAL_JOINED_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups',
'pci_devices', 'tags']
# These are fields that are optional but don't translate to db columns
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS = ['fault', 'flavor', 'old_flavor',
'new_flavor']
# These are fields that are optional and in instance_extra
_INSTANCE_EXTRA_FIELDS = ['numa_topology', 'pci_requests', 'flavor']
# These are fields that can be specified as expected_attrs
INSTANCE_OPTIONAL_ATTRS = (_INSTANCE_OPTIONAL_JOINED_FIELDS +
_INSTANCE_OPTIONAL_NON_COLUMN_FIELDS +
_INSTANCE_EXTRA_FIELDS)
# These are fields that most query calls load by default
INSTANCE_DEFAULT_FIELDS = ['metadata', 'system_metadata',
'info_cache', 'security_groups']
def _expected_cols(expected_attrs):
"""Return expected_attrs that are columns needing joining.
NB: This function may modify expected_attrs if one
requested attribute requires another.
"""
if not expected_attrs:
return expected_attrs
if ('system_metadata' in expected_attrs and
'flavor' not in expected_attrs):
# NOTE(danms): If the client asked for sysmeta, we have to
# pull flavor so we can potentially provide compatibility
expected_attrs.append('flavor')
simple_cols = [attr for attr in expected_attrs
if attr in _INSTANCE_OPTIONAL_JOINED_FIELDS]
complex_cols = ['extra.%s' % field
for field in _INSTANCE_EXTRA_FIELDS
if field in expected_attrs]
if complex_cols:
simple_cols.append('extra')
simple_cols = filter(lambda x: x not in _INSTANCE_EXTRA_FIELDS,
simple_cols)
if (any([flavor in expected_attrs
for flavor in ['flavor', 'old_flavor', 'new_flavor']]) and
'system_metadata' not in simple_cols):
# NOTE(danms): While we're maintaining compatibility with
# flavor data being stored in system_metadata, we need to
# ask for it any time flavors are requested.
simple_cols.append('system_metadata')
expected_attrs.append('system_metadata')
return simple_cols + complex_cols
def compat_instance(instance):
"""Create a dict-like instance structure from an objects.Instance.
This is basically the same as nova.objects.base.obj_to_primitive(),
except that it includes some instance-specific details, like stashing
flavor information in system_metadata.
If you have a function (or RPC client) that needs to see the instance
as a dict that has flavor information in system_metadata, use this
to appease it (while you fix said thing).
:param instance: a nova.objects.Instance instance
:returns: a dict-based instance structure
"""
if not isinstance(instance, objects.Instance):
return instance
db_instance = copy.deepcopy(base.obj_to_primitive(instance))
flavor_attrs = [('', 'flavor'), ('old_', 'old_flavor'),
('new_', 'new_flavor')]
for prefix, attr in flavor_attrs:
flavor = (instance.obj_attr_is_set(attr) and
getattr(instance, attr) or None)
if flavor:
# NOTE(danms): If flavor is unset or None, don't
# copy it into the primitive's system_metadata
db_instance['system_metadata'] = \
flavors.save_flavor_info(
db_instance.get('system_metadata', {}),
flavor, prefix)
if attr in db_instance:
del db_instance[attr]
return db_instance
# TODO(berrange): Remove NovaObjectDictCompat
class Instance(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Added info_cache
# Version 1.2: Added security_groups
# Version 1.3: Added expected_vm_state and admin_state_reset to
# save()
# Version 1.4: Added locked_by and deprecated locked
# Version 1.5: Added cleaned
# Version 1.6: Added pci_devices
# Version 1.7: String attributes updated to support unicode
# Version 1.8: 'security_groups' and 'pci_devices' cannot be None
# Version 1.9: Make uuid a non-None real string
# Version 1.10: Added use_slave to refresh and get_by_uuid
# Version 1.11: Update instance from database during destroy
# Version 1.12: Added ephemeral_key_uuid
# Version 1.13: Added delete_metadata_key()
# Version 1.14: Added numa_topology
# Version 1.15: PciDeviceList 1.1
# Version 1.16: Added pci_requests
# Version 1.17: Added tags
# Version 1.18: Added flavor, old_flavor, new_flavor
VERSION = '1.18'
fields = {
'id': fields.IntegerField(),
'user_id': fields.StringField(nullable=True),
'project_id': fields.StringField(nullable=True),
'image_ref': fields.StringField(nullable=True),
'kernel_id': fields.StringField(nullable=True),
'ramdisk_id': fields.StringField(nullable=True),
'hostname': fields.StringField(nullable=True),
'launch_index': fields.IntegerField(nullable=True),
'key_name': fields.StringField(nullable=True),
'key_data': fields.StringField(nullable=True),
'power_state': fields.IntegerField(nullable=True),
'vm_state': fields.StringField(nullable=True),
'task_state': fields.StringField(nullable=True),
'memory_mb': fields.IntegerField(nullable=True),
'vcpus': fields.IntegerField(nullable=True),
'root_gb': fields.IntegerField(nullable=True),
'ephemeral_gb': fields.IntegerField(nullable=True),
'ephemeral_key_uuid': fields.UUIDField(nullable=True),
'host': fields.StringField(nullable=True),
'node': fields.StringField(nullable=True),
'instance_type_id': fields.IntegerField(nullable=True),
'user_data': fields.StringField(nullable=True),
'reservation_id': fields.StringField(nullable=True),
'scheduled_at': fields.DateTimeField(nullable=True),
'launched_at': fields.DateTimeField(nullable=True),
'terminated_at': fields.DateTimeField(nullable=True),
'availability_zone': fields.StringField(nullable=True),
'display_name': fields.StringField(nullable=True),
'display_description': fields.StringField(nullable=True),
'launched_on': fields.StringField(nullable=True),
# NOTE(jdillaman): locked deprecated in favor of locked_by,
# to be removed in Icehouse
'locked': fields.BooleanField(default=False),
'locked_by': fields.StringField(nullable=True),
'os_type': fields.StringField(nullable=True),
'architecture': fields.StringField(nullable=True),
'vm_mode': fields.StringField(nullable=True),
'uuid': fields.UUIDField(),
'root_device_name': fields.StringField(nullable=True),
'default_ephemeral_device': fields.StringField(nullable=True),
'default_swap_device': fields.StringField(nullable=True),
'config_drive': fields.StringField(nullable=True),
'access_ip_v4': fields.IPV4AddressField(nullable=True),
'access_ip_v6': fields.IPV6AddressField(nullable=True),
'auto_disk_config': fields.BooleanField(default=False),
'progress': fields.IntegerField(nullable=True),
'shutdown_terminate': fields.BooleanField(default=False),
'disable_terminate': fields.BooleanField(default=False),
'cell_name': fields.StringField(nullable=True),
'metadata': fields.DictOfStringsField(),
'system_metadata': fields.DictOfNullableStringsField(),
'info_cache': fields.ObjectField('InstanceInfoCache',
nullable=True),
'security_groups': fields.ObjectField('SecurityGroupList'),
'fault': fields.ObjectField('InstanceFault', nullable=True),
'cleaned': fields.BooleanField(default=False),
'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
'numa_topology': fields.ObjectField('InstanceNUMATopology',
nullable=True),
'pci_requests': fields.ObjectField('InstancePCIRequests',
nullable=True),
'tags': fields.ObjectField('TagList'),
'flavor': fields.ObjectField('Flavor'),
'old_flavor': fields.ObjectField('Flavor', nullable=True),
'new_flavor': fields.ObjectField('Flavor', nullable=True),
}
obj_extra_fields = ['name']
obj_relationships = {
'fault': [('1.0', '1.0')],
'info_cache': [('1.1', '1.0'), ('1.9', '1.4'), ('1.10', '1.5')],
'security_groups': [('1.2', '1.0')],
'pci_devices': [('1.6', '1.0'), ('1.15', '1.1')],
'numa_topology': [('1.14', '1.0')],
'pci_requests': [('1.16', '1.1')],
'tags': [('1.17', '1.0')],
'flavor': [('1.18', '1.1')],
'old_flavor': [('1.18', '1.1')],
'new_flavor': [('1.18', '1.1')],
}
def __init__(self, *args, **kwargs):
super(Instance, self).__init__(*args, **kwargs)
self._reset_metadata_tracking()
def _reset_metadata_tracking(self, fields=None):
if fields is None or 'system_metadata' in fields:
self._orig_system_metadata = (dict(self.system_metadata) if
'system_metadata' in self else {})
if fields is None or 'metadata' in fields:
self._orig_metadata = (dict(self.metadata) if
'metadata' in self else {})
def obj_reset_changes(self, fields=None):
super(Instance, self).obj_reset_changes(fields)
self._reset_metadata_tracking(fields=fields)
def obj_what_changed(self):
changes = super(Instance, self).obj_what_changed()
if 'metadata' in self and self.metadata != self._orig_metadata:
changes.add('metadata')
if 'system_metadata' in self and (self.system_metadata !=
self._orig_system_metadata):
changes.add('system_metadata')
return changes
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = super(Instance, cls)._obj_from_primitive(context, objver,
primitive)
self._reset_metadata_tracking()
return self
def obj_make_compatible(self, primitive, target_version):
super(Instance, self).obj_make_compatible(primitive, target_version)
target_version = utils.convert_version_to_tuple(target_version)
unicode_attributes = ['user_id', 'project_id', 'image_ref',
'kernel_id', 'ramdisk_id', 'hostname',
'key_name', 'key_data', 'host', 'node',
'user_data', 'availability_zone',
'display_name', 'display_description',
'launched_on', 'locked_by', 'os_type',
'architecture', 'vm_mode', 'root_device_name',
'default_ephemeral_device',
'default_swap_device', 'config_drive',
'cell_name']
if target_version < (1, 7):
# NOTE(danms): Before 1.7, we couldn't handle unicode in
# string fields, so squash it here
for field in [x for x in unicode_attributes if x in primitive
and primitive[x] is not None]:
primitive[field] = primitive[field].encode('ascii', 'replace')
if target_version < (1, 18):
if 'system_metadata' in primitive:
for ftype in ('', 'old_', 'new_'):
attrname = '%sflavor' % ftype
primitive.pop(attrname, None)
if self[attrname] is not None:
flavors.save_flavor_info(
primitive['system_metadata'],
getattr(self, attrname), ftype)
@property
def name(self):
try:
base_name = CONF.instance_name_template % self.id
except TypeError:
# Support templates like "uuid-%(uuid)s", etc.
info = {}
# NOTE(russellb): Don't use self.iteritems() here, as it will
# result in infinite recursion on the name property.
for key in self.fields:
if key == 'name':
# NOTE(danms): prevent recursion
continue
elif not self.obj_attr_is_set(key):
# NOTE(danms): Don't trigger lazy-loads
continue
info[key] = self[key]
try:
base_name = CONF.instance_name_template % info
except KeyError:
base_name = self.uuid
return base_name
@staticmethod
def _migrate_flavor(instance):
"""Migrate a fractional flavor to a full one stored in extra.
This method migrates flavor information stored in an instance's
system_metadata to instance_extra. Since the information in the
former is not complete, we must attempt to fetch the original
flavor by id to merge its extra_specs with what we store.
This is a transitional tool and can be removed in a later release
once we can ensure that everyone has migrated their instances
(likely the L release).
"""
# NOTE(danms): Always use admin context and read_deleted=yes here
# because we need to make sure we can look up our original flavor
# and try to reconstruct extra_specs, even if it has been deleted
ctxt = context.get_admin_context(read_deleted='yes')
instance.flavor = flavors.extract_flavor(instance)
flavors.delete_flavor_info(instance.system_metadata, '')
for ftype in ('old', 'new'):
attrname = '%s_flavor' % ftype
prefix = '%s_' % ftype
try:
flavor = flavors.extract_flavor(instance, prefix)
setattr(instance, attrname, flavor)
flavors.delete_flavor_info(instance.system_metadata, prefix)
except KeyError:
setattr(instance, attrname, None)
# NOTE(danms): Merge in the extra_specs from the original flavor
# since they weren't stored with the instance.
for flv in (instance.flavor, instance.new_flavor, instance.old_flavor):
if flv is not None:
try:
db_flavor = objects.Flavor.get_by_flavor_id(ctxt,
flv.flavorid)
except exception.FlavorNotFound:
continue
extra_specs = dict(db_flavor.extra_specs)
extra_specs.update(flv.get('extra_specs', {}))
flv.extra_specs = extra_specs
def _flavor_from_db(self, db_flavor):
"""Load instance flavor information from instance_extra."""
flavor_info = jsonutils.loads(db_flavor)
self.flavor = objects.Flavor.obj_from_primitive(flavor_info['cur'])
if flavor_info['old']:
self.old_flavor = objects.Flavor.obj_from_primitive(
flavor_info['old'])
else:
self.old_flavor = None
if flavor_info['new']:
self.new_flavor = objects.Flavor.obj_from_primitive(
flavor_info['new'])
else:
self.new_flavor = None
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _maybe_migrate_flavor(self, db_inst, expected_attrs):
"""Determine the proper place and format for flavor loading.
This method loads the flavor information into the instance. If
the information is already migrated to instance_extra, then we
load that. If it is in system_metadata, we migrate it to extra.
If, however, we're loading an instance for an older client and
the flavor has already been migrated, we need to stash it back
into system metadata, which we do here.
This is transitional and can be removed when we remove
_migrate_flavor().
"""
version = utils.convert_version_to_tuple(self.VERSION)
flavor_requested = any(
[flavor in expected_attrs
for flavor in ('flavor', 'old_flavor', 'new_flavor')])
flavor_implied = (version < (1, 18) and
'system_metadata' in expected_attrs)
# NOTE(danms): This is compatibility logic. If the flavor
# attributes were requested, then we do this load/migrate
# logic. However, if the instance is old, we might need to
# do it anyway in order to satisfy our sysmeta-based contract.
if not (flavor_requested or flavor_implied):
return False
migrated_flavor = False
if flavor_implied:
# This instance is from before flavors were migrated out of
# system_metadata. Make sure that we honor that.
if db_inst['extra']['flavor'] is not None:
self._flavor_from_db(db_inst['extra']['flavor'])
sysmeta = self.system_metadata
flavors.save_flavor_info(sysmeta, self.flavor)
# FIXME(danms): Unfortunately NovaObject doesn't have
# a __del__ which means we have to peer behind the
# facade here to get these attributes deleted. Since
# they're stored as "_$name" we can do that here, but
# I need to follow up with a proper handler on the
# base class.
del self._flavor
if self.old_flavor:
flavors.save_flavor_info(sysmeta, self.old_flavor, 'old_')
del self._old_flavor
if self.new_flavor:
flavors.save_flavor_info(sysmeta, self.new_flavor, 'new_')
del self._new_flavor
self.system_metadata = sysmeta
else:
# Migrate the flavor from system_metadata to extra,
# if needed
if db_inst.get('extra', {}).get('flavor') is not None:
self._flavor_from_db(db_inst['extra']['flavor'])
elif 'instance_type_id' in self.system_metadata:
self._migrate_flavor(self)
migrated_flavor = True
return migrated_flavor
@staticmethod
def _from_db_object(context, instance, db_inst, expected_attrs=None):
"""Method to help with migration to objects.
Converts a database entity to a formal object.
"""
instance._context = context
if expected_attrs is None:
expected_attrs = []
# Most of the field names match right now, so be quick
for field in instance.fields:
if field in INSTANCE_OPTIONAL_ATTRS:
continue
elif field == 'deleted':
instance.deleted = db_inst['deleted'] == db_inst['id']
elif field == 'cleaned':
instance.cleaned = db_inst['cleaned'] == 1
else:
instance[field] = db_inst[field]
if 'metadata' in expected_attrs:
instance['metadata'] = utils.instance_meta(db_inst)
if 'system_metadata' in expected_attrs:
instance['system_metadata'] = utils.instance_sys_meta(db_inst)
if 'fault' in expected_attrs:
instance['fault'] = (
objects.InstanceFault.get_latest_for_instance(
context, instance.uuid))
if 'numa_topology' in expected_attrs:
instance._load_numa_topology(
db_inst.get('extra').get('numa_topology'))
if 'pci_requests' in expected_attrs:
instance._load_pci_requests(
db_inst.get('extra').get('pci_requests'))
if 'info_cache' in expected_attrs:
if db_inst['info_cache'] is None:
instance.info_cache = None
elif not instance.obj_attr_is_set('info_cache'):
# TODO(danms): If this ever happens on a backlevel instance
# passed to us by a backlevel service, things will break
instance.info_cache = objects.InstanceInfoCache(context)
if instance.info_cache is not None:
instance.info_cache._from_db_object(context,
instance.info_cache,
db_inst['info_cache'])
migrated_flavor = instance._maybe_migrate_flavor(db_inst,
expected_attrs)
# TODO(danms): If we are updating these on a backlevel instance,
# we'll end up sending back new versions of these objects (see
# above note for new info_caches
if 'pci_devices' in expected_attrs:
pci_devices = base.obj_make_list(
context, objects.PciDeviceList(context),
objects.PciDevice, db_inst['pci_devices'])
instance['pci_devices'] = pci_devices
if 'security_groups' in expected_attrs:
sec_groups = base.obj_make_list(
context, objects.SecurityGroupList(context),
objects.SecurityGroup, db_inst['security_groups'])
instance['security_groups'] = sec_groups
if 'tags' in expected_attrs:
tags = base.obj_make_list(
context, objects.TagList(context),
objects.Tag, db_inst['tags'])
instance['tags'] = tags
instance.obj_reset_changes()
if migrated_flavor:
# NOTE(danms): If we migrated the flavor above, we need to make
# sure we know that flavor and system_metadata have been
# touched so that the next save will update them. We can remove
# this when we remove _migrate_flavor().
instance._changed_fields.add('system_metadata')
instance._changed_fields.add('flavor')
return instance
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get_by_uuid(context, uuid,
columns_to_join=columns_to_join,
use_slave=use_slave)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable_classmethod
def get_by_id(cls, context, inst_id, expected_attrs=None):
if expected_attrs is None:
expected_attrs = ['info_cache', 'security_groups']
columns_to_join = _expected_cols(expected_attrs)
db_inst = db.instance_get(context, inst_id,
columns_to_join=columns_to_join)
return cls._from_db_object(context, cls(), db_inst,
expected_attrs)
@base.remotable
def create(self, context):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
updates = self.obj_get_changes()
expected_attrs = [attr for attr in INSTANCE_DEFAULT_FIELDS
if attr in updates]
if 'security_groups' in updates:
updates['security_groups'] = [x.name for x in
updates['security_groups']]
if 'info_cache' in updates:
updates['info_cache'] = {
'network_info': updates['info_cache'].network_info.json()
}
updates['extra'] = {}
numa_topology = updates.pop('numa_topology', None)
if numa_topology:
expected_attrs.append('numa_topology')
updates['extra']['numa_topology'] = numa_topology._to_json()
pci_requests = updates.pop('pci_requests', None)
if pci_requests:
expected_attrs.append('pci_requests')
updates['extra']['pci_requests'] = (
pci_requests.to_json())
flavor = updates.pop('flavor', None)
if flavor:
expected_attrs.append('flavor')
old = ((self.obj_attr_is_set('old_flavor') and
self.old_flavor) and
self.old_flavor.obj_to_primitive() or None)
new = ((self.obj_attr_is_set('new_flavor') and
self.new_flavor) and
self.new_flavor.obj_to_primitive() or None)
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': old,
'new': new,
}
updates['extra']['flavor'] = jsonutils.dumps(flavor_info)
db_inst = db.instance_create(context, updates)
self._from_db_object(context, self, db_inst, expected_attrs)
@base.remotable
def destroy(self, context):
if not self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='destroy',
reason='already destroyed')
if not self.obj_attr_is_set('uuid'):
raise exception.ObjectActionError(action='destroy',
reason='no uuid')
if not self.obj_attr_is_set('host') or not self.host:
# NOTE(danms): If our host is not set, avoid a race
constraint = db.constraint(host=db.equal_any(None))
else:
constraint = None
try:
db_inst = db.instance_destroy(context, self.uuid,
constraint=constraint)
self._from_db_object(context, self, db_inst)
except exception.ConstraintNotMet:
raise exception.ObjectActionError(action='destroy',
reason='host changed')
delattr(self, base.get_attrname('id'))
def _save_info_cache(self, context):
if self.info_cache:
self.info_cache.save(context)
def _save_security_groups(self, context):
security_groups = self.security_groups or []
for secgroup in security_groups:
secgroup.save(context)
self.security_groups.obj_reset_changes()
def _save_fault(self, context):
# NOTE(danms): I don't think we need to worry about this, do we?
pass
def _save_numa_topology(self, context):
if self.numa_topology:
self.numa_topology.instance_uuid = self.uuid
self.numa_topology._save(context)
else:
objects.InstanceNUMATopology.delete_by_instance_uuid(
context, self.uuid)
def _save_pci_requests(self, context):
# NOTE(danms): No need for this yet.
pass
def _save_pci_devices(self, context):
# NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
# permitted to update the DB. all change to devices from here will
# be dropped.
pass
def _save_flavor(self, context):
# FIXME(danms): We can do this smarterly by updating this
# with all the other extra things at the same time
flavor_info = {
'cur': self.flavor.obj_to_primitive(),
'old': (self.old_flavor and
self.old_flavor.obj_to_primitive() or None),
'new': (self.new_flavor and
self.new_flavor.obj_to_primitive() or None),
}
db.instance_extra_update_by_uuid(
context, self.uuid,
{'flavor': jsonutils.dumps(flavor_info)})
self.obj_reset_changes(['flavor', 'old_flavor', 'new_flavor'])
def _save_old_flavor(self, context):
if 'old_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _save_new_flavor(self, context):
if 'new_flavor' in self.obj_what_changed():
self._save_flavor(context)
def _maybe_upgrade_flavor(self):
# NOTE(danms): We may have regressed to flavors stored in sysmeta,
# so we have to merge back in here. That could happen if we pass
# a converted instance to an older node, which still stores the
# flavor in sysmeta, which then calls save(). We need to not
# store that flavor info back into sysmeta after we've already
# converted it.
if (not self.obj_attr_is_set('system_metadata') or
'instance_type_id' not in self.system_metadata):
return
LOG.debug('Transforming legacy flavors on save', instance=self)
for ftype in ('', 'old_', 'new_'):
attr = '%sflavor' % ftype
try:
flavor = flavors.extract_flavor(self, prefix=ftype)
flavors.delete_flavor_info(self.system_metadata, ftype)
# NOTE(danms): This may trigger a lazy-load of the flavor
# information, but only once and it avoids re-fetching and
# re-migrating the original flavor.
getattr(self, attr).update(flavor)
except AttributeError:
setattr(self, attr, flavor)
except KeyError:
setattr(self, attr, None)
@base.remotable
def save(self, context, expected_vm_state=None,
expected_task_state=None, admin_state_reset=False):
"""Save updates to this instance
Column-wise updates will be made based on the result of
self.what_changed(). If expected_task_state is provided,
it will be checked against the in-database copy of the
instance before updates are made.
:param:context: Security context
:param:expected_task_state: Optional tuple of valid task states
for the instance to be in
:param:expected_vm_state: Optional tuple of valid vm states
for the instance to be in
:param admin_state_reset: True if admin API is forcing setting
of task_state/vm_state
"""
cell_type = cells_opts.get_cell_type()
if cell_type == 'api' and self.cell_name:
# NOTE(comstud): We need to stash a copy of ourselves
# before any updates are applied. When we call the save
# methods on nested objects, we will lose any changes to
# them. But we need to make sure child cells can tell
# what is changed.
#
# We also need to nuke any updates to vm_state and task_state
# unless admin_state_reset is True. compute cells are
# authoritative for their view of vm_state and task_state.
stale_instance = self.obj_clone()
def _handle_cell_update_from_api():
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_from_api(context, stale_instance,
expected_vm_state,
expected_task_state,
admin_state_reset)
else:
stale_instance = None
self._maybe_upgrade_flavor()
updates = {}
changes = self.obj_what_changed()
for field in self.fields:
# NOTE(danms): For object fields, we construct and call a
# helper method like self._save_$attrname()
if (self.obj_attr_is_set(field) and
isinstance(self.fields[field], fields.ObjectField)):
try:
getattr(self, '_save_%s' % field)(context)
except AttributeError:
LOG.exception(_LE('No save handler for %s'), field,
instance=self)
elif field in changes:
updates[field] = self[field]
if not updates:
if stale_instance:
_handle_cell_update_from_api()
return
# Cleaned needs to be turned back into an int here
if 'cleaned' in updates:
if updates['cleaned']:
updates['cleaned'] = 1
else:
updates['cleaned'] = 0
if expected_task_state is not None:
if (self.VERSION == '1.9' and
expected_task_state == 'image_snapshot'):
# NOTE(danms): Icehouse introduced a pending state which
# Havana doesn't know about. If we're an old instance,
# tolerate the pending state as well
expected_task_state = [
expected_task_state, 'image_snapshot_pending']
updates['expected_task_state'] = expected_task_state
if expected_vm_state is not None:
updates['expected_vm_state'] = expected_vm_state
expected_attrs = [attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
if self.obj_attr_is_set(attr)]
if 'pci_devices' in expected_attrs:
# NOTE(danms): We don't refresh pci_devices on save right now
expected_attrs.remove('pci_devices')
# NOTE(alaski): We need to pull system_metadata for the
# notification.send_update() below. If we don't there's a KeyError
# when it tries to extract the flavor.
# NOTE(danms): If we have sysmeta, we need flavor since the caller
# might be expecting flavor information as a result
if 'system_metadata' not in expected_attrs:
expected_attrs.append('system_metadata')
expected_attrs.append('flavor')
old_ref, inst_ref = db.instance_update_and_get_original(
context, self.uuid, updates, update_cells=False,
columns_to_join=_expected_cols(expected_attrs))
if stale_instance:
_handle_cell_update_from_api()
elif cell_type == 'compute':
cells_api = cells_rpcapi.CellsAPI()
cells_api.instance_update_at_top(context, inst_ref)
self._from_db_object(context, self, inst_ref,
expected_attrs=expected_attrs)
# NOTE(danms): We have to be super careful here not to trigger
# any lazy-loads that will unmigrate or unbackport something. So,
# make a copy of the instance for notifications first.
new_ref = self.obj_clone()
notifications.send_update(context, old_ref, new_ref)
self.obj_reset_changes()
@base.remotable
def refresh(self, context, use_slave=False):
extra = [field for field in INSTANCE_OPTIONAL_ATTRS
if self.obj_attr_is_set(field)]
current = self.__class__.get_by_uuid(context, uuid=self.uuid,
expected_attrs=extra,
use_slave=use_slave)
# NOTE(danms): We orphan the instance copy so we do not unexpectedly
# trigger a lazy-load (which would mean we failed to calculate the
# expected_attrs properly)
current._context = None
for field in self.fields:
if self.obj_attr_is_set(field):
if field == 'info_cache':
self.info_cache.refresh()
elif self[field] != current[field]:
self[field] = current[field]
self.obj_reset_changes()
def _load_generic(self, attrname):
instance = self.__class__.get_by_uuid(self._context,
uuid=self.uuid,
expected_attrs=[attrname])
# NOTE(danms): Never allow us to recursively-load
if instance.obj_attr_is_set(attrname):
self[attrname] = instance[attrname]
else:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='loading %s requires recursion' % attrname)
def _load_fault(self):
self.fault = objects.InstanceFault.get_latest_for_instance(
self._context, self.uuid)
def _load_numa_topology(self, db_topology=None):
if db_topology is not None:
self.numa_topology = \
objects.InstanceNUMATopology.obj_from_db_obj(self.uuid,
db_topology)
else:
try:
self.numa_topology = \
objects.InstanceNUMATopology.get_by_instance_uuid(
self._context, self.uuid)
except exception.NumaTopologyNotFound:
self.numa_topology = None
def _load_pci_requests(self, db_requests=None):
# FIXME: also do this if none!
if db_requests is not None:
self.pci_requests = objects.InstancePCIRequests.obj_from_db(
self._context, self.uuid, db_requests)
else:
self.pci_requests = \
objects.InstancePCIRequests.get_by_instance_uuid(
self._context, self.uuid)
def _load_flavor(self):
try:
instance = self.__class__.get_by_uuid(
self._context, uuid=self.uuid,
expected_attrs=['flavor', 'system_metadata'])
except exception.InstanceNotFound:
# NOTE(danms): Before we had instance types in system_metadata,
# we just looked up the instance_type_id. Since we could still
# have an instance in the database that doesn't have either
# newer setup, mirror the original behavior here if the instance
# is deleted
if not self.deleted:
raise
self.flavor = objects.Flavor.get_by_id(self._context,
self.instance_type_id)
self.old_flavor = None
self.new_flavor = None
return
# NOTE(danms): Orphan the instance to make sure we don't lazy-load
# anything below
instance._context = None
self.flavor = instance.flavor
self.old_flavor = instance.old_flavor
self.new_flavor = instance.new_flavor
# NOTE(danms): The query above may have migrated the flavor from
# system_metadata. Since we have it anyway, go ahead and refresh
# our system_metadata from it so that a save will be accurate.
instance.system_metadata.update(self.get('system_metadata', {}))
self.system_metadata = instance.system_metadata
def obj_load_attr(self, attrname):
if attrname not in INSTANCE_OPTIONAL_ATTRS:
raise exception.ObjectActionError(
action='obj_load_attr',
reason='attribute %s not lazy-loadable' % attrname)
if ('flavor' in attrname and
self.obj_attr_is_set('system_metadata') and
'instance_type_id' in self.system_metadata):
# NOTE(danms): Looks like we're loading a flavor, and that
# should be doable without a context, so do this before the
# orphan check below.
self._migrate_flavor(self)
if self.obj_attr_is_set(attrname):
return
if not self._context:
raise exception.OrphanedObjectError(method='obj_load_attr',
objtype=self.obj_name())
LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s",
{'attr': attrname,
'name': self.obj_name(),
'uuid': self.uuid,
})
# NOTE(danms): We handle some fields differently here so that we
# can be more efficient
if attrname == 'fault':
self._load_fault()
elif attrname == 'numa_topology':
self._load_numa_topology()
elif attrname == 'pci_requests':
self._load_pci_requests()
elif 'flavor' in attrname:
self._load_flavor()
else:
# FIXME(comstud): This should be optimized to only load the attr.
self._load_generic(attrname)
self.obj_reset_changes([attrname])
def get_flavor(self, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
try:
return getattr(self, attr)
except exception.FlavorNotFound:
# NOTE(danms): This only happens in the case where we don't
# have flavor information in sysmeta or extra, and doing
# this triggers a lookup based on our instance_type_id for
# (very) legacy instances. That legacy code expects a None here,
# so emulate it for this helper, even though the actual attribute
# is not nullable.
return None
def set_flavor(self, flavor, namespace=None):
prefix = ('%s_' % namespace) if namespace is not None else ''
attr = '%sflavor' % prefix
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
setattr(self, attr, flavor)
self.save()
def delete_flavor(self, namespace):
prefix = ('%s_' % namespace) if namespace else ''
attr = '%sflavor' % prefix
setattr(self, attr, None)
self.save()
@base.remotable
def delete_metadata_key(self, context, key):
"""Optimized metadata delete method.
This provides a more efficient way to delete a single metadata
key, instead of just calling instance.save(). This should be called
with the key still present in self.metadata, which it will update
after completion.
"""
db.instance_metadata_delete(context, self.uuid, key)
md_was_changed = 'metadata' in self.obj_what_changed()
del self.metadata[key]
self._orig_metadata.pop(key, None)
notifications.send_update(context, self, self)
if not md_was_changed:
self.obj_reset_changes(['metadata'])
def _make_instance_list(context, inst_list, db_inst_list, expected_attrs):
get_fault = expected_attrs and 'fault' in expected_attrs
inst_faults = {}
if get_fault:
# Build an instance_uuid:latest-fault mapping
expected_attrs.remove('fault')
instance_uuids = [inst['uuid'] for inst in db_inst_list]
faults = objects.InstanceFaultList.get_by_instance_uuids(
context, instance_uuids)
for fault in faults:
if fault.instance_uuid not in inst_faults:
inst_faults[fault.instance_uuid] = fault
inst_list.objects = []
for db_inst in db_inst_list:
inst_obj = objects.Instance._from_db_object(
context, objects.Instance(context), db_inst,
expected_attrs=expected_attrs)
if get_fault:
inst_obj.fault = inst_faults.get(inst_obj.uuid, None)
inst_list.objects.append(inst_obj)
inst_list.obj_reset_changes()
return inst_list
class InstanceList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added use_slave to get_by_host
# Instance <= version 1.9
# Version 1.2: Instance <= version 1.11
# Version 1.3: Added use_slave to get_by_filters
# Version 1.4: Instance <= version 1.12
# Version 1.5: Added method get_active_by_window_joined.
# Version 1.6: Instance <= version 1.13
# Version 1.7: Added use_slave to get_active_by_window_joined
# Version 1.8: Instance <= version 1.14
# Version 1.9: Instance <= version 1.15
# Version 1.10: Instance <= version 1.16
# Version 1.11: Added sort_keys and sort_dirs to get_by_filters
# Version 1.12: Pass expected_attrs to instance_get_active_by_window_joined
# Version 1.13: Instance <= version 1.17
# Version 1.14: Instance <= version 1.18
VERSION = '1.14'
fields = {
'objects': fields.ListOfObjectsField('Instance'),
}
child_versions = {
'1.1': '1.9',
# NOTE(danms): Instance was at 1.9 before we added this
'1.2': '1.11',
'1.3': '1.11',
'1.4': '1.12',
'1.5': '1.12',
'1.6': '1.13',
'1.7': '1.13',
'1.8': '1.14',
'1.9': '1.15',
'1.10': '1.16',
'1.11': '1.16',
'1.12': '1.16',
'1.13': '1.17',
'1.14': '1.18',
}
@base.remotable_classmethod
def get_by_filters(cls, context, filters,
sort_key='created_at', sort_dir='desc', limit=None,
marker=None, expected_attrs=None, use_slave=False,
sort_keys=None, sort_dirs=None):
if sort_keys or sort_dirs:
db_inst_list = db.instance_get_all_by_filters_sort(
context, filters, limit=limit, marker=marker,
columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs)
else:
db_inst_list = db.instance_get_all_by_filters(
context, filters, sort_key, sort_dir, limit=limit,
marker=marker, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host(cls, context, host, expected_attrs=None, use_slave=False):
db_inst_list = db.instance_get_all_by_host(
context, host, columns_to_join=_expected_cols(expected_attrs),
use_slave=use_slave)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_node(cls, context, host, node, expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_node(
context, host, node,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_by_host_and_not_type(cls, context, host, type_id=None,
expected_attrs=None):
db_inst_list = db.instance_get_all_by_host_and_not_type(
context, host, type_id=type_id)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def get_hung_in_rebooting(cls, context, reboot_window,
expected_attrs=None):
db_inst_list = db.instance_get_all_hung_in_rebooting(context,
reboot_window)
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@base.remotable_classmethod
def _get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
# NOTE(mriedem): We need to convert the begin/end timestamp strings
# to timezone-aware datetime objects for the DB API call.
begin = timeutils.parse_isotime(begin)
end = timeutils.parse_isotime(end) if end else None
db_inst_list = db.instance_get_active_by_window_joined(
context, begin, end, project_id, host,
columns_to_join=_expected_cols(expected_attrs))
return _make_instance_list(context, cls(), db_inst_list,
expected_attrs)
@classmethod
def get_active_by_window_joined(cls, context, begin, end=None,
project_id=None, host=None,
expected_attrs=None,
use_slave=False):
"""Get instances and joins active during a certain time window.
:param:context: nova request context
:param:begin: datetime for the start of the time window
:param:end: datetime for the end of the time window
:param:project_id: used to filter instances by project
:param:host: used to filter instances on a given compute host
:param:expected_attrs: list of related fields that can be joined
in the database layer when querying for instances
:param use_slave if True, ship this query off to a DB slave
:returns: InstanceList
"""
# NOTE(mriedem): We have to convert the datetime objects to string
# primitives for the remote call.
begin = timeutils.isotime(begin)
end = timeutils.isotime(end) if end else None
return cls._get_active_by_window_joined(context, begin, end,
project_id, host,
expected_attrs,
use_slave=use_slave)
@base.remotable_classmethod
def get_by_security_group_id(cls, context, security_group_id):
db_secgroup = db.security_group_get(
context, security_group_id,
columns_to_join=['instances.info_cache',
'instances.system_metadata'])
return _make_instance_list(context, cls(), db_secgroup['instances'],
['info_cache', 'system_metadata'])
@classmethod
def get_by_security_group(cls, context, security_group):
return cls.get_by_security_group_id(context, security_group.id)
def fill_faults(self):
"""Batch query the database for our instances' faults.
:returns: A list of instance uuids for which faults were found.
"""
uuids = [inst.uuid for inst in self]
faults = objects.InstanceFaultList.get_by_instance_uuids(
self._context, uuids)
faults_by_uuid = {}
for fault in faults:
if fault.instance_uuid not in faults_by_uuid:
faults_by_uuid[fault.instance_uuid] = fault
for instance in self:
if instance.uuid in faults_by_uuid:
instance.fault = faults_by_uuid[instance.uuid]
else:
# NOTE(danms): Otherwise the caller will cause a lazy-load
# when checking it, and we know there are none
instance.fault = None
instance.obj_reset_changes(['fault'])
return faults_by_uuid.keys()
|
{
"content_hash": "5c1e68de6a7369032e3c2f4e6e36be76",
"timestamp": "",
"source": "github",
"line_count": 1196,
"max_line_length": 79,
"avg_line_length": 43.30769230769231,
"alnum_prop": 0.574098385975751,
"repo_name": "shakamunyi/nova",
"id": "2064b7700018c41cb5dd2fc86f93557e2b9575ba",
"size": "52401",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/objects/instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
}
|
from setuptools import find_packages, setup
with open('README.rst', 'rb') as stream:
readme = stream.read()
if hasattr(readme, 'decode'):
readme = readme.decode('utf-8')
with open('dotenvfile/version.txt', 'r') as stream:
version = stream.read()
if hasattr(version, 'decode'):
version = version.decode('utf-8')
version = version.strip()
setup(
name='dotenvfile',
url='https://github.com/smartmob-project/dotenvfile',
description='.env file parser',
long_description=readme,
keywords='',
maintainer='Andre Caron',
maintainer_email='ac@smartmob.org',
version=version,
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries',
],
packages=find_packages(),
package_data={
'dotenvfile': [
'version.txt',
],
},
)
|
{
"content_hash": "96928a34d3813e5b94dc8c0260244564",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 57,
"avg_line_length": 29.31578947368421,
"alnum_prop": 0.6023339317773788,
"repo_name": "smartmob-project/dotenvfile",
"id": "6c1d3abfdd56ef78fae7d6fd07a8cba5a075b5ab",
"size": "1139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4826"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.