repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
Keeward/RatticWeb | cred/migrations/0018_fix_unicode_filename.py | 7 | 6419 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'cred.cred': {
'Meta': {'object_name': 'Cred'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'icon': ('django.db.models.fields.related.ForeignKey', [], {'default': '58', 'to': "orm['cred.CredIcon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'child_creds'", 'default': 'None', 'to': "orm['cred.Tag']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
'cred.credaudit': {
'Meta': {'ordering': "('-time',)", 'object_name': 'CredAudit'},
'audittype': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'cred': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['cred.Cred']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credlogs'", 'to': "orm['auth.User']"})
},
'cred.credchangeq': {
'Meta': {'object_name': 'CredChangeQ'},
'cred': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cred.Cred']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'cred.credicon': {
'Meta': {'object_name': 'CredIcon'},
'filename': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'xoffset': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'yoffset': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cred.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['cred']
symmetrical = True
| gpl-2.0 |
gptech/ansible | lib/ansible/modules/cloud/azure/azure_rm_resourcegroup.py | 29 | 9119 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <mdavis@ansible.com>
# Chris Houseknecht, <house@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup
version_added: "2.1"
short_description: Manage Azure resource groups.
description:
- Create, update and delete a resource group.
options:
force:
description:
- Remove a resource group and all associated resources. Use with state 'absent' to delete a resource
group that contains resources.
default: false
required: false
location:
description:
- Azure location for the resource group. Required when creating a new resource group. Cannot
be changed once resource group is created.
required: false
default: null
name:
description:
- Name of the resource group.
required: true
state:
description:
- Assert the state of the resource group. Use 'present' to create or update and
'absent' to delete. When 'absent' a resource group containing resources will not be removed unless the
force option is used.
default: present
choices:
- absent
- present
required: false
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Create a resource group
azure_rm_resourcegroup:
name: Testing
location: westus
tags:
testing: testing
delete: never
- name: Delete a resource group
azure_rm_resourcegroup:
name: Testing
state: absent
'''
RETURN = '''
contains_resources:
description: Whether or not the resource group contains associated resources.
returned: always
type: bool
sample: True
state:
description: Current state of the resource group.
returned: always
type: dict
sample: {
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"provisioning_state": "Succeeded",
"tags": {
"delete": "on-exit",
"testing": "no"
}
}
'''
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.resource.resources.models import ResourceGroup
except ImportError:
pass
def resource_group_to_dict(rg):
return dict(
id=rg.id,
name=rg.name,
location=rg.location,
tags=rg.tags,
provisioning_state=rg.properties.provisioning_state
)
class AzureRMResourceGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
location=dict(type='str'),
force=dict(type='bool', default=False)
)
self.name = None
self.state = None
self.location = None
self.tags = None
self.force = None
self.results = dict(
changed=False,
contains_resources=False,
state=dict(),
)
super(AzureRMResourceGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
results = dict()
changed = False
rg = None
contains_resources = False
try:
self.log('Fetching resource group {0}'.format(self.name))
rg = self.rm_client.resource_groups.get(self.name)
self.check_provisioning_state(rg, self.state)
contains_resources = self.resources_exist()
results = resource_group_to_dict(rg)
if self.state == 'absent':
self.log("CHANGED: resource group {0} exists but requested state is 'absent'".format(self.name))
changed = True
elif self.state == 'present':
update_tags, results['tags'] = self.update_tags(results['tags'])
self.log("update tags %s" % update_tags)
self.log("new tags: %s" % str(results['tags']))
if update_tags:
changed = True
if self.location and self.location != results['location']:
self.fail("Resource group '{0}' already exists in location '{1}' and cannot be "
"moved.".format(self.name, results['location']))
except CloudError:
self.log('Resource group {0} does not exist'.format(self.name))
if self.state == 'present':
self.log("CHANGED: resource group {0} does not exist but requested state is "
"'present'".format(self.name))
changed = True
self.results['changed'] = changed
self.results['state'] = results
self.results['contains_resources'] = contains_resources
if self.check_mode:
return self.results
if changed:
if self.state == 'present':
if not rg:
# Create resource group
self.log("Creating resource group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location is required when creating a resource group.")
if self.name_exists():
self.fail("Error: a resource group with the name {0} already exists in your subscription."
.format(self.name))
params = ResourceGroup(
location=self.location,
tags=self.tags
)
else:
# Update resource group
params = ResourceGroup(
location=results['location'],
tags=results['tags']
)
self.results['state'] = self.create_or_update_resource_group(params)
elif self.state == 'absent':
if contains_resources and not self.force:
self.fail("Error removing resource group {0}. Resources exist within the group.".format(self.name))
self.delete_resource_group()
return self.results
def create_or_update_resource_group(self, params):
try:
result = self.rm_client.resource_groups.create_or_update(self.name, params)
except Exception as exc:
self.fail("Error creating or updating resource group {0} - {1}".format(self.name, str(exc)))
return resource_group_to_dict(result)
def delete_resource_group(self):
try:
poller = self.rm_client.resource_groups.delete(self.name)
self.get_poller_result(poller)
except Exception as exc:
self.fail("Error delete resource group {0} - {1}".format(self.name, str(exc)))
# The delete operation doesn't return anything.
# If we got here, assume all is good
self.results['state']['status'] = 'Deleted'
return True
def resources_exist(self):
found = False
try:
response = self.rm_client.resource_groups.list_resources(self.name)
except Exception as exc:
self.fail("Error checking for resource existence in {0} - {1}".format(self.name, str(exc)))
for item in response:
found = True
break
return found
def name_exists(self):
try:
exists = self.rm_client.resource_groups.check_existence(self.name)
except Exception as exc:
self.fail("Error checking for existence of name {0} - {1}".format(self.name, str(exc)))
return exists
def main():
AzureRMResourceGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
jenshenrik/destiny-trader | destiny.py | 1 | 3112 | import sys
import re
TYPE_BATTLEFIELD = "Battlefield"
def print_usage():
print("""
Star Wars: Destiny trade list builder
Usage:
$>python destiny.py <target-file>
where <target-file> is the text-file to process.
This file should be generated by logging into swdestiny.com, going to 'My collection',
selecting all (Ctrl/Cmd + A), pasting into an empty file, and saving.
""")
# Opens file, and returns it as a list of lines
def open_file(path):
f = open(path, 'r+')
lines = f.readlines()
f.close()
return lines
def write_file(path, haves, wants):
output = open(path, 'w')
output.write("HAVES")
for card in haves:
qty = 0
if card.type == TYPE_BATTLEFIELD:
qty = card.qty - 1
else:
qty = card.qty - 2
output.write("\n%dx %s\t\t(%s)" % (qty, card.name, card.set_string))
output.write("\n\nWANTS")
for card in wants:
qty = 0
if card.type == TYPE_BATTLEFIELD:
qty = 1 #you always only want 1 battlefield
else:
qty = 2 - card.qty
output.write("\n%dx %s\t\t(%s)" % (qty, card.name, card.set_string))
output.close()
def strip_header(lines):
return lines[19:]
def strip_footer(lines):
return lines[:-11]
class Card:
def __init__(self, line):
split = line.split("\t")
self.name = split[0].lstrip().rstrip()
self.qty = self.parse_qty(split[1])
self.type = split[6]
self.rarity = split[7]
self.set = self.parse_set(split[-1].lstrip().rstrip())
self.number = self.parse_number(split[-1])
self.set_string = split[-1].lstrip().rstrip()
# Pulls number from quantity string
def parse_qty(self, qty_string):
found = re.findall(r'\d+', qty_string)
return int(found[0])
# Parse the card's set name.
# Assumes the last word is set number
def parse_set(self, set_string):
return set_string.rsplit(" ", 1)[0]
# Parse the card's number in the set.
# Assumes the last word is set number
def parse_number(self, number_string):
return int(number_string.rsplit(" ", 1)[1])
def check_usage():
num_args = len(sys.argv)
if num_args < 2:
print_usage()
sys.exit()
def extract_filename_and_extension(filename):
split_name = filename.rsplit(".", 1)
return (split_name[0], split_name[1])
# run script
check_usage()
input_file = sys.argv[1]
file_lines = open_file(input_file)
file_lines = strip_header(file_lines)
file_lines = strip_footer(file_lines)
cards = []
for line in file_lines:
cards.append(Card(line))
haves = []
wants = []
for card in cards:
if card.type == TYPE_BATTLEFIELD:
if card.qty < 1:
wants.append(card)
elif card.qty > 1:
haves.append(card)
else:
if card.qty < 2:
wants.append(card)
elif card.qty > 2:
haves.append(card)
(filename, extension) = extract_filename_and_extension(input_file)
write_file(filename+"_trades."+extension, haves, wants)
| gpl-3.0 |
goergeBerg/drupal | sites/all/modules/proj4js/lib/proj4js/tools/toposort.py | 261 | 8594 | #
# According to <http://www.vrplumber.com/programming/> this file
# is licensed under a BSD-style license. We only use the section
# originally by Tim Peters.
#
# TODO: The use of this code needs to be okayed by someone.
#
class RecursionError( OverflowError, ValueError ):
'''Unable to calculate result because of recursive structure'''
def sort(nodes, routes, noRecursion=1):
'''Passed a list of node IDs and a list of source,dest ID routes
attempt to create a list of stages where each sub list
is one stage in a process.
'''
children, parents = _buildChildrenLists(routes)
# first stage is those nodes
# having no incoming routes...
stage = []
stages = [stage]
taken = []
for node in nodes:
if (not parents.get(node)):
stage.append (node)
if nodes and not stage:
# there is no element which does not depend on
# some other element!!!
stage.append( nodes[0])
taken.extend( stage )
nodes = filter ( lambda x, l=stage: x not in l, nodes )
while nodes:
previousStageChildren = []
nodelen = len(nodes)
# second stage are those nodes
# which are direct children of the first stage
for node in stage:
for child in children.get (node, []):
if child not in previousStageChildren and child not in taken:
previousStageChildren.append(child)
elif child in taken and noRecursion:
raise RecursionError( (child, node) )
# unless they are children of other direct children...
# TODO, actually do that...
stage = previousStageChildren
removes = []
for current in stage:
currentParents = parents.get( current, [] )
for parent in currentParents:
if parent in stage and parent != current:
# might wind up removing current...
if not current in parents.get(parent, []):
# is not mutually dependent...
removes.append( current )
for remove in removes:
while remove in stage:
stage.remove( remove )
stages.append( stage)
taken.extend( stage )
nodes = filter ( lambda x, l=stage: x not in l, nodes )
if nodelen == len(nodes):
if noRecursion:
raise RecursionError( nodes )
else:
stages.append( nodes[:] )
nodes = []
return stages
def _buildChildrenLists (routes):
childrenTable = {}
parentTable = {}
for sourceID,destinationID in routes:
currentChildren = childrenTable.get( sourceID, [])
currentParents = parentTable.get( destinationID, [])
if not destinationID in currentChildren:
currentChildren.append ( destinationID)
if not sourceID in currentParents:
currentParents.append ( sourceID)
childrenTable[sourceID] = currentChildren
parentTable[destinationID] = currentParents
return childrenTable, parentTable
def toposort (nodes, routes, noRecursion=1):
'''Topological sort from Tim Peters, fairly efficient
in comparison (it seems).'''
#first calculate the recursion depth
dependencies = {}
inversedependencies = {}
if not nodes:
return []
if not routes:
return [nodes]
for node in nodes:
dependencies[ node ] = (0, node)
inversedependencies[ node ] = []
for depended, depends in routes:
# is it a null rule
try:
newdependencylevel, object = dependencies.get ( depends, (0, depends))
except TypeError:
print depends
raise
dependencies[ depends ] = (newdependencylevel + 1, depends)
# "dependency (existence) of depended-on"
newdependencylevel,object = dependencies.get ( depended, (0, depended) )
dependencies[ depended ] = (newdependencylevel, depended)
# Inverse dependency set up
dependencieslist = inversedependencies.get ( depended, [])
dependencieslist.append (depends)
inversedependencies[depended] = dependencieslist
### Now we do the actual sorting
# The first task is to create the sortable
# list of dependency-levels
sortinglist = dependencies.values()
sortinglist.sort ()
output = []
while sortinglist:
deletelist = []
generation = []
output.append( generation)
while sortinglist and sortinglist[0][0] == 0:
number, object = sortinglist[0]
generation.append ( object )
deletelist.append( object )
for inverse in inversedependencies.get(object, () ):
try:
oldcount, inverse = dependencies [ inverse]
if oldcount > 0:
# will be dealt with on later pass
dependencies [ inverse] = (oldcount-1, inverse)
else:
# will be dealt with on this pass,
# so needs not to be in the sorting list next time
deletelist.append( inverse )
# just in case a loop comes through
inversedependencies[object] = []
except KeyError:
# dealing with a recursion-breaking run...
pass
del sortinglist [0]
# if no elements could be deleted, then
# there is something which depends upon itself
if not deletelist:
if noRecursion:
raise RecursionError( sortinglist )
else:
# hack so that something gets deleted...
## import pdb
## pdb.set_trace()
dependencies[sortinglist[0][1]] = (0,sortinglist[0][1])
# delete the items that were dealt with
for item in deletelist:
try:
del dependencies [ item ]
except KeyError:
pass
# need to recreate the sortinglist
sortinglist = dependencies.values()
if not generation:
output.remove( generation )
sortinglist.sort ()
return output
if __name__ == "__main__":
nodes = ['a', 'b', 'c', 'd', 'e', 'f']
route = [('a', 'b'), ('b', 'c'), ('b', 'd'), ('e','f')]
for x in toposort( nodes, route):
for a in x:
print a
raise SystemExit
import pprint, traceback
nodes= [ 0,1,2,3,4,5 ]
testingValues = [
[ (0,1),(1,2),(2,3),(3,4),(4,5)],
[ (0,1),(0,2),(1,2),(3,4),(4,5)],
[
(0,1),
(0,2),
(0,2),
(2,4),
(2,5),
(3,2),
(0,3)],
[
(0,1), # 3-element cycle test, no orphan nodes
(1,2),
(2,0),
(2,4),
(2,5),
(3,2),
(0,3)],
[
(0,1),
(1,1),
(1,1),
(1,4),
(1,5),
(1,2),
(3,1),
(2,1),
(2,0)],
[
(0,1),
(1,0),
(0,2),
(0,3),
],
[
(0,1),
(1,0),
(0,2),
(3,1),
],
]
print 'sort, no recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', sort( nodes, testingValues[index] )
except:
print 'exception raised'
print 'toposort, no recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', toposort( nodes, testingValues[index] )
except:
print 'exception raised'
print 'sort, recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', sort( nodes, testingValues[index],0 )
except:
print 'exception raised'
print 'toposort, recursion allowed'
for index in range(len(testingValues)):
## print ' %s -- %s'%( index, testingValues[index])
try:
print ' ', toposort( nodes, testingValues[index],0 )
except:
print 'exception raised'
| gpl-2.0 |
bfarr/kombine | examples/kepler/correlated_likelihood.py | 1 | 2577 | import numpy as np
import numpy.linalg as nl
import numpy.random as nr
import rv_model as rv
import scipy.linalg as sl
import scipy.stats as ss
def generate_covariance(ts, sigma, tau):
r"""Generates a covariance matrix according to an
squared-exponential autocovariance
.. math::
\left\langle x_i x_j \right\rangle = \sigma_0^2 \delta_{ij} + \sigma^2 \exp\left[ - \frac{\left| t_i - t_j\right|^2}{2 \tau^2} \right]
"""
ndim = ts.shape[0]
tis = ts[:, np.newaxis]
tjs = ts[np.newaxis, :]
return sigma*sigma*np.exp(-np.square(tis-tjs)/(2.0*tau*tau))
params_dtype = np.dtype([('mu', np.float),
('K', np.float),
('e', np.float),
('omega', np.float),
('chi', np.float),
('P', np.float),
('nu', np.float),
('sigma', np.float),
('tau', np.float)])
class Log1PPosterior(object):
"""Log of the posterior for a single planet system observed with a
single telescope. """
def __init__(self, ts, vs, dvs):
self.ts = np.sort(ts)
self.vs = vs
self.dvs = dvs
self.T = self.ts[-1] - self.ts[0]
self.dt_min = np.min(np.diff(self.ts))
def to_params(self, p):
p = np.atleast_1d(p)
return p.view(params_dtype)
def log_prior(self, p):
p = self.to_params(p)
# Bounds
if p['K'] < 0.0 or p['e'] < 0.0 or p['e'] > 1.0 or p['omega'] < 0.0 or p['omega'] > 2.0*np.pi or p['P'] < 0.0 or p['nu'] < 0.1 or p['nu'] > 10.0 or p['sigma'] < 0.0 or p['tau'] < 0.0 or p['tau'] > self.T:
return np.NINF
# Otherwise, flat prior on everything.
return 0.0
def log_likelihood(self, p):
p = self.to_params(p)
v = self.rvs(p)
res = self.vs - v - p['mu']
cov = p['nu']*p['nu']*np.diag(self.dvs*self.dvs)
cov += generate_covariance(self.ts, p['sigma'], p['tau'])
cfactor = sl.cho_factor(cov)
cc, lower = cfactor
n = self.ts.shape[0]
return -0.5*n*np.log(2.0*np.pi) - np.sum(np.log(np.diag(cc))) - 0.5*np.dot(res, sl.cho_solve(cfactor, res))
def __call__(self, p):
lp = self.log_prior(p)
if lp == np.NINF:
return np.NINF
else:
return lp + self.log_likelihood(p)
def rvs(self, p):
p = self.to_params(p)
return rv.rv_model(self.ts, p['K'], p['e'], p['omega'], p['chi'], p['P'])
| mit |
gallifrey17/eden | tests/unit_tests/modules/s3/s3gis/TrueCodePaths.py | 23 | 13607 |
import unittest
class TrueCodePaths(unittest.TestCase):
def setUp(self):
vars = request.vars
vars["lat"] = 0
vars["lon"] = 0
vars["zoom"] = 1
self.old_s3roles = list(session.s3.roles)
session.s3.roles.append(1)
def tearDown(self):
vars = request.vars
del vars["lat"]
del vars["lon"]
del vars["zoom"]
session.s3.roles = self.old_s3roles
def check(test, scripts):
expected = [
"S3.public_url = 'http://127.0.0.1:8000';",
"S3.gis.mapAdmin = true;",
"S3.gis.window = true;",
"S3.gis.windowHide = true;",
"S3.gis.west_collapsed = true;",
"S3.gis.map_height = 123;",
"S3.gis.map_width = 123;",
"S3.gis.zoom = 1;",
"S3.gis.lat, S3.gis.lon;",
"S3.gis.bottom_left = new OpenLayers.LonLat(-10.000000, -10.000000);",
"S3.gis.top_right = new OpenLayers.LonLat(10.000000, 10.000000);",
"S3.gis.projection = '900913';",
"S3.gis.units = 'm';",
"S3.gis.maxResolution = 156543.033900;",
"S3.gis.maxExtent = new OpenLayers.Bounds(-20037508, -20037508, 20037508, 20037508.34);",
"S3.gis.numZoomLevels = 22;",
"S3.gis.max_w = 30;",
"S3.gis.max_h = 35;",
"S3.gis.mouse_position = 'mgrs';",
"S3.gis.wms_browser_name = 'Test WMS browser';",
"S3.gis.wms_browser_url = 'test%3A//test_WMS_URL';",
"S3.gis.draw_feature = 'inactive';",
"S3.gis.draw_polygon = 'inactive';",
"S3.gis.marker_default = 'gis_marker.image.marker_red.png';",
"S3.gis.marker_default_height = 34;",
"S3.gis.marker_default_width = 20;",
"S3.i18n.gis_legend = 'Legend';",
"S3.i18n.gis_search = 'Search Geonames';",
"S3.i18n.gis_search_no_internet = 'Geonames.org search requires Internet connectivity!';",
"S3.i18n.gis_requires_login = 'Requires Login';",
"S3.i18n.gis_base_layers = 'Base Layers';",
"S3.i18n.gis_overlays = 'Overlays';",
"S3.i18n.gis_layers = 'Layers';",
"S3.i18n.gis_draft_layer = 'Draft Features';",
"S3.i18n.gis_cluster_multiple = 'There are multiple records at this location';",
"S3.i18n.gis_loading = 'Loading';",
"S3.i18n.gis_length_message = 'The length is';",
"S3.i18n.gis_area_message = 'The area is';",
"S3.i18n.gis_length_tooltip = 'Measure Length: Click the points along the path & end with a double-click';",
"S3.i18n.gis_area_tooltip = 'Measure Area: Click the points around the polygon & end with a double-click';",
"S3.i18n.gis_zoomfull = 'Zoom to maximum map extent';",
"S3.i18n.gis_zoomout = 'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle';",
"S3.i18n.gis_zoomin = 'Zoom In: click in the map or use the left mouse button and drag to create a rectangle';",
"S3.i18n.gis_pan = 'Pan Map: keep the left mouse button pressed and drag the map';",
"S3.i18n.gis_navPrevious = 'Previous View';",
"S3.i18n.gis_navNext = 'Next View';",
"S3.i18n.gis_geoLocate = 'Zoom to Current Location';",
"S3.i18n.gis_draw_feature = 'Add Point';",
"S3.i18n.gis_draw_polygon = 'Add Polygon';",
"S3.i18n.gis_save = 'Save: Default Lat, Lon & Zoom for the Viewport';",
"S3.i18n.gis_potlatch = 'Edit the OpenStreetMap data for this area';",
"S3.i18n.gis_current_location = 'Current Location';",
"S3.i18n.gis_add_resources = 'Add Resources';",
"""if (typeof(printCapabilities) != 'undefined') {
// info.json from script headers OK
printProvider = new GeoExt.data.PrintProvider({
//method: 'POST',
//url: 'test_print_script_url/',
method: 'GET', // 'POST' recommended for production use
capabilities: printCapabilities, // from the info.json returned from the script headers
customParams: {
mapTitle: 'Test Map Title',
subTitle: 'Printed from Sahana Eden',
creator: ''
}
});
// Our print page. Stores scale, center and rotation and gives us a page
// extent feature that we can add to a layer.
printPage = new GeoExt.data.PrintPage({
printProvider: printProvider
});
//var printExtent = new GeoExt.plugins.PrintExtent({
// printProvider: printProvider
//});
// A layer to display the print page extent
//var pageLayer = new OpenLayers.Layer.Vector('Print Extent');
//pageLayer.addFeatures(printPage.feature);
//pageLayer.setVisibility(false);
//map.addLayer(pageLayer);
//var pageControl = new OpenLayers.Control.TransformFeature();
//map.addControl(pageControl);
//map.setOptions({
// eventListeners: {
// recenter/resize page extent after pan/zoom
// 'moveend': function() {
// printPage.fit(mapPanel, true);
// }
// }
//});
// The form with fields controlling the print output
S3.gis.printFormPanel = new Ext.form.FormPanel({
title: 'Print Map',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%%'},
listeners: {
'expand': function() {
//if (null == mapPanel.map.getLayersByName('Print Extent')[0]) {
// mapPanel.map.addLayer(pageLayer);
//}
if (null == mapPanel.plugins[0]) {
//map.addLayer(pageLayer);
//pageControl.activate();
//mapPanel.plugins = [ new GeoExt.plugins.PrintExtent({
// printProvider: printProvider,
// map: map,
// layer: pageLayer,
// control: pageControl
//}) ];
//mapPanel.plugins[0].addPage();
}
},
'collapse': function() {
//mapPanel.map.removeLayer(pageLayer);
//if (null != mapPanel.plugins[0]) {
// map.removeLayer(pageLayer);
// mapPanel.plugins[0].removePage(mapPanel.plugins[0].pages[0]);
// mapPanel.plugins = [];
//}
}
},
items: [{
xtype: 'textarea',
name: 'comment',
value: '',
fieldLabel: 'Comment',
plugins: new GeoExt.plugins.PrintPageField({
printPage: printPage
})
}, {
xtype: 'combo',
store: printProvider.layouts,
displayField: 'name',
fieldLabel: 'Layout',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
})
}, {
xtype: 'combo',
store: printProvider.dpis,
displayField: 'name',
fieldLabel: 'Resolution',
tpl: '<tpl for="."><div class="x-combo-list-item">{name} dpi</div></tpl>',
typeAhead: true,
mode: 'local',
triggerAction: 'all',
plugins: new GeoExt.plugins.PrintProviderField({
printProvider: printProvider
}),
// the plugin will work even if we modify a combo value
setValue: function(v) {
v = parseInt(v) + ' dpi';
Ext.form.ComboBox.prototype.setValue.apply(this, arguments);
}
//}, {
// xtype: 'combo',
// store: printProvider.scales,
// displayField: 'name',
// fieldLabel: 'Scale',
// typeAhead: true,
// mode: 'local',
// triggerAction: 'all',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
//}, {
// xtype: 'textfield',
// name: 'rotation',
// fieldLabel: 'Rotation',
// plugins: new GeoExt.plugins.PrintPageField({
// printPage: printPage
// })
}],
buttons: [{
text: 'Create PDF',
handler: function() {
// the PrintExtent plugin is the mapPanel's 1st plugin
//mapPanel.plugins[0].print();
// convenient way to fit the print page to the visible map area
printPage.fit(mapPanel, true);
// print the page, including the legend, where available
if (null == legendPanel) {
printProvider.print(mapPanel, printPage);
} else {
printProvider.print(mapPanel, printPage, {legend: legendPanel});
}
}
}]
});
} else {
// Display error diagnostic
S3.gis.printFormPanel = new Ext.Panel ({
title: 'Print Map',
rootVisible: false,
split: true,
autoScroll: true,
collapsible: true,
collapsed: true,
collapseMode: 'mini',
lines: false,
bodyStyle: 'padding:5px',
labelAlign: 'top',
defaults: {anchor: '100%'},
html: 'Printing disabled since server not accessible: <BR />test_print_script_url/'
});
}"""
]
test_gis = s3base.GIS()
actual_output = str(
test_gis.show_map(
projection = 900913,
height = 123,
width = 123,
bbox = dict(
max_lat= 10,
min_lat= -10,
max_lon= 10,
min_lon= -10
),
legend = "Test",
add_feature = True,
add_polygon = True,
window = True,
closable = True,
mouse_position = "mgrs",
wms_browser = {
"name": "Test WMS browser",
"url": "test://test_WMS_URL"
},
print_tool = {
"url": "test_print_script_url/",
"subTitle": "Tested from TestS3GIS",
# looks like a bug: "mapTitle" vs "title"
"title": "Test print tool",
"mapTitle": "Test Map Title"
},
collapsed = True,
window_hide = True,
catalogue_toolbar = True,
toolbar = True,
search = True,
catalogue_layers = True,
zoom = 1,
)
)
for expected_line in expected:
assert expected_line in actual_output
substitutions = dict(application_name = request.application)
for script in scripts:
script_string = "<script src=\"%s\" type=\"text/javascript\"></script>" % (
script % substitutions
)
assert script_string in actual_output
def test_true_code_paths_with_debug(self):
current.session.s3.debug = True
self.check(
scripts = (
"/%(application_name)s/static/scripts/gis/usng2.js",
"/%(application_name)s/static/scripts/gis/MP.js",
"/%(application_name)s/static/test_print_script_url/info.json?var=printCapabilities",
)
)
def test_true_code_paths(self):
"Basic map with true code paths turned on"
current.session.s3.debug = False
self.check(
scripts = (
"/%(application_name)s/static/test_print_script_url/info.json?var=printCapabilities",
"/%(application_name)s/static/scripts/gis/MGRS.min.js",
)
)
| mit |
clovett/MissionPlanner | ExtLibs/Mavlink/pymavlink/generator/lib/genxmlif/xmlifODict.py | 82 | 1506 | from types import DictType
from UserDict import UserDict
class odict(UserDict):
def __init__(self, dictOrTuple = None):
self._keys = []
UserDict.__init__(self, dictOrTuple)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
newInstance = odict()
newInstance.update(self)
return newInstance
def items(self):
return zip(self._keys, self.values())
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
if key not in self._keys:
self._keys.append(key)
return UserDict.setdefault(self, key, failobj)
def update(self, dictOrTuple):
if isinstance(dictOrTuple, DictType):
itemList = dictOrTuple.items()
else:
itemList = dictOrTuple
for key, val in itemList:
self.__setitem__(key,val)
def values(self):
return map(self.get, self._keys) | gpl-3.0 |
nishad-jobsglobal/odoo-marriot | addons/base_report_designer/plugin/openerp_report_designer/bin/script/lib/error.py | 382 | 1726 | ##########################################################################
#
# Copyright (c) 2003-2004 Danny Brewer d29583@groovegarden.com
# Copyright (C) 2004-2010 OpenERP SA (<http://openerp.com>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# See: http://www.gnu.org/licenses/lgpl.html
#
##############################################################################
if __name__<>"package":
from gui import *
class ErrorDialog:
def __init__(self, sErrorMsg, sErrorHelpMsg="", sTitle="Error Message"):
self.win = DBModalDialog(50, 50, 150, 90, sTitle)
self.win.addFixedText("lblErrMsg", 5, 5, 190, 25, sErrorMsg)
self.win.addFixedText("lblErrHelpMsg", 5, 30, 190, 25, sErrorHelpMsg)
self.win.addButton('btnOK', 55,-5,40,15,'Ok'
,actionListenerProc = self.btnOkOrCancel_clicked )
self.win.doModalDialog("",None)
def btnOkOrCancel_clicked( self, oActionEvent ):
self.win.endExecute()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jjyycchh/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/buildbot/buildbot_mock.py | 126 | 3744 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
_log = logging.getLogger(__name__)
class MockBuild(object):
def __init__(self, build_number, revision, is_green):
self._number = build_number
self._revision = revision
self._is_green = is_green
class MockBuilder(object):
def __init__(self, name):
self._name = name
def name(self):
return self._name
def build(self, build_number):
return MockBuild(build_number=build_number, revision=1234, is_green=False)
def results_url(self):
return "http://example.com/builders/%s/results" % self.name()
def accumulated_results_url(self):
return "http://example.com/f/builders/%s/results/layout-test-results" % self.name()
def latest_layout_test_results_url(self):
return self.accumulated_results_url()
def force_build(self, username, comments):
_log.info("MOCK: force_build: name=%s, username=%s, comments=%s" % (
self._name, username, comments))
class MockFailureMap(object):
def __init__(self, buildbot):
self._buildbot = buildbot
def is_empty(self):
return False
def filter_out_old_failures(self, is_old_revision):
pass
def failing_revisions(self):
return [29837]
def builders_failing_for(self, revision):
return [self._buildbot.builder_with_name("Builder1")]
def tests_failing_for(self, revision):
return ["mock-test-1"]
def failing_tests(self):
return set(["mock-test-1"])
class MockBuildBot(object):
def __init__(self):
self._mock_builder1_status = {
"name": "Builder1",
"is_green": True,
"activity": "building",
}
self._mock_builder2_status = {
"name": "Builder2",
"is_green": True,
"activity": "idle",
}
def builder_with_name(self, name):
return MockBuilder(name)
def builder_statuses(self):
return [
self._mock_builder1_status,
self._mock_builder2_status,
]
def light_tree_on_fire(self):
self._mock_builder2_status["is_green"] = False
def failure_map(self):
return MockFailureMap(self)
| bsd-3-clause |
ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/github/tests/Logging_.py | 39 | 9038 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import logging
import sys
import github
import Framework
atMostPython32 = sys.hexversion < 0x03030000
class Logging(Framework.BasicTestCase):
class MockHandler:
def __init__(self):
self.level = logging.DEBUG
self.handled = None
def handle(self, record):
self.handled = record.getMessage()
def setUp(self):
Framework.BasicTestCase.setUp(self)
logger = logging.getLogger("github")
logger.setLevel(logging.DEBUG)
self.__handler = self.MockHandler()
logger.addHandler(self.__handler)
def testLoggingWithBasicAuthentication(self):
self.assertEqual(github.Github(self.login, self.password).get_user().name, "Vincent Jacques")
# In Python 3.3, dicts are not output in the same order as in Python 2.5 -> 3.2.
# So, logging is not deterministic and we cannot test it.
if atMostPython32:
self.assertEqual(self.__handler.handled, 'GET https://api.github.com/user {\'Authorization\': \'Basic (login and password removed)\', \'User-Agent\': \'PyGithub/Python\'} null ==> 200 {\'status\': \'200 OK\', \'content-length\': \'806\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept, Authorization, Cookie\', \'x-ratelimit-remaining\': \'4993\', \'server\': \'nginx\', \'last-modified\': \'Fri, 14 Sep 2012 18:47:46 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"434dfe5d3f50558fe3cea087cb95c401"\', \'cache-control\': \'private, s-maxage=60, max-age=60\', \'date\': \'Mon, 17 Sep 2012 17:12:32 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"owned_private_repos":3,"disk_usage":18612,"following":28,"type":"User","public_repos":13,"location":"Paris, France","company":"Criteo","avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","plan":{"space":614400,"private_repos":5,"name":"micro","collaborators":1},"blog":"http://vincent-jacques.net","login":"jacquev6","public_gists":3,"html_url":"https://github.com/jacquev6","hireable":false,"created_at":"2010-07-09T06:10:06Z","private_gists":5,"followers":13,"name":"Vincent Jacques","email":"vincent@vincent-jacques.net","bio":"","total_private_repos":3,"collaborators":0,"gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithOAuthAuthentication(self):
self.assertEqual(github.Github(self.oauth_token).get_user().name, "Vincent Jacques")
if atMostPython32:
self.assertEqual(self.__handler.handled, 'GET https://api.github.com/user {\'Authorization\': \'token (oauth token removed)\', \'User-Agent\': \'PyGithub/Python\'} null ==> 200 {\'status\': \'200 OK\', \'x-ratelimit-remaining\': \'4993\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept, Authorization, Cookie\', \'content-length\': \'628\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"c23ad6b5815fc3d6ec6341c4a47afe85"\', \'cache-control\': \'private, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:36:54 GMT\', \'x-oauth-scopes\': \'\', \'content-type\': \'application/json; charset=utf-8\', \'x-accepted-oauth-scopes\': \'user\'} {"type":"User","bio":"","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","blog":"http://vincent-jacques.net","public_repos":13,"created_at":"2010-07-09T06:10:06Z","avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","email":"vincent@vincent-jacques.net","following":29,"name":"Vincent Jacques","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","hireable":false,"id":327146,"public_gists":3,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithoutAuthentication(self):
self.assertEqual(github.Github().get_user("jacquev6").name, "Vincent Jacques")
if atMostPython32:
self.assertEqual(self.__handler.handled, 'GET https://api.github.com/users/jacquev6 {\'User-Agent\': \'PyGithub/Python\'} null ==> 200 {\'status\': \'200 OK\', \'content-length\': \'628\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept\', \'x-ratelimit-remaining\': \'4989\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"9bd085221a16b6d2ea95e72634c3c1ac"\', \'cache-control\': \'public, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:38:56 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"type":"User","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","created_at":"2010-07-09T06:10:06Z","email":"vincent@vincent-jacques.net","hireable":false,"avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","public_gists":3,"bio":"","following":29,"name":"Vincent Jacques","blog":"http://vincent-jacques.net","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"public_repos":13,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
def testLoggingWithBaseUrl(self):
# ReplayData forged, not recorded
self.assertEqual(github.Github(base_url="http://my.enterprise.com/my/prefix").get_user("jacquev6").name, "Vincent Jacques")
if atMostPython32:
self.assertEqual(self.__handler.handled, 'GET http://my.enterprise.com/my/prefix/users/jacquev6 {\'User-Agent\': \'PyGithub/Python\'} null ==> 200 {\'status\': \'200 OK\', \'content-length\': \'628\', \'x-github-media-type\': \'github.beta; format=json\', \'x-content-type-options\': \'nosniff\', \'vary\': \'Accept\', \'x-ratelimit-remaining\': \'4989\', \'server\': \'nginx\', \'last-modified\': \'Tue, 25 Sep 2012 07:42:42 GMT\', \'connection\': \'keep-alive\', \'x-ratelimit-limit\': \'5000\', \'etag\': \'"9bd085221a16b6d2ea95e72634c3c1ac"\', \'cache-control\': \'public, max-age=60, s-maxage=60\', \'date\': \'Tue, 25 Sep 2012 20:38:56 GMT\', \'content-type\': \'application/json; charset=utf-8\'} {"type":"User","html_url":"https://github.com/jacquev6","login":"jacquev6","followers":14,"company":"Criteo","created_at":"2010-07-09T06:10:06Z","email":"vincent@vincent-jacques.net","hireable":false,"avatar_url":"https://secure.gravatar.com/avatar/b68de5ae38616c296fa345d2b9df2225?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png","public_gists":3,"bio":"","following":29,"name":"Vincent Jacques","blog":"http://vincent-jacques.net","gravatar_id":"b68de5ae38616c296fa345d2b9df2225","id":327146,"public_repos":13,"location":"Paris, France","url":"https://api.github.com/users/jacquev6"}')
| apache-2.0 |
kingmotley/SickRage | lib/imdb/Company.py | 143 | 7385 | """
company module (imdb package).
This module provides the company class, used to store information about
a given company.
Copyright 2008-2009 Davide Alberani <da@erlug.linux.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
from copy import deepcopy
from imdb.utils import analyze_company_name, build_company_name, \
flatten, _Container, cmpCompanies
class Company(_Container):
"""A company.
Every information about a company can be accessed as:
companyObject['information']
to get a list of the kind of information stored in a
company object, use the keys() method; some useful aliases
are defined (as "also known as" for the "akas" key);
see the keys_alias dictionary.
"""
# The default sets of information retrieved.
default_info = ('main',)
# Aliases for some not-so-intuitive keys.
keys_alias = {
'distributor': 'distributors',
'special effects company': 'special effects companies',
'other company': 'miscellaneous companies',
'miscellaneous company': 'miscellaneous companies',
'other companies': 'miscellaneous companies',
'misc companies': 'miscellaneous companies',
'misc company': 'miscellaneous companies',
'production company': 'production companies'}
keys_tomodify_list = ()
cmpFunct = cmpCompanies
def _init(self, **kwds):
"""Initialize a company object.
*companyID* -- the unique identifier for the company.
*name* -- the name of the company, if not in the data dictionary.
*myName* -- the nickname you use for this company.
*myID* -- your personal id for this company.
*data* -- a dictionary used to initialize the object.
*notes* -- notes about the given company.
*accessSystem* -- a string representing the data access system used.
*titlesRefs* -- a dictionary with references to movies.
*namesRefs* -- a dictionary with references to persons.
*charactersRefs* -- a dictionary with references to companies.
*modFunct* -- function called returning text fields.
"""
name = kwds.get('name')
if name and not self.data.has_key('name'):
self.set_name(name)
self.companyID = kwds.get('companyID', None)
self.myName = kwds.get('myName', u'')
def _reset(self):
"""Reset the company object."""
self.companyID = None
self.myName = u''
def set_name(self, name):
"""Set the name of the company."""
# XXX: convert name to unicode, if it's a plain string?
# Company diverges a bit from other classes, being able
# to directly handle its "notes". AND THAT'S PROBABLY A BAD IDEA!
oname = name = name.strip()
notes = u''
if name.endswith(')'):
fparidx = name.find('(')
if fparidx != -1:
notes = name[fparidx:]
name = name[:fparidx].rstrip()
if self.notes:
name = oname
d = analyze_company_name(name)
self.data.update(d)
if notes and not self.notes:
self.notes = notes
def _additional_keys(self):
"""Valid keys to append to the data.keys() list."""
if self.data.has_key('name'):
return ['long imdb name']
return []
def _getitem(self, key):
"""Handle special keys."""
## XXX: can a company have an imdbIndex?
if self.data.has_key('name'):
if key == 'long imdb name':
return build_company_name(self.data)
return None
def getID(self):
"""Return the companyID."""
return self.companyID
def __nonzero__(self):
"""The company is "false" if the self.data does not contain a name."""
# XXX: check the name and the companyID?
if self.data.get('name'): return 1
return 0
def __contains__(self, item):
"""Return true if this company and the given Movie are related."""
from Movie import Movie
if isinstance(item, Movie):
for m in flatten(self.data, yieldDictKeys=1, scalar=Movie):
if item.isSame(m):
return 1
return 0
def isSameName(self, other):
"""Return true if two company have the same name
and/or companyID."""
if not isinstance(other, self.__class__):
return 0
if self.data.has_key('name') and \
other.data.has_key('name') and \
build_company_name(self.data) == \
build_company_name(other.data):
return 1
if self.accessSystem == other.accessSystem and \
self.companyID is not None and \
self.companyID == other.companyID:
return 1
return 0
isSameCompany = isSameName
def __deepcopy__(self, memo):
"""Return a deep copy of a company instance."""
c = Company(name=u'', companyID=self.companyID,
myName=self.myName, myID=self.myID,
data=deepcopy(self.data, memo),
notes=self.notes, accessSystem=self.accessSystem,
titlesRefs=deepcopy(self.titlesRefs, memo),
namesRefs=deepcopy(self.namesRefs, memo),
charactersRefs=deepcopy(self.charactersRefs, memo))
c.current_info = list(self.current_info)
c.set_mod_funct(self.modFunct)
return c
def __repr__(self):
"""String representation of a Company object."""
r = '<Company id:%s[%s] name:_%s_>' % (self.companyID,
self.accessSystem,
self.get('long imdb name'))
if isinstance(r, unicode): r = r.encode('utf_8', 'replace')
return r
def __str__(self):
"""Simply print the short name."""
return self.get('name', u'').encode('utf_8', 'replace')
def __unicode__(self):
"""Simply print the short title."""
return self.get('name', u'')
def summary(self):
"""Return a string with a pretty-printed summary for the company."""
if not self: return u''
s = u'Company\n=======\nName: %s\n' % \
self.get('name', u'')
for k in ('distributor', 'production company', 'miscellaneous company',
'special effects company'):
d = self.get(k, [])[:5]
if not d: continue
s += u'Last movies from this company (%s): %s.\n' % \
(k, u'; '.join([x.get('long imdb title', u'') for x in d]))
return s
| gpl-3.0 |
0k/connector-telephony | asterisk_click2dial/controller.py | 16 | 1296 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Asterisk click2dial module for OpenERP
# Copyright (C) 2014 Alexis de Lattre (alexis@via.ecp.fr)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
class AsteriskClick2dialController(openerp.addons.web.http.Controller):
_cp_path = '/asterisk_click2dial'
@openerp.addons.web.http.jsonrequest
def get_record_from_my_channel(self, req):
res = req.session.model('asterisk.server').get_record_from_my_channel()
return res
| agpl-3.0 |
lakshayg/tensorflow | tensorflow/contrib/rnn/python/kernel_tests/benchmarking.py | 67 | 1906 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for benchmarking OpKernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
from tensorflow.python.framework import ops
def device(use_gpu=False):
"""TensorFlow device to assign ops to."""
if use_gpu:
return ops.device("/gpu:0")
return ops.device("/cpu:0")
def seconds_per_run(op, sess, num_runs=50):
"""Number of seconds taken to execute 'op' once on average."""
for _ in range(2):
sess.run(op)
start_time = time.time()
for _ in range(num_runs):
sess.run(op)
end_time = time.time()
time_taken = (end_time - start_time) / num_runs
return time_taken
def dict_product(dicts):
"""Constructs iterator over outer product of entries in a dict-of-lists.
Example:
>>> dict_products({"a": [1,2], "b": [3, 4]})
>>> [{"a": 1, "b": 3},
{"a": 1, "b": 4},
{"a": 2, "b": 3},
{"a": 2, "b": 4}]
Args:
dicts: dictionary with string keys and list values.
Yields:
Individual dicts from outer product.
"""
keys, values = zip(*dicts.items())
for config_values in itertools.product(*values):
yield dict(zip(keys, config_values))
| apache-2.0 |
ASlave2Audio/Restaurant-App | mingw/bin/lib/fileinput.py | 224 | 14143 | """Helper class to quickly write a loop over all standard input files.
Typical use is:
import fileinput
for line in fileinput.input():
process(line)
This iterates over the lines of all files listed in sys.argv[1:],
defaulting to sys.stdin if the list is empty. If a filename is '-' it
is also replaced by sys.stdin. To specify an alternative list of
filenames, pass it as the argument to input(). A single file name is
also allowed.
Functions filename(), lineno() return the filename and cumulative line
number of the line that has just been read; filelineno() returns its
line number in the current file; isfirstline() returns true iff the
line just read is the first line of its file; isstdin() returns true
iff the line was read from sys.stdin. Function nextfile() closes the
current file so that the next iteration will read the first line from
the next file (if any); lines not read from the file will not count
towards the cumulative line count; the filename is not changed until
after the first line of the next file has been read. Function close()
closes the sequence.
Before any lines have been read, filename() returns None and both line
numbers are zero; nextfile() has no effect. After all lines have been
read, filename() and the line number functions return the values
pertaining to the last line read; nextfile() has no effect.
All files are opened in text mode by default, you can override this by
setting the mode parameter to input() or FileInput.__init__().
If an I/O error occurs during opening or reading a file, the IOError
exception is raised.
If sys.stdin is used more than once, the second and further use will
return no lines, except perhaps for interactive use, or if it has been
explicitly reset (e.g. using sys.stdin.seek(0)).
Empty files are opened and immediately closed; the only time their
presence in the list of filenames is noticeable at all is when the
last file opened is empty.
It is possible that the last line of a file doesn't end in a newline
character; otherwise lines are returned including the trailing
newline.
Class FileInput is the implementation; its methods filename(),
lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
correspond to the functions in the module. In addition it has a
readline() method which returns the next input line, and a
__getitem__() method which implements the sequence behavior. The
sequence must be accessed in strictly sequential order; sequence
access and readline() cannot be mixed.
Optional in-place filtering: if the keyword argument inplace=1 is
passed to input() or to the FileInput constructor, the file is moved
to a backup file and standard output is directed to the input file.
This makes it possible to write a filter that rewrites its input file
in place. If the keyword argument backup=".<some extension>" is also
given, it specifies the extension for the backup file, and the backup
file remains around; by default, the extension is ".bak" and it is
deleted when the output file is closed. In-place filtering is
disabled when standard input is read. XXX The current implementation
does not work for MS-DOS 8+3 filesystems.
Performance: this module is unfortunately one of the slower ways of
processing large numbers of input lines. Nevertheless, a significant
speed-up has been obtained by using readlines(bufsize) instead of
readline(). A new keyword argument, bufsize=N, is present on the
input() function and the FileInput() class to override the default
buffer size.
XXX Possible additions:
- optional getopt argument processing
- isatty()
- read(), read(size), even readlines()
"""
import sys, os
__all__ = ["input","close","nextfile","filename","lineno","filelineno",
"isfirstline","isstdin","FileInput"]
_state = None
DEFAULT_BUFSIZE = 8*1024
def input(files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
"""input([files[, inplace[, backup[, mode[, openhook]]]]])
Create an instance of the FileInput class. The instance will be used
as global state for the functions of this module, and is also returned
to use during iteration. The parameters to this function will be passed
along to the constructor of the FileInput class.
"""
global _state
if _state and _state._file:
raise RuntimeError, "input() already active"
_state = FileInput(files, inplace, backup, bufsize, mode, openhook)
return _state
def close():
"""Close the sequence."""
global _state
state = _state
_state = None
if state:
state.close()
def nextfile():
"""
Close the current file so that the next iteration will read the first
line from the next file (if any); lines not read from the file will
not count towards the cumulative line count. The filename is not
changed until after the first line of the next file has been read.
Before the first line has been read, this function has no effect;
it cannot be used to skip the first file. After the last line of the
last file has been read, this function has no effect.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.nextfile()
def filename():
"""
Return the name of the file currently being read.
Before the first line has been read, returns None.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filename()
def lineno():
"""
Return the cumulative line number of the line that has just been read.
Before the first line has been read, returns 0. After the last line
of the last file has been read, returns the line number of that line.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.lineno()
def filelineno():
"""
Return the line number in the current file. Before the first line
has been read, returns 0. After the last line of the last file has
been read, returns the line number of that line within the file.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.filelineno()
def fileno():
"""
Return the file number of the current file. When no file is currently
opened, returns -1.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.fileno()
def isfirstline():
"""
Returns true the line just read is the first line of its file,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isfirstline()
def isstdin():
"""
Returns true if the last line was read from sys.stdin,
otherwise returns false.
"""
if not _state:
raise RuntimeError, "no active input()"
return _state.isstdin()
class FileInput:
"""class FileInput([files[, inplace[, backup[, mode[, openhook]]]]])
Class FileInput is the implementation of the module; its methods
filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
nextfile() and close() correspond to the functions of the same name
in the module.
In addition it has a readline() method which returns the next
input line, and a __getitem__() method which implements the
sequence behavior. The sequence must be accessed in strictly
sequential order; random access and readline() cannot be mixed.
"""
def __init__(self, files=None, inplace=0, backup="", bufsize=0,
mode="r", openhook=None):
if isinstance(files, basestring):
files = (files,)
else:
if files is None:
files = sys.argv[1:]
if not files:
files = ('-',)
else:
files = tuple(files)
self._files = files
self._inplace = inplace
self._backup = backup
self._bufsize = bufsize or DEFAULT_BUFSIZE
self._savestdout = None
self._output = None
self._filename = None
self._lineno = 0
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = None
self._buffer = []
self._bufindex = 0
# restrict mode argument to reading modes
if mode not in ('r', 'rU', 'U', 'rb'):
raise ValueError("FileInput opening mode must be one of "
"'r', 'rU', 'U' and 'rb'")
self._mode = mode
if inplace and openhook:
raise ValueError("FileInput cannot use an opening hook in inplace mode")
elif openhook and not hasattr(openhook, '__call__'):
raise ValueError("FileInput openhook must be callable")
self._openhook = openhook
def __del__(self):
self.close()
def close(self):
self.nextfile()
self._files = ()
def __iter__(self):
return self
def next(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
line = self.readline()
if not line:
raise StopIteration
return line
def __getitem__(self, i):
if i != self._lineno:
raise RuntimeError, "accessing lines out of order"
try:
return self.next()
except StopIteration:
raise IndexError, "end of input reached"
def nextfile(self):
savestdout = self._savestdout
self._savestdout = 0
if savestdout:
sys.stdout = savestdout
output = self._output
self._output = 0
if output:
output.close()
file = self._file
self._file = 0
if file and not self._isstdin:
file.close()
backupfilename = self._backupfilename
self._backupfilename = 0
if backupfilename and not self._backup:
try: os.unlink(backupfilename)
except OSError: pass
self._isstdin = False
self._buffer = []
self._bufindex = 0
def readline(self):
try:
line = self._buffer[self._bufindex]
except IndexError:
pass
else:
self._bufindex += 1
self._lineno += 1
self._filelineno += 1
return line
if not self._file:
if not self._files:
return ""
self._filename = self._files[0]
self._files = self._files[1:]
self._filelineno = 0
self._file = None
self._isstdin = False
self._backupfilename = 0
if self._filename == '-':
self._filename = '<stdin>'
self._file = sys.stdin
self._isstdin = True
else:
if self._inplace:
self._backupfilename = (
self._filename + (self._backup or os.extsep+"bak"))
try: os.unlink(self._backupfilename)
except os.error: pass
# The next few lines may raise IOError
os.rename(self._filename, self._backupfilename)
self._file = open(self._backupfilename, self._mode)
try:
perm = os.fstat(self._file.fileno()).st_mode
except OSError:
self._output = open(self._filename, "w")
else:
fd = os.open(self._filename,
os.O_CREAT | os.O_WRONLY | os.O_TRUNC,
perm)
self._output = os.fdopen(fd, "w")
try:
if hasattr(os, 'chmod'):
os.chmod(self._filename, perm)
except OSError:
pass
self._savestdout = sys.stdout
sys.stdout = self._output
else:
# This may raise IOError
if self._openhook:
self._file = self._openhook(self._filename, self._mode)
else:
self._file = open(self._filename, self._mode)
self._buffer = self._file.readlines(self._bufsize)
self._bufindex = 0
if not self._buffer:
self.nextfile()
# Recursive call
return self.readline()
def filename(self):
return self._filename
def lineno(self):
return self._lineno
def filelineno(self):
return self._filelineno
def fileno(self):
if self._file:
try:
return self._file.fileno()
except ValueError:
return -1
else:
return -1
def isfirstline(self):
return self._filelineno == 1
def isstdin(self):
return self._isstdin
def hook_compressed(filename, mode):
ext = os.path.splitext(filename)[1]
if ext == '.gz':
import gzip
return gzip.open(filename, mode)
elif ext == '.bz2':
import bz2
return bz2.BZ2File(filename, mode)
else:
return open(filename, mode)
def hook_encoded(encoding):
import codecs
def openhook(filename, mode):
return codecs.open(filename, mode, encoding)
return openhook
def _test():
import getopt
inplace = 0
backup = 0
opts, args = getopt.getopt(sys.argv[1:], "ib:")
for o, a in opts:
if o == '-i': inplace = 1
if o == '-b': backup = a
for line in input(args, inplace=inplace, backup=backup):
if line[-1:] == '\n': line = line[:-1]
if line[-1:] == '\r': line = line[:-1]
print "%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
isfirstline() and "*" or "", line)
print "%d: %s[%d]" % (lineno(), filename(), filelineno())
if __name__ == '__main__':
_test()
| mit |
ewjoachim/django-extensions | django_extensions/management/commands/runjob.py | 28 | 1938 | from optparse import make_option
from django.core.management.base import LabelCommand
from django_extensions.management.jobs import get_job, print_jobs
from django_extensions.management.utils import signalcommand
class Command(LabelCommand):
option_list = LabelCommand.option_list + (
make_option('--list', '-l', action="store_true", dest="list_jobs",
help="List all jobs with their description"),
)
help = "Run a single maintenance job."
args = "[app_name] job_name"
label = ""
def runjob(self, app_name, job_name, options):
verbosity = int(options.get('verbosity', 1))
if verbosity > 1:
print("Executing job: %s (app: %s)" % (job_name, app_name))
try:
job = get_job(app_name, job_name)
except KeyError:
if app_name:
print("Error: Job %s for applabel %s not found" % (job_name, app_name))
else:
print("Error: Job %s not found" % job_name)
print("Use -l option to view all the available jobs")
return
try:
job().execute()
except Exception:
import traceback
print("ERROR OCCURED IN JOB: %s (APP: %s)" % (job_name, app_name))
print("START TRACEBACK:")
traceback.print_exc()
print("END TRACEBACK\n")
@signalcommand
def handle(self, *args, **options):
app_name = None
job_name = None
if len(args) == 1:
job_name = args[0]
elif len(args) == 2:
app_name, job_name = args
if options.get('list_jobs'):
print_jobs(only_scheduled=False, show_when=True, show_appname=True)
else:
if not job_name:
print("Run a single maintenance job. Please specify the name of the job.")
return
self.runjob(app_name, job_name, options)
| mit |
splunk/splunk-webframework | contrib/django/django/conf/locale/tr/formats.py | 106 | 1043 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'd F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'd F'
SHORT_DATE_FORMAT = 'd M Y'
SHORT_DATETIME_FORMAT = 'd M Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Pazartesi
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%y-%m-%d', # '06-10-25'
# '%d %B %Y', '%d %b. %Y', # '25 Ekim 2006', '25 Eki. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| apache-2.0 |
naslanidis/ansible | contrib/inventory/vmware_inventory.py | 11 | 25368 | #!/usr/bin/env python
# Requirements
# - pyvmomi >= 6.0.0.2016.4
# TODO:
# * more jq examples
# * optional folder heriarchy
"""
$ jq '._meta.hostvars[].config' data.json | head
{
"alternateguestname": "",
"instanceuuid": "5035a5cd-b8e8-d717-e133-2d383eb0d675",
"memoryhotaddenabled": false,
"guestfullname": "Red Hat Enterprise Linux 7 (64-bit)",
"changeversion": "2016-05-16T18:43:14.977925Z",
"uuid": "4235fc97-5ddb-7a17-193b-9a3ac97dc7b4",
"cpuhotremoveenabled": false,
"vpmcenabled": false,
"firmware": "bios",
"""
from __future__ import print_function
import argparse
import atexit
import datetime
import getpass
import jinja2
import os
import six
import ssl
import sys
import uuid
from collections import defaultdict
from six.moves import configparser
from time import time
HAS_PYVMOMI = False
try:
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
HAS_PYVMOMI = True
except ImportError:
pass
try:
import json
except ImportError:
import simplejson as json
hasvcr = False
try:
import vcr
hasvcr = True
except ImportError:
pass
class VMwareMissingHostException(Exception):
pass
class VMWareInventory(object):
__name__ = 'VMWareInventory'
guest_props = False
instances = []
debug = False
load_dumpfile = None
write_dumpfile = None
maxlevel = 1
lowerkeys = True
config = None
cache_max_age = None
cache_path_cache = None
cache_path_index = None
cache_dir = None
server = None
port = None
username = None
password = None
validate_certs = True
host_filters = []
skip_keys = []
groupby_patterns = []
if sys.version_info > (3, 0):
safe_types = [int, bool, str, float, None]
else:
safe_types = [int, long, bool, str, float, None]
iter_types = [dict, list]
bad_types = ['Array', 'disabledMethod', 'declaredAlarmState']
vimTableMaxDepth = {
"vim.HostSystem": 2,
"vim.VirtualMachine": 2,
}
custom_fields = {}
# translation table for attributes to fetch for known vim types
if not HAS_PYVMOMI:
vimTable = {}
else:
vimTable = {
vim.Datastore: ['_moId', 'name'],
vim.ResourcePool: ['_moId', 'name'],
vim.HostSystem: ['_moId', 'name'],
}
@staticmethod
def _empty_inventory():
return {"_meta": {"hostvars": {}}}
def __init__(self, load=True):
self.inventory = VMWareInventory._empty_inventory()
if load:
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Check the cache
cache_valid = self.is_cache_valid()
# Handle Cache
if self.args.refresh_cache or not cache_valid:
self.do_api_calls_update_cache()
else:
self.debugl('loading inventory from cache')
self.inventory = self.get_inventory_from_cache()
def debugl(self, text):
if self.args.debug:
try:
text = str(text)
except UnicodeEncodeError:
text = text.encode('ascii', 'ignore')
print('%s %s' % (datetime.datetime.now(), text))
def show(self):
# Data to print
self.debugl('dumping results')
data_to_print = None
if self.args.host:
data_to_print = self.get_host_info(self.args.host)
elif self.args.list:
# Display list of instances for inventory
data_to_print = self.inventory
return json.dumps(data_to_print, indent=2)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
valid = False
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
valid = True
return valid
def do_api_calls_update_cache(self):
''' Get instances and cache the data '''
self.inventory = self.instances_to_inventory(self.get_instances())
self.write_to_cache(self.inventory)
def write_to_cache(self, data):
''' Dump inventory to json file '''
with open(self.cache_path_cache, 'wb') as f:
f.write(json.dumps(data))
def get_inventory_from_cache(self):
''' Read in jsonified inventory '''
jdata = None
with open(self.cache_path_cache, 'rb') as f:
jdata = f.read()
return json.loads(jdata)
def read_settings(self):
''' Reads the settings from the vmware_inventory.ini file '''
scriptbasename = __file__
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'vmware': {
'server': '',
'port': 443,
'username': '',
'password': '',
'validate_certs': True,
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename),
'cache_name': 'ansible-vmware',
'cache_path': '~/.ansible/tmp',
'cache_max_age': 3600,
'max_object_level': 1,
'skip_keys': 'declaredalarmstate,'
'disabledmethod,'
'dynamicproperty,'
'dynamictype,'
'environmentbrowser,'
'managedby,'
'parent,'
'childtype,'
'resourceconfig',
'alias_pattern': '{{ config.name + "_" + config.uuid }}',
'host_pattern': '{{ guest.ipaddress }}',
'host_filters': '{{ guest.gueststate == "running" }}',
'groupby_patterns': '{{ guest.guestid }},{{ "templates" if config.template else "guests"}}',
'lower_var_keys': True,
'custom_field_group_prefix': 'vmware_tag_',
'groupby_custom_field': False}
}
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
# where is the config?
vmware_ini_path = os.environ.get('VMWARE_INI_PATH', defaults['vmware']['ini_path'])
vmware_ini_path = os.path.expanduser(os.path.expandvars(vmware_ini_path))
config.read(vmware_ini_path)
# apply defaults
for k, v in defaults['vmware'].items():
if not config.has_option('vmware', k):
config.set('vmware', k, str(v))
# where is the cache?
self.cache_dir = os.path.expanduser(config.get('vmware', 'cache_path'))
if self.cache_dir and not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
# set the cache filename and max age
cache_name = config.get('vmware', 'cache_name')
self.cache_path_cache = self.cache_dir + "/%s.cache" % cache_name
self.debugl('cache path is %s' % self.cache_path_cache)
self.cache_max_age = int(config.getint('vmware', 'cache_max_age'))
# mark the connection info
self.server = os.environ.get('VMWARE_SERVER', config.get('vmware', 'server'))
self.debugl('server is %s' % self.server)
self.port = int(os.environ.get('VMWARE_PORT', config.get('vmware', 'port')))
self.username = os.environ.get('VMWARE_USERNAME', config.get('vmware', 'username'))
self.debugl('username is %s' % self.username)
self.password = os.environ.get('VMWARE_PASSWORD', config.get('vmware', 'password'))
self.validate_certs = os.environ.get('VMWARE_VALIDATE_CERTS', config.get('vmware', 'validate_certs'))
if self.validate_certs in ['no', 'false', 'False', False]:
self.validate_certs = False
self.debugl('cert validation is %s' % self.validate_certs)
# behavior control
self.maxlevel = int(config.get('vmware', 'max_object_level'))
self.debugl('max object level is %s' % self.maxlevel)
self.lowerkeys = config.get('vmware', 'lower_var_keys')
if type(self.lowerkeys) != bool:
if str(self.lowerkeys).lower() in ['yes', 'true', '1']:
self.lowerkeys = True
else:
self.lowerkeys = False
self.debugl('lower keys is %s' % self.lowerkeys)
self.skip_keys = list(config.get('vmware', 'skip_keys').split(','))
self.debugl('skip keys is %s' % self.skip_keys)
self.host_filters = list(config.get('vmware', 'host_filters').split(','))
self.debugl('host filters are %s' % self.host_filters)
self.groupby_patterns = list(config.get('vmware', 'groupby_patterns').split(','))
self.debugl('groupby patterns are %s' % self.groupby_patterns)
# Special feature to disable the brute force serialization of the
# virtulmachine objects. The key name for these properties does not
# matter because the values are just items for a larger list.
if config.has_section('properties'):
self.guest_props = []
for prop in config.items('properties'):
self.guest_props.append(prop[1])
# save the config
self.config = config
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on PyVmomi')
parser.add_argument('--debug', action='store_true', default=False,
help='show debug info')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to VSphere (default: False - use cache files)')
parser.add_argument('--max-instances', default=None, type=int,
help='maximum number of instances to retrieve')
self.args = parser.parse_args()
def get_instances(self):
''' Get a list of vm instances with pyvmomi '''
kwargs = {'host': self.server,
'user': self.username,
'pwd': self.password,
'port': int(self.port)}
if hasattr(ssl, 'SSLContext') and not self.validate_certs:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_NONE
kwargs['sslContext'] = context
return self._get_instances(kwargs)
def _get_instances(self, inkwargs):
''' Make API calls '''
instances = []
si = SmartConnect(**inkwargs)
self.debugl('retrieving all instances')
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
# Create a search container for virtualmachines
self.debugl('creating containerview for virtualmachines')
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= self.args.max_instances:
break
instances.append(child)
self.debugl("%s total instances in container view" % len(instances))
if self.args.host:
instances = [x for x in instances if x.name == self.args.host]
instance_tuples = []
for instance in sorted(instances):
if self.guest_props:
ifacts = self.facts_from_proplist(instance)
else:
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
self.debugl('facts collected for all instances')
cfm = content.customFieldsManager
if cfm is not None and cfm.field:
for f in cfm.field:
if f.managedObjectType == vim.VirtualMachine:
self.custom_fields[f.key] = f.name
self.debugl('%d custom fieds collected' % len(self.custom_fields))
return instance_tuples
def instances_to_inventory(self, instances):
''' Convert a list of vm objects into a json compliant inventory '''
self.debugl('re-indexing instances based on ini settings')
inventory = VMWareInventory._empty_inventory()
inventory['all'] = {}
inventory['all']['hosts'] = []
for idx, instance in enumerate(instances):
# make a unique id for this object to avoid vmware's
# numerous uuid's which aren't all unique.
thisid = str(uuid.uuid4())
idata = instance[1]
# Put it in the inventory
inventory['all']['hosts'].append(thisid)
inventory['_meta']['hostvars'][thisid] = idata.copy()
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid
# Make a map of the uuid to the alias the user wants
name_mapping = self.create_template_mapping(
inventory,
self.config.get('vmware', 'alias_pattern')
)
# Make a map of the uuid to the ssh hostname the user wants
host_mapping = self.create_template_mapping(
inventory,
self.config.get('vmware', 'host_pattern')
)
# Reset the inventory keys
for k, v in name_mapping.items():
if not host_mapping or not k in host_mapping:
continue
# set ansible_host (2.x)
try:
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
# 1.9.x backwards compliance
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
except Exception:
continue
if k == v:
continue
# add new key
inventory['all']['hosts'].append(v)
inventory['_meta']['hostvars'][v] = inventory['_meta']['hostvars'][k]
# cleanup old key
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)
self.debugl('pre-filtered hosts:')
for i in inventory['all']['hosts']:
self.debugl(' * %s' % i)
# Apply host filters
for hf in self.host_filters:
if not hf:
continue
self.debugl('filter: %s' % hf)
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
for k, v in filter_map.items():
if not v:
# delete this host
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)
self.debugl('post-filter hosts:')
for i in inventory['all']['hosts']:
self.debugl(' * %s' % i)
# Create groups
for gbp in self.groupby_patterns:
groupby_map = self.create_template_mapping(inventory, gbp)
for k, v in groupby_map.items():
if v not in inventory:
inventory[v] = {}
inventory[v]['hosts'] = []
if k not in inventory[v]['hosts']:
inventory[v]['hosts'].append(k)
if self.config.get('vmware', 'groupby_custom_field'):
for k, v in inventory['_meta']['hostvars'].items():
if 'customvalue' in v:
for tv in v['customvalue']:
if not isinstance(tv['value'], str) and not isinstance(tv['value'], unicode):
continue
newkey = None
field_name = self.custom_fields[tv['key']] if tv['key'] in self.custom_fields else tv['key']
values = []
keylist = map(lambda x: x.strip(), tv['value'].split(','))
for kl in keylist:
try:
newkey = self.config.get('vmware', 'custom_field_group_prefix') + field_name + '_' + kl
newkey = newkey.strip()
except Exception as e:
self.debugl(e)
values.append(newkey)
for tag in values:
if not tag:
continue
if tag not in inventory:
inventory[tag] = {}
inventory[tag]['hosts'] = []
if k not in inventory[tag]['hosts']:
inventory[tag]['hosts'].append(k)
return inventory
def create_template_mapping(self, inventory, pattern, dtype='string'):
''' Return a hash of uuid to templated string from pattern '''
mapping = {}
for k, v in inventory['_meta']['hostvars'].items():
t = jinja2.Template(pattern)
newkey = None
try:
newkey = t.render(v)
newkey = newkey.strip()
except Exception as e:
self.debugl(e)
if not newkey:
continue
elif dtype == 'integer':
newkey = int(newkey)
elif dtype == 'boolean':
if newkey.lower() == 'false':
newkey = False
elif newkey.lower() == 'true':
newkey = True
elif dtype == 'string':
pass
mapping[k] = newkey
return mapping
def facts_from_proplist(self, vm):
'''Get specific properties instead of serializing everything'''
rdata = {}
for prop in self.guest_props:
self.debugl('getting %s property for %s' % (prop, vm.name))
key = prop
if self.lowerkeys:
key = key.lower()
if '.' not in prop:
# props without periods are direct attributes of the parent
rdata[key] = getattr(vm, prop)
else:
# props with periods are subkeys of parent attributes
parts = prop.split('.')
total = len(parts) - 1
# pointer to the current object
val = None
# pointer to the current result key
lastref = rdata
for idx, x in enumerate(parts):
# if the val wasn't set yet, get it from the parent
if not val:
val = getattr(vm, x)
else:
# in a subkey, get the subprop from the previous attrib
try:
val = getattr(val, x)
except AttributeError as e:
self.debugl(e)
# lowercase keys if requested
if self.lowerkeys:
x = x.lower()
# change the pointer or set the final value
if idx != total:
if x not in lastref:
lastref[x] = {}
lastref = lastref[x]
else:
lastref[x] = val
return rdata
def facts_from_vobj(self, vobj, level=0):
''' Traverse a VM object and return a json compliant data structure '''
# pyvmomi objects are not yet serializable, but may be one day ...
# https://github.com/vmware/pyvmomi/issues/21
# WARNING:
# Accessing an object attribute will trigger a SOAP call to the remote.
# Increasing the attributes collected or the depth of recursion greatly
# increases runtime duration and potentially memory+network utilization.
if level == 0:
try:
self.debugl("get facts for %s" % vobj.name)
except Exception as e:
self.debugl(e)
rdata = {}
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if x not in self.bad_types]
methods = [x for x in methods if not x.lower() in self.skip_keys]
methods = sorted(methods)
for method in methods:
# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue
# Skip callable methods
if callable(methodToCall):
continue
if self.lowerkeys:
method = method.lower()
rdata[method] = self._process_object_types(
methodToCall,
thisvm=vobj,
inkey=method,
)
return rdata
def _process_object_types(self, vobj, thisvm=None, inkey=None, level=0):
''' Serialize an object '''
rdata = {}
if type(vobj).__name__ in self.vimTableMaxDepth and level >= self.vimTableMaxDepth[type(vobj).__name__]:
return rdata
if vobj is None:
rdata = None
elif type(vobj) in self.vimTable:
rdata = {}
for key in self.vimTable[type(vobj)]:
rdata[key] = getattr(vobj, key)
elif issubclass(type(vobj), str) or isinstance(vobj, str):
if vobj.isalnum():
rdata = vobj
else:
rdata = vobj.decode('ascii', 'ignore')
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
rdata = vobj
elif issubclass(type(vobj), int) or isinstance(vobj, int):
rdata = vobj
elif issubclass(type(vobj), float) or isinstance(vobj, float):
rdata = vobj
elif issubclass(type(vobj), long) or isinstance(vobj, long):
rdata = vobj
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
rdata = []
try:
vobj = sorted(vobj)
except Exception:
pass
for idv, vii in enumerate(vobj):
if level + 1 <= self.maxlevel:
vid = self._process_object_types(
vii,
thisvm=thisvm,
inkey=inkey + '[' + str(idv) + ']',
level=(level + 1)
)
if vid:
rdata.append(vid)
elif issubclass(type(vobj), dict):
pass
elif issubclass(type(vobj), object):
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if x not in self.bad_types]
methods = [x for x in methods if not inkey + '.' + x.lower() in self.skip_keys]
methods = sorted(methods)
for method in methods:
# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue
if callable(methodToCall):
continue
if self.lowerkeys:
method = method.lower()
if level + 1 <= self.maxlevel:
rdata[method] = self._process_object_types(
methodToCall,
thisvm=thisvm,
inkey=inkey + '.' + method,
level=(level + 1)
)
else:
pass
return rdata
def get_host_info(self, host):
''' Return hostvars for a single host '''
if host in self.inventory['_meta']['hostvars']:
return self.inventory['_meta']['hostvars'][host]
elif self.args.host and self.inventory['_meta']['hostvars']:
match = None
for k, v in self.inventory['_meta']['hostvars']:
if self.inventory['_meta']['hostvars'][k]['name'] == self.args.host:
match = k
break
if match:
return self.inventory['_meta']['hostvars'][match]
else:
raise VMwareMissingHostException('%s not found' % host)
else:
raise VMwareMissingHostException('%s not found' % host)
if __name__ == "__main__":
# Run the script
print(VMWareInventory().show())
| gpl-3.0 |
xuweiliang/Codelibrary | nova/virt/vmwareapi/network_util.py | 27 | 9040 | # Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility functions for ESX Networking.
"""
from oslo_log import log as logging
from oslo_vmware import exceptions as vexc
from oslo_vmware import vim_util as vutil
from nova import exception
from nova.i18n import _
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
LOG = logging.getLogger(__name__)
def _get_network_obj(session, network_objects, network_name):
"""Gets the network object for the requested network.
The network object will be used when creating the VM configuration
spec. The network object contains the relevant network details for
the specific network type, for example, a distributed port group.
The method will search for the network_name in the list of
network_objects.
:param session: vCenter soap session
:param network_objects: group of networks
:param network_name: the requested network
:return: network object
"""
network_obj = {}
# network_objects is actually a RetrieveResult object from vSphere API call
for obj_content in network_objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
prop_dict = vm_util.propset_dict(obj_content.propSet)
network_refs = prop_dict.get('network')
if network_refs:
network_refs = network_refs.ManagedObjectReference
for network in network_refs:
# Get network properties
if network._type == 'DistributedVirtualPortgroup':
props = session._call_method(vutil,
"get_object_property",
network,
"config")
# NOTE(asomya): This only works on ESXi if the port binding
# is set to ephemeral
# For a VLAN the network name will be the UUID. For a VXLAN
# network this will have a VXLAN prefix and then the
# network name.
if network_name in props.name:
network_obj['type'] = 'DistributedVirtualPortgroup'
network_obj['dvpg'] = props.key
dvs_props = session._call_method(vutil,
"get_object_property",
props.distributedVirtualSwitch,
"uuid")
network_obj['dvsw'] = dvs_props
return network_obj
else:
props = session._call_method(vutil,
"get_object_property",
network,
"summary.name")
if props == network_name:
network_obj['type'] = 'Network'
network_obj['name'] = network_name
return network_obj
def get_network_with_the_name(session, network_name="vmnet0", cluster=None):
"""Gets reference to the network whose name is passed as the
argument.
"""
vm_networks = session._call_method(vim_util,
'get_object_properties',
None, cluster,
'ClusterComputeResource', ['network'])
while vm_networks:
if vm_networks.objects:
network_obj = _get_network_obj(session, vm_networks.objects,
network_name)
if network_obj:
session._call_method(vutil, 'cancel_retrieval',
vm_networks)
return network_obj
vm_networks = session._call_method(vutil, 'continue_retrieval',
vm_networks)
LOG.debug("Network %s not found on cluster!", network_name)
def get_vswitch_for_vlan_interface(session, vlan_interface, cluster=None):
"""Gets the vswitch associated with the physical network adapter
with the name supplied.
"""
# Get the list of vSwicthes on the Host System
host_mor = vm_util.get_host_ref(session, cluster)
vswitches_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.vswitch")
# Meaning there are no vSwitches on the host. Shouldn't be the case,
# but just doing code check
if not vswitches_ret:
return
vswitches = vswitches_ret.HostVirtualSwitch
# Get the vSwitch associated with the network adapter
for elem in vswitches:
try:
for nic_elem in elem.pnic:
if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
return elem.name
# Catching Attribute error as a vSwitch may not be associated with a
# physical NIC.
except AttributeError:
pass
def check_if_vlan_interface_exists(session, vlan_interface, cluster=None):
"""Checks if the vlan_interface exists on the esx host."""
host_mor = vm_util.get_host_ref(session, cluster)
physical_nics_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.pnic")
# Meaning there are no physical nics on the host
if not physical_nics_ret:
return False
physical_nics = physical_nics_ret.PhysicalNic
for pnic in physical_nics:
if vlan_interface == pnic.device:
return True
return False
def get_vlanid_and_vswitch_for_portgroup(session, pg_name, cluster=None):
"""Get the vlan id and vswitch associated with the port group."""
host_mor = vm_util.get_host_ref(session, cluster)
port_grps_on_host_ret = session._call_method(vutil,
"get_object_property",
host_mor,
"config.network.portgroup")
if not port_grps_on_host_ret:
msg = _("ESX SOAP server returned an empty port group "
"for the host system in its response")
LOG.error(msg)
raise exception.NovaException(msg)
port_grps_on_host = port_grps_on_host_ret.HostPortGroup
for p_gp in port_grps_on_host:
if p_gp.spec.name == pg_name:
p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
return p_gp.spec.vlanId, p_grp_vswitch_name
return None, None
def create_port_group(session, pg_name, vswitch_name, vlan_id=0, cluster=None):
"""Creates a port group on the host system with the vlan tags
supplied. VLAN id 0 means no vlan id association.
"""
client_factory = session.vim.client.factory
add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
client_factory,
vswitch_name,
pg_name,
vlan_id)
host_mor = vm_util.get_host_ref(session, cluster)
network_system_mor = session._call_method(vutil,
"get_object_property",
host_mor,
"configManager.networkSystem")
LOG.debug("Creating Port Group with name %s on "
"the ESX host", pg_name)
try:
session._call_method(session.vim,
"AddPortGroup", network_system_mor,
portgrp=add_prt_grp_spec)
except vexc.AlreadyExistsException:
# There can be a race condition when two instances try
# adding port groups at the same time. One succeeds, then
# the other one will get an exception. Since we are
# concerned with the port group being created, which is done
# by the other call, we can ignore the exception.
LOG.debug("Port Group %s already exists.", pg_name)
LOG.debug("Created Port Group with name %s on "
"the ESX host", pg_name)
| apache-2.0 |
ClearCorp-dev/odoo-clearcorp | TODO-8.0/account_tax_translate_name/__openerp__.py | 3 | 1599 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : 'Account tax translate name',
"version" : '1.0',
"author" : 'CLEARCORP S.A',
'complexity': 'easy',
"description": """
This module eliminates translation of the name properties in the account tax.
""",
"category": 'Accounting & Finance',
"sequence": 4,
"website" : "http://clearcorp.co.cr",
"images" : [],
"icon" : False,
"depends" : ["account"],
"init_xml" : [],
"demo_xml" : [],
"update_xml" : [],
"test" : [],
"auto_install": False,
"application": False,
"installable": True,
}
| agpl-3.0 |
lepistone/server-tools | fetchmail_attach_from_folder/match_algorithm/email_exact.py | 54 | 2397 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from .base import base
from openerp.tools.safe_eval import safe_eval
from openerp.tools.mail import email_split
class email_exact(base):
'''Search for exactly the mailadress as noted in the email'''
name = 'Exact mailadress'
required_fields = ['model_field', 'mail_field']
def _get_mailaddresses(self, conf, mail_message):
mailaddresses = []
fields = conf.mail_field.split(',')
for field in fields:
if field in mail_message:
mailaddresses += email_split(mail_message[field])
return [addr.lower() for addr in mailaddresses]
def _get_mailaddress_search_domain(
self, conf, mail_message, operator='=', values=None):
mailaddresses = values or self._get_mailaddresses(
conf, mail_message)
if not mailaddresses:
return [(0, '=', 1)]
search_domain = ((['|'] * (len(mailaddresses) - 1)) + [
(conf.model_field, operator, addr) for addr in mailaddresses] +
safe_eval(conf.domain or '[]'))
return search_domain
def search_matches(self, cr, uid, conf, mail_message, mail_message_org):
conf_model = conf.pool.get(conf.model_id.model)
search_domain = self._get_mailaddress_search_domain(conf, mail_message)
return conf_model.search(
cr, uid, search_domain, order=conf.model_order)
| agpl-3.0 |
sagar30051991/ozsmart-erp | erpnext/accounts/report/bank_reconciliation_statement/bank_reconciliation_statement.py | 8 | 4130 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt
from frappe import _
def execute(filters=None):
if not filters: filters = {}
columns = get_columns()
if not filters.get("account"): return columns, []
account_currency = frappe.db.get_value("Account", filters.account, "account_currency")
data = get_entries(filters)
from erpnext.accounts.utils import get_balance_on
balance_as_per_system = get_balance_on(filters["account"], filters["report_date"])
total_debit, total_credit = 0,0
for d in data:
total_debit += flt(d.debit)
total_credit += flt(d.credit)
amounts_not_reflected_in_system = frappe.db.sql("""
select sum(jvd.debit_in_account_currency - jvd.credit_in_account_currency)
from `tabJournal Entry Account` jvd, `tabJournal Entry` jv
where jvd.parent = jv.name and jv.docstatus=1 and jvd.account=%s
and jv.posting_date > %s and jv.clearance_date <= %s and ifnull(jv.is_opening, 'No') = 'No'
""", (filters["account"], filters["report_date"], filters["report_date"]))
amounts_not_reflected_in_system = flt(amounts_not_reflected_in_system[0][0]) \
if amounts_not_reflected_in_system else 0.0
bank_bal = flt(balance_as_per_system) - flt(total_debit) + flt(total_credit) \
+ amounts_not_reflected_in_system
data += [
get_balance_row(_("Bank Statement balance as per General Ledger"), balance_as_per_system, account_currency),
{},
{
"journal_entry": _("Outstanding Cheques and Deposits to clear"),
"debit": total_debit,
"credit": total_credit,
"account_currency": account_currency
},
get_balance_row(_("Cheques and Deposits incorrectly cleared"), amounts_not_reflected_in_system,
account_currency),
{},
get_balance_row(_("Calculated Bank Statement balance"), bank_bal, account_currency)
]
return columns, data
def get_columns():
return [
{
"fieldname": "posting_date",
"label": _("Posting Date"),
"fieldtype": "Date",
"width": 100
},
{
"fieldname": "journal_entry",
"label": _("Journal Entry"),
"fieldtype": "Link",
"options": "Journal Entry",
"width": 220
},
{
"fieldname": "debit",
"label": _("Debit"),
"fieldtype": "Currency",
"options": "account_currency",
"width": 120
},
{
"fieldname": "credit",
"label": _("Credit"),
"fieldtype": "Currency",
"options": "account_currency",
"width": 120
},
{
"fieldname": "against_account",
"label": _("Against Account"),
"fieldtype": "Link",
"options": "Account",
"width": 200
},
{
"fieldname": "reference",
"label": _("Reference"),
"fieldtype": "Data",
"width": 100
},
{
"fieldname": "ref_date",
"label": _("Ref Date"),
"fieldtype": "Date",
"width": 110
},
{
"fieldname": "clearance_date",
"label": _("Clearance Date"),
"fieldtype": "Date",
"width": 110
},
{
"fieldname": "account_currency",
"label": _("Currency"),
"fieldtype": "Link",
"options": "Currency",
"width": 100
}
]
def get_entries(filters):
entries = frappe.db.sql("""select
jv.posting_date, jv.name as journal_entry, jvd.debit_in_account_currency as debit,
jvd.credit_in_account_currency as credit, jvd.against_account,
jv.cheque_no as reference, jv.cheque_date as ref_date, jv.clearance_date, jvd.account_currency
from
`tabJournal Entry Account` jvd, `tabJournal Entry` jv
where jvd.parent = jv.name and jv.docstatus=1
and jvd.account = %(account)s and jv.posting_date <= %(report_date)s
and ifnull(jv.clearance_date, '4000-01-01') > %(report_date)s
and ifnull(jv.is_opening, 'No') = 'No'
order by jv.posting_date DESC,jv.name DESC""", filters, as_dict=1)
return entries
def get_balance_row(label, amount, account_currency):
if amount > 0:
return {
"journal_entry": label,
"debit": amount,
"credit": 0,
"account_currency": account_currency
}
else:
return {
"journal_entry": label,
"debit": 0,
"credit": abs(amount),
"account_currency": account_currency
}
| agpl-3.0 |
ZenDevelopmentSystems/pdnn | utils/network_config.py | 2 | 7314 | # Copyright 2014 Yajie Miao Carnegie Mellon University
# 2015 Yun Wang Carnegie Mellon University
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
import theano
import theano.tensor as T
from io_func.data_io import read_data_args, read_dataset
from learn_rates import LearningRateExpDecay
from utils import parse_lrate, parse_activation, parse_conv_spec, activation_to_txt, string2bool
class NetworkConfig():
def __init__(self):
self.model_type = 'DNN'
self.batch_size = 256
self.momentum = 0.5
self.lrate = LearningRateExpDecay(start_rate=0.08, scale_by = 0.5,
min_derror_decay_start = 0.05,
min_derror_stop = 0.05,
min_epoch_decay_start=15)
self.activation = T.nnet.sigmoid
self.activation_text = 'sigmoid'
self.do_maxout = False
self.pool_size = 1
self.do_dropout = False
self.dropout_factor = []
self.input_dropout_factor = 0.0
self.max_col_norm = None
self.l1_reg = None
self.l2_reg = None
# data reading
self.train_sets = None
self.train_xy = None
self.train_x = None
self.train_y = None
self.valid_sets = None
self.valid_xy = None
self.valid_x = None
self.valid_y = None
self.test_sets = None
self.test_xy = None
self.test_x = None
self.test_y = None
# specifically for DNN
self.n_ins = 0
self.hidden_layers_sizes = []
self.n_outs = 0
self.non_updated_layers = []
# specifically for DNN_SAT
self.ivec_n_ins = 0
self.ivec_hidden_layers_sizes = []
self.ivec_n_outs = 0
# specifically for convolutional networks
self.conv_layer_configs = []
self.conv_activation = T.nnet.sigmoid
self.conv_activation_text = 'sigmoid'
self.use_fast = False
# number of epochs between model saving (for later model resuming)
self.model_save_step = 1
# the path to save model into Kaldi-compatible format
self.cfg_output_file = ''
self.param_output_file = ''
self.kaldi_output_file = ''
# initialize pfile reading. TODO: inteference *directly* for Kaldi feature and alignment files
def init_data_reading(self, train_data_spec, valid_data_spec):
train_dataset, train_dataset_args = read_data_args(train_data_spec)
valid_dataset, valid_dataset_args = read_data_args(valid_data_spec)
self.train_sets, self.train_xy, self.train_x, self.train_y = read_dataset(train_dataset, train_dataset_args)
self.valid_sets, self.valid_xy, self.valid_x, self.valid_y = read_dataset(valid_dataset, valid_dataset_args)
def init_data_reading_test(self, data_spec):
dataset, dataset_args = read_data_args(data_spec)
self.test_sets, self.test_xy, self.test_x, self.test_y = read_dataset(dataset, dataset_args)
# initialize the activation function
def init_activation(self):
self.activation = parse_activation(self.activation_text)
def parse_config_common(self, arguments):
# parse batch_size, momentum, learning rate and regularization
if arguments.has_key('batch_size'):
self.batch_size = int(arguments['batch_size'])
if arguments.has_key('momentum'):
self.momentum = float(arguments['momentum'])
if arguments.has_key('lrate'):
self.lrate = parse_lrate(arguments['lrate'])
if arguments.has_key('l1_reg'):
self.l1_reg = float(arguments['l1_reg'])
if arguments.has_key('l2_reg'):
self.l2_reg = float(arguments['l2_reg'])
if arguments.has_key('max_col_norm'):
self.max_col_norm = float(arguments['max_col_norm'])
# parse activation function, including maxout
if arguments.has_key('activation'):
self.activation_text = arguments['activation']
self.activation = parse_activation(arguments['activation'])
if arguments['activation'].startswith('maxout'):
self.do_maxout = True
self.pool_size = int(arguments['activation'].replace('maxout:',''))
self.activation_text = 'maxout'
# parse dropout. note that dropout can be applied to the input features only when dropout is also
# applied to hidden-layer outputs at the same time. that is, you cannot apply dropout only to the
# input features
if arguments.has_key('dropout_factor'):
self.do_dropout = True
factors = arguments['dropout_factor'].split(',')
self.dropout_factor = [float(factor) for factor in factors]
if arguments.has_key('input_dropout_factor'):
self.input_dropout_factor = float(arguments['input_dropout_factor'])
if arguments.has_key('cfg_output_file'):
self.cfg_output_file = arguments['cfg_output_file']
if arguments.has_key('param_output_file'):
self.param_output_file = arguments['param_output_file']
if arguments.has_key('kaldi_output_file'):
self.kaldi_output_file = arguments['kaldi_output_file']
if arguments.has_key('model_save_step'):
self.model_save_step = int(arguments['model_save_step'])
if arguments.has_key('non_updated_layers'):
layers = arguments['non_updated_layers'].split(",")
self.non_updated_layers = [int(layer) for layer in layers]
def parse_config_dnn(self, arguments, nnet_spec):
self.parse_config_common(arguments)
# parse DNN network structure
nnet_layers = nnet_spec.split(':')
self.n_ins = int(nnet_layers[0])
self.hidden_layers_sizes = [int(nnet_layers[i]) for i in range(1, len(nnet_layers)-1)]
self.n_outs = int(nnet_layers[-1])
def parse_config_cnn(self, arguments, nnet_spec, conv_nnet_spec):
self.parse_config_dnn(arguments, nnet_spec)
# parse convolutional layer structure
self.conv_layer_configs = parse_conv_spec(conv_nnet_spec, self.batch_size)
# parse convolutional layer activation
# parse activation function, including maxout
if arguments.has_key('conv_activation'):
self.conv_activation_text = arguments['conv_activation']
self.conv_activation = parse_activation(arguments['conv_activation'])
# maxout not supported yet
# whether we use the fast version of convolution
if arguments.has_key('use_fast'):
self.use_fast = string2bool(arguments['use_fast'])
| apache-2.0 |
zarboz/EvilZ-Kernel122 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Jon-ICS/upm | examples/python/l298-stepper.py | 7 | 2459 | #!/usr/bin/python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_l298 as upmL298
def main():
# Instantiate a Stepper motor on a L298 Dual H-Bridge.
# This was tested with the NEMA-17 12V, 350mA, with 200 steps per rev.
myHBridge = upmL298.L298(200, 3, 4, 7, 8, 9)
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myHBridge
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
myHBridge.setSpeed(10) # 10 RPMs
myHBridge.setDirection(upmL298.L298.DIR_CW)
myHBridge.enable(True)
print("Rotating 1 full revolution at 10 RPM speed.")
# move 200 steps, a full rev
myHBridge.stepperSteps(200)
print("Sleeping for 2 seconds...")
time.sleep(2)
print("Rotating 1/2 revolution in opposite direction at 10 RPM speed.")
myHBridge.setDirection(upmL298.L298.DIR_CCW)
myHBridge.stepperSteps(100)
# release
myHBridge.enable(False)
# exitHandler is called automatically
if __name__ == '__main__':
main()
| mit |
FrancescoRizzi/AWSomesauce | articles/BAS4-pws/custauth/custauth.py | 1 | 18186 | #!/usr/bin/env python
import os
import json
import StringIO
from contextlib import closing
import re
import time
import pprint
import boto3
from boto3.session import Session
import botocore
import jwt
from cryptography.x509 import load_pem_x509_certificate
from cryptography.hazmat.backends import default_backend
# Simplest form of logging using the standard logging module:
# ============================================================
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Top-Level Handler:
# ============================================================
def lambda_handler(event, context):
logger.info("CustAuth Triggered.")
authToken = event.get('authorizationToken', '')
methodArn = event.get('methodArn', '')
authHeader = event.get('Authorization', '')
logger.info("Authorization Token : '{0!s}'.".format(authToken))
logger.info("Method ARN : '{0!s}'.".format(methodArn))
logger.info("Authorization Header: '{0!s}'.".format(authHeader))
# Check Configuration before wasting time
# ========================================================
# AUTH_APP_ID: required
auth_app_id = os.environ.get('AUTH_APP_ID', None)
if not auth_app_id:
logger.error("Missing Required 'AUTH_APP_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_APP_ID'")
logger.info("Auth App ID : '{0!s}'.".format(auth_app_id))
# AUTH_TENANT_ID: required
auth_tenant_id = os.environ.get('AUTH_TENANT_ID', None)
if not auth_tenant_id:
logger.error("Missing Required 'AUTH_TENANT_ID' Environmental Variable.")
raise ValueError("Missing/blank 'AUTH_TENANT_ID'")
logger.info("Auth Tenant ID : '{0!s}'.".format(auth_tenant_id))
# CERTS_BUCKET: required
certs_bucket = os.environ.get('CERTS_BUCKET', None)
if not certs_bucket:
logger.error("Missing Required 'CERTS_BUCKET' Environmental Variable.")
raise ValueError("Missing/blank 'CERTS_BUCKET'")
logger.info("Certificates Bucket : '{0!s}'.".format(certs_bucket))
# ========================================================
# Client credentials expected in the authorizationToken, in the form:
# 'Bearer <id_token>'
# Missing authorizationToken:
# response 401 - Unauthorized (although we don't send back a 'WWW-Authenticate' header as we should)
if not authToken:
logger.warn("Missing Authorization Token: will trigger 401-Unauthorized response.")
raise Exception('Unauthorized')
validator = TokenValidator()
validToken = validator.ValidateToken(authToken, auth_app_id, auth_tenant_id, certs_bucket)
logger.info("Is the Authorization Token valid? {0!s}".format(validToken))
# authorizationToken invalid (format or contents):
# respond with Policy DENYING access, which will trigger API Gateway to respond with
# response 403 - Forbidden
# authorizationToken valid (format and contents):
# respond with Policy ALLOWING access, which will trigger API Gateway to
# proceed with the backend integration configured on the method.
principalId = auth_app_id
arnParts = event['methodArn'].split(':')
apiGatewayArnTmp = arnParts[5].split('/')
awsAccountId = arnParts[4]
policy = AuthPolicy(principalId, awsAccountId)
policy.restApiId = apiGatewayArnTmp[0]
policy.region = arnParts[3]
policy.stage = apiGatewayArnTmp[1]
policyDesc = ''
if validToken:
policy.allowAllMethods()
policyDesc = 'ALLOW'
else:
policy.denyAllMethods()
policyDesc = 'DENY'
authResponse = policy.build()
# Optional: context
# The response can also include a 'context' key-value pairs mapping,
# which will be rendered available to the configured backend
# (if the policy is such that the request handling continues)
# as $context.authorizer.<key>
# This mapping is part of the cached response.
#
# context = {
# 'key': 'value', # $context.authorizer.key -> value
# 'number' : 1,
# 'bool' : True
# }
# authResponse['context'] = context
#
# INVALID formats:
# context['arr'] = ['foo']
# context['obj'] = {'foo':'bar'}
logger.info("CustAuth completed: returning policy to {0!s} access.".format(policyDesc))
return authResponse
# TokenValidator
# ============================================================
class TokenValidator(object):
PEMSTART = "-----BEGIN CERTIFICATE-----\n"
PEMEND = "\n-----END CERTIFICATE-----\n"
def __init__(self):
self._session = None
self._client = None
def ValidateToken(self, auth_header, auth_app_id, auth_tenant_id, certs_bucket):
# auth_header expected to be in the form:
# 'Bearer <id_token>'
(pre, encoded_token) = auth_header.split(' ', 2)
if (not pre) or (pre.upper() != "BEARER"):
logger.warn("Authorization Token did not match expected 'Bearer <id_token>' format.")
return False
expected_issuer = 'https://sts.windows.net/{0!s}/'.format(auth_tenant_id)
unverified_headers = jwt.get_unverified_header(encoded_token)
#unverified_token = jwt.decode(encoded_token, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer, options={'verify_signature': False})
#x5t = unverified_token.get('x5t', None)
#kid = unverified_token.get('kid', None)
kid = unverified_headers.get('kid', None)
logger.info("Token 'kid': '{0!s}'.".format(kid))
if not kid:
logger.warn("Could not extract 'kid' property from token.")
return False
cert_pem = self.GetSigningCertificate(certs_bucket, kid)
if cert_pem:
logger.info("Retrieved Signing Certificate.")
#if isinstance(cert_pem, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# cert_pem = cert_pem.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
cert = load_pem_x509_certificate(cert_pem, default_backend())
logger.info("Loaded Signing Certificate.")
public_key = cert.public_key()
logger.info("Extracted Public Key from Signing Certificate.")
decoded = jwt.decode(encoded_token, public_key, algorithms=['RS256'], audience=auth_app_id, issuer=expected_issuer)
# NOTE: the JWT decode method verifies
# - general format of the encoded token
# - signature, using the given public key
# - aud claim (Audience) vs audience value
# - exp claim (Expiration) vs current datetime (UTC)
# - nbf claim (Not Before) vs current datetime (UTC)
# - iss claim (Issuer) vs issuer value
if decoded:
logger.info("Token Decoded and Validated Successfully.")
return True
else:
logger.warn("Failed to Decode Token when verifying signature.")
return False
else:
logger.warn("Could not retrieve signing certificate matching token's 'kid' property ('{0!s}').".format(kid))
return False
def GetSigningCertificate(self, certs_bucket, kid):
self.EnsureClient()
discovery_record_str = None
with closing(StringIO.StringIO()) as dest:
self._client.download_fileobj(
Bucket=certs_bucket,
Key=kid,
Fileobj=dest)
discovery_record_str = dest.getvalue()
if not discovery_record_str:
logger.warn("Could not retrieve Discovery Record from Bucket.")
return None
logger.info("Retrieved Discovery Record Payload from Bucket.")
# discovery_record_str is the payload extracted from
# the bucket, presumed to be the JSON-formatted string
# of the signing certificate discovery record. eg:
# {
# "x5t": "...",
# "use": "...",
# "e": "...",
# "kty": "...",
# "n": "...",
# "x5c": [
# "..."
# ],
# "issuer": "...",
# "kid": "..."
# }
# What we need to extract as 'certificate' is
# the first value in the "x5c" property list
discovery_record = json.loads(discovery_record_str)
logger.info("Parsed Discovery Record JSON.")
x5c = discovery_record.get('x5c', None)
if not x5c:
logger.warn("Could not find 'x5c' property from Discovery Record.")
return None
logger.info("Discovery Record x5c found.")
raw_cert = ""
if isinstance(x5c, list):
raw_cert = x5c[0]
elif isinstance(x5c, basestring):
raw_cert = x5c
else:
logger.warn("Unexpected data type for x5c value from Discovery Record (expected string or list).")
return None
logger.info("Raw Cert:|{0!s}|".format(raw_cert))
if isinstance(raw_cert, unicode):
logger.info("Raw Certificate is unicode. Attempting STRICT conversion to ASCII.")
raw_cert = raw_cert.encode('ascii', 'strict')
logger.info("Raw Certificate encoded to ASCII.")
logger.info("Formatting Raw Certificate according to PEM 64-characters lines.")
raw_cert = self.InsertNewLines(raw_cert)
logger.info("Raw Certificate lines length normalized to PEM.")
pem_cert = self.PEMSTART + raw_cert + self.PEMEND
logger.info("After wrapping Raw certificate in PEM Markers:")
logger.info(pem_cert)
#tmp = "is NOT"
#if isinstance(raw_cert, unicode):
# tmp = "is"
#logger.info("Before Wrapping in PEM delimiters, the raw_cert data type {0!s} unicode.".format(tmp))
#
#pem_cert = self.PEMSTART + raw_cert + self.PEMEND
#logger.info("PEM Cert:|{0!s}|".format(pem_cert))
#
#tmp = "is NOT"
#if isinstance(pem_cert, unicode):
# tmp = "is"
#logger.info("After Wrapping in PEM delimiters, the pem_cert data type {0!s} unicode.".format(tmp))
#
#if isinstance(pem_cert, unicode):
# logger.info("Signing Certificate is unicode. Will attempt STRICT conversion.")
# pem_cert = pem_cert.encode('ascii', 'strict')
# logger.info("Signing Certificate unicode encoded to ASCII.")
#
#logger.info("Splitting according to PEM format (64 characters per line).")
#pem_cert = self.InsertNewLines(pem_cert)
#logger.info("After splitting in 64-character long lines:")
#logger.info(pem_cert)
return pem_cert
def InsertNewLines(self, s, every=64):
lines = []
for i in xrange(0, len(s), every):
lines.append(s[i:i+every])
return '\n'.join(lines)
def EnsureClient(self):
self.EnsureSession()
if not self._client:
self._client = self._session.client('s3')
def EnsureSession(self):
if not self._session:
self._session = boto3.Session()
# HttpVerbs
# ============================================================
class HttpVerb:
GET = "GET"
POST = "POST"
PUT = "PUT"
PATCH = "PATCH"
HEAD = "HEAD"
DELETE = "DELETE"
OPTIONS = "OPTIONS"
ALL = "*"
# AuthPolicy
# ============================================================
class AuthPolicy(object):
awsAccountId = ""
"""The AWS account id the policy will be generated for. This is used to create the method ARNs."""
principalId = ""
"""The principal used for the policy, this should be a unique identifier for the end user."""
version = "2012-10-17"
"""The policy version used for the evaluation. This should always be '2012-10-17'"""
pathRegex = "^[/.a-zA-Z0-9-\*]+$"
"""The regular expression used to validate resource paths for the policy"""
"""these are the internal lists of allowed and denied methods. These are lists
of objects and each object has 2 properties: A resource ARN and a nullable
conditions statement.
the build method processes these lists and generates the approriate
statements for the final policy"""
allowMethods = []
denyMethods = []
restApiId = "*"
"""The API Gateway API id. By default this is set to '*'"""
region = "*"
"""The region where the API is deployed. By default this is set to '*'"""
stage = "*"
"""The name of the stage used in the policy. By default this is set to '*'"""
def __init__(self, principal, awsAccountId):
self.awsAccountId = awsAccountId
self.principalId = principal
self.allowMethods = []
self.denyMethods = []
def _addMethod(self, effect, verb, resource, conditions):
"""Adds a method to the internal lists of allowed or denied methods. Each object in
the internal list contains a resource ARN and a condition statement. The condition
statement can be null."""
if verb != "*" and not hasattr(HttpVerb, verb):
raise NameError("Invalid HTTP verb " + verb + ". Allowed verbs in HttpVerb class")
resourcePattern = re.compile(self.pathRegex)
if not resourcePattern.match(resource):
raise NameError("Invalid resource path: " + resource + ". Path should match " + self.pathRegex)
if resource[:1] == "/":
resource = resource[1:]
resourceArn = ("arn:aws:execute-api:" +
self.region + ":" +
self.awsAccountId + ":" +
self.restApiId + "/" +
self.stage + "/" +
verb + "/" +
resource)
if effect.lower() == "allow":
self.allowMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
elif effect.lower() == "deny":
self.denyMethods.append({
'resourceArn' : resourceArn,
'conditions' : conditions
})
def _getEmptyStatement(self, effect):
"""Returns an empty statement object prepopulated with the correct action and the
desired effect."""
statement = {
'Action': 'execute-api:Invoke',
'Effect': effect[:1].upper() + effect[1:].lower(),
'Resource': []
}
return statement
def _getStatementForEffect(self, effect, methods):
"""This function loops over an array of objects containing a resourceArn and
conditions statement and generates the array of statements for the policy."""
statements = []
if len(methods) > 0:
statement = self._getEmptyStatement(effect)
for curMethod in methods:
if curMethod['conditions'] is None or len(curMethod['conditions']) == 0:
statement['Resource'].append(curMethod['resourceArn'])
else:
conditionalStatement = self._getEmptyStatement(effect)
conditionalStatement['Resource'].append(curMethod['resourceArn'])
conditionalStatement['Condition'] = curMethod['conditions']
statements.append(conditionalStatement)
statements.append(statement)
return statements
def allowAllMethods(self):
"""Adds a '*' allow to the policy to authorize access to all methods of an API"""
self._addMethod("Allow", HttpVerb.ALL, "*", [])
def denyAllMethods(self):
"""Adds a '*' allow to the policy to deny access to all methods of an API"""
self._addMethod("Deny", HttpVerb.ALL, "*", [])
def allowMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods for the policy"""
self._addMethod("Allow", verb, resource, [])
def denyMethod(self, verb, resource):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods for the policy"""
self._addMethod("Deny", verb, resource, [])
def allowMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of allowed
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Allow", verb, resource, conditions)
def denyMethodWithConditions(self, verb, resource, conditions):
"""Adds an API Gateway method (Http verb + Resource path) to the list of denied
methods and includes a condition for the policy statement. More on AWS policy
conditions here: http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements.html#Condition"""
self._addMethod("Deny", verb, resource, conditions)
def build(self):
"""Generates the policy document based on the internal lists of allowed and denied
conditions. This will generate a policy with two main statements for the effect:
one statement for Allow and one statement for Deny.
Methods that includes conditions will have their own statement in the policy."""
if ((self.allowMethods is None or len(self.allowMethods) == 0) and
(self.denyMethods is None or len(self.denyMethods) == 0)):
raise NameError("No statements defined for the policy")
policy = {
'principalId' : self.principalId,
'policyDocument' : {
'Version' : self.version,
'Statement' : []
}
}
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Allow", self.allowMethods))
policy['policyDocument']['Statement'].extend(self._getStatementForEffect("Deny", self.denyMethods))
return policy | mit |
n0trax/ansible | lib/ansible/plugins/connection/winrm.py | 12 | 25632 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: winrm
short_description: Run tasks over Microsoft's WinRM
description:
- Run commands or put/fetch on a target via WinRM
version_added: "2.0"
options:
remote_addr:
description:
- Address of the windows machine
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_winrm_host
remote_user:
description:
- The user to log in as to the Windows machine
vars:
- name: ansible_user
- name: ansible_winrm_user
"""
import base64
import inspect
import os
import re
import shlex
import traceback
import json
import tempfile
import subprocess
HAVE_KERBEROS = False
try:
import kerberos
HAVE_KERBEROS = True
except ImportError:
pass
from ansible.errors import AnsibleError, AnsibleConnectionFailure
from ansible.errors import AnsibleFileNotFound
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves.urllib.parse import urlunsplit
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.six import binary_type
from ansible.plugins.connection import ConnectionBase
from ansible.plugins.shell.powershell import leaf_exec
from ansible.utils.hashing import secure_hash
from ansible.utils.path import makedirs_safe
try:
import winrm
from winrm import Response
from winrm.protocol import Protocol
HAS_WINRM = True
except ImportError as e:
HAS_WINRM = False
try:
import xmltodict
HAS_XMLTODICT = True
except ImportError as e:
HAS_XMLTODICT = False
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
'''WinRM connections over HTTP/HTTPS.'''
transport = 'winrm'
module_implementation_preferences = ('.ps1', '.exe', '')
become_methods = ['runas']
allow_executable = False
def __init__(self, *args, **kwargs):
self.has_pipelining = True
self.always_pipeline_modules = True
self.has_native_async = True
self.protocol = None
self.shell_id = None
self.delegate = None
self._shell_type = 'powershell'
# FUTURE: Add runas support
super(Connection, self).__init__(*args, **kwargs)
def set_host_overrides(self, host, variables, templar):
'''
Override WinRM-specific options from host variables.
'''
if not HAS_WINRM:
return
hostvars = {}
for k in variables:
if k.startswith('ansible_winrm'):
hostvars[k] = templar.template(variables[k])
self._winrm_host = self._play_context.remote_addr
self._winrm_port = int(self._play_context.port or 5986)
self._winrm_scheme = hostvars.get('ansible_winrm_scheme', 'http' if self._winrm_port == 5985 else 'https')
self._winrm_path = hostvars.get('ansible_winrm_path', '/wsman')
self._winrm_user = self._play_context.remote_user
self._winrm_pass = self._play_context.password
self._become_method = self._play_context.become_method
self._become_user = self._play_context.become_user
self._become_pass = self._play_context.become_pass
self._kinit_cmd = hostvars.get('ansible_winrm_kinit_cmd', 'kinit')
if hasattr(winrm, 'FEATURE_SUPPORTED_AUTHTYPES'):
self._winrm_supported_authtypes = set(winrm.FEATURE_SUPPORTED_AUTHTYPES)
else:
# for legacy versions of pywinrm, use the values we know are supported
self._winrm_supported_authtypes = set(['plaintext', 'ssl', 'kerberos'])
# TODO: figure out what we want to do with auto-transport selection in the face of NTLM/Kerb/CredSSP/Cert/Basic
transport_selector = 'ssl' if self._winrm_scheme == 'https' else 'plaintext'
if HAVE_KERBEROS and ((self._winrm_user and '@' in self._winrm_user)):
self._winrm_transport = 'kerberos,%s' % transport_selector
else:
self._winrm_transport = transport_selector
self._winrm_transport = hostvars.get('ansible_winrm_transport', self._winrm_transport)
if isinstance(self._winrm_transport, string_types):
self._winrm_transport = [x.strip() for x in self._winrm_transport.split(',') if x.strip()]
unsupported_transports = set(self._winrm_transport).difference(self._winrm_supported_authtypes)
if unsupported_transports:
raise AnsibleError('The installed version of WinRM does not support transport(s) %s' % list(unsupported_transports))
# if kerberos is among our transports and there's a password specified, we're managing the tickets
kinit_mode = to_text(hostvars.get('ansible_winrm_kinit_mode', '')).strip()
if kinit_mode == "":
# HACK: ideally, remove multi-transport stuff
self._kerb_managed = "kerberos" in self._winrm_transport and self._winrm_pass
elif kinit_mode == "managed":
self._kerb_managed = True
elif kinit_mode == "manual":
self._kerb_managed = False
else:
raise AnsibleError('Unknown ansible_winrm_kinit_mode value: "%s" (must be "managed" or "manual")' % kinit_mode)
# arg names we're going passing directly
internal_kwarg_mask = set(['self', 'endpoint', 'transport', 'username', 'password', 'scheme', 'path', 'kinit_mode', 'kinit_cmd'])
self._winrm_kwargs = dict(username=self._winrm_user, password=self._winrm_pass)
argspec = inspect.getargspec(Protocol.__init__)
supported_winrm_args = set(argspec.args)
supported_winrm_args.update(internal_kwarg_mask)
passed_winrm_args = set([v.replace('ansible_winrm_', '') for v in hostvars if v.startswith('ansible_winrm_')])
unsupported_args = passed_winrm_args.difference(supported_winrm_args)
# warn for kwargs unsupported by the installed version of pywinrm
for arg in unsupported_args:
display.warning("ansible_winrm_{0} unsupported by pywinrm (is an up-to-date version of pywinrm installed?)".format(arg))
# pass through matching kwargs, excluding the list we want to treat specially
for arg in passed_winrm_args.difference(internal_kwarg_mask).intersection(supported_winrm_args):
self._winrm_kwargs[arg] = hostvars['ansible_winrm_%s' % arg]
# Until pykerberos has enough goodies to implement a rudimentary kinit/klist, simplest way is to let each connection
# auth itself with a private CCACHE.
def _kerb_auth(self, principal, password):
if password is None:
password = ""
self._kerb_ccache = tempfile.NamedTemporaryFile()
display.vvvvv("creating Kerberos CC at %s" % self._kerb_ccache.name)
krb5ccname = "FILE:%s" % self._kerb_ccache.name
krbenv = dict(KRB5CCNAME=krb5ccname)
os.environ["KRB5CCNAME"] = krb5ccname
kinit_cmdline = [self._kinit_cmd, principal]
display.vvvvv("calling kinit for principal %s" % principal)
p = subprocess.Popen(kinit_cmdline, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=krbenv)
# TODO: unicode/py3
stdout, stderr = p.communicate(password + b'\n')
if p.returncode != 0:
raise AnsibleConnectionFailure("Kerberos auth failure: %s" % stderr.strip())
display.vvvvv("kinit succeeded for principal %s" % principal)
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
display.vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" %
(self._winrm_user, self._winrm_port, self._winrm_host), host=self._winrm_host)
netloc = '%s:%d' % (self._winrm_host, self._winrm_port)
endpoint = urlunsplit((self._winrm_scheme, netloc, self._winrm_path, '', ''))
errors = []
for transport in self._winrm_transport:
if transport == 'kerberos':
if not HAVE_KERBEROS:
errors.append('kerberos: the python kerberos library is not installed')
continue
if self._kerb_managed:
self._kerb_auth(self._winrm_user, self._winrm_pass)
display.vvvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint), host=self._winrm_host)
try:
protocol = Protocol(endpoint, transport=transport, **self._winrm_kwargs)
# open the shell from connect so we know we're able to talk to the server
if not self.shell_id:
self.shell_id = protocol.open_shell(codepage=65001) # UTF-8
display.vvvvv('WINRM OPEN SHELL: %s' % self.shell_id, host=self._winrm_host)
return protocol
except Exception as e:
err_msg = to_text(e).strip()
if re.search(to_text(r'Operation\s+?timed\s+?out'), err_msg, re.I):
raise AnsibleError('the connection attempt timed out')
m = re.search(to_text(r'Code\s+?(\d{3})'), err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
err_msg = 'the specified credentials were rejected by the server'
elif code == 411:
return protocol
errors.append(u'%s: %s' % (transport, err_msg))
display.vvvvv(u'WINRM CONNECTION ERROR: %s\n%s' % (err_msg, to_text(traceback.format_exc())), host=self._winrm_host)
if errors:
raise AnsibleConnectionFailure(', '.join(map(to_native, errors)))
else:
raise AnsibleError('No transport found for WinRM connection')
def _winrm_send_input(self, protocol, shell_id, command_id, stdin, eof=False):
rq = {'env:Envelope': protocol._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd',
action='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/Send',
shell_id=shell_id)}
stream = rq['env:Envelope'].setdefault('env:Body', {}).setdefault('rsp:Send', {})\
.setdefault('rsp:Stream', {})
stream['@Name'] = 'stdin'
stream['@CommandId'] = command_id
stream['#text'] = base64.b64encode(to_bytes(stdin))
if eof:
stream['@End'] = 'true'
protocol.send_message(xmltodict.unparse(rq))
def _winrm_exec(self, command, args=(), from_exec=False, stdin_iterator=None):
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
if from_exec:
display.vvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
else:
display.vvvvvv("WINRM EXEC %r %r" % (command, args), host=self._winrm_host)
command_id = None
try:
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
# TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
# comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
self._winrm_send_input(self.protocol, self.shell_id, command_id, data, eof=is_last)
except Exception as ex:
from traceback import format_exc
display.warning("FATAL ERROR DURING FILE TRANSFER: %s" % format_exc(ex))
stdin_push_failed = True
if stdin_push_failed:
raise AnsibleError('winrm send_input failed')
# NB: this can hang if the receiver is still running (eg, network failed a Send request but the server's still happy).
# FUTURE: Consider adding pywinrm status check/abort operations to see if the target is still running after a failure.
resptuple = self.protocol.get_command_output(self.shell_id, command_id)
# ensure stdout/stderr are text for py3
# FUTURE: this should probably be done internally by pywinrm
response = Response(tuple(to_text(v) if isinstance(v, binary_type) else v for v in resptuple))
# TODO: check result from response and set stdin_push_failed if we have nonzero
if from_exec:
display.vvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
else:
display.vvvvvv('WINRM RESULT %r' % to_text(response), host=self._winrm_host)
display.vvvvvv('WINRM STDOUT %s' % to_text(response.std_out), host=self._winrm_host)
display.vvvvvv('WINRM STDERR %s' % to_text(response.std_err), host=self._winrm_host)
if stdin_push_failed:
raise AnsibleError('winrm send_input failed; \nstdout: %s\nstderr %s' % (response.std_out, response.std_err))
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def _connect(self):
if not HAS_WINRM:
raise AnsibleError("winrm or requests is not installed: %s" % to_text(e))
elif not HAS_XMLTODICT:
raise AnsibleError("xmltodict is not installed: %s" % to_text(e))
super(Connection, self)._connect()
if not self.protocol:
self.protocol = self._winrm_connect()
self._connected = True
return self
def _reset(self): # used by win_reboot (and any other action that might need to bounce the state)
self.protocol = None
self.shell_id = None
self._connect()
def _create_raw_wrapper_payload(self, cmd, environment=None):
environment = {} if environment is None else environment
payload = {
'module_entry': to_text(base64.b64encode(to_bytes(cmd))),
'powershell_modules': {},
'actions': ['exec'],
'exec': to_text(base64.b64encode(to_bytes(leaf_exec))),
'environment': environment
}
return json.dumps(payload)
def _wrapper_payload_stream(self, payload, buffer_size=200000):
payload_bytes = to_bytes(payload)
byte_count = len(payload_bytes)
for i in range(0, byte_count, buffer_size):
yield payload_bytes[i:i + buffer_size], i + buffer_size >= byte_count
def exec_command(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = self._shell._encode_script(cmd, as_list=True, strict_mode=False, preserve_rc=False)
# TODO: display something meaningful here
display.vvv("EXEC (via pipeline wrapper)")
stdin_iterator = None
if in_data:
stdin_iterator = self._wrapper_payload_stream(in_data)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True, stdin_iterator=stdin_iterator)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def exec_command_old(self, cmd, in_data=None, sudoable=True):
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
cmd_parts = shlex.split(to_bytes(cmd), posix=False)
cmd_parts = map(to_text, cmd_parts)
script = None
cmd_ext = cmd_parts and self._shell._unquote(cmd_parts[0]).lower()[-4:] or ''
# Support running .ps1 files (via script/raw).
if cmd_ext == '.ps1':
script = '& %s' % cmd
# Support running .bat/.cmd files; change back to the default system encoding instead of UTF-8.
elif cmd_ext in ('.bat', '.cmd'):
script = '[System.Console]::OutputEncoding = [System.Text.Encoding]::Default; & %s' % cmd
# Encode the command if not already encoded; supports running simple PowerShell commands via raw.
elif '-EncodedCommand' not in cmd_parts:
script = cmd
if script:
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = to_text(base64.b64decode(encoded_cmd).decode('utf-16-le'))
display.vvv("EXEC %s" % decoded_cmd, host=self._winrm_host)
else:
display.vvv("EXEC %s" % cmd, host=self._winrm_host)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception:
traceback.print_exc()
raise AnsibleConnectionFailure("failed to exec cmd %s" % cmd)
result.std_out = to_bytes(result.std_out)
result.std_err = to_bytes(result.std_err)
# parse just stderr from CLIXML output
if self.is_clixml(result.std_err):
try:
result.std_err = self.parse_clixml_stream(result.std_err)
except:
# unsure if we're guaranteed a valid xml doc- use raw output in case of error
pass
return (result.status_code, result.std_out, result.std_err)
def is_clixml(self, value):
return value.startswith(b"#< CLIXML")
# hacky way to get just stdout- not always sure of doc framing here, so use with care
def parse_clixml_stream(self, clixml_doc, stream_name='Error'):
clear_xml = clixml_doc.replace(b'#< CLIXML\r\n', b'')
doc = xmltodict.parse(clear_xml)
lines = [l.get('#text', '').replace('_x000D__x000A_', '') for l in doc.get('Objs', {}).get('S', {}) if l.get('@S') == stream_name]
return '\r\n'.join(lines)
# FUTURE: determine buffer size at runtime via remote winrm config?
def _put_file_stdin_iterator(self, in_path, out_path, buffer_size=250000):
in_size = os.path.getsize(to_bytes(in_path, errors='surrogate_or_strict'))
offset = 0
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
for out_data in iter((lambda: in_file.read(buffer_size)), b''):
offset += len(out_data)
self._display.vvvvv('WINRM PUT "%s" to "%s" (offset=%d size=%d)' % (in_path, out_path, offset, len(out_data)), host=self._winrm_host)
# yes, we're double-encoding over the wire in this case- we want to ensure that the data shipped to the end PS pipeline is still b64-encoded
b64_data = base64.b64encode(out_data) + b'\r\n'
# cough up the data, as well as an indicator if this is the last chunk so winrm_send knows to set the End signal
yield b64_data, (in_file.tell() == in_size)
if offset == 0: # empty file, return an empty buffer + eof to close it
yield "", True
def put_file(self, in_path, out_path):
super(Connection, self).put_file(in_path, out_path)
out_path = self._shell._unquote(out_path)
display.vvv('PUT "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound('file or module does not exist: "%s"' % in_path)
script_template = u'''
begin {{
$path = '{0}'
$DebugPreference = "Continue"
$ErrorActionPreference = "Stop"
Set-StrictMode -Version 2
$fd = [System.IO.File]::Create($path)
$sha1 = [System.Security.Cryptography.SHA1CryptoServiceProvider]::Create()
$bytes = @() #initialize for empty file case
}}
process {{
$bytes = [System.Convert]::FromBase64String($input)
$sha1.TransformBlock($bytes, 0, $bytes.Length, $bytes, 0) | Out-Null
$fd.Write($bytes, 0, $bytes.Length)
}}
end {{
$sha1.TransformFinalBlock($bytes, 0, 0) | Out-Null
$hash = [System.BitConverter]::ToString($sha1.Hash).Replace("-", "").ToLowerInvariant()
$fd.Close()
Write-Output "{{""sha1"":""$hash""}}"
}}
'''
script = script_template.format(self._shell._escape(out_path))
cmd_parts = self._shell._encode_script(script, as_list=True, strict_mode=False, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], stdin_iterator=self._put_file_stdin_iterator(in_path, out_path))
# TODO: improve error handling
if result.status_code != 0:
raise AnsibleError(to_native(result.std_err))
put_output = json.loads(result.std_out)
remote_sha1 = put_output.get("sha1")
if not remote_sha1:
raise AnsibleError("Remote sha1 was not returned")
local_sha1 = secure_hash(in_path)
if not remote_sha1 == local_sha1:
raise AnsibleError("Remote sha1 hash {0} does not match local hash {1}".format(to_native(remote_sha1), to_native(local_sha1)))
def fetch_file(self, in_path, out_path):
super(Connection, self).fetch_file(in_path, out_path)
in_path = self._shell._unquote(in_path)
out_path = out_path.replace('\\', '/')
display.vvv('FETCH "%s" TO "%s"' % (in_path, out_path), host=self._winrm_host)
buffer_size = 2**19 # 0.5MB chunks
makedirs_safe(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = New-Object IO.FileStream("%(path)s", [System.IO.FileMode]::Open, [System.IO.FileAccess]::Read, [IO.FileShare]::ReadWrite);
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=self._shell._escape(in_path), offset=offset)
display.vvvvv('WINRM FETCH "%s" to "%s" (offset=%d)' % (in_path, out_path, offset), host=self._winrm_host)
cmd_parts = self._shell._encode_script(script, as_list=True, preserve_rc=False)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(to_native(result.std_err))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
makedirs_safe(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(to_bytes(out_path, errors='surrogate_or_strict')):
break
out_file = open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise AnsibleError('failed to transfer file to "%s"' % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
display.vvvvv('WINRM CLOSE SHELL: %s' % self.shell_id, host=self._winrm_host)
self.protocol.close_shell(self.shell_id)
self.shell_id = None
self.protocol = None
self._connected = False
| gpl-3.0 |
DarKnight--/owtf | framework/interface/html/filter/sanitiser.py | 2 | 3574 | #!/usr/bin/env python
'''
The sanitiser module allows the rest of the framework to sanitise input
Requires lxml, installation instructions here: http://lxml.de/installation.html
In Backtrack 5: /usr/bin/easy_install --allow-hosts=lxml.de,*.python.org lxml
Tip for Ubuntu courtesy of Mario Heiderich: Python2.7-dev is needed to compile this lib properly
Clean HTML reference: http://lxml.de/lxmlhtml.html#cleaning-up-html
Library documentation: http://lxml.de/api/lxml.html.clean.Cleaner-class.html
'''
import sys
from lxml.html.clean import Cleaner, clean_html
import lxml.html
from urlparse import urlparse
ALLOWED_TAGS = ('html', 'body', 'a', 'p', 'h1', 'h2', 'h3', 'h4', 'div', 'span', 'i', 'b', 'u',
'table', 'tbody', 'tr', 'td', 'th', 'strong', 'em', 'sup', 'sub', 'ul', 'ol', 'li')
ALLOWED_URL_SCHEMES = ['http', 'https', 'ftp', 'mailto', 'sftp', 'shttp']
class HTMLSanitiser:
def __init__(self):
self.Cleaner = Cleaner(scripts=False, javascript=False, comments=False, links=False, meta=True,
page_structure=False, processing_instructions=False, embedded=False, frames=False,
forms=False, annoying_tags=False, remove_unknown_tags=False, safe_attrs_only=True,
allow_tags=ALLOWED_TAGS)
def IsValidURL(self, URL):
ParsedURL = urlparse(URL)
return (ParsedURL.scheme in ALLOWED_URL_SCHEMES)
def CleanURLs(self, HTML):
# Largely Inspired from: http://stackoverflow.com/questions/5789127/how-to-replace-links-using-lxml-and-
# iterlinks
ParsedHTML = lxml.html.document_fromstring(HTML)
for Element, Attribute, Link, Pos in ParsedHTML.iterlinks():
if not self.IsValidURL(Link):
Element.set(Attribute, Link.replace(Link, ''))
return lxml.html.tostring(ParsedHTML)
def CleanThirdPartyHTML(self, HTML):
# 1st clean URLs, 2nd get rid of basics, 3rd apply white list
return self.Cleaner.clean_html(clean_html(self.CleanURLs(HTML)))
def TestPrint(self, TestInfo, TestOutput):
TestInfo += "_" * (60 - len(TestInfo)) # Make info visually easier to compare
print TestInfo + TestOutput
# For testing as a standalone script:
if 'sanitiser.py' in sys.argv[0]: # When called as a script run tests
Sanitiser = HTMLSanitiser()
Input = sys.stdin.read() # Read for stdin so that we can cat whatever | sanitiser => easier to test in bulk
Sanitiser.TestPrint("raw input=", Input)
Sanitiser.TestPrint("Filter 1 - clean_html=", clean_html(Input))
Sanitiser.TestPrint("Filter 2 - white_list=", Sanitiser.Cleaner.clean_html(Input))
Sanitiser.TestPrint("Filter 3 - clean_html(white_list) =", clean_html(Sanitiser.Cleaner.clean_html(Input)))
Sanitiser.TestPrint("Filter 4 = cleanURLs(clean_html(white_list)) =", Sanitiser.CleanURLs(clean_html(
Sanitiser.Cleaner.clean_html(Input))))
Sanitiser.TestPrint("Filter 5 = cleanURLs(white_list(clean_html)) =", Sanitiser.CleanURLs(
Sanitiser.Cleaner.clean_html(clean_html(Input))))
Sanitiser.TestPrint("Filter 6 = white_list(clean_html(cleanURLs))", Sanitiser.Cleaner.clean_html(clean_html(
Sanitiser.CleanURLs(Input))))
Sanitiser.TestPrint("Latest - Step 1 - CleanURLs =", Sanitiser.CleanURLs(Input))
Sanitiser.TestPrint("Latest - Step 2 - clean_html(CleanURLs) =", clean_html(Sanitiser.CleanURLs(Input)))
Sanitiser.TestPrint("Latest - Step 3 - white_list(clean_html(CleanURLs)) =", Sanitiser.CleanThirdPartyHTML(Input))
| bsd-3-clause |
CTSRD-SOAAP/chromium-42.0.2311.135 | native_client/buildbot/buildbot_selector.py | 1 | 18629 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import subprocess
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
python = sys.executable
bash = '/bin/bash'
echo = 'echo'
BOT_ASSIGNMENT = {
######################################################################
# Buildbots.
######################################################################
'xp-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'xp-bare-newlib-opt':
python + ' buildbot\\buildbot_standard.py opt 32 newlib --no-gyp',
'xp-bare-glibc-opt':
python + ' buildbot\\buildbot_standard.py opt 32 glibc --no-gyp',
'precise-64-validator-opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
# Clang.
'precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'mac10.7-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# PNaCl.
'oneiric_32-newlib-arm_hw-pnacl-panda-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-dbg',
'oneiric_32-newlib-arm_hw-pnacl-panda-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-opt',
'precise_64-newlib-arm_qemu-pnacl-dbg':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-dbg',
'precise_64-newlib-arm_qemu-pnacl-opt':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-opt',
'precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'mac10.8-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'win7-64-newlib-opt-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
# PNaCl Spec
'precise_64-newlib-arm_qemu-pnacl-buildonly-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-buildonly',
'oneiric_32-newlib-arm_hw-pnacl-panda-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-arm-hw',
'lucid_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8632',
'lucid_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-x8664',
# NaCl Spec
'lucid_64-newlib-x86_32-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8632',
'lucid_64-newlib-x86_64-spec':
bash + ' buildbot/buildbot_spec2k.sh nacl-x8664',
# Android bots.
'precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Valgrind bots.
'precise-64-newlib-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
'precise-64-glibc-dbg-valgrind':
echo + ' "Valgrind bots are disabled: see '
'https://code.google.com/p/nativeclient/issues/detail?id=3158"',
# Coverage.
'mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'xp-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
######################################################################
# Trybots.
######################################################################
'nacl-precise64_validator_opt':
python + ' buildbot/buildbot_standard.py opt 64 glibc --validator',
'nacl-precise64_newlib_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh newlib',
'nacl-precise64_glibc_dbg_valgrind':
bash + ' buildbot/buildbot_valgrind.sh glibc',
# Android trybots.
'nacl-precise64-newlib-dbg-android':
python + ' buildbot/buildbot_standard.py dbg arm newlib --android',
'nacl-precise64-newlib-opt-android':
python + ' buildbot/buildbot_standard.py opt arm newlib --android',
# Coverage trybots.
'nacl-mac10.6-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-precise-64-32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
'nacl-precise-64-64-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 64 newlib --coverage'),
'nacl-win32-newlib-coverage':
python + (' buildbot/buildbot_standard.py '
'coverage 32 newlib --coverage'),
# Clang trybots.
'nacl-precise_64-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 64 newlib --clang',
'nacl-mac10.6-newlib-dbg-clang':
python + ' buildbot/buildbot_standard.py dbg 32 newlib --clang',
# ASan.
'nacl-precise_64-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 64 newlib --asan',
'nacl-mac10.7-newlib-dbg-asan':
python + ' buildbot/buildbot_standard.py opt 32 newlib --asan',
# Pnacl main trybots
'nacl-precise_64-newlib-arm_qemu-pnacl':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu arm',
'nacl-precise_64-newlib-x86_32-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 32 pnacl',
'nacl-precise_64-newlib-x86_64-pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-precise_64-newlib-mips-pnacl':
echo + ' "TODO(mseaborn): add mips"',
'nacl-arm_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-try',
'nacl-arm_hw_opt_panda':
bash + ' buildbot/buildbot_pnacl.sh mode-buildbot-arm-hw-try',
'nacl-mac10.8_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
'nacl-win7_64_newlib_opt_pnacl':
python + ' buildbot/buildbot_pnacl.py opt 64 pnacl',
# Pnacl spec2k trybots
'nacl-precise_64-newlib-x86_32-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8632',
'nacl-precise_64-newlib-x86_64-pnacl-spec':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-x8664',
'nacl-arm_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-buildonly',
'nacl-arm_hw_perf_panda':
bash + ' buildbot/buildbot_spec2k.sh pnacl-trybot-arm-hw',
# Toolchain glibc.
'precise64-glibc': bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'mac-glibc': bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'win7-glibc': 'buildbot\\buildbot_windows-glibc-makefile.bat',
# Toolchain newlib x86.
'win7-toolchain_x86': 'buildbot\\buildbot_toolchain_win.bat',
'mac-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh mac',
'precise64-toolchain_x86': bash + ' buildbot/buildbot_toolchain.sh linux',
# Toolchain newlib arm.
'win7-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'mac-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build',
'precise64-toolchain_arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' --test_toolchain nacl_arm_newlib'
' toolchain_build',
# BIONIC toolchain builders.
'precise64-toolchain_bionic':
python +
' buildbot/buildbot_toolchain_build.py'
' --buildbot'
' toolchain_build_bionic',
# Pnacl toolchain builders.
'linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-32',
'linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot --tests-arch x86-64',
'mac-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
'win-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --buildbot',
# Pnacl toolchain testers
'linux-pnacl-x86_64-tests-x86_64':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-64',
'linux-pnacl-x86_64-tests-x86_32':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot x86-32',
'linux-pnacl-x86_64-tests-arm':
bash + ' buildbot/buildbot_pnacl_toolchain_tests.sh tc-test-bot arm',
# MIPS toolchain buildbot.
'linux-pnacl-x86_32-tests-mips':
bash + ' buildbot/buildbot_pnacl.sh mode-trybot-qemu mips32',
# Toolchain trybots.
'nacl-toolchain-precise64-newlib':
bash + ' buildbot/buildbot_toolchain.sh linux',
'nacl-toolchain-mac-newlib': bash + ' buildbot/buildbot_toolchain.sh mac',
'nacl-toolchain-win7-newlib': 'buildbot\\buildbot_toolchain_win.bat',
'nacl-toolchain-precise64-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' --test_toolchain nacl_arm_newlib'
' toolchain_build',
'nacl-toolchain-mac-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-win7-newlib-arm':
python +
' buildbot/buildbot_toolchain_build.py'
' --trybot'
' toolchain_build',
'nacl-toolchain-precise64-glibc':
bash + ' buildbot/buildbot_linux-glibc-makefile.sh',
'nacl-toolchain-mac-glibc':
bash + ' buildbot/buildbot_mac-glibc-makefile.sh',
'nacl-toolchain-win7-glibc':
'buildbot\\buildbot_windows-glibc-makefile.bat',
# Pnacl toolchain trybots.
'nacl-toolchain-linux-pnacl-x86_32':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-32',
'nacl-toolchain-linux-pnacl-x86_64':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64',
'nacl-toolchain-linux-pnacl-mips': echo + ' "TODO(mseaborn)"',
'nacl-toolchain-mac-pnacl-x86_32':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
'nacl-toolchain-win7-pnacl-x86_64':
python + ' buildbot/buildbot_pnacl_toolchain.py --trybot',
# Sanitizer Pnacl toolchain trybots.
'nacl-toolchain-asan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize address --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize memory not understood.
'nacl-toolchain-msan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize memory --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize thread not understood.
'nacl-toolchain-tsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize thread --skip-tests',
# TODO(kschimpf): Bot is currently broken: --sanitize undefined not understood.
'nacl-toolchain-ubsan':
python +
' buildbot/buildbot_pnacl_toolchain.py --trybot --tests-arch x86-64 '
' --sanitize undefined --skip-tests',
}
special_for_arm = [
'win7_64',
'win7-64',
'lucid-64',
'lucid64',
'precise-64',
'precise64'
]
for platform in [
'vista', 'win7', 'win8', 'win',
'mac10.6', 'mac10.7', 'mac10.8',
'lucid', 'precise'] + special_for_arm:
if platform in special_for_arm:
arch_variants = ['arm']
else:
arch_variants = ['', '32', '64', 'arm']
for arch in arch_variants:
arch_flags = ''
real_arch = arch
arch_part = '-' + arch
# Disable GYP build for win32 bots and arm cross-builders. In this case
# "win" means Windows XP, not Vista, Windows 7, etc.
#
# Building via GYP always builds all toolchains by default, but the win32
# XP pnacl builds are pathologically slow (e.g. ~38 seconds per compile on
# the nacl-win32_glibc_opt trybot). There are other builders that test
# Windows builds via gyp, so the reduced test coverage should be slight.
if arch == 'arm' or (platform == 'win' and arch == '32'):
arch_flags += ' --no-gyp'
if platform == 'win7' and arch == '32':
arch_flags += ' --no-goma'
if arch == '':
arch_part = ''
real_arch = '32'
# Test with Breakpad tools only on basic Linux builds.
if sys.platform.startswith('linux'):
arch_flags += ' --use-breakpad-tools'
for mode in ['dbg', 'opt']:
for libc in ['newlib', 'glibc']:
# Buildbots.
for bare in ['', '-bare']:
name = platform + arch_part + bare + '-' + libc + '-' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
# Trybots
for arch_sep in ['', '-', '_']:
name = 'nacl-' + platform + arch_sep + arch + '_' + libc + '_' + mode
assert name not in BOT_ASSIGNMENT, name
BOT_ASSIGNMENT[name] = (
python + ' buildbot/buildbot_standard.py ' +
mode + ' ' + real_arch + ' ' + libc + arch_flags)
def EscapeJson(data):
return '"' + json.dumps(data).replace('"', r'\"') + '"'
def HasNoPerfResults(builder):
if 'pnacl-buildonly-spec' in builder:
return True
return builder in [
'mac-toolchain_arm',
'win-pnacl-x86_32',
'linux-pnacl-x86_32-tests-mips',
'precise64-toolchain_bionic',
]
def Main():
builder = os.environ.get('BUILDBOT_BUILDERNAME')
build_number = os.environ.get('BUILDBOT_BUILDNUMBER')
build_revision = os.environ.get('BUILDBOT_GOT_REVISION',
os.environ.get('BUILDBOT_REVISION'))
slave_type = os.environ.get('BUILDBOT_SLAVE_TYPE')
cmd = BOT_ASSIGNMENT.get(builder)
if not cmd:
sys.stderr.write('ERROR - unset/invalid builder name\n')
sys.exit(1)
env = os.environ.copy()
# Don't write out .pyc files because in cases in which files move around or
# the PYTHONPATH / sys.path change, old .pyc files can be mistakenly used.
# This avoids the need for admin changes on the bots in this case.
env['PYTHONDONTWRITEBYTECODE'] = '1'
# Use .boto file from home-dir instead of buildbot supplied one.
if 'AWS_CREDENTIAL_FILE' in env:
del env['AWS_CREDENTIAL_FILE']
alt_boto = os.path.expanduser('~/.boto')
if os.path.exists(alt_boto):
env['BOTO_CONFIG'] = alt_boto
cwd_drive = os.path.splitdrive(os.getcwd())[0]
env['GSUTIL'] = cwd_drive + '/b/build/third_party/gsutil/gsutil'
# When running from cygwin, we sometimes want to use a native python.
# The native python will use the depot_tools version by invoking python.bat.
if pynacl.platform.IsWindows():
env['NATIVE_PYTHON'] = 'python.bat'
else:
env['NATIVE_PYTHON'] = 'python'
if sys.platform == 'win32':
# If the temp directory is not on the same drive as the working directory,
# there can be random failures when cleaning up temp directories, so use
# a directory on the current drive. Use __file__ here instead of os.getcwd()
# because toolchain_main picks its working directories relative to __file__
filedrive, _ = os.path.splitdrive(__file__)
tempdrive, _ = os.path.splitdrive(env['TEMP'])
if tempdrive != filedrive:
env['TEMP'] = filedrive + '\\temp'
env['TMP'] = env['TEMP']
if not os.path.exists(env['TEMP']):
os.mkdir(env['TEMP'])
# Run through runtest.py to get upload of perf data.
build_properties = {
'buildername': builder,
'mastername': 'client.nacl',
'buildnumber': str(build_number),
}
factory_properties = {
'perf_id': builder,
'show_perf_results': True,
'step_name': 'naclperf', # Seems unused, but is required.
'test_name': 'naclperf', # Really "Test Suite"
}
# Locate the buildbot build directory by relative path, as it's absolute
# location varies by platform and configuration.
buildbot_build_dir = os.path.join(* [os.pardir] * 4)
runtest = os.path.join(buildbot_build_dir, 'scripts', 'slave', 'runtest.py')
# For builds with an actual build number, require that the script is present
# (i.e. that we're run from an actual buildbot).
if build_number is not None and not os.path.exists(runtest):
raise Exception('runtest.py script not found at: %s\n' % runtest)
cmd_exe = cmd.split(' ')[0]
cmd_exe_ext = os.path.splitext(cmd_exe)[1]
# Do not wrap these types of builds with runtest.py:
# - tryjobs
# - commands beginning with 'echo '
# - batch files
# - debug builders
# - builds with no perf tests
if not (slave_type == 'Trybot' or
cmd_exe == echo or
cmd_exe_ext == '.bat' or
'-dbg' in builder or
HasNoPerfResults(builder)):
# Perf dashboards are now generated by output scraping that occurs in the
# script runtest.py, which lives in the buildbot repository.
# Non-trybot builds should be run through runtest, allowing it to upload
# perf data if relevant.
cmd = ' '.join([
python, runtest,
'--revision=' + build_revision,
'--build-dir=src/out',
'--results-url=https://chromeperf.appspot.com',
'--annotate=graphing',
'--no-xvfb', # We provide our own xvfb invocation.
'--factory-properties', EscapeJson(factory_properties),
'--build-properties', EscapeJson(build_properties),
cmd,
])
print "%s runs: %s\n" % (builder, cmd)
retcode = subprocess.call(cmd, env=env, shell=True)
sys.exit(retcode)
if __name__ == '__main__':
Main()
| bsd-3-clause |
adrianpaesani/odoo-argentina | l10n_ar_invoice_sale/__openerp__.py | 2 | 1545 | # -*- coding: utf-8 -*-
{
'name': 'Argentinian Sale Total Fields',
'version': '1.0',
'category': 'Localization/Argentina',
'sequence': 14,
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'summary': '',
'description': """
Argentinian Sale Total Fields
=============================
Add fields in sale orders so that you can print sale orders with vay included or not depending on VAT responsabilities
NOTAS PARA MEJORAR Y TRADUCIR:
* Para usar esta funcionalidad tnees que ir a la compania, pestañá config, grupo "sale" y setear si querés y el valor por defecto que querés que tome si no hay match (para el caso de facu sería "no_discriminated_default" de (manera predeterminada no se discrimnan los impuestos, es decir que si n tiene seteado nada el partner entonces no se discrimina)
* En los presupuestos, igualmente, si lo quiere cambiar ,en la segunda pestaña hay un campo "vat discriminated", abajo de posición fiscal, donde se
""",
'depends': [
'sale',
'l10n_ar_invoice',
],
'external_dependencies': {
},
'data': [
'security/invoice_sale_security.xml',
'sale_view.xml',
'report/invoice_report_view.xml',
'res_company_view.xml',
# 'res_config_view.xml',
'security/ir.model.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
omemo/python-omemo | src/omemo/liteprekeystore.py | 1 | 2577 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Tarek Galal <tare2.galal@gmail.com>
#
# This file is part of Gajim-OMEMO plugin.
#
# The Gajim-OMEMO plugin is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Gajim-OMEMO is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# the Gajim-OMEMO plugin. If not, see <http://www.gnu.org/licenses/>.
#
from axolotl.state.prekeyrecord import PreKeyRecord
from axolotl.state.prekeystore import PreKeyStore
class LitePreKeyStore(PreKeyStore):
def __init__(self, dbConn):
"""
:type dbConn: Connection
"""
self.dbConn = dbConn
dbConn.execute("CREATE TABLE IF NOT EXISTS prekeys(" +
"_id INTEGER PRIMARY KEY AUTOINCREMENT," +
"prekey_id INTEGER UNIQUE, sent_to_server BOOLEAN, " +
" record BLOB);")
def loadPreKey(self, preKeyId):
q = "SELECT record FROM prekeys WHERE prekey_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (preKeyId, ))
result = cursor.fetchone()
if not result:
raise Exception("No such prekeyRecord!")
return PreKeyRecord(serialized=result[0])
def loadPendingPreKeys(self):
q = "SELECT record FROM prekeys"
cursor = self.dbConn.cursor()
cursor.execute(q)
result = cursor.fetchall()
return [PreKeyRecord(serialized=r[0]) for r in result]
def storePreKey(self, preKeyId, preKeyRecord):
# self.removePreKey(preKeyId)
q = "INSERT INTO prekeys (prekey_id, record) VALUES(?,?)"
cursor = self.dbConn.cursor()
cursor.execute(q, (preKeyId, preKeyRecord.serialize()))
self.dbConn.commit()
def containsPreKey(self, preKeyId):
q = "SELECT record FROM prekeys WHERE prekey_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (preKeyId, ))
return cursor.fetchone() is not None
def removePreKey(self, preKeyId):
q = "DELETE FROM prekeys WHERE prekey_id = ?"
cursor = self.dbConn.cursor()
cursor.execute(q, (preKeyId, ))
self.dbConn.commit()
| gpl-3.0 |
kevinmora94/proyectoDrupal | web/themes/custom/proyectofinal/node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| gpl-2.0 |
diegosarmentero/ninja-ide | ninja_ide/gui/dialogs/wizard_new_project.py | 6 | 1116 | # -*- coding: utf-8 -*-
#
# This file is part of NINJA-IDE (http://ninja-ide.org).
#
# NINJA-IDE is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
#
# NINJA-IDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NINJA-IDE; If not, see <http://www.gnu.org/licenses/>.
# Get project types
# Project type is language
# Should have subtype, which is pyqt, ninja plugin, pytk, etc...
#We provide the first window of the wizard, to do this everyone will inherit
# from us
from PyQt4.QtGui import QDialog
from PyQt4.QtCore import Qt
class NewProjectTypeChooser(QDialog):
def __init__(self, parent=None):
super(NewProjectTypeChooser, self).__init__(parent, Qt.Dialog)
pass
| gpl-3.0 |
devstructure/blueprint | blueprint/io/__init__.py | 4 | 4378 | import logging
import sys
from blueprint import Blueprint
from blueprint import cfg
from blueprint import git
import http
def pull(server, secret, name):
"""
Pull a blueprint from the secret and name on the configured server.
"""
r = http.get('/{0}/{1}'.format(secret, name), server=server)
if 200 == r.status:
b = Blueprint.load(r, name)
for filename in b.sources.itervalues():
logging.info('fetching source tarballs - this may take a while')
r = http.get('/{0}/{1}/{2}'.format(secret, name, filename),
server=server)
if 200 == r.status:
try:
f = open(filename, 'w')
f.write(r.read())
except OSError:
logging.error('could not open {0}'.format(filename))
return None
finally:
f.close()
elif 404 == r.status:
logging.error('{0} not found'.format(filename))
return None
elif 502 == r.status:
logging.error('upstream storage service failed')
return None
else:
logging.error('unexpected {0} fetching tarball'.
format(r.status))
return None
return b
elif 404 == r.status:
logging.error('blueprint not found')
elif 502 == r.status:
logging.error('upstream storage service failed')
else:
logging.error('unexpected {0} fetching blueprint'.format(r.status))
return None
def push(server, secret, b):
"""
Push a blueprint to the secret and its name on the configured server.
"""
r = http.put('/{0}/{1}'.format(secret, b.name),
b.dumps(),
{'Content-Type': 'application/json'},
server=server)
if 202 == r.status:
pass
elif 400 == r.status:
logging.error('malformed blueprint')
return None
elif 502 == r.status:
logging.error('upstream storage service failed')
return None
else:
logging.error('unexpected {0} storing blueprint'.format(r.status))
return None
if b._commit is None and 0 < len(b.sources):
logging.warning('blueprint came from standard input - '
'source tarballs will not be pushed')
elif b._commit is not None:
tree = git.tree(b._commit)
for dirname, filename in sorted(b.sources.iteritems()):
blob = git.blob(tree, filename)
content = git.content(blob)
logging.info('storing source tarballs - this may take a while')
r = http.put('/{0}/{1}/{2}'.format(secret, b.name, filename),
content,
{'Content-Type': 'application/x-tar'},
server=server)
if 202 == r.status:
pass
elif 400 == r.status:
logging.error('tarball content or name not expected')
return None
elif 404 == r.status:
logging.error('blueprint not found')
return None
elif 413 == r.status:
logging.error('tarballs can\'t exceed 64MB')
return None
elif 502 == r.status:
logging.error('upstream storage service failed')
return None
else:
logging.error('unexpected {0} storing tarball'.
format(r.status))
return None
return '{0}/{1}/{2}'.format(server, secret, b.name)
def secret(server):
"""
Fetch a new secret from the configured server.
"""
r = http.get('/secret', server=server)
if 201 == r.status:
secret = r.read().rstrip()
logging.warning('created secret {0}'.format(secret))
logging.warning('to set as the default secret, store it in ~/.blueprint.cfg:')
sys.stderr.write('\n[io]\nsecret = {0}\nserver = {1}\n\n'.
format(secret, cfg.get('io', 'server')))
return secret
elif 502 == r.status:
logging.error('upstream storage service failed')
return None
else:
logging.error('unexpected {0} creating secret'.format(r.status))
return None
| bsd-2-clause |
ryfeus/lambda-packs | Keras_tensorflow/source/tensorflow/contrib/learn/python/learn/dataframe/transforms/example_parser.py | 86 | 2370 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Transform that parses serialized tensorflow.Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.ops import parsing_ops
class ExampleParser(transform.TensorFlowTransform):
"""A Transform that parses serialized `tensorflow.Example` protos."""
def __init__(self, features):
"""Initialize `ExampleParser`.
The `features` argument must be an object that can be converted to an
`OrderedDict`. The keys should be strings and will be used to name the
output. Values should be either `VarLenFeature` or `FixedLenFeature`. If
`features` is a dict, it will be sorted by key.
Args:
features: An object that can be converted to an `OrderedDict` mapping
column names to feature definitions.
"""
super(ExampleParser, self).__init__()
if isinstance(features, dict):
self._ordered_features = collections.OrderedDict(sorted(features.items(
), key=lambda f: f[0]))
else:
self._ordered_features = collections.OrderedDict(features)
@property
def name(self):
return "ExampleParser"
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return list(self._ordered_features.keys())
@transform.parameter
def feature_definitions(self):
return self._ordered_features
def _apply_transform(self, input_tensors, **kwargs):
parsed_values = parsing_ops.parse_example(input_tensors[0],
features=self._ordered_features)
# pylint: disable=not-callable
return self.return_type(**parsed_values)
| mit |
nitmir/django-cas-server | cas_server/admin.py | 1 | 6813 | # This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License version 3 for
# more details.
#
# You should have received a copy of the GNU General Public License version 3
# along with this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# (c) 2015-2016 Valentin Samir
"""module for the admin interface of the app"""
from .default_settings import settings
from django.contrib import admin
from .models import ServiceTicket, ProxyTicket, ProxyGrantingTicket, User, ServicePattern
from .models import Username, ReplaceAttributName, ReplaceAttributValue, FilterAttributValue
from .models import FederatedIendityProvider, FederatedUser, UserAttributes
from .forms import TicketForm
class BaseInlines(admin.TabularInline):
"""
Bases: :class:`django.contrib.admin.TabularInline`
Base class for inlines in the admin interface.
"""
#: This controls the number of extra forms the formset will display in addition to
#: the initial forms.
extra = 0
class UserAdminInlines(BaseInlines):
"""
Bases: :class:`BaseInlines`
Base class for inlines in :class:`UserAdmin` interface
"""
#: The form :class:`TicketForm<cas_server.forms.TicketForm>` used to display tickets.
form = TicketForm
#: Fields to display on a object that are read only (not editable).
readonly_fields = (
'validate', 'service', 'service_pattern',
'creation', 'renew', 'single_log_out', 'value'
)
#: Fields to display on a object.
fields = (
'validate', 'service', 'service_pattern',
'creation', 'renew', 'single_log_out'
)
class ServiceTicketInline(UserAdminInlines):
"""
Bases: :class:`UserAdminInlines`
:class:`ServiceTicket<cas_server.models.ServiceTicket>` in admin interface
"""
#: The model which the inline is using.
model = ServiceTicket
class ProxyTicketInline(UserAdminInlines):
"""
Bases: :class:`UserAdminInlines`
:class:`ProxyTicket<cas_server.models.ProxyTicket>` in admin interface
"""
#: The model which the inline is using.
model = ProxyTicket
class ProxyGrantingInline(UserAdminInlines):
"""
Bases: :class:`UserAdminInlines`
:class:`ProxyGrantingTicket<cas_server.models.ProxyGrantingTicket>` in admin interface
"""
#: The model which the inline is using.
model = ProxyGrantingTicket
class UserAdmin(admin.ModelAdmin):
"""
Bases: :class:`django.contrib.admin.ModelAdmin`
:class:`User<cas_server.models.User>` in admin interface
"""
#: See :class:`ServiceTicketInline`, :class:`ProxyTicketInline`, :class:`ProxyGrantingInline`
#: objects below the :class:`UserAdmin` fields.
inlines = (ServiceTicketInline, ProxyTicketInline, ProxyGrantingInline)
#: Fields to display on a object that are read only (not editable).
readonly_fields = ('username', 'date', "session_key")
#: Fields to display on a object.
fields = ('username', 'date', "session_key")
#: Fields to display on the list of class:`UserAdmin` objects.
list_display = ('username', 'date', "session_key")
class UsernamesInline(BaseInlines):
"""
Bases: :class:`BaseInlines`
:class:`Username<cas_server.models.Username>` in admin interface
"""
#: The model which the inline is using.
model = Username
class ReplaceAttributNameInline(BaseInlines):
"""
Bases: :class:`BaseInlines`
:class:`ReplaceAttributName<cas_server.models.ReplaceAttributName>` in admin interface
"""
#: The model which the inline is using.
model = ReplaceAttributName
class ReplaceAttributValueInline(BaseInlines):
"""
Bases: :class:`BaseInlines`
:class:`ReplaceAttributValue<cas_server.models.ReplaceAttributValue>` in admin interface
"""
#: The model which the inline is using.
model = ReplaceAttributValue
class FilterAttributValueInline(BaseInlines):
"""
Bases: :class:`BaseInlines`
:class:`FilterAttributValue<cas_server.models.FilterAttributValue>` in admin interface
"""
#: The model which the inline is using.
model = FilterAttributValue
class ServicePatternAdmin(admin.ModelAdmin):
"""
Bases: :class:`django.contrib.admin.ModelAdmin`
:class:`ServicePattern<cas_server.models.ServicePattern>` in admin interface
"""
#: See :class:`UsernamesInline`, :class:`ReplaceAttributNameInline`,
#: :class:`ReplaceAttributValueInline`, :class:`FilterAttributValueInline` objects below
#: the :class:`ServicePatternAdmin` fields.
inlines = (
UsernamesInline,
ReplaceAttributNameInline,
ReplaceAttributValueInline,
FilterAttributValueInline
)
#: Fields to display on the list of class:`ServicePatternAdmin` objects.
list_display = ('pos', 'name', 'pattern', 'proxy',
'single_log_out', 'proxy_callback', 'restrict_users')
class FederatedIendityProviderAdmin(admin.ModelAdmin):
"""
Bases: :class:`django.contrib.admin.ModelAdmin`
:class:`FederatedIendityProvider<cas_server.models.FederatedIendityProvider>` in admin
interface
"""
#: Fields to display on a object.
fields = ('pos', 'suffix', 'server_url', 'cas_protocol_version', 'verbose_name', 'display')
#: Fields to display on the list of class:`FederatedIendityProviderAdmin` objects.
list_display = ('verbose_name', 'suffix', 'display')
class FederatedUserAdmin(admin.ModelAdmin):
"""
Bases: :class:`django.contrib.admin.ModelAdmin`
:class:`FederatedUser<cas_server.models.FederatedUser>` in admin
interface
"""
#: Fields to display on a object.
fields = ('username', 'provider', 'last_update')
#: Fields to display on the list of class:`FederatedUserAdmin` objects.
list_display = ('username', 'provider', 'last_update')
class UserAttributesAdmin(admin.ModelAdmin):
"""
Bases: :class:`django.contrib.admin.ModelAdmin`
:class:`UserAttributes<cas_server.models.UserAttributes>` in admin
interface
"""
#: Fields to display on a object.
fields = ('username', '_attributs')
admin.site.register(ServicePattern, ServicePatternAdmin)
admin.site.register(FederatedIendityProvider, FederatedIendityProviderAdmin)
if settings.DEBUG: # pragma: no branch (we always test with DEBUG True)
admin.site.register(User, UserAdmin)
admin.site.register(FederatedUser, FederatedUserAdmin)
admin.site.register(UserAttributes, UserAttributesAdmin)
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/254_test_htmlparser.py | 19 | 10454 | """Tests for HTMLParser.py."""
import HTMLParser
import pprint
import sys
import unittest
from test import test_support
class EventCollector(HTMLParser.HTMLParser):
def __init__(self):
self.events = []
self.append = self.events.append
HTMLParser.HTMLParser.__init__(self)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def _run_check(self, source, expected_events, collector=EventCollector):
parser = collector()
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra)
def _parse_error(self, source):
def parse(source=source):
parser = HTMLParser.HTMLParser()
parser.feed(source)
parser.close()
self.assertRaises(HTMLParser.HTMLParseError, parse)
class HTMLParserTestCase(TestCaseBase):
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b--><!>
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_doctype_decl(self):
inside = """\
DOCTYPE html [
<!ELEMENT html - O EMPTY>
<!ATTLIST html
version CDATA #IMPLIED
profile CDATA 'DublinCore'>
<!NOTATION datatype SYSTEM 'http://xml.python.org/notations/python-module'>
<!ENTITY myEntity 'internal parsed entity'>
<!ENTITY anEntity SYSTEM 'http://xml.python.org/entities/something.xml'>
<!ENTITY % paramEntity 'name|name|name'>
%paramEntity;
<!-- comment -->
]"""
self._run_check("<!%s>" % inside, [
("decl", inside),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])
])
self._run_check("""<a b='' c="">""", [
("starttag", "a", [("b", ""), ("c", "")]),
])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>", [
("starttag", "e", [("a", "rgb(1,2,3)")]),
])
# Regression test for SF bug #921657.
self._run_check("<a href=mailto:xyz@example.com>", [
("starttag", "a", [("href", "mailto:xyz@example.com")]),
])
def test_attr_entity_replacement(self):
self._run_check("""<a b='&><"''>""", [
("starttag", "a", [("b", "&><\"'")]),
])
def test_attr_funky_names(self):
self._run_check("""<a a.b='v' c:d=v e-f=v>""", [
("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a $>")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
self._parse_error("<a foo=>")
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
s = """<script> <!-- not a comment --> ¬-an-entity-ref; </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <!-- not a comment --> ¬-an-entity-ref; "),
("endtag", "script"),
])
s = """<script> <not a='start tag'> </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <not a='start tag'> "),
("endtag", "script"),
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
sidmitra/django_nonrel_testapp | dbindexer/compiler.py | 11 | 1312 | from resolver import resolver
from django.utils.importlib import import_module
def __repr__(self):
return '<%s, %s, %s, %s>' % (self.alias, self.col, self.field.name,
self.field.model.__name__)
from django.db.models.sql.where import Constraint
Constraint.__repr__ = __repr__
# TODO: manipulate a copy of the query instead of the query itself. This has to
# be done because the query can be reused afterwards by the user so that a
# manipulated query can result in strange behavior for these cases!
#TODO: Add watching layer which gives suggestions for indexes via query inspection
# at runtime
class BaseCompiler(object):
def convert_filters(self):
resolver.convert_filters(self.query)
class SQLCompiler(BaseCompiler):
def execute_sql(self, *args, **kwargs):
self.convert_filters()
return super(SQLCompiler, self).execute_sql(*args, **kwargs)
def results_iter(self):
self.convert_filters()
return super(SQLCompiler, self).results_iter()
class SQLInsertCompiler(BaseCompiler):
def execute_sql(self, return_id=False):
resolver.convert_query(self.query)
return super(SQLInsertCompiler, self).execute_sql(return_id=return_id)
class SQLUpdateCompiler(BaseCompiler):
pass
class SQLDeleteCompiler(BaseCompiler):
pass
| bsd-3-clause |
waseem18/oh-mainline | vendor/packages/twisted/twisted/python/modules.py | 18 | 26357 | # -*- test-case-name: twisted.test.test_modules -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module aims to provide a unified, object-oriented view of Python's
runtime hierarchy.
Python is a very dynamic language with wide variety of introspection utilities.
However, these utilities can be hard to use, because there is no consistent
API. The introspection API in python is made up of attributes (__name__,
__module__, func_name, etc) on instances, modules, classes and functions which
vary between those four types, utility modules such as 'inspect' which provide
some functionality, the 'imp' module, the "compiler" module, the semantics of
PEP 302 support, and setuptools, among other things.
At the top, you have "PythonPath", an abstract representation of sys.path which
includes methods to locate top-level modules, with or without loading them.
The top-level exposed functions in this module for accessing the system path
are "walkModules", "iterModules", and "getModule".
From most to least specific, here are the objects provided::
PythonPath # sys.path
|
v
PathEntry # one entry on sys.path: an importer
|
v
PythonModule # a module or package that can be loaded
|
v
PythonAttribute # an attribute of a module (function or class)
|
v
PythonAttribute # an attribute of a function or class
|
v
...
Here's an example of idiomatic usage: this is what you would do to list all of
the modules outside the standard library's python-files directory::
import os
stdlibdir = os.path.dirname(os.__file__)
from twisted.python.modules import iterModules
for modinfo in iterModules():
if (modinfo.pathEntry.filePath.path != stdlibdir
and not modinfo.isPackage()):
print 'unpackaged: %s: %s' % (
modinfo.name, modinfo.filePath.path)
"""
__metaclass__ = type
# let's try to keep path imports to a minimum...
from os.path import dirname, split as splitpath
import sys
import zipimport
import inspect
import warnings
from zope.interface import Interface, implements
from twisted.python.components import registerAdapter
from twisted.python.filepath import FilePath, UnlistableError
from twisted.python.zippath import ZipArchive
from twisted.python.reflect import namedAny
_nothing = object()
PYTHON_EXTENSIONS = ['.py']
OPTIMIZED_MODE = __doc__ is None
if OPTIMIZED_MODE:
PYTHON_EXTENSIONS.append('.pyo')
else:
PYTHON_EXTENSIONS.append('.pyc')
def _isPythonIdentifier(string):
"""
cheezy fake test for proper identifier-ness.
@param string: a str which might or might not be a valid python identifier.
@return: True or False
"""
return (' ' not in string and
'.' not in string and
'-' not in string)
def _isPackagePath(fpath):
# Determine if a FilePath-like object is a Python package. TODO: deal with
# __init__module.(so|dll|pyd)?
extless = fpath.splitext()[0]
basend = splitpath(extless)[1]
return basend == "__init__"
class _ModuleIteratorHelper:
"""
This mixin provides common behavior between python module and path entries,
since the mechanism for searching sys.path and __path__ attributes is
remarkably similar.
"""
def iterModules(self):
"""
Loop over the modules present below this entry or package on PYTHONPATH.
For modules which are not packages, this will yield nothing.
For packages and path entries, this will only yield modules one level
down; i.e. if there is a package a.b.c, iterModules on a will only
return a.b. If you want to descend deeply, use walkModules.
@return: a generator which yields PythonModule instances that describe
modules which can be, or have been, imported.
"""
yielded = {}
if not self.filePath.exists():
return
for placeToLook in self._packagePaths():
try:
children = placeToLook.children()
except UnlistableError:
continue
children.sort()
for potentialTopLevel in children:
ext = potentialTopLevel.splitext()[1]
potentialBasename = potentialTopLevel.basename()[:-len(ext)]
if ext in PYTHON_EXTENSIONS:
# TODO: this should be a little choosier about which path entry
# it selects first, and it should do all the .so checking and
# crud
if not _isPythonIdentifier(potentialBasename):
continue
modname = self._subModuleName(potentialBasename)
if modname.split(".")[-1] == '__init__':
# This marks the directory as a package so it can't be
# a module.
continue
if modname not in yielded:
yielded[modname] = True
pm = PythonModule(modname, potentialTopLevel, self._getEntry())
assert pm != self
yield pm
else:
if (ext or not _isPythonIdentifier(potentialBasename)
or not potentialTopLevel.isdir()):
continue
modname = self._subModuleName(potentialTopLevel.basename())
for ext in PYTHON_EXTENSIONS:
initpy = potentialTopLevel.child("__init__"+ext)
if initpy.exists():
yielded[modname] = True
pm = PythonModule(modname, initpy, self._getEntry())
assert pm != self
yield pm
break
def walkModules(self, importPackages=False):
"""
Similar to L{iterModules}, this yields self, and then every module in my
package or entry, and every submodule in each package or entry.
In other words, this is deep, and L{iterModules} is shallow.
"""
yield self
for package in self.iterModules():
for module in package.walkModules(importPackages=importPackages):
yield module
def _subModuleName(self, mn):
"""
This is a hook to provide packages with the ability to specify their names
as a prefix to submodules here.
"""
return mn
def _packagePaths(self):
"""
Implement in subclasses to specify where to look for modules.
@return: iterable of FilePath-like objects.
"""
raise NotImplementedError()
def _getEntry(self):
"""
Implement in subclasses to specify what path entry submodules will come
from.
@return: a PathEntry instance.
"""
raise NotImplementedError()
def __getitem__(self, modname):
"""
Retrieve a module from below this path or package.
@param modname: a str naming a module to be loaded. For entries, this
is a top-level, undotted package name, and for packages it is the name
of the module without the package prefix. For example, if you have a
PythonModule representing the 'twisted' package, you could use::
twistedPackageObj['python']['modules']
to retrieve this module.
@raise: KeyError if the module is not found.
@return: a PythonModule.
"""
for module in self.iterModules():
if module.name == self._subModuleName(modname):
return module
raise KeyError(modname)
def __iter__(self):
"""
Implemented to raise NotImplementedError for clarity, so that attempting to
loop over this object won't call __getitem__.
Note: in the future there might be some sensible default for iteration,
like 'walkEverything', so this is deliberately untested and undefined
behavior.
"""
raise NotImplementedError()
class PythonAttribute:
"""
I represent a function, class, or other object that is present.
@ivar name: the fully-qualified python name of this attribute.
@ivar onObject: a reference to a PythonModule or other PythonAttribute that
is this attribute's logical parent.
@ivar name: the fully qualified python name of the attribute represented by
this class.
"""
def __init__(self, name, onObject, loaded, pythonValue):
"""
Create a PythonAttribute. This is a private constructor. Do not construct
me directly, use PythonModule.iterAttributes.
@param name: the FQPN
@param onObject: see ivar
@param loaded: always True, for now
@param pythonValue: the value of the attribute we're pointing to.
"""
self.name = name
self.onObject = onObject
self._loaded = loaded
self.pythonValue = pythonValue
def __repr__(self):
return 'PythonAttribute<%r>'%(self.name,)
def isLoaded(self):
"""
Return a boolean describing whether the attribute this describes has
actually been loaded into memory by importing its module.
Note: this currently always returns true; there is no Python parser
support in this module yet.
"""
return self._loaded
def load(self, default=_nothing):
"""
Load the value associated with this attribute.
@return: an arbitrary Python object, or 'default' if there is an error
loading it.
"""
return self.pythonValue
def iterAttributes(self):
for name, val in inspect.getmembers(self.load()):
yield PythonAttribute(self.name+'.'+name, self, True, val)
class PythonModule(_ModuleIteratorHelper):
"""
Representation of a module which could be imported from sys.path.
@ivar name: the fully qualified python name of this module.
@ivar filePath: a FilePath-like object which points to the location of this
module.
@ivar pathEntry: a L{PathEntry} instance which this module was located
from.
"""
def __init__(self, name, filePath, pathEntry):
"""
Create a PythonModule. Do not construct this directly, instead inspect a
PythonPath or other PythonModule instances.
@param name: see ivar
@param filePath: see ivar
@param pathEntry: see ivar
"""
assert not name.endswith(".__init__")
self.name = name
self.filePath = filePath
self.parentPath = filePath.parent()
self.pathEntry = pathEntry
def _getEntry(self):
return self.pathEntry
def __repr__(self):
"""
Return a string representation including the module name.
"""
return 'PythonModule<%r>' % (self.name,)
def isLoaded(self):
"""
Determine if the module is loaded into sys.modules.
@return: a boolean: true if loaded, false if not.
"""
return self.pathEntry.pythonPath.moduleDict.get(self.name) is not None
def iterAttributes(self):
"""
List all the attributes defined in this module.
Note: Future work is planned here to make it possible to list python
attributes on a module without loading the module by inspecting ASTs or
bytecode, but currently any iteration of PythonModule objects insists
they must be loaded, and will use inspect.getmodule.
@raise NotImplementedError: if this module is not loaded.
@return: a generator yielding PythonAttribute instances describing the
attributes of this module.
"""
if not self.isLoaded():
raise NotImplementedError(
"You can't load attributes from non-loaded modules yet.")
for name, val in inspect.getmembers(self.load()):
yield PythonAttribute(self.name+'.'+name, self, True, val)
def isPackage(self):
"""
Returns true if this module is also a package, and might yield something
from iterModules.
"""
return _isPackagePath(self.filePath)
def load(self, default=_nothing):
"""
Load this module.
@param default: if specified, the value to return in case of an error.
@return: a genuine python module.
@raise: any type of exception. Importing modules is a risky business;
the erorrs of any code run at module scope may be raised from here, as
well as ImportError if something bizarre happened to the system path
between the discovery of this PythonModule object and the attempt to
import it. If you specify a default, the error will be swallowed
entirely, and not logged.
@rtype: types.ModuleType.
"""
try:
return self.pathEntry.pythonPath.moduleLoader(self.name)
except: # this needs more thought...
if default is not _nothing:
return default
raise
def __eq__(self, other):
"""
PythonModules with the same name are equal.
"""
if not isinstance(other, PythonModule):
return False
return other.name == self.name
def __ne__(self, other):
"""
PythonModules with different names are not equal.
"""
if not isinstance(other, PythonModule):
return True
return other.name != self.name
def walkModules(self, importPackages=False):
if importPackages and self.isPackage():
self.load()
return super(PythonModule, self).walkModules(importPackages=importPackages)
def _subModuleName(self, mn):
"""
submodules of this module are prefixed with our name.
"""
return self.name + '.' + mn
def _packagePaths(self):
"""
Yield a sequence of FilePath-like objects which represent path segments.
"""
if not self.isPackage():
return
if self.isLoaded():
load = self.load()
if hasattr(load, '__path__'):
for fn in load.__path__:
if fn == self.parentPath.path:
# this should _really_ exist.
assert self.parentPath.exists()
yield self.parentPath
else:
smp = self.pathEntry.pythonPath._smartPath(fn)
if smp.exists():
yield smp
else:
yield self.parentPath
class PathEntry(_ModuleIteratorHelper):
"""
I am a proxy for a single entry on sys.path.
@ivar filePath: a FilePath-like object pointing at the filesystem location
or archive file where this path entry is stored.
@ivar pythonPath: a PythonPath instance.
"""
def __init__(self, filePath, pythonPath):
"""
Create a PathEntry. This is a private constructor.
"""
self.filePath = filePath
self.pythonPath = pythonPath
def _getEntry(self):
return self
def __repr__(self):
return 'PathEntry<%r>' % (self.filePath,)
def _packagePaths(self):
yield self.filePath
class IPathImportMapper(Interface):
"""
This is an internal interface, used to map importers to factories for
FilePath-like objects.
"""
def mapPath(self, pathLikeString):
"""
Return a FilePath-like object.
@param pathLikeString: a path-like string, like one that might be
passed to an import hook.
@return: a L{FilePath}, or something like it (currently only a
L{ZipPath}, but more might be added later).
"""
class _DefaultMapImpl:
""" Wrapper for the default importer, i.e. None. """
implements(IPathImportMapper)
def mapPath(self, fsPathString):
return FilePath(fsPathString)
_theDefaultMapper = _DefaultMapImpl()
class _ZipMapImpl:
""" IPathImportMapper implementation for zipimport.ZipImporter. """
implements(IPathImportMapper)
def __init__(self, importer):
self.importer = importer
def mapPath(self, fsPathString):
"""
Map the given FS path to a ZipPath, by looking at the ZipImporter's
"archive" attribute and using it as our ZipArchive root, then walking
down into the archive from there.
@return: a L{zippath.ZipPath} or L{zippath.ZipArchive} instance.
"""
za = ZipArchive(self.importer.archive)
myPath = FilePath(self.importer.archive)
itsPath = FilePath(fsPathString)
if myPath == itsPath:
return za
# This is NOT a general-purpose rule for sys.path or __file__:
# zipimport specifically uses regular OS path syntax in its pathnames,
# even though zip files specify that slashes are always the separator,
# regardless of platform.
segs = itsPath.segmentsFrom(myPath)
zp = za
for seg in segs:
zp = zp.child(seg)
return zp
registerAdapter(_ZipMapImpl, zipimport.zipimporter, IPathImportMapper)
def _defaultSysPathFactory():
"""
Provide the default behavior of PythonPath's sys.path factory, which is to
return the current value of sys.path.
@return: L{sys.path}
"""
return sys.path
class PythonPath:
"""
I represent the very top of the Python object-space, the module list in
sys.path and the modules list in sys.modules.
@ivar _sysPath: a sequence of strings like sys.path. This attribute is
read-only.
@ivar moduleDict: a dictionary mapping string module names to module
objects, like sys.modules.
@ivar sysPathHooks: a list of PEP-302 path hooks, like sys.path_hooks.
@ivar moduleLoader: a function that takes a fully-qualified python name and
returns a module, like twisted.python.reflect.namedAny.
"""
def __init__(self,
sysPath=None,
moduleDict=sys.modules,
sysPathHooks=sys.path_hooks,
importerCache=sys.path_importer_cache,
moduleLoader=namedAny,
sysPathFactory=None):
"""
Create a PythonPath. You almost certainly want to use
modules.theSystemPath, or its aliased methods, rather than creating a
new instance yourself, though.
All parameters are optional, and if unspecified, will use 'system'
equivalents that makes this PythonPath like the global L{theSystemPath}
instance.
@param sysPath: a sys.path-like list to use for this PythonPath, to
specify where to load modules from.
@param moduleDict: a sys.modules-like dictionary to use for keeping
track of what modules this PythonPath has loaded.
@param sysPathHooks: sys.path_hooks-like list of PEP-302 path hooks to
be used for this PythonPath, to determie which importers should be
used.
@param importerCache: a sys.path_importer_cache-like list of PEP-302
importers. This will be used in conjunction with the given
sysPathHooks.
@param moduleLoader: a module loader function which takes a string and
returns a module. That is to say, it is like L{namedAny} - *not* like
L{__import__}.
@param sysPathFactory: a 0-argument callable which returns the current
value of a sys.path-like list of strings. Specify either this, or
sysPath, not both. This alternative interface is provided because the
way the Python import mechanism works, you can re-bind the 'sys.path'
name and that is what is used for current imports, so it must be a
factory rather than a value to deal with modification by rebinding
rather than modification by mutation. Note: it is not recommended to
rebind sys.path. Although this mechanism can deal with that, it is a
subtle point which some tools that it is easy for tools which interact
with sys.path to miss.
"""
if sysPath is not None:
sysPathFactory = lambda : sysPath
elif sysPathFactory is None:
sysPathFactory = _defaultSysPathFactory
self._sysPathFactory = sysPathFactory
self._sysPath = sysPath
self.moduleDict = moduleDict
self.sysPathHooks = sysPathHooks
self.importerCache = importerCache
self.moduleLoader = moduleLoader
def _getSysPath(self):
"""
Retrieve the current value of the module search path list.
"""
return self._sysPathFactory()
sysPath = property(_getSysPath)
def _findEntryPathString(self, modobj):
"""
Determine where a given Python module object came from by looking at path
entries.
"""
topPackageObj = modobj
while '.' in topPackageObj.__name__:
topPackageObj = self.moduleDict['.'.join(
topPackageObj.__name__.split('.')[:-1])]
if _isPackagePath(FilePath(topPackageObj.__file__)):
# if package 'foo' is on sys.path at /a/b/foo, package 'foo's
# __file__ will be /a/b/foo/__init__.py, and we are looking for
# /a/b here, the path-entry; so go up two steps.
rval = dirname(dirname(topPackageObj.__file__))
else:
# the module is completely top-level, not within any packages. The
# path entry it's on is just its dirname.
rval = dirname(topPackageObj.__file__)
# There are probably some awful tricks that an importer could pull
# which would break this, so let's just make sure... it's a loaded
# module after all, which means that its path MUST be in
# path_importer_cache according to PEP 302 -glyph
if rval not in self.importerCache:
warnings.warn(
"%s (for module %s) not in path importer cache "
"(PEP 302 violation - check your local configuration)." % (
rval, modobj.__name__),
stacklevel=3)
return rval
def _smartPath(self, pathName):
"""
Given a path entry from sys.path which may refer to an importer,
return the appropriate FilePath-like instance.
@param pathName: a str describing the path.
@return: a FilePath-like object.
"""
importr = self.importerCache.get(pathName, _nothing)
if importr is _nothing:
for hook in self.sysPathHooks:
try:
importr = hook(pathName)
except ImportError:
pass
if importr is _nothing: # still
importr = None
return IPathImportMapper(importr, _theDefaultMapper).mapPath(pathName)
def iterEntries(self):
"""
Iterate the entries on my sysPath.
@return: a generator yielding PathEntry objects
"""
for pathName in self.sysPath:
fp = self._smartPath(pathName)
yield PathEntry(fp, self)
def __getitem__(self, modname):
"""
Get a python module by its given fully-qualified name.
@param modname: The fully-qualified Python module name to load.
@type modname: C{str}
@return: an object representing the module identified by C{modname}
@rtype: L{PythonModule}
@raise KeyError: if the module name is not a valid module name, or no
such module can be identified as loadable.
"""
# See if the module is already somewhere in Python-land.
moduleObject = self.moduleDict.get(modname)
if moduleObject is not None:
# we need 2 paths; one of the path entry and one for the module.
pe = PathEntry(
self._smartPath(
self._findEntryPathString(moduleObject)),
self)
mp = self._smartPath(moduleObject.__file__)
return PythonModule(modname, mp, pe)
# Recurse if we're trying to get a submodule.
if '.' in modname:
pkg = self
for name in modname.split('.'):
pkg = pkg[name]
return pkg
# Finally do the slowest possible thing and iterate
for module in self.iterModules():
if module.name == modname:
return module
raise KeyError(modname)
def __repr__(self):
"""
Display my sysPath and moduleDict in a string representation.
"""
return "PythonPath(%r,%r)" % (self.sysPath, self.moduleDict)
def iterModules(self):
"""
Yield all top-level modules on my sysPath.
"""
for entry in self.iterEntries():
for module in entry.iterModules():
yield module
def walkModules(self, importPackages=False):
"""
Similar to L{iterModules}, this yields every module on the path, then every
submodule in each package or entry.
"""
for package in self.iterModules():
for module in package.walkModules(importPackages=False):
yield module
theSystemPath = PythonPath()
def walkModules(importPackages=False):
"""
Deeply iterate all modules on the global python path.
@param importPackages: Import packages as they are seen.
"""
return theSystemPath.walkModules(importPackages=importPackages)
def iterModules():
"""
Iterate all modules and top-level packages on the global Python path, but
do not descend into packages.
@param importPackages: Import packages as they are seen.
"""
return theSystemPath.iterModules()
def getModule(moduleName):
"""
Retrieve a module from the system path.
"""
return theSystemPath[moduleName]
| agpl-3.0 |
lesh1k/beatport-verifier | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/connection.py | 483 | 9011 | import datetime
import sys
import socket
from socket import timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection, HTTPException
except ImportError:
from httplib import HTTPConnection as _HTTPConnection, HTTPException
class DummyConnection(object):
"Used to detect a failed ConnectionCls import."
pass
try: # Compiled with SSL?
HTTPSConnection = DummyConnection
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
ConnectTimeoutError,
SystemTimeWarning,
SecurityWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None):
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.ca_certs = ca_certs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate has no `subjectAltName`, falling back to check for a `commonName` for now. '
'This feature is being removed by major browsers and deprecated by RFC 2818. '
'(See https://github.com/shazow/urllib3/issues/497 for details.)'),
SecurityWarning
)
match_hostname(cert, self.assert_hostname or hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED
or self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| cc0-1.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/scipy/spatial/tests/test_spherical_voronoi.py | 29 | 4636 | from __future__ import print_function
import numpy as np
from numpy.testing import (TestCase,
assert_almost_equal,
assert_array_equal,
assert_array_almost_equal)
from scipy.spatial import SphericalVoronoi, distance
from scipy.spatial import _spherical_voronoi as spherical_voronoi
class TestCircumcenters(TestCase):
def test_circumcenters(self):
tetrahedrons = np.array([
[[1, 2, 3],
[-1.1, -2.1, -3.1],
[-1.2, 2.2, 3.2],
[-1.3, -2.3, 3.3]],
[[10, 20, 30],
[-10.1, -20.1, -30.1],
[-10.2, 20.2, 30.2],
[-10.3, -20.3, 30.3]]
])
result = spherical_voronoi.calc_circumcenters(tetrahedrons)
expected = [
[-0.5680861153262529, -0.133279590288315, 0.1843323216995444],
[-0.5965330784014926, -0.1480377040397778, 0.1981967854886021]
]
assert_array_almost_equal(result, expected)
class TestProjectToSphere(TestCase):
def test_unit_sphere(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0, 0, 0])
radius = 1
projected = spherical_voronoi.project_to_sphere(points, center, radius)
assert_array_almost_equal(points, projected)
def test_scaled_points(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([0, 0, 0])
radius = 1
scaled = points * 2
projected = spherical_voronoi.project_to_sphere(scaled, center, radius)
assert_array_almost_equal(points, projected)
def test_translated_sphere(self):
points = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
center = np.array([1, 2, 3])
translated = points + center
radius = 1
projected = spherical_voronoi.project_to_sphere(translated, center,
radius)
assert_array_almost_equal(translated, projected)
class TestSphericalVoronoi(TestCase):
def setUp(self):
self.points = np.array([
[-0.78928481, -0.16341094, 0.59188373],
[-0.66839141, 0.73309634, 0.12578818],
[0.32535778, -0.92476944, -0.19734181],
[-0.90177102, -0.03785291, -0.43055335],
[0.71781344, 0.68428936, 0.12842096],
[-0.96064876, 0.23492353, -0.14820556],
[0.73181537, -0.22025898, -0.6449281],
[0.79979205, 0.54555747, 0.25039913]]
)
def test_constructor(self):
center = np.array([1, 2, 3])
radius = 2
s1 = SphericalVoronoi(self.points)
s2 = SphericalVoronoi(self.points, radius)
s3 = SphericalVoronoi(self.points, None, center)
s4 = SphericalVoronoi(self.points, radius, center)
assert_array_equal(s1.center, np.array([0, 0, 0]))
self.assertEqual(s1.radius, 1)
assert_array_equal(s2.center, np.array([0, 0, 0]))
self.assertEqual(s2.radius, 2)
assert_array_equal(s3.center, center)
self.assertEqual(s3.radius, 1)
assert_array_equal(s4.center, center)
self.assertEqual(s4.radius, radius)
def test_vertices_regions_translation_invariance(self):
sv_origin = SphericalVoronoi(self.points)
center = np.array([1, 1, 1])
sv_translated = SphericalVoronoi(self.points + center, None, center)
assert_array_equal(sv_origin.regions, sv_translated.regions)
assert_array_almost_equal(sv_origin.vertices + center,
sv_translated.vertices)
def test_vertices_regions_scaling_invariance(self):
sv_unit = SphericalVoronoi(self.points)
sv_scaled = SphericalVoronoi(self.points * 2, 2)
assert_array_equal(sv_unit.regions, sv_scaled.regions)
assert_array_almost_equal(sv_unit.vertices * 2,
sv_scaled.vertices)
def test_sort_vertices_of_regions(self):
sv = SphericalVoronoi(self.points)
unsorted_regions = sv.regions
sv.sort_vertices_of_regions()
assert_array_equal(sorted(sv.regions), sorted(unsorted_regions))
def test_voronoi_circles(self):
sv = spherical_voronoi.SphericalVoronoi(self.points)
for vertex in sv.vertices:
distances = distance.cdist(sv.points,np.array([vertex]))
closest = np.array(sorted(distances)[0:3])
assert_almost_equal(closest[0], closest[1], 7, str(vertex))
assert_almost_equal(closest[0], closest[2], 7, str(vertex))
| gpl-3.0 |
synicalsyntax/zulip | zerver/views/archive.py | 4 | 3287 | from typing import List, Optional
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import loader
from zerver.lib.avatar import get_gravatar_url
from zerver.lib.exceptions import JsonableError
from zerver.lib.response import json_success
from zerver.lib.streams import get_stream_by_id
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.topic import get_topic_history_for_web_public_stream, messages_for_topic
from zerver.models import Message, UserProfile
def archive(request: HttpRequest,
stream_id: int,
topic_name: str) -> HttpResponse:
def get_response(rendered_message_list: List[str],
is_web_public: bool,
stream_name: str) -> HttpResponse:
return render(
request,
'zerver/archive/index.html',
context={
'is_web_public': is_web_public,
'message_list': rendered_message_list,
'stream': stream_name,
'topic': topic_name,
},
)
try:
stream = get_stream_by_id(stream_id)
except JsonableError:
return get_response([], False, '')
if not stream.is_web_public:
return get_response([], False, '')
all_messages = list(
messages_for_topic(
stream_recipient_id=stream.recipient_id,
topic_name=topic_name,
).select_related('sender').order_by('date_sent'),
)
if not all_messages:
return get_response([], True, stream.name)
rendered_message_list = []
prev_sender: Optional[UserProfile] = None
for msg in all_messages:
include_sender = False
status_message = Message.is_status_message(msg.content, msg.rendered_content)
if not prev_sender or prev_sender != msg.sender or status_message:
if status_message:
prev_sender = None
else:
prev_sender = msg.sender
include_sender = True
if status_message:
status_message = msg.rendered_content[4+3: -4]
context = {
'sender_full_name': msg.sender.full_name,
'timestampstr': datetime_to_timestamp(msg.last_edit_time
if msg.last_edit_time
else msg.date_sent),
'message_content': msg.rendered_content,
'avatar_url': get_gravatar_url(msg.sender.delivery_email, 1),
'include_sender': include_sender,
'status_message': status_message,
}
rendered_msg = loader.render_to_string('zerver/archive/single_message.html', context)
rendered_message_list.append(rendered_msg)
return get_response(rendered_message_list, True, stream.name)
def get_web_public_topics_backend(request: HttpRequest, stream_id: int) -> HttpResponse:
try:
stream = get_stream_by_id(stream_id)
except JsonableError:
return json_success(dict(topics=[]))
if not stream.is_web_public:
return json_success(dict(topics=[]))
result = get_topic_history_for_web_public_stream(recipient=stream.recipient)
return json_success(dict(topics=result))
| apache-2.0 |
nonsensews/mango | mango/system/tasks.py | 2 | 16555 | # -*- coding: utf-8 -*-
# This file is part of mango.
# Distributed under the terms of the last AGPL License.
# The full license is in the file LICENCE, distributed as part of this software.
__author__ = 'Team Machine'
import uuid
import logging
import ujson as json
from tornado import gen
from schematics.types import compound
from mango.schemas import tasks
from mango.schemas import BaseResult
from mango.schemas.tasks import TaskMap
from riak.datatypes import Map
from mango.tools import clean_structure, clean_results, get_search_item
from tornado import httpclient as _http_client
_http_client.AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient')
http_client = _http_client.AsyncHTTPClient()
class TasksResult(BaseResult):
'''
List result
'''
results = compound.ListType(compound.ModelType(tasks.Task))
class Tasks(object):
'''
Tasks
'''
@gen.coroutine
def quick_search(self, account, start, end, lapse, status, page_num, fields, search):
'''
Quick Seach
'''
search_index = 'mango_task_index'
query = 'allfields_register:{0}'.format(search.decode('utf-8'))
page_num = int(page_num)
page_size = self.settings['page_size']
start_num = page_size * (page_num - 1)
if not fields:
fields = 'email_register,last_name_register,phone_register,lead_type_register,uuid_register'
else:
fields = '{0}'.format(fields.decode('utf-8'))
url = quick_search_item(self.solr, search_index, query, start_num, page_size, fields).replace(' ', '')
logging.warning('check this url')
logging.warning(url)
IGNORE_ME = ["_yz_id","_yz_rk","_yz_rt","_yz_rb"]
got_response = []
# clean response message
message = {
'count': 0,
'page': page_num,
'results': []
}
def handle_request(response):
'''
Request Async Handler
'''
if response.error:
logging.error(response.error)
got_response.append({'error':True, 'message': response.error})
else:
got_response.append(json.loads(response.body))
try:
http_client.fetch(
url,
callback=handle_request
)
while len(got_response) == 0:
# don't be careless with the time.
yield gen.sleep(0.0021)
stuff = got_response[0]
if stuff['response']['numFound']:
message['count'] += stuff['response']['numFound']
for doc in stuff['response']['docs']:
message['results'].append(clean_response_test(doc, IGNORE_ME))
else:
logging.error('there is probably something wrong!')
except Exception as error:
logging.warning(error)
return message
@gen.coroutine
def get_task(self, account, task_uuid):
'''
Get task
'''
search_index = 'mango_task_index'
query = 'uuid_register:{0}'.format(task_uuid)
filter_query = 'account_register:{0}'.format(account.decode('utf-8'))
# note where the hack change ' to %27 for the url string!
fq_watchers = "watchers_register:*'{0}'*".format(account.decode('utf8')).replace("'",'%27')
urls = set()
urls.add(get_search_item(self.solr, search_index, query, filter_query))
urls.add(get_search_item(self.solr, search_index, query, fq_watchers))
# init got response list
got_response = []
# init crash message
message = {'message': 'not found'}
# ignore riak fields
IGNORE_ME = ["_yz_id","_yz_rk","_yz_rt","_yz_rb"]
# hopefully asynchronous handle function request
def handle_request(response):
'''
Request Async Handler
'''
if response.error:
logging.error(response.error)
got_response.append({'error':True, 'message': response.error})
else:
got_response.append(json.loads(response.body))
try:
# and know for something completly different!
for url in urls:
http_client.fetch(
url,
callback=handle_request
)
while len(got_response) <= 1:
# Yo, don't be careless with the time!
yield gen.sleep(0.0010)
# get stuff from response
stuff = got_response[0]
# get it from watchers list
watchers = got_response[1]
if stuff['response']['numFound']:
response = stuff['response']['docs'][0]
message = clean_response(response, IGNORE_ME)
elif watchers['response']['numFound']:
response = watchers['response']['docs'][0]
message = clean_response(response, IGNORE_ME)
else:
logging.error('there is probably something wrong!')
except Exception as error:
logging.warning(error)
return message
@gen.coroutine
def get_task_list(self, account, start, end, lapse, status, page_num):
'''
Get task list
'''
search_index = 'mango_task_index'
query = 'uuid_register:*'
filter_query = 'account_register:{0}'.format(account.decode('utf-8'))
# note where the hack change ' to %27 for the url string!
fq_watchers = "watchers_register:*'{0}'*".format(account.decode('utf8')).replace("'",'%27')
# page number
page_num = int(page_num)
page_size = self.settings['page_size']
start_num = page_size * (page_num - 1)
# set of urls
urls = set()
urls.add(get_search_list(self.solr, search_index, query, filter_query, start_num, page_size))
urls.add(get_search_list(self.solr, search_index, query, fq_watchers, start_num, page_size))
# init got response list
got_response = []
# init crash message
message = {
'count': 0,
'page': page_num,
'results': []
}
# ignore riak fields
IGNORE_ME = ["_yz_id","_yz_rk","_yz_rt","_yz_rb"]
# hopefully asynchronous handle function request
def handle_request(response):
'''
Request Async Handler
'''
if response.error:
logging.error(response.error)
got_response.append({'error':True, 'message': response.error})
else:
got_response.append(json.loads(response.body))
try:
# and know for something completly different!
for url in urls:
http_client.fetch(
url,
callback=handle_request
)
while len(got_response) <= 1:
# Yo, don't be careless with the time!
yield gen.sleep(0.0010)
# get stuff from response
stuff = got_response[0]
# get it from watchers list
watchers = got_response[1]
if stuff['response']['numFound']:
message['count'] += stuff['response']['numFound']
for doc in stuff['response']['docs']:
message['results'].append(clean_response(doc, IGNORE_ME))
if watchers['response']['numFound']:
message['count'] += watchers['response']['numFound']
for doc in watchers['response']['docs']:
message['results'].append(clean_response(doc, IGNORE_ME))
else:
logging.error('there is probably something wrong!')
except Exception as error:
logging.warning(error)
return message
@gen.coroutine
def new_task(self, struct):
'''
New task event
'''
search_index = 'mango_task_index'
bucket_type = 'mango_task'
bucket_name = 'tasks'
try:
event = tasks.Task(struct)
event.validate()
event = clean_structure(event)
except Exception as error:
raise error
try:
structure = {
"uuid": str(event.get('uuid', str(uuid.uuid4()))),
"account": str(event.get('account', 'pebkac')),
"subject": str(event.get('subject', '')),
"description": str(event.get('description', '')),
"data": str(event.get('data', '')),
"assign": str(event.get('assign', '')),
"public": str(event.get('public', '')),
"source ": str(event.get('source', '')),
"destination": str(event.get('destination', '')),
"labels": str(event.get('labels', '')),
"start_time": str(event.get('start_time', '')),
"ack_time": str(event.get('ack_time', '')),
"stop_time": str(event.get('stop_time', '')),
"deadline": str(event.get('deadline', '')),
"duration": str(event.get('duration', '')),
"comments": str(event.get('comments', '')),
"history": str(event.get('history', '')),
"status": str(event.get('status', '')),
"checked": str(event.get('checked', '')),
"checked_by": str(event.get('checked_by', '')),
"checked_at": str(event.get('checked_at', '')),
"created_by": str(event.get('created_by', '')),
"created_at": str(event.get('created_at', '')),
"last_update_by": str(event.get('last_update_by', '')),
"last_update_at": str(event.get('last_update_at', '')),
}
result = TaskMap(
self.kvalue,
bucket_name,
bucket_type,
search_index,
structure
)
message = structure.get('uuid')
except Exception as error:
logging.error(error)
message = str(error)
return message
@gen.coroutine
def modify_task(self, account, task_uuid, struct):
'''
Modify task
'''
# riak search index
search_index = 'mango_task_index'
# riak bucket type
bucket_type = 'mango_task'
# riak bucket name
bucket_name = 'tasks'
# solr query
query = 'uuid_register:{0}'.format(task_uuid.rstrip('/'))
# filter query
filter_query = 'account_register:{0}'.format(account.decode('utf-8'))
# search query url
url = "https://{0}/search/query/{1}?wt=json&q={2}&fq={3}".format(
self.solr, search_index, query, filter_query
)
# pretty please, ignore this list of fields from database.
# if you want to include something in here, remember the _register.
# example: labels_register.
IGNORE_ME = ("_yz_id","_yz_rk","_yz_rt","_yz_rb")
# got callback response?
got_response = []
# yours truly
message = {'update_complete':False}
def handle_request(response):
'''
Request Async Handler
'''
if response.error:
logging.error(response.error)
got_response.append({'error':True, 'message': response.error})
else:
got_response.append(json.loads(response.body))
try:
http_client.fetch(
url,
callback=handle_request
)
while len(got_response) == 0:
# don't be careless with the time.
yield gen.sleep(0.0010)
response = got_response[0].get('response')['docs'][0]
riak_key = str(response['_yz_rk'])
bucket = self.kvalue.bucket_type(bucket_type).bucket('{0}'.format(bucket_name))
bucket.set_properties({'search_index': search_index})
task = Map(bucket, riak_key)
for key in struct:
if key not in IGNORE_ME:
if type(struct.get(key)) == list:
task.reload()
old_value = task.registers['{0}'.format(key)].value
if old_value:
old_list = json.loads(old_value.replace("'",'"'))
for thing in struct.get(key):
old_list.append(thing)
task.registers['{0}'.format(key)].assign(str(old_list))
else:
new_list = []
for thing in struct.get(key):
new_list.append(thing)
task.registers['{0}'.format(key)].assign(str(new_list))
else:
task.registers['{0}'.format(key)].assign(str(struct.get(key)))
task.update()
update_complete = True
message['update_complete'] = True
except Exception as error:
logging.exception(error)
return message.get('update_complete', False)
@gen.coroutine
def modify_remove(self, account, task_uuid, struct):
'''
Modify remove
'''
# riak search index
search_index = 'mango_task_index'
# riak bucket type
bucket_type = 'mango_task'
# riak bucket name
bucket_name = 'tasks'
# solr query
query = 'uuid_register:{0}'.format(task_uuid.rstrip('/'))
# filter query
filter_query = 'account_register:{0}'.format(account.decode('utf-8'))
# search query url
url = "https://{0}/search/query/{1}?wt=json&q={2}&fq={3}".format(
self.solr, search_index, query, filter_query
)
# pretty please, ignore this list of fields from database.
# if you want to include something in here, remember the _register.
# example: labels_register.
IGNORE_ME = ("_yz_id","_yz_rk","_yz_rt","_yz_rb")
# got callback response?
got_response = []
# yours truly
message = {'update_complete':False}
def handle_request(response):
'''
Request Async Handler
'''
if response.error:
logging.error(response.error)
got_response.append({'error':True, 'message': response.error})
else:
got_response.append(json.loads(response.body))
try:
http_client.fetch(
url,
callback=handle_request
)
while len(got_response) == 0:
# Please, don't be careless with the time.
yield gen.sleep(0.0010)
response = got_response[0].get('response')['docs'][0]
riak_key = str(response['_yz_rk'])
bucket = self.kvalue.bucket_type(bucket_type).bucket('{0}'.format(bucket_name))
bucket.set_properties({'search_index': search_index})
task = Map(bucket, riak_key)
for key in struct:
if key not in IGNORE_ME:
if type(struct.get(key)) == list:
task.reload()
old_value = task.registers['{0}'.format(key)].value
if old_value:
old_list = json.loads(old_value.replace("'",'"'))
new_list = [x for x in old_list if x not in struct.get(key)]
task.registers['{0}'.format(key)].assign(str(new_list))
task.update()
message['update_complete'] = True
else:
message['update_complete'] = False
except Exception as error:
logging.exception(error)
return message.get('update_complete', False)
@gen.coroutine
def remove_task(self, account, task_uuid):
'''
Remove task
'''
# Yo, missing history ?
struct = {}
struct['status'] = 'deleted'
message = yield self.modify_task(account, task_uuid, struct)
return message
| agpl-3.0 |
ryfeus/lambda-packs | pytorch/source/PIL/BdfFontFile.py | 4 | 3000 | #
# The Python Imaging Library
# $Id$
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from . import Image, FontFile
# --------------------------------------------------------------------
# parse X Bitmap Distribution Format (BDF)
# --------------------------------------------------------------------
bdf_slant = {
"R": "Roman",
"I": "Italic",
"O": "Oblique",
"RI": "Reverse Italic",
"RO": "Reverse Oblique",
"OT": "Other"
}
bdf_spacing = {
"P": "Proportional",
"M": "Monospaced",
"C": "Cell"
}
def bdf_char(f):
# skip to STARTCHAR
while True:
s = f.readline()
if not s:
return None
if s[:9] == b"STARTCHAR":
break
id = s[9:].strip().decode('ascii')
# load symbol properties
props = {}
while True:
s = f.readline()
if not s or s[:6] == b"BITMAP":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
# load bitmap
bitmap = []
while True:
s = f.readline()
if not s or s[:7] == b"ENDCHAR":
break
bitmap.append(s[:-1])
bitmap = b"".join(bitmap)
[x, y, l, d] = [int(p) for p in props["BBX"].split()]
[dx, dy] = [int(p) for p in props["DWIDTH"].split()]
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
try:
im = Image.frombytes("1", (x, y), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (x, y))
return id, int(props["ENCODING"]), bbox, im
##
# Font file plugin for the X11 BDF format.
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
FontFile.FontFile.__init__(self)
s = fp.readline()
if s[:13] != b"STARTFONT 2.1":
raise SyntaxError("not a valid BDF file")
props = {}
comments = []
while True:
s = fp.readline()
if not s or s[:13] == b"ENDPROPERTIES":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
if s.find(b"LogicalFontDescription") < 0:
comments.append(s[i+1:-1].decode('ascii'))
while True:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if 0 <= ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
| mit |
magvugr/AT | EntVirtual/lib/python2.7/site-packages/django/db/backends/postgresql/client.py | 47 | 2120 | import os
import subprocess
from django.core.files.temp import NamedTemporaryFile
from django.db.backends.base.client import BaseDatabaseClient
from django.utils.six import print_
def _escape_pgpass(txt):
"""
Escape a fragment of a PostgreSQL .pgpass file.
"""
return txt.replace('\\', '\\\\').replace(':', '\\:')
class DatabaseClient(BaseDatabaseClient):
executable_name = 'psql'
@classmethod
def runshell_db(cls, conn_params):
args = [cls.executable_name]
host = conn_params.get('host', '')
port = conn_params.get('port', '')
dbname = conn_params.get('database', '')
user = conn_params.get('user', '')
passwd = conn_params.get('password', '')
if user:
args += ['-U', user]
if host:
args += ['-h', host]
if port:
args += ['-p', str(port)]
args += [dbname]
temp_pgpass = None
try:
if passwd:
# Create temporary .pgpass file.
temp_pgpass = NamedTemporaryFile(mode='w+')
try:
print_(
_escape_pgpass(host) or '*',
str(port) or '*',
_escape_pgpass(dbname) or '*',
_escape_pgpass(user) or '*',
_escape_pgpass(passwd),
file=temp_pgpass,
sep=':',
flush=True,
)
os.environ['PGPASSFILE'] = temp_pgpass.name
except UnicodeEncodeError:
# If the current locale can't encode the data, we let
# the user input the password manually.
pass
subprocess.call(args)
finally:
if temp_pgpass:
temp_pgpass.close()
if 'PGPASSFILE' in os.environ: # unit tests need cleanup
del os.environ['PGPASSFILE']
def runshell(self):
DatabaseClient.runshell_db(self.connection.get_connection_params())
| gpl-3.0 |
cloakedcode/CouchPotatoServer | couchpotato/core/providers/automation/bluray/main.py | 9 | 1053 | from couchpotato.core.helpers.rss import RSS
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.automation.base import Automation
log = CPLog(__name__)
class Bluray(Automation, RSS):
interval = 1800
rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip()
year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip()
if not name.find('/') == -1: # make sure it is not a double movie release
continue
if tryInt(year) < self.getMinimal('year'):
continue
imdb = self.search(name, year)
if imdb:
if self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
| gpl-3.0 |
torufuru/OFPatchPanel | ryu/tests/unit/packet/test_mpls.py | 18 | 1677 | # Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import logging
import inspect
from nose.tools import *
from nose.plugins.skip import Skip, SkipTest
from ryu.lib.packet import mpls
LOG = logging.getLogger(__name__)
class Test_mpls(unittest.TestCase):
label = 29
exp = 6
bsb = 1
ttl = 64
mp = mpls.mpls(label, exp, bsb, ttl)
def setUp(self):
pass
def tearDown(self):
pass
def test_to_string(self):
mpls_values = {'label': self.label,
'exp': self.exp,
'bsb': self.bsb,
'ttl': self.ttl}
_mpls_str = ','.join(['%s=%s' % (k, repr(mpls_values[k]))
for k, v in inspect.getmembers(self.mp)
if k in mpls_values])
mpls_str = '%s(%s)' % (mpls.mpls.__name__, _mpls_str)
eq_(str(self.mp), mpls_str)
eq_(repr(self.mp), mpls_str)
def test_json(self):
jsondict = self.mp.to_jsondict()
mp = mpls.mpls.from_jsondict(jsondict['mpls'])
eq_(str(self.mp), str(mp))
| apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.2/tests/modeltests/one_to_one/tests.py | 92 | 5714 | from django.test import TestCase
from django.db import transaction, IntegrityError
from models import Place, Restaurant, Waiter, ManualPrimaryKey, RelatedModel, MultiModel
class OneToOneTests(TestCase):
def setUp(self):
self.p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
self.p1.save()
self.p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
self.p2.save()
self.r = Restaurant(place=self.p1, serves_hot_dogs=True, serves_pizza=False)
self.r.save()
def test_getter(self):
# A Restaurant can access its place.
self.assertEqual(repr(self.r.place), '<Place: Demon Dogs the place>')
# A Place can access its restaurant, if available.
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
# p2 doesn't have an associated restaurant.
self.assertRaises(Restaurant.DoesNotExist, getattr, self.p2, 'restaurant')
def test_setter(self):
# Set the place using assignment notation. Because place is the primary
# key on Restaurant, the save will create a new restaurant
self.r.place = self.p2
self.r.save()
self.assertEqual(repr(self.p2.restaurant), '<Restaurant: Ace Hardware the restaurant>')
self.assertEqual(repr(self.r.place), '<Place: Ace Hardware the place>')
self.assertEqual(self.p2.pk, self.r.pk)
# Set the place back again, using assignment in the reverse direction.
self.p1.restaurant = self.r
self.assertEqual(repr(self.p1.restaurant), '<Restaurant: Demon Dogs the restaurant>')
r = Restaurant.objects.get(pk=self.p1.id)
self.assertEqual(repr(r.place), '<Place: Demon Dogs the place>')
def test_manager_all(self):
# Restaurant.objects.all() just returns the Restaurants, not the Places.
self.assertQuerysetEqual(Restaurant.objects.all(), [
'<Restaurant: Demon Dogs the restaurant>',
])
# Place.objects.all() returns all Places, regardless of whether they
# have Restaurants.
self.assertQuerysetEqual(Place.objects.order_by('name'), [
'<Place: Ace Hardware the place>',
'<Place: Demon Dogs the place>',
])
def test_manager_get(self):
def assert_get_restaurant(**params):
self.assertEqual(repr(Restaurant.objects.get(**params)),
'<Restaurant: Demon Dogs the restaurant>')
assert_get_restaurant(place__id__exact=self.p1.pk)
assert_get_restaurant(place__id=self.p1.pk)
assert_get_restaurant(place__exact=self.p1.pk)
assert_get_restaurant(place__exact=self.p1)
assert_get_restaurant(place=self.p1.pk)
assert_get_restaurant(place=self.p1)
assert_get_restaurant(pk=self.p1.pk)
assert_get_restaurant(place__pk__exact=self.p1.pk)
assert_get_restaurant(place__pk=self.p1.pk)
assert_get_restaurant(place__name__startswith="Demon")
def assert_get_place(**params):
self.assertEqual(repr(Place.objects.get(**params)),
'<Place: Demon Dogs the place>')
assert_get_place(restaurant__place__exact=self.p1.pk)
assert_get_place(restaurant__place__exact=self.p1)
assert_get_place(restaurant__place__pk=self.p1.pk)
assert_get_place(restaurant__exact=self.p1.pk)
assert_get_place(restaurant__exact=self.r)
assert_get_place(restaurant__pk=self.p1.pk)
assert_get_place(restaurant=self.p1.pk)
assert_get_place(restaurant=self.r)
assert_get_place(id__exact=self.p1.pk)
assert_get_place(pk=self.p1.pk)
def test_foreign_key(self):
# Add a Waiter to the Restaurant.
w = self.r.waiter_set.create(name='Joe')
w.save()
self.assertEqual(repr(w), '<Waiter: Joe the waiter at Demon Dogs the restaurant>')
# Query the waiters
def assert_filter_waiters(**params):
self.assertQuerysetEqual(Waiter.objects.filter(**params), [
'<Waiter: Joe the waiter at Demon Dogs the restaurant>'
])
assert_filter_waiters(restaurant__place__exact=self.p1.pk)
assert_filter_waiters(restaurant__place__exact=self.p1)
assert_filter_waiters(restaurant__place__pk=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1.pk)
assert_filter_waiters(restaurant__exact=self.p1)
assert_filter_waiters(restaurant__pk=self.p1.pk)
assert_filter_waiters(restaurant=self.p1.pk)
assert_filter_waiters(restaurant=self.r)
assert_filter_waiters(id__exact=self.p1.pk)
assert_filter_waiters(pk=self.p1.pk)
# Delete the restaurant; the waiter should also be removed
r = Restaurant.objects.get(pk=self.p1.pk)
r.delete()
self.assertEqual(Waiter.objects.count(), 0)
def test_multiple_o2o(self):
# One-to-one fields still work if you create your own primary key
o1 = ManualPrimaryKey(primary_key="abc123", name="primary")
o1.save()
o2 = RelatedModel(link=o1, name="secondary")
o2.save()
# You can have multiple one-to-one fields on a model, too.
x1 = MultiModel(link1=self.p1, link2=o1, name="x1")
x1.save()
self.assertEqual(repr(o1.multimodel), '<MultiModel: Multimodel x1>')
# This will fail because each one-to-one field must be unique (and
# link2=o1 was used for x1, above).
sid = transaction.savepoint()
mm = MultiModel(link1=self.p2, link2=o1, name="x1")
self.assertRaises(IntegrityError, mm.save)
transaction.savepoint_rollback(sid)
| mit |
classner/fertilized-devtools | binding_generator/ordered_set.py | 1 | 1936 | # See http://code.activestate.com/recipes/576694/.
import collections
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def update(self, setvalues):
for key in setvalues:
self.add(key)
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
if __name__ == '__main__':
s = OrderedSet('abracadaba')
t = OrderedSet('simsalabim')
print(s | t)
print(s & t)
print(s - t)
| bsd-2-clause |
timopulkkinen/BubbleFish | tools/generate_stubs/generate_stubs.py | 63 | 41074 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates windows and posix stub files for a given set of signatures.
For libraries that need to be loaded outside of the standard executable startup
path mechanism, stub files need to be generated for the wanted functions. In
windows, this is done via "def" files and the delay load mechanism. On a posix
system, a set of stub functions need to be generated that dispatch to functions
found via dlsym.
This script takes a set of files, where each file is a list of C-style
signatures (one signature per line). The output is either a windows def file,
or a header + implementation file of stubs suitable for use in a posix system.
This script also handles varidiac functions, e.g.
void printf(const char* s, ...);
TODO(hclam): Fix the situation for varidiac functions.
Stub for the above function will be generated and inside the stub function it
is translated to:
void printf(const char* s, ...) {
printf_ptr(s, (void*)arg1);
}
Only one argument from the varidiac arguments is used and it will be used as
type void*.
"""
__author__ = 'ajwong@chromium.org (Albert J. Wong)'
import optparse
import os
import re
import string
import subprocess
import sys
class Error(Exception):
pass
class BadSignatureError(Error):
pass
class SubprocessError(Error):
def __init__(self, message, error_code):
Error.__init__(self)
self.message = message
self.error_code = error_code
def __str__(self):
return 'Failed with code %s: %s' % (self.message, repr(self.error_code))
# Regular expression used to parse function signatures in the input files.
# The regex is built around identifying the "identifier" for the function name.
# We consider the identifier to be the string that follows these constraints:
#
# 1) Starts with [_a-ZA-Z] (C++ spec 2.10).
# 2) Continues with [_a-ZA-Z0-9] (C++ spec 2.10).
# 3) Preceeds an opening parenthesis by 0 or more whitespace chars.
#
# From that, all preceeding characters are considered the return value.
# Trailing characters should have a substring matching the form (.*). That
# is considered the arguments.
SIGNATURE_REGEX = re.compile('(?P<return_type>.+?)'
'(?P<name>[_a-zA-Z][_a-zA-Z0-9]+)\s*'
'\((?P<params>.*?)\)')
# Used for generating C++ identifiers.
INVALID_C_IDENT_CHARS = re.compile('[^_a-zA-Z0-9]')
# Constants defning the supported file types options.
FILE_TYPE_WIN_X86 = 'windows_lib'
FILE_TYPE_WIN_X64 = 'windows_lib_x64'
FILE_TYPE_POSIX_STUB = 'posix_stubs'
FILE_TYPE_WIN_DEF = 'windows_def'
# Template for generating a stub function definition. Includes a forward
# declaration marking the symbol as weak. This template takes the following
# named parameters.
# return_type: The return type.
# name: The name of the function.
# params: The parameters to the function.
# return_prefix: 'return ' if this function is not void. '' otherwise.
# arg_list: The arguments used to call the stub function.
STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(name)s(%(params)s) {
%(return_prefix)s%(name)s_ptr(%(arg_list)s);
}""")
# Template for generating a variadic stub function definition with return
# value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# return_type: The return type.
# name: The name of the function.
# params: The parameters to the function.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(return_type)s ret___ = %(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
return ret___;
}""")
# Template for generating a variadic stub function definition without
# return value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# name: The name of the function.
# params: The parameters to the function.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VOID_VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern void %(name)s(%(params)s) __attribute__((weak));
void %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
}""")
# Template for the preamble for the stub header file with the header guards,
# standard set of includes, and namespace opener. This template takes the
# following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_PREAMBLE = """// This is generated file. Do not modify directly.
#ifndef %(guard_name)s
#define %(guard_name)s
#include <map>
#include <string>
#include <vector>
#include "base/logging.h"
namespace %(namespace)s {
"""
# Template for the end of the stub header. This closes the namespace and the
# header guards. This template takes the following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_CLOSER = """} // namespace %(namespace)s
#endif // %(guard_name)s
"""
# The standard includes needed for the stub implementation file. Takes one
# string substition with the path to the associated stub header file.
IMPLEMENTATION_PREAMBLE = """// This is generated file. Do not modify directly.
#include "%s"
#include <stdlib.h> // For NULL.
#include <dlfcn.h> // For dysym, dlopen.
#include <map>
#include <vector>
"""
# The start and end templates for the enum definitions used by the Umbrella
# initializer.
UMBRELLA_ENUM_START = """// Enum and typedef for umbrella initializer.
enum StubModules {
"""
UMBRELLA_ENUM_END = """ kNumStubModules
};
"""
# Start and end of the extern "C" section for the implementation contents.
IMPLEMENTATION_CONTENTS_C_START = """extern "C" {
"""
IMPLEMENTATION_CONTENTS_C_END = """
} // extern "C"
"""
# Templates for the start and end of a namespace. Takes one parameter, the
# namespace name.
NAMESPACE_START = """namespace %s {
"""
NAMESPACE_END = """} // namespace %s
"""
# Comment to include before the section declaring all the function pointers
# used by the stub functions.
FUNCTION_POINTER_SECTION_COMMENT = (
"""// Static pointers that will hold the location of the real function
// implementations after the module has been loaded.
""")
# Template for the module initialization check function. This template
# takes two parameteres: the function name, and the conditional used to
# verify the module's initialization.
MODULE_INITIALIZATION_CHECK_FUNCTION = (
"""// Returns true if all stubs have been properly initialized.
bool %s() {
if (%s) {
return true;
} else {
return false;
}
}
""")
# Template for the line that initialize the stub pointer. This template takes
# the following named parameters:
# name: The name of the function.
# return_type: The return type.
# params: The parameters to the function.
STUB_POINTER_INITIALIZER = """ %(name)s_ptr =
reinterpret_cast<%(return_type)s (*)(%(parameters)s)>(
dlsym(module, "%(name)s"));
VLOG_IF(1, !%(name)s_ptr) << "Couldn't load %(name)s, dlerror() says:\\n"
<< dlerror();
"""
# Template for module initializer function start and end. This template takes
# one parameter which is the initializer function name.
MODULE_INITIALIZE_START = """// Initializes the module stubs.
void %s(void* module) {
"""
MODULE_INITIALIZE_END = """}
"""
# Template for module uninitializer function start and end. This template
# takes one parameter which is the initializer function name.
MODULE_UNINITIALIZE_START = (
"""// Uninitialize the module stubs. Reset pointers to NULL.
void %s() {
""")
MODULE_UNINITIALIZE_END = """}
"""
# Open namespace and add typedef for internal data structures used by the
# umbrella initializer.
UMBRELLA_INITIALIZER_START = """namespace %s {
typedef std::map<StubModules, void*> StubHandleMap;
"""
# Function close DSOs on error and clean up dangling references.
UMBRELLA_INITIALIZER_CLEANUP_FUNCTION = (
"""static void CloseLibraries(StubHandleMap* stub_handles) {
for (StubHandleMap::const_iterator it = stub_handles->begin();
it != stub_handles->end();
++it) {
dlclose(it->second);
}
stub_handles->clear();
}
""")
# Function to initialize each DSO for the given paths.
UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START = (
"""bool InitializeStubs(const StubPathMap& path_map) {
StubHandleMap opened_libraries;
for (int i = 0; i < kNumStubModules; ++i) {
StubModules cur_module = static_cast<StubModules>(i);
// If a module is missing, we fail.
StubPathMap::const_iterator it = path_map.find(cur_module);
if (it == path_map.end()) {
CloseLibraries(&opened_libraries);
return false;
}
// Otherwise, attempt to dlopen the library.
const std::vector<std::string>& paths = it->second;
bool module_opened = false;
for (std::vector<std::string>::const_iterator dso_path = paths.begin();
!module_opened && dso_path != paths.end();
++dso_path) {
void* handle = dlopen(dso_path->c_str(), RTLD_LAZY);
if (handle != NULL) {
module_opened = true;
opened_libraries[cur_module] = handle;
} else {
VLOG(1) << "dlopen(" << dso_path->c_str() << ") failed, "
<< "dlerror() says:\\n" << dlerror();
}
}
if (!module_opened) {
CloseLibraries(&opened_libraries);
return false;
}
}
""")
# Template to generate code to check if each module initializer correctly
# completed, and cleanup on failures. This template takes the following
# named parameters.
# conditional: The conditional expression for successful initialization.
# uninitializers: The statements needed to uninitialize the modules.
UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP = (
""" // Check that each module is initialized correctly.
// Close all previously opened libraries on failure.
if (%(conditional)s) {
%(uninitializers)s;
CloseLibraries(&opened_libraries);
return false;
}
return true;
}
""")
# Template for Initialize, Unininitialize, and IsInitialized functions for each
# module. This template takes the following named parameters:
# initialize: Name of the Initialize function.
# uninitialize: Name of the Uninitialize function.
# is_initialized: Name of the IsInitialized function.
MODULE_FUNCTION_PROTOTYPES = """bool %(is_initialized)s();
void %(initialize)s(void* module);
void %(uninitialize)s();
"""
# Template for umbrella initializer declaration and associated datatypes.
UMBRELLA_INITIALIZER_PROTOTYPE = (
"""typedef std::map<StubModules, std::vector<std::string> > StubPathMap;
// Umbrella initializer for all the modules in this stub file.
bool InitializeStubs(const StubPathMap& path_map);
""")
def ExtractModuleName(infile_path):
"""Infers the module name from the input file path.
The input filename is supposed to be in the form "ModuleName.sigs".
This function splits the filename from the extention on that basename of
the path and returns that as the module name.
Args:
infile_path: String holding the path to the input file.
Returns:
The module name as a string.
"""
basename = os.path.basename(infile_path)
# This loop continously removes suffixes of the filename separated by a "."
# character.
while 1:
new_basename = os.path.splitext(basename)[0]
if basename == new_basename:
break
else:
basename = new_basename
return basename
def ParseSignatures(infile):
"""Parses function signatures in the input file.
This function parses a file of signatures into a list of dictionaries that
represent the function signatures in the input file. Each dictionary has
the following keys:
return_type: A string with the return type.
name: A string with the name of the function.
params: A list of each function parameter declaration (type + name)
The format of the input file is one C-style function signature per line, no
trailing semicolon. Empty lines are allowed. An empty line is a line that
consists purely of whitespace. Lines that begin with a # are considered
comment lines and are ignored.
We assume that "int foo(void)" is the same as "int foo()", which is not
true in C where "int foo()" is equivalent to "int foo(...)". Our generated
code is C++, and we do not handle varargs, so this is a case that can be
ignored for now.
Args:
infile: File object holding a text file of function signatures.
Returns:
A list of dictionaries, where each dictionary represents one function
signature.
Raises:
BadSignatureError: A line could not be parsed as a signature.
"""
signatures = []
for line in infile:
line = line.strip()
if line and line[0] != '#':
m = SIGNATURE_REGEX.match(line)
if m is None:
raise BadSignatureError('Unparsable line: %s' % line)
signatures.append(
{'return_type': m.group('return_type').strip(),
'name': m.group('name').strip(),
'params': [arg.strip() for arg in m.group('params').split(',')]})
return signatures
def WriteWindowsDefFile(module_name, signatures, outfile):
"""Writes a windows def file to the given output file object.
The def file format is basically a list of function names. Generation is
simple. After outputting the LIBRARY and EXPORTS lines, print out each
function name, one to a line, preceeded by 2 spaces.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
outfile: File handle to populate with definitions.
"""
outfile.write('LIBRARY %s\n' % module_name)
outfile.write('EXPORTS\n')
for sig in signatures:
outfile.write(' %s\n' % sig['name'])
def QuietRun(args, filter=None, write_to=sys.stdout):
"""Invoke |args| as command via subprocess.Popen, filtering lines starting
with |filter|."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
for line in out.splitlines():
if not filter or not line.startswith(filter):
write_to.write(line + '\n')
return popen.returncode
def CreateWindowsLib(module_name, signatures, intermediate_dir, outdir_path,
machine):
"""Creates a windows library file.
Calling this function will create a lib file in the outdir_path that exports
the signatures passed into the object. A temporary def file will be created
in the intermediate_dir.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
intermediate_dir: The directory where the generated .def files should go.
outdir_path: The directory where generated .lib files should go.
machine: String holding the machine type, 'X86' or 'X64'.
Raises:
SubprocessError: If invoking the windows "lib" tool fails, this is raised
with the error code.
"""
def_file_path = os.path.join(intermediate_dir,
module_name + '.def')
lib_file_path = os.path.join(outdir_path,
module_name + '.lib')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
# Invoke the "lib" program on Windows to create stub .lib files for the
# generated definitions. These .lib files can then be used during
# delayloading of the dynamic libraries.
ret = QuietRun(['lib', '/nologo',
'/machine:' + machine,
'/def:' + def_file_path,
'/out:' + lib_file_path],
filter=' Creating library')
if ret != 0:
raise SubprocessError(
'Failed creating %s for %s' % (lib_file_path, def_file_path),
ret)
class PosixStubWriter(object):
"""Creates a file of stub functions for a library that is opened via dlopen.
Windows provides a function in their compiler known as delay loading, which
effectively generates a set of stub functions for a dynamic library that
delays loading of the dynamic library/resolution of the symbols until one of
the needed functions are accessed.
In posix, RTLD_LAZY does something similar with DSOs. This is the default
link mode for DSOs. However, even though the symbol is not resolved until
first usage, the DSO must be present at load time of the main binary.
To simulate the windows delay load procedure, we need to create a set of
stub functions that allow for correct linkage of the main binary, but
dispatch to the dynamically resolved symbol when the module is initialized.
This class takes a list of function signatures, and generates a set of stub
functions plus initialization code for them.
"""
def __init__(self, module_name, signatures):
"""Initializes PosixStubWriter for this set of signatures and module_name.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
"""
self.signatures = signatures
self.module_name = module_name
@classmethod
def CStyleIdentifier(cls, identifier):
"""Generates a C style identifier.
The module_name has all invalid identifier characters removed (anything
that's not [_a-zA-Z0-9]) and is run through string.capwords to try
and approximate camel case.
Args:
identifier: The string with the module name to turn to C-style.
Returns:
A string that can be used as part of a C identifier.
"""
return string.capwords(re.sub(INVALID_C_IDENT_CHARS, '', identifier))
@classmethod
def EnumName(cls, module_name):
"""Gets the enum name for the module.
Takes the module name and creates a suitable enum name. The module_name
is munged to be a valid C identifier then prefixed with the string
"kModule" to generate a Google style enum name.
Args:
module_name: The name of the module to generate an enum name for.
Returns:
A string with the name of the enum value representing this module.
"""
return 'kModule%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def IsInitializedName(cls, module_name):
"""Gets the name of function that checks initialization of this module.
The name is in the format IsModuleInitialized. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization check function.
"""
return 'Is%sInitialized' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def InitializeModuleName(cls, module_name):
"""Gets the name of the function that initializes this module.
The name is in the format InitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization function.
"""
return 'Initialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def UninitializeModuleName(cls, module_name):
"""Gets the name of the function that uninitializes this module.
The name is in the format UninitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the uninitialization function.
"""
return 'Uninitialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def StubFunctionPointer(cls, signature):
"""Generates a function pointer declaration for the given signature.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the declaration of the function pointer for the signature.
"""
return 'static %s (*%s_ptr)(%s) = NULL;' % (signature['return_type'],
signature['name'],
', '.join(signature['params']))
@classmethod
def StubFunction(cls, signature):
"""Generates a stub function definition for the given signature.
The function definitions are created with __attribute__((weak)) so that
they may be overridden by a real static link or mock versions to be used
when testing.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the stub function definition.
"""
return_prefix = ''
if signature['return_type'] != 'void':
return_prefix = 'return '
# Generate the argument list.
arguments = [re.split('[\*& ]', arg)[-1].strip() for arg in
signature['params']]
arg_list = ', '.join(arguments)
if arg_list == 'void':
arg_list = ''
if arg_list != '' and len(arguments) > 1 and arguments[-1] == '...':
# If the last argment is ... then this is a variadic function.
if return_prefix != '':
return VARIADIC_STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2]}
else:
return VOID_VARIADIC_STUB_FUNCTION_DEFINITION % {
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2]}
else:
# This is a regular function.
return STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'return_prefix': return_prefix,
'arg_list': arg_list}
@classmethod
def WriteImplementationPreamble(cls, header_path, outfile):
"""Write the necessary includes for the implementation file.
Args:
header_path: The path to the header file.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_PREAMBLE % header_path)
@classmethod
def WriteUmbrellaInitializer(cls, module_names, namespace, outfile):
"""Writes a single function that will open + initialize each module.
This intializer will take in an stl map of that lists the correct
dlopen target for each module. The map type is
std::map<enum StubModules, vector<std::string>> which matches one module
to a list of paths to try in dlopen.
This function is an all-or-nothing function. If any module fails to load,
all other modules are dlclosed, and the function returns. Though it is
not enforced, this function should only be called once.
Args:
module_names: A list with the names of the modules in this stub file.
namespace: The namespace these functions should be in.
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(UMBRELLA_INITIALIZER_START % namespace)
outfile.write(UMBRELLA_INITIALIZER_CLEANUP_FUNCTION)
# Create the initializaiton function that calls all module initializers,
# checks if they succeeded, and backs out module loads on an error.
outfile.write(UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START)
outfile.write(
'\n // Initialize each module if we have not already failed.\n')
for module in module_names:
outfile.write(' %s(opened_libraries[%s]);\n' %
(PosixStubWriter.InitializeModuleName(module),
PosixStubWriter.EnumName(module)))
outfile.write('\n')
# Output code to check the initialization status, clean up on error.
initializer_checks = ['!%s()' % PosixStubWriter.IsInitializedName(name)
for name in module_names]
uninitializers = ['%s()' % PosixStubWriter.UninitializeModuleName(name)
for name in module_names]
outfile.write(UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP % {
'conditional': ' ||\n '.join(initializer_checks),
'uninitializers': ';\n '.join(uninitializers)})
outfile.write('\n} // namespace %s\n' % namespace)
@classmethod
def WriteHeaderContents(cls, module_names, namespace, header_guard, outfile):
"""Writes a header file for the stub file generated for module_names.
The header file exposes the following:
1) An enum, StubModules, listing with an entry for each enum.
2) A typedef for a StubPathMap allowing for specification of paths to
search for each module.
3) The IsInitialized/Initialize/Uninitialize functions for each module.
4) An umbrella initialize function for all modules.
Args:
module_names: A list with the names of each module in this stub file.
namespace: The namespace these functions should be in.
header_guard: The macro to use as our header guard.
outfile: The output handle to populate.
"""
outfile.write(STUB_HEADER_PREAMBLE %
{'guard_name': header_guard, 'namespace': namespace})
# Generate the Initializer protoypes for each module.
outfile.write('// Individual module initializer functions.\n')
for name in module_names:
outfile.write(MODULE_FUNCTION_PROTOTYPES % {
'is_initialized': PosixStubWriter.IsInitializedName(name),
'initialize': PosixStubWriter.InitializeModuleName(name),
'uninitialize': PosixStubWriter.UninitializeModuleName(name)})
# Generate the enum for umbrella initializer.
outfile.write(UMBRELLA_ENUM_START)
outfile.write(' %s = 0,\n' % PosixStubWriter.EnumName(module_names[0]))
for name in module_names[1:]:
outfile.write(' %s,\n' % PosixStubWriter.EnumName(name))
outfile.write(UMBRELLA_ENUM_END)
outfile.write(UMBRELLA_INITIALIZER_PROTOTYPE)
outfile.write(STUB_HEADER_CLOSER % {
'namespace': namespace, 'guard_name':
header_guard})
def WriteImplementationContents(self, namespace, outfile):
"""Given a file handle, write out the stub definitions for this module.
Args:
namespace: The namespace these functions should be in.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_CONTENTS_C_START)
self.WriteFunctionPointers(outfile)
self.WriteStubFunctions(outfile)
outfile.write(IMPLEMENTATION_CONTENTS_C_END)
outfile.write(NAMESPACE_START % namespace)
self.WriteModuleInitializeFunctions(outfile)
outfile.write(NAMESPACE_END % namespace)
def WriteFunctionPointers(self, outfile):
"""Write the function pointer declarations needed by the stubs.
We need function pointers to hold the actual location of the function
implementation returned by dlsym. This function outputs a pointer
definition for each signature in the module.
Pointers will be named with the following pattern "FuntionName_ptr".
Args:
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(FUNCTION_POINTER_SECTION_COMMENT)
for sig in self.signatures:
outfile.write('%s\n' % PosixStubWriter.StubFunctionPointer(sig))
outfile.write('\n')
def WriteStubFunctions(self, outfile):
"""Write the function stubs to handle dispatching to real implementations.
Functions that have a return type other than void will look as follows:
ReturnType FunctionName(A a) {
return FunctionName_ptr(a);
}
Functions with a return type of void will look as follows:
void FunctionName(A a) {
FunctionName_ptr(a);
}
Args:
outfile: The file handle to populate.
"""
outfile.write('// Stubs that dispatch to the real implementations.\n')
for sig in self.signatures:
outfile.write('%s\n' % PosixStubWriter.StubFunction(sig))
def WriteModuleInitializeFunctions(self, outfile):
"""Write functions to initialize/query initlialization of the module.
This creates 2 functions IsModuleInitialized and InitializeModule where
"Module" is replaced with the module name, first letter capitalized.
The InitializeModule function takes a handle that is retrieved from dlopen
and attempts to assign each function pointer above via dlsym.
The IsModuleInitialized returns true if none of the required functions
pointers are NULL.
Args:
outfile: The file handle to populate.
"""
ptr_names = ['%s_ptr' % sig['name'] for sig in self.signatures]
# Construct the conditional expression to check the initialization of
# all the function pointers above. It should generate a conjuntion
# with each pointer on its own line, indented by six spaces to match
# the indentation level of MODULE_INITIALIZATION_CHECK_FUNCTION.
initialization_conditional = ' &&\n '.join(ptr_names)
outfile.write(MODULE_INITIALIZATION_CHECK_FUNCTION % (
PosixStubWriter.IsInitializedName(self.module_name),
initialization_conditional))
# Create function that initializes the module.
outfile.write(MODULE_INITIALIZE_START %
PosixStubWriter.InitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(STUB_POINTER_INITIALIZER % {
'name': sig['name'],
'return_type': sig['return_type'],
'parameters': ', '.join(sig['params'])})
outfile.write(MODULE_INITIALIZE_END)
# Create function that uninitializes the module (sets all pointers to
# NULL).
outfile.write(MODULE_UNINITIALIZE_START %
PosixStubWriter.UninitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(' %s_ptr = NULL;\n' % sig['name'])
outfile.write(MODULE_UNINITIALIZE_END)
def CreateOptionParser():
"""Creates an OptionParser for the configuration options of script.
Returns:
A OptionParser object.
"""
parser = optparse.OptionParser(usage='usage: %prog [options] input')
parser.add_option('-o',
'--output',
dest='out_dir',
default=None,
help='Output location.')
parser.add_option('-i',
'--intermediate_dir',
dest='intermediate_dir',
default=None,
help=('Location of intermediate files. Ignored for %s type'
% FILE_TYPE_WIN_DEF))
parser.add_option('-t',
'--type',
dest='type',
default=None,
help=('Type of file. Valid types are "%s" or "%s" or "%s" '
'or "%s"' %
(FILE_TYPE_POSIX_STUB, FILE_TYPE_WIN_X86,
FILE_TYPE_WIN_X64, FILE_TYPE_WIN_DEF)))
parser.add_option('-s',
'--stubfile_name',
dest='stubfile_name',
default=None,
help=('Name of posix_stubs output file. Only valid with '
'%s type.' % FILE_TYPE_POSIX_STUB))
parser.add_option('-p',
'--path_from_source',
dest='path_from_source',
default=None,
help=('The relative path from the project root that the '
'generated file should consider itself part of (eg. '
'third_party/ffmpeg). This is used to generate the '
'header guard and namespace for our initializer '
'functions and does NOT affect the physical output '
'location of the file like -o does. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-e',
'--extra_stub_header',
dest='extra_stub_header',
default=None,
help=('File to insert after the system includes in the '
'generated stub implemenation file. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-m',
'--module_name',
dest='module_name',
default=None,
help=('Name of output DLL or LIB for DEF creation using '
'%s type.' % FILE_TYPE_WIN_DEF))
return parser
def ParseOptions():
"""Parses the options and terminates program if they are not sane.
Returns:
The pair (optparse.OptionValues, [string]), that is the output of
a successful call to parser.parse_args().
"""
parser = CreateOptionParser()
options, args = parser.parse_args()
if not args:
parser.error('No inputs specified')
if options.out_dir is None:
parser.error('Output location not specified')
if (options.type not in
[FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64, FILE_TYPE_POSIX_STUB,
FILE_TYPE_WIN_DEF]):
parser.error('Invalid output file type: %s' % options.type)
if options.type == FILE_TYPE_POSIX_STUB:
if options.stubfile_name is None:
parser.error('Output file name needed for %s' % FILE_TYPE_POSIX_STUB)
if options.path_from_source is None:
parser.error('Path from source needed for %s' % FILE_TYPE_POSIX_STUB)
if options.type == FILE_TYPE_WIN_DEF:
if options.module_name is None:
parser.error('Module name needed for %s' % FILE_TYPE_WIN_DEF)
return options, args
def EnsureDirExists(dir):
"""Creates a directory. Does not use the more obvious 'if not exists: create'
to avoid race with other invocations of the same code, which will error out
on makedirs if another invocation has succeeded in creating the directory
since the existence check."""
try:
os.makedirs(dir)
except:
if not os.path.isdir(dir):
raise
def CreateOutputDirectories(options):
"""Creates the intermediate and final output directories.
Given the parsed options, create the intermediate and final output
directories if they do not exist. Returns the paths to both directories
as a pair.
Args:
options: An OptionParser.OptionValues object with the parsed options.
Returns:
The pair (out_dir, intermediate_dir), both of which are strings.
"""
out_dir = os.path.normpath(options.out_dir)
intermediate_dir = os.path.normpath(options.intermediate_dir)
if intermediate_dir is None:
intermediate_dir = out_dir
EnsureDirExists(out_dir)
EnsureDirExists(intermediate_dir)
return out_dir, intermediate_dir
def CreateWindowsLibForSigFiles(sig_files, out_dir, intermediate_dir, machine):
"""For each signature file, create a windows lib.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated libs go.
intermediate_dir: String holding path to directory generated intermdiate
artifacts.
machine: String holding the machine type, 'X86' or 'X64'.
"""
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
module_name = ExtractModuleName(os.path.basename(input_path))
CreateWindowsLib(module_name, signatures, intermediate_dir, out_dir,
machine)
finally:
infile.close()
def CreateWindowsDefForSigFiles(sig_files, out_dir, module_name):
"""For all signature files, create a single windows def file.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated def goes.
module_name: Name of the output DLL or LIB which will link in the def file.
"""
signatures = []
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures += ParseSignatures(infile)
finally:
infile.close()
def_file_path = os.path.join(
out_dir, os.path.splitext(os.path.basename(module_name))[0] + '.def')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
def CreatePosixStubsForSigFiles(sig_files, stub_name, out_dir,
intermediate_dir, path_from_source,
extra_stub_header):
"""Create a posix stub library with a module for each signature file.
Args:
sig_files: Array of strings with the paths to each signature file.
stub_name: String with the basename of the generated stub file.
out_dir: String holding path to directory for the .h files.
intermediate_dir: String holding path to directory for the .cc files.
path_from_source: String with relative path of generated files from the
project root.
extra_stub_header: String with path to file of extra lines to insert
into the generated header for the stub library.
"""
header_base_name = stub_name + '.h'
header_path = os.path.join(out_dir, header_base_name)
impl_path = os.path.join(intermediate_dir, stub_name + '.cc')
module_names = [ExtractModuleName(path) for path in sig_files]
namespace = path_from_source.replace('/', '_').lower()
header_guard = '%s_' % namespace.upper()
header_include_path = os.path.join(path_from_source, header_base_name)
# First create the implementation file.
impl_file = open(impl_path, 'w')
try:
# Open the file, and create the preamble which consists of a file
# header plus any necessary includes.
PosixStubWriter.WriteImplementationPreamble(header_include_path,
impl_file)
if extra_stub_header is not None:
extra_header_file = open(extra_stub_header, 'r')
try:
impl_file.write('\n')
for line in extra_header_file:
impl_file.write(line)
impl_file.write('\n')
finally:
extra_header_file.close()
# For each signature file, generate the stub population functions
# for that file. Each file represents one module.
for input_path in sig_files:
name = ExtractModuleName(input_path)
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
finally:
infile.close()
writer = PosixStubWriter(name, signatures)
writer.WriteImplementationContents(namespace, impl_file)
# Lastly, output the umbrella function for the file.
PosixStubWriter.WriteUmbrellaInitializer(module_names, namespace,
impl_file)
finally:
impl_file.close()
# Then create the associated header file.
header_file = open(header_path, 'w')
try:
PosixStubWriter.WriteHeaderContents(module_names, namespace,
header_guard, header_file)
finally:
header_file.close()
def main():
options, args = ParseOptions()
out_dir, intermediate_dir = CreateOutputDirectories(options)
if options.type == FILE_TYPE_WIN_X86:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X86')
elif options.type == FILE_TYPE_WIN_X64:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X64')
elif options.type == FILE_TYPE_POSIX_STUB:
CreatePosixStubsForSigFiles(args, options.stubfile_name, out_dir,
intermediate_dir, options.path_from_source,
options.extra_stub_header)
elif options.type == FILE_TYPE_WIN_DEF:
CreateWindowsDefForSigFiles(args, out_dir, options.module_name)
if __name__ == '__main__':
main()
| bsd-3-clause |
The-Compiler/qutebrowser | qutebrowser/browser/webengine/certificateerror.py | 2 | 1605 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineCertificateError."""
from PyQt5.QtWebEngineWidgets import QWebEngineCertificateError
from qutebrowser.utils import usertypes, utils, debug
class CertificateErrorWrapper(usertypes.AbstractCertificateErrorWrapper):
"""A wrapper over a QWebEngineCertificateError."""
def __init__(self, error):
super().__init__(error)
self.ignore = False
def __str__(self):
return self._error.errorDescription()
def __repr__(self):
return utils.get_repr(
self, error=debug.qenum_key(QWebEngineCertificateError,
self._error.error()),
string=str(self))
def url(self):
return self._error.url()
def is_overridable(self):
return self._error.isOverridable()
| gpl-3.0 |
aglitke/vdsm | vdsm_hooks/vmdisk/before_vm_start.py | 2 | 2632 | #!/usr/bin/python
import os
import sys
import hooking
import traceback
'''
vmdisk hook:
add additional disk image for a VM (raw or qcow2)
syntax:
vmdisk=/path/to/disk.img:qcow2,/other/disk.img:raw
'''
driver_types = ('raw', 'qcow2')
def indexToDiskName(i):
s = ''
while True:
s = chr(ord('a') + i % 26) + s
i = i / 26
if i == 0:
break
return 'vd' + (s or 'a')
def createDiskElement(domxml, devpath, drivertype):
'''
<disk device="disk" type="file">
<source file="/net/myhost/myimage.img"/>
<target bus="virtio" dev="vda"/>
<driver cache="none" error_policy="stop" name="qemu" type="qcow2"/>
</disk>
'''
disk = domxml.createElement('disk')
disk.setAttribute('device', 'disk')
disk.setAttribute('type', 'file')
source = domxml.createElement('source')
source.setAttribute('file', devpath)
disk.appendChild(source)
# find a name for vdXXX
target = domxml.createElement('target')
target.setAttribute('bus', 'virtio')
xmldisks = domxml.getElementsByTagName('disk')
disks = []
for d in xmldisks:
disks.append(d.getElementsByTagName('target')[0].getAttribute('dev'))
for i in range(0, 27):
if not indexToDiskName(i) in disks:
target.setAttribute('dev', indexToDiskName(i))
break
disk.appendChild(target)
driver = domxml.createElement('driver')
driver.setAttribute('cache', 'none')
driver.setAttribute('name', 'qemu')
driver.setAttribute('type', drivertype)
disk.appendChild(driver)
return disk
if 'vmdisk' in os.environ:
try:
disks = os.environ['vmdisk']
domxml = hooking.read_domxml()
devices = domxml.getElementsByTagName('devices')[0]
for disk in disks.split(','):
try:
devpath, drivertype = disk.split(':')
except ValueError:
sys.stderr.write('vmdisk: input error, expected '
'diskpath:diskformat ie '
'/path/disk.img:qcow2\n')
sys.exit(2)
if not drivertype in driver_types:
sys.stderr.write('vmdisk: input error, driver '
'type: raw or qcow2\n')
sys.exit(2)
diskdev = createDiskElement(domxml, devpath, drivertype)
devices.appendChild(diskdev)
hooking.write_domxml(domxml)
except:
sys.stderr.write('vmdisk: [unexpected error]: %s\n' %
traceback.format_exc())
sys.exit(2)
| gpl-2.0 |
ion-storm/Unleashed-N5 | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
brianv0/sqlalchemy | lib/sqlalchemy/util/compat.py | 70 | 6809 | # util/compat.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handle Python version/platform incompatibilities."""
import sys
try:
import threading
except ImportError:
import dummy_threading as threading
py33 = sys.version_info >= (3, 3)
py32 = sys.version_info >= (3, 2)
py3k = sys.version_info >= (3, 0)
py2k = sys.version_info < (3, 0)
py265 = sys.version_info >= (2, 6, 5)
jython = sys.platform.startswith('java')
pypy = hasattr(sys, 'pypy_version_info')
win32 = sys.platform.startswith('win')
cpython = not pypy and not jython # TODO: something better for this ?
import collections
next = next
if py3k:
import pickle
else:
try:
import cPickle as pickle
except ImportError:
import pickle
# work around http://bugs.python.org/issue2646
if py265:
safe_kwarg = lambda arg: arg
else:
safe_kwarg = str
ArgSpec = collections.namedtuple("ArgSpec",
["args", "varargs", "keywords", "defaults"])
if py3k:
import builtins
from inspect import getfullargspec as inspect_getfullargspec
from urllib.parse import (quote_plus, unquote_plus,
parse_qsl, quote, unquote)
import configparser
from io import StringIO
from io import BytesIO as byte_buffer
def inspect_getargspec(func):
return ArgSpec(
*inspect_getfullargspec(func)[0:4]
)
string_types = str,
binary_type = bytes
text_type = str
int_types = int,
iterbytes = iter
def u(s):
return s
def ue(s):
return s
def b(s):
return s.encode("latin-1")
if py32:
callable = callable
else:
def callable(fn):
return hasattr(fn, '__call__')
def cmp(a, b):
return (a > b) - (a < b)
from functools import reduce
print_ = getattr(builtins, "print")
import_ = getattr(builtins, '__import__')
import itertools
itertools_filterfalse = itertools.filterfalse
itertools_filter = filter
itertools_imap = map
from itertools import zip_longest
import base64
def b64encode(x):
return base64.b64encode(x).decode('ascii')
def b64decode(x):
return base64.b64decode(x.encode('ascii'))
else:
from inspect import getargspec as inspect_getfullargspec
inspect_getargspec = inspect_getfullargspec
from urllib import quote_plus, unquote_plus, quote, unquote
from urlparse import parse_qsl
import ConfigParser as configparser
from StringIO import StringIO
from cStringIO import StringIO as byte_buffer
string_types = basestring,
binary_type = str
text_type = unicode
int_types = int, long
def iterbytes(buf):
return (ord(byte) for byte in buf)
def u(s):
# this differs from what six does, which doesn't support non-ASCII
# strings - we only use u() with
# literal source strings, and all our source files with non-ascii
# in them (all are tests) are utf-8 encoded.
return unicode(s, "utf-8")
def ue(s):
return unicode(s, "unicode_escape")
def b(s):
return s
def import_(*args):
if len(args) == 4:
args = args[0:3] + ([str(arg) for arg in args[3]],)
return __import__(*args)
callable = callable
cmp = cmp
reduce = reduce
import base64
b64encode = base64.b64encode
b64decode = base64.b64decode
def print_(*args, **kwargs):
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
for arg in enumerate(args):
if not isinstance(arg, basestring):
arg = str(arg)
fp.write(arg)
import itertools
itertools_filterfalse = itertools.ifilterfalse
itertools_filter = itertools.ifilter
itertools_imap = itertools.imap
from itertools import izip_longest as zip_longest
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
from collections import namedtuple
from operator import attrgetter as dottedgetter
if py3k:
def reraise(tp, value, tb=None, cause=None):
if cause is not None:
value.__cause__ = cause
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
def raise_from_cause(exception, exc_info=None):
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb, cause=exc_value)
else:
exec("def reraise(tp, value, tb=None, cause=None):\n"
" raise tp, value, tb\n")
def raise_from_cause(exception, exc_info=None):
# not as nice as that of Py3K, but at least preserves
# the code line where the issue occurred
if exc_info is None:
exc_info = sys.exc_info()
exc_type, exc_value, exc_tb = exc_info
reraise(type(exception), exception, tb=exc_tb)
if py3k:
exec_ = getattr(builtins, 'exec')
else:
def exec_(func_text, globals_, lcl=None):
if lcl is None:
exec('exec func_text in globals_')
else:
exec('exec func_text in globals_, lcl')
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass.
Drops the middle class upon creation.
Source: http://lucumr.pocoo.org/2013/5/21/porting-to-python-3-redux/
"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
from contextlib import contextmanager
try:
from contextlib import nested
except ImportError:
# removed in py3k, credit to mitsuhiko for
# workaround
@contextmanager
def nested(*managers):
exits = []
vars = []
exc = (None, None, None)
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
reraise(exc[0], exc[1], exc[2])
| mit |
balloob/home-assistant | tests/components/mobile_app/test_http_api.py | 12 | 3643 | """Tests for the mobile_app HTTP API."""
import json
from unittest.mock import patch
import pytest
from homeassistant.components.mobile_app.const import CONF_SECRET, DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.setup import async_setup_component
from .const import REGISTER, REGISTER_CLEARTEXT, RENDER_TEMPLATE
from tests.common import mock_coro
async def test_registration(hass, hass_client, hass_admin_user):
"""Test that registrations happen."""
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
api_client = await hass_client()
with patch(
"homeassistant.components.person.async_add_user_device_tracker",
spec=True,
return_value=mock_coro(),
) as add_user_dev_track:
resp = await api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert len(add_user_dev_track.mock_calls) == 1
assert add_user_dev_track.mock_calls[0][1][1] == hass_admin_user.id
assert add_user_dev_track.mock_calls[0][1][2] == "device_tracker.test_1"
assert resp.status == 201
register_json = await resp.json()
assert CONF_WEBHOOK_ID in register_json
assert CONF_SECRET in register_json
entries = hass.config_entries.async_entries(DOMAIN)
assert entries[0].unique_id == "io.homeassistant.mobile_app_test-mock-device-id"
assert entries[0].data["device_id"] == REGISTER_CLEARTEXT["device_id"]
assert entries[0].data["app_data"] == REGISTER_CLEARTEXT["app_data"]
assert entries[0].data["app_id"] == REGISTER_CLEARTEXT["app_id"]
assert entries[0].data["app_name"] == REGISTER_CLEARTEXT["app_name"]
assert entries[0].data["app_version"] == REGISTER_CLEARTEXT["app_version"]
assert entries[0].data["device_name"] == REGISTER_CLEARTEXT["device_name"]
assert entries[0].data["manufacturer"] == REGISTER_CLEARTEXT["manufacturer"]
assert entries[0].data["model"] == REGISTER_CLEARTEXT["model"]
assert entries[0].data["os_name"] == REGISTER_CLEARTEXT["os_name"]
assert entries[0].data["os_version"] == REGISTER_CLEARTEXT["os_version"]
assert (
entries[0].data["supports_encryption"]
== REGISTER_CLEARTEXT["supports_encryption"]
)
async def test_registration_encryption(hass, hass_client):
"""Test that registrations happen."""
try:
from nacl.encoding import Base64Encoder
from nacl.secret import SecretBox
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
api_client = await hass_client()
resp = await api_client.post("/api/mobile_app/registrations", json=REGISTER)
assert resp.status == 201
register_json = await resp.json()
keylen = SecretBox.KEY_SIZE
key = register_json[CONF_SECRET].encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
payload = json.dumps(RENDER_TEMPLATE["data"]).encode("utf-8")
data = SecretBox(key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await api_client.post(
"/api/webhook/{}".format(register_json[CONF_WEBHOOK_ID]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = SecretBox(key).decrypt(
webhook_json["encrypted_data"], encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
assert json.loads(decrypted_data) == {"one": "Hello world"}
| apache-2.0 |
googleapis/googleapis-gen | google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/types/feed_item_target_service.py | 1 | 5926 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v7.enums.types import response_content_type as gage_response_content_type
from google.ads.googleads.v7.resources.types import feed_item_target as gagr_feed_item_target
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v7.services',
marshal='google.ads.googleads.v7',
manifest={
'GetFeedItemTargetRequest',
'MutateFeedItemTargetsRequest',
'FeedItemTargetOperation',
'MutateFeedItemTargetsResponse',
'MutateFeedItemTargetResult',
},
)
class GetFeedItemTargetRequest(proto.Message):
r"""Request message for
[FeedItemTargetService.GetFeedItemTarget][google.ads.googleads.v7.services.FeedItemTargetService.GetFeedItemTarget].
Attributes:
resource_name (str):
Required. The resource name of the feed item
targets to fetch.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
class MutateFeedItemTargetsRequest(proto.Message):
r"""Request message for
[FeedItemTargetService.MutateFeedItemTargets][google.ads.googleads.v7.services.FeedItemTargetService.MutateFeedItemTargets].
Attributes:
customer_id (str):
Required. The ID of the customer whose feed
item targets are being modified.
operations (Sequence[google.ads.googleads.v7.services.types.FeedItemTargetOperation]):
Required. The list of operations to perform
on individual feed item targets.
partial_failure (bool):
If true, successful operations will be
carried out and invalid operations will return
errors. If false, all operations will be carried
out in one transaction if and only if they are
all valid. Default is false.
response_content_type (google.ads.googleads.v7.enums.types.ResponseContentTypeEnum.ResponseContentType):
The response content type setting. Determines
whether the mutable resource or just the
resource name should be returned post mutation.
validate_only (bool):
If true, the request is validated but not
executed. Only errors are returned, not results.
"""
customer_id = proto.Field(
proto.STRING,
number=1,
)
operations = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FeedItemTargetOperation',
)
partial_failure = proto.Field(
proto.BOOL,
number=4,
)
response_content_type = proto.Field(
proto.ENUM,
number=5,
enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType,
)
validate_only = proto.Field(
proto.BOOL,
number=3,
)
class FeedItemTargetOperation(proto.Message):
r"""A single operation (create, remove) on an feed item target.
Attributes:
create (google.ads.googleads.v7.resources.types.FeedItemTarget):
Create operation: No resource name is
expected for the new feed item target.
remove (str):
Remove operation: A resource name for the removed feed item
target is expected, in this format:
``customers/{customer_id}/feedItemTargets/{feed_id}~{feed_item_id}~{feed_item_target_type}~{feed_item_target_id}``
"""
create = proto.Field(
proto.MESSAGE,
number=1,
oneof='operation',
message=gagr_feed_item_target.FeedItemTarget,
)
remove = proto.Field(
proto.STRING,
number=2,
oneof='operation',
)
class MutateFeedItemTargetsResponse(proto.Message):
r"""Response message for an feed item target mutate.
Attributes:
partial_failure_error (google.rpc.status_pb2.Status):
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results (Sequence[google.ads.googleads.v7.services.types.MutateFeedItemTargetResult]):
All results for the mutate.
"""
partial_failure_error = proto.Field(
proto.MESSAGE,
number=3,
message=status_pb2.Status,
)
results = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='MutateFeedItemTargetResult',
)
class MutateFeedItemTargetResult(proto.Message):
r"""The result for the feed item target mutate.
Attributes:
resource_name (str):
Returned for successful operations.
feed_item_target (google.ads.googleads.v7.resources.types.FeedItemTarget):
The mutated feed item target with only mutable fields after
mutate. The field will only be returned when
response_content_type is set to "MUTABLE_RESOURCE".
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
feed_item_target = proto.Field(
proto.MESSAGE,
number=2,
message=gagr_feed_item_target.FeedItemTarget,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
thaim/ansible | lib/ansible/module_utils/network/apconos/apconos.py | 18 | 3650 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by
# Ansible still belong to the author of the module, and may assign their own
# license to the complete work.
#
# Copyright (C) 2019 APCON, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contains utility methods
# APCON Networking
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils._text import to_text
from ansible.module_utils.network.common.utils import EntityCollection
from ansible.module_utils.connection import Connection, exec_command
from ansible.module_utils.connection import ConnectionError
_DEVICE_CONFIGS = {}
_CONNECTION = None
command_spec = {
'command': dict(key=True),
}
def check_args(module, warnings):
pass
def get_connection(module):
global _CONNECTION
if _CONNECTION:
return _CONNECTION
_CONNECTION = Connection(module._socket_path)
return _CONNECTION
def get_config(module, flags=None):
flags = [] if flags is None else flags
cmd = ' '.join(flags).strip()
try:
return _DEVICE_CONFIGS[cmd]
except KeyError:
conn = get_connection(module)
out = conn.get(cmd)
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[cmd] = cfg
return cfg
def run_commands(module, commands, check_rc=True):
connection = get_connection(module)
transform = EntityCollection(module, command_spec)
commands = transform(commands)
responses = list()
for cmd in commands:
out = connection.get(**cmd)
responses.append(to_text(out, errors='surrogate_then_replace'))
return responses
def load_config(module, config):
try:
conn = get_connection(module)
conn.edit_config(config)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc))
def get_defaults_flag(module):
rc, out, err = exec_command(module, 'display running-config ?')
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line:
commands.add(line.strip().split()[0])
if 'all' in commands:
return 'all'
else:
return 'full'
| mit |
krafczyk/spack | var/spack/repos/builtin/packages/laghos/package.py | 2 | 3127 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Laghos(MakefilePackage):
"""Laghos (LAGrangian High-Order Solver) is a CEED miniapp that solves the
time-dependent Euler equations of compressible gas dynamics in a moving
Lagrangian frame using unstructured high-order finite element spatial
discretization and explicit high-order time-stepping.
"""
tags = ['proxy-app', 'ecp-proxy-app']
homepage = "https://github.com/CEED/Laghos"
url = "https://github.com/CEED/Laghos/archive/v1.0.tar.gz"
git = "https://github.com/CEED/Laghos.git"
version('develop', branch='master')
version('1.0', '4c091e115883c79bed81c557ef16baff')
variant('metis', default=True, description='Enable/disable METIS support')
depends_on('mfem@develop+mpi+metis', when='@develop+metis')
depends_on('mfem@develop+mpi~metis', when='@develop~metis')
depends_on('mfem@laghos-v1.0,3.3.2:+mpi+metis', when='@1.0+metis')
depends_on('mfem@laghos-v1.0,3.3.2:+mpi~metis', when='@1.0~metis')
@property
def build_targets(self):
targets = []
spec = self.spec
targets.append('MFEM_DIR=%s' % spec['mfem'].prefix)
targets.append('CONFIG_MK=%s' % spec['mfem'].package.config_mk)
targets.append('TEST_MK=%s' % spec['mfem'].package.test_mk)
targets.append('CXX=%s' % spec['mpi'].mpicxx)
return targets
# See lib/spack/spack/build_systems/makefile.py
def check(self):
targets = []
spec = self.spec
targets.append('MFEM_DIR=%s' % spec['mfem'].prefix)
targets.append('CONFIG_MK=%s' % spec['mfem'].package.config_mk)
targets.append('TEST_MK=%s' % spec['mfem'].package.test_mk)
with working_dir(self.build_directory):
make('test', *targets)
def install(self, spec, prefix):
mkdirp(prefix.bin)
install('laghos', prefix.bin)
| lgpl-2.1 |
dset0x/invenio | invenio/modules/groups/forms.py | 14 | 2468 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Group Forms."""
from invenio.base.i18n import _
from invenio.modules.accounts.models import Usergroup
from invenio.utils.forms import InvenioBaseForm, RemoteAutocompleteField
from wtforms import validators, widgets
from wtforms.fields import BooleanField, HiddenField
from wtforms_alchemy import model_form_factory
ModelForm = model_form_factory(InvenioBaseForm)
class UsergroupForm(ModelForm):
"""Create new Usergroup."""
class Meta:
"""Meta class model for *WTForms-Alchemy*."""
model = Usergroup
strip_string_fields = True
field_args = dict(
name=dict(
label=_('Name'),
validators=[validators.DataRequired()],
widget=widgets.TextInput(),
),
description=dict(label=_('Description')),
join_policy=dict(label=_('Join policy')),
login_method=dict(label=_('Login method'))
)
class JoinUsergroupForm(InvenioBaseForm):
"""Join existing group."""
id_usergroup = RemoteAutocompleteField(
# without label
'',
remote='',
min_length=1,
highlight='true',
data_key='id',
data_value='name'
)
class UserJoinGroupForm(InvenioBaseForm):
"""Select a user that Join an existing group."""
id_usergroup = HiddenField()
id_user = RemoteAutocompleteField(
# without label
'',
remote='',
min_length=3,
highlight='true',
data_key='id',
data_value='nickname'
)
# set as admin of the group
user_status = BooleanField(label=_('as Admin'))
# return page
redirect_url = HiddenField()
| gpl-2.0 |
ifaoe/daisi-tk | daisi_images.py | 1 | 5255 | #!/usr/bin/python3
import logging
import psycopg2
from argparse import ArgumentParser
from gdal_tif2geo import process
import multiprocessing
import subprocess
from joblib import Parallel, delayed
from math import ceil
import tempfile
import os
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# function which is called in parallel
def parallel_process(row, linco_path, linco_args, threads, overwrite, temppath, compress, opencl):
# split row from database query into single variables
[epsg, iiq_file, geo_file, ne_x, ne_y, nw_x, nw_y, sw_x, sw_y, se_x, se_y] = row
if not overwrite:
if os.path.isfile(geo_file) and os.path.exists(geo_file):
print('{file} already exists.'.format(file=geo_file))
return
print("Processing {0} -> {1}".format(iiq_file, geo_file))
# convert iiq -> tiff
# create temporary file
temp_file = tempfile.NamedTemporaryFile()
# run linco
linco_command = ('nice', '-n 19', linco_path, iiq_file, temp_file.name, '-cputhreads={threads}'.format(threads=threads), linco_args)
logger.debug(' '.join(linco_command))
linco_log = subprocess.run(linco_command, shell=True, check=True, stdout=subprocess.PIPE).stdout.decode('utf8')
logger.debug(linco_log)
# create geotiff
process(temp_file.name, geo_file, [ne_x, ne_y], [nw_x, nw_y], [se_x, se_y], [sw_x, sw_y], threads,
0.02, compress, 95, 'lanczos', epsg, [256, 256], args.verbose, opencl, overwrite, temppath)
if __name__ == '__main__':
parser = ArgumentParser(description='Georeference DAISI images from tif.')
parser.add_argument('-v', '--verbose', action='store_true', help='Verbosity.')
parser.add_argument('-s', '--session', type=str, default='.*', help='Session pattern (default: .*).')
parser.add_argument('-t', '--transect', type=str, default='.*', help='Transect pattern (default: .*).')
parser.add_argument('-c', '--camera', type=str, default='.*', help='Camera pattern (default: .*).')
parser.add_argument('-i', '--image', type=str, default='.*', help='Image pattern (default: .*).')
parser.add_argument('-H', '--host', type=str, default='127.0.0.1', help='Database host (default: 127.0.0.1).')
parser.add_argument('-d', '--database', type=str, default='daisi', help='Database name (default: DAISI).')
parser.add_argument('-u', '--user', type=str, default='daisi', help='Database user (default: DAISI).')
parser.add_argument('-P', '--password', type=str, default='18ifaoe184', help='Database password.')
parser.add_argument('-p', '--port', type=str, default='5432', help='Database port (default: 5432).')
parser.add_argument('-l', '--location', type=str, default='rostock', help='Image data location (default: rostock)')
parser.add_argument('-o', '--overwrite', action='store_true', help='Overwrite image if it already exists.')
parser.add_argument('--linco-path', type=str, default='/usr/local/bin/linco', help='Location of linco executable.')
parser.add_argument('--linco-args', type=str, default='-bits=16 -shadowRecovery=75 -highlightRecovery=75',
help='Set linco arguments (default: -bits=16 -shadowRecovery=75 -highlightRecovery=75).')
parser.add_argument('--linco-help', action='store_true', help='Get linco help (overwrites all other arguments).')
parser.add_argument('--temp-path', type=str, help='Path for temporary files')
parser.add_argument('--compress', action='store_true', help='Enable JPEG compression (default: off).')
parser.add_argument('--opencl', action='store_true', help='Enable OpenCL (default: off, requires working OpenCL setup.).')
args = parser.parse_args()
if args.linco_help:
subprocess.run([args.linco_path, '--help'])
exit(1)
if args.verbose:
logger.setLevel(logging.DEBUG)
# connecting to database
connection = psycopg2.connect(database=args.database, host=args.host, port=args.port, user=args.user, password=args.password)
cursor = connection.cursor()
cursor.execute("SELECT epsg, iiq_path, geo_path, ne_x, ne_y, nw_x, nw_y, sw_x, sw_y, se_x, se_y FROM daisi_dev.gdal_images "
"WHERE location=%s AND session~%s AND transect~%s AND camera~%s AND image~%s",
(args.location, args.session, args.transect, args.camera, args.image))
rows = cursor.fetchall()
row_count = len(rows)
if row_count == 0:
logger.critical('No images match the query {0}'.format(cursor.query))
exit(1)
logger.debug('{0} images match the query {1}'.format(row_count, cursor.query))
connection.commit()
cpu_count = multiprocessing.cpu_count()
thread_count = min(cpu_count, ceil(cpu_count/row_count))
process_count = min(cpu_count, ceil(cpu_count/thread_count))
logger.debug('Found {0} CPUs. Using {1} processes with {2} thread(s) each.'.format(cpu_count, process_count, thread_count))
Parallel(n_jobs=process_count)(delayed(parallel_process)
(
row, args.linco_path, args.linco_args, thread_count, args.overwrite, args.temp_path, args.compress, args.opencl
) for row in rows)
| gpl-2.0 |
DanielNeugebauer/adhocracy | scripts/common.py | 6 | 2881 | """
Utility code to use in command line scripts.
You need some boilerplate code to import from this module
Cause it's probably not in the python path::
# boilerplate code. copy that to a new commandline script
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
* Parse command line arguments with a preconfigured argparser::
parser = create_parser('description of the command...', use_instances=True)
# You can add more arguments to the parser
# parser.add_argument(...)
args = parser.parse_args()
# args will contain all command line args
load_from_args(args)
instances = get_instances(args) # returns instance objects
"""
try:
from argparse import ArgumentParser
except ImportError:
print ('This script uses argparse. It is part of python 2.7/3.2\n'
'and can be installed from pypi for other versions:\n'
'http://pypi.python.org/pypi/argparse')
exit(1)
import os
from sqlalchemy import engine_from_config
from paste.deploy import appconfig
from adhocracy.config.environment import load_environment
from adhocracy.model import Instance
section = 'content'
def config_from_args(args):
filename = args.file
section = args.section
return appconfig('config:%s#%s' % (os.path.abspath(filename), section))
def load_config(config):
return load_environment(config.global_conf, config.local_conf)
def load_from_args(args):
config = config_from_args(args)
return load_config(config)
def create_parser(description, use_instance=True,
instance_help='Instances to consider'):
parser = ArgumentParser(description=description)
parser.add_argument("file", help="configuration file to use",
metavar="<config file>")
parser.add_argument("-n", default=section, dest="section",
help=('name of the "app:"-section to use. (default: '
'%s)' % section))
if use_instance:
parser.add_argument(
"-i", "--instance", metavar='INSTANCE', nargs="*",
dest="instances", help=instance_help, action="append")
return parser
def get_instances(args):
'''
Flatten out the instances parsed by a parser from create_parser
used with `use_instance=True`
'''
if args.instances:
keys = [item for sublist in args.instances for item in
sublist]
instances = []
for key in keys:
obj = Instance.find(key)
if obj is None:
raise ValueError("Instance '%s' does not exist" % key)
instances.append(obj)
return instances
return None
def get_engine(conf, echo=True):
return engine_from_config(conf.local_conf, echo=echo)
| agpl-3.0 |
def-/commandergenius | project/jni/python/src/Demo/newmetaclasses/Eiffel.py | 37 | 3713 | """Support Eiffel-style preconditions and postconditions."""
from types import FunctionType as function
class EiffelBaseMetaClass(type):
def __new__(meta, name, bases, dict):
meta.convert_methods(dict)
return super(EiffelBaseMetaClass, meta).__new__(meta, name, bases,
dict)
@classmethod
def convert_methods(cls, dict):
"""Replace functions in dict with EiffelMethod wrappers.
The dict is modified in place.
If a method ends in _pre or _post, it is removed from the dict
regardless of whether there is a corresponding method.
"""
# find methods with pre or post conditions
methods = []
for k, v in dict.iteritems():
if k.endswith('_pre') or k.endswith('_post'):
assert isinstance(v, function)
elif isinstance(v, function):
methods.append(k)
for m in methods:
pre = dict.get("%s_pre" % m)
post = dict.get("%s_post" % m)
if pre or post:
dict[k] = cls.make_eiffel_method(dict[m], pre, post)
class EiffelMetaClass1(EiffelBaseMetaClass):
# an implementation of the "eiffel" meta class that uses nested functions
@staticmethod
def make_eiffel_method(func, pre, post):
def method(self, *args, **kwargs):
if pre:
pre(self, *args, **kwargs)
x = func(self, *args, **kwargs)
if post:
post(self, x, *args, **kwargs)
return x
if func.__doc__:
method.__doc__ = func.__doc__
return method
class EiffelMethodWrapper:
def __init__(self, inst, descr):
self._inst = inst
self._descr = descr
def __call__(self, *args, **kwargs):
return self._descr.callmethod(self._inst, args, kwargs)
class EiffelDescriptor(object):
def __init__(self, func, pre, post):
self._func = func
self._pre = pre
self._post = post
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __get__(self, obj, cls):
return EiffelMethodWrapper(obj, self)
def callmethod(self, inst, args, kwargs):
if self._pre:
self._pre(inst, *args, **kwargs)
x = self._func(inst, *args, **kwargs)
if self._post:
self._post(inst, x, *args, **kwargs)
return x
class EiffelMetaClass2(EiffelBaseMetaClass):
# an implementation of the "eiffel" meta class that uses descriptors
make_eiffel_method = EiffelDescriptor
def _test(metaclass):
class Eiffel:
__metaclass__ = metaclass
class Test(Eiffel):
def m(self, arg):
"""Make it a little larger"""
return arg + 1
def m2(self, arg):
"""Make it a little larger"""
return arg + 1
def m2_pre(self, arg):
assert arg > 0
def m2_post(self, result, arg):
assert result > arg
class Sub(Test):
def m2(self, arg):
return arg**2
def m2_post(self, Result, arg):
super(Sub, self).m2_post(Result, arg)
assert Result < 100
t = Test()
t.m(1)
t.m2(1)
try:
t.m2(0)
except AssertionError:
pass
else:
assert False
s = Sub()
try:
s.m2(1)
except AssertionError:
pass # result == arg
else:
assert False
try:
s.m2(10)
except AssertionError:
pass # result == 100
else:
assert False
s.m2(5)
if __name__ == "__main__":
_test(EiffelMetaClass1)
_test(EiffelMetaClass2)
| lgpl-2.1 |
HankFaan/fbthrift | thrift/lib/py/transport/TSSLSocketOverHttpTunnel.py | 16 | 1645 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ssl
from .TSocketOverHttpTunnel import TSocketOverHttpTunnel
from .TTransport import TTransportException
class TSSLSocketOverHttpTunnel(TSocketOverHttpTunnel):
def __init__(self, host, port, proxy_host, proxy_port,
ssl_version=ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_NONE,
ca_certs=None,
keyfile=None,
certfile=None):
TSocketOverHttpTunnel.__init__(self, host, port, proxy_host, proxy_port)
self.ssl_version = ssl_version
self.cert_reqs = cert_reqs
self.keyfile, self.certfile, self.ca_certs = \
keyfile, certfile, ca_certs
def open(self):
TSocketOverHttpTunnel.open(self)
try:
sslh = ssl.SSLSocket(self.handle,
ssl_version=self.ssl_version,
cert_reqs=self.cert_reqs,
keyfile=self.keyfile,
certfile=self.certfile,
ca_certs=self.ca_certs)
self.handle = sslh
except ssl.SSLError as e:
self.close()
raise TTransportException(TTransportException.NOT_OPEN,
"SSL error during handshake: " + str(e))
except socket.error as e:
self.close()
raise TTransportException(TTransportException.NOT_OPEN,
"socket error during SSL handshake: " + str(e))
| apache-2.0 |
fnoorian/Free-buck-boost | drivers/json_server.py | 1 | 1329 | from werkzeug.wrappers import Request, Response
from werkzeug.serving import run_simple
# this use package json-rpc (not jsonrpc!)
from jsonrpc import JSONRPCResponseManager, dispatcher
from drivers.boost_driver import FCCBoostDriver
from drivers.buck_driver import FCCBuckDriver, FCCMPPTDriver
from drivers.mighty_driver import MightyWattDriver
@dispatcher.add_method
def get_version():
version = ["fcc_json_server", 1]
return version
@Request.application
def application(request):
dispatcher["mightywatt_readstatus"] = mightywatt.read_status
dispatcher["mightywatt_setpower"] = mightywatt.set_power
dispatcher["charger_readstatus"] = charger.read_status
dispatcher["discharger_readstatus"] = discharger.read_status
dispatcher["mppt_readstatus"] = mppt.read_status
response = JSONRPCResponseManager.handle(
request.data, dispatcher)
return Response(response.json, mimetype='application/json')
if __name__ == '__main__':
mightywatt = MightyWattDriver(u'8533434373835120D1C2')
charger = FCCBoostDriver(u'75439333635351719221')
discharger = FCCBuckDriver(u'75439333635351712071')
mppt = FCCMPPTDriver(u'75439333635351918140')
#run_simple('localhost', 4000, application)
run_simple('0.0.0.0', 4002, application)
| bsd-2-clause |
googleinterns/cabby | cabby/model/datasets.py | 1 | 4391 | # coding=utf-8
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl import logging
import os
import pandas as pd
from sklearn.utils import shuffle
from cabby.geo import regions
from cabby.geo import util as gutil
class RUNDataset:
def __init__(self, data_dir: str, s2level: int, lines: bool = False):
train_ds, valid_ds, test_ds, ds = self.load_data(data_dir, lines=lines)
# Get labels.
map_1 = regions.get_region("RUN-map1")
map_2 = regions.get_region("RUN-map2")
map_3 = regions.get_region("RUN-map3")
logging.info(map_1.polygon.wkt)
logging.info(map_2.polygon.wkt)
logging.info(map_3.polygon.wkt)
unique_cellid_map_1 = gutil.cellids_from_polygon(map_1.polygon, s2level)
unique_cellid_map_2 = gutil.cellids_from_polygon(map_2.polygon, s2level)
unique_cellid_map_3 = gutil.cellids_from_polygon(map_3.polygon, s2level)
unique_cellid = (
unique_cellid_map_1 + unique_cellid_map_2 + unique_cellid_map_3)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.ds = ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
def load_data(self, data_dir: str, lines: bool):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
ds['instructions'] = ds.groupby(
['id'])['instruction'].transform(lambda x: ' '.join(x))
ds = ds.drop_duplicates(subset='id', keep="last")
columns_keep = ds.columns.difference(
['map', 'id', 'instructions', 'end_point', 'start_point'])
ds.drop(columns_keep, 1, inplace=True)
ds = shuffle(ds)
ds.reset_index(inplace=True, drop=True)
dataset_size = ds.shape[0]
logging.info(f"Size of dataset: {ds.shape[0]}")
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
return train_ds, valid_ds, test_ds, ds
class RVSDataset:
def __init__(self, data_dir: str, s2level: int, region: str, lines: bool = True):
ds = pd.read_json(os.path.join(data_dir, 'dataset.json'), lines=lines)
logging.info(f"Size of dataset before removal of duplication: {ds.shape[0]}")
ds = pd.concat([ds.drop(['geo_landmarks'], axis=1), ds['geo_landmarks'].apply(pd.Series)], axis=1)
lengths = ds.end_point.apply(lambda x: x if len(x) == 3 else "").tolist()
ds['end_osmid'] = ds.end_point.apply(lambda x: x[1])
ds['start_osmid'] = ds.start_point.apply(lambda x: x[1])
ds['end_pivot'] = ds.end_point
ds['end_point'] = ds.end_point.apply(lambda x: x[3])
ds['start_point'] = ds.start_point.apply(lambda x: x[3])
ds = ds.drop_duplicates(subset=['end_osmid', 'start_osmid'], keep='last')
logging.info(f"Size of dataset after removal of duplication: {ds.shape[0]}")
dataset_size = ds.shape[0]
train_size = round(dataset_size * 80 / 100)
valid_size = round(dataset_size * 10 / 100)
train_ds = ds.iloc[:train_size]
valid_ds = ds.iloc[train_size:train_size + valid_size]
test_ds = ds.iloc[train_size + valid_size:]
# Get labels.
active_region = regions.get_region(region)
unique_cellid = gutil.cellids_from_polygon(active_region.polygon, s2level)
label_to_cellid = {idx: cellid for idx, cellid in enumerate(unique_cellid)}
cellid_to_label = {cellid: idx for idx, cellid in enumerate(unique_cellid)}
self.train = train_ds
self.valid = valid_ds
self.test = test_ds
self.unique_cellid = unique_cellid
self.label_to_cellid = label_to_cellid
self.cellid_to_label = cellid_to_label
| apache-2.0 |
sarthakmeh03/django | django/db/migrations/writer.py | 58 | 11569 | from __future__ import unicode_literals
import os
import re
from importlib import import_module
from django import get_version
from django.apps import apps
from django.db import migrations
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.serializer import serializer_factory
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.inspect import get_func_args
from django.utils.module_loading import module_dir
from django.utils.timezone import now
try:
import enum
except ImportError:
# No support on Python 2 if enum34 isn't installed.
enum = None
class SettingsReference(str):
"""
Special subclass of string which actually references a current settings
value. It's treated as the value in memory, but serializes out to a
settings.NAME attribute reference.
"""
def __new__(self, value, setting_name):
return str.__new__(self, value)
def __init__(self, value, setting_name):
self.setting_name = setting_name
class OperationWriter(object):
def __init__(self, operation, indentation=2):
self.operation = operation
self.buff = []
self.indentation = indentation
def serialize(self):
def _write(_arg_name, _arg_value):
if (_arg_name in self.operation.serialization_expand_args and
isinstance(_arg_value, (list, tuple, dict))):
if isinstance(_arg_value, dict):
self.feed('%s={' % _arg_name)
self.indent()
for key, value in _arg_value.items():
key_string, key_imports = MigrationWriter.serialize(key)
arg_string, arg_imports = MigrationWriter.serialize(value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s: %s' % (key_string, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s: %s,' % (key_string, arg_string))
imports.update(key_imports)
imports.update(arg_imports)
self.unindent()
self.feed('},')
else:
self.feed('%s=[' % _arg_name)
self.indent()
for item in _arg_value:
arg_string, arg_imports = MigrationWriter.serialize(item)
args = arg_string.splitlines()
if len(args) > 1:
for arg in args[:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s,' % arg_string)
imports.update(arg_imports)
self.unindent()
self.feed('],')
else:
arg_string, arg_imports = MigrationWriter.serialize(_arg_value)
args = arg_string.splitlines()
if len(args) > 1:
self.feed('%s=%s' % (_arg_name, args[0]))
for arg in args[1:-1]:
self.feed(arg)
self.feed('%s,' % args[-1])
else:
self.feed('%s=%s,' % (_arg_name, arg_string))
imports.update(arg_imports)
imports = set()
name, args, kwargs = self.operation.deconstruct()
operation_args = get_func_args(self.operation.__init__)
# See if this operation is in django.db.migrations. If it is,
# We can just use the fact we already have that imported,
# otherwise, we need to add an import for the operation class.
if getattr(migrations, name, None) == self.operation.__class__:
self.feed('migrations.%s(' % name)
else:
imports.add('import %s' % (self.operation.__class__.__module__))
self.feed('%s.%s(' % (self.operation.__class__.__module__, name))
self.indent()
for i, arg in enumerate(args):
arg_value = arg
arg_name = operation_args[i]
_write(arg_name, arg_value)
i = len(args)
# Only iterate over remaining arguments
for arg_name in operation_args[i:]:
if arg_name in kwargs: # Don't sort to maintain signature order
arg_value = kwargs[arg_name]
_write(arg_name, arg_value)
self.unindent()
self.feed('),')
return self.render(), imports
def indent(self):
self.indentation += 1
def unindent(self):
self.indentation -= 1
def feed(self, line):
self.buff.append(' ' * (self.indentation * 4) + line)
def render(self):
return '\n'.join(self.buff)
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"replaces_str": "",
"initial_str": "",
}
imports = set()
# Deconstruct operations
operations = []
for operation in self.migration.operations:
operation_string, operation_imports = OperationWriter(operation).serialize()
imports.update(operation_imports)
operations.append(operation_string)
items["operations"] = "\n".join(operations) + "\n" if operations else ""
# Format dependencies and write out swappable dependencies right
dependencies = []
for dependency in self.migration.dependencies:
if dependency[0] == "__setting__":
dependencies.append(" migrations.swappable_dependency(settings.%s)," % dependency[1])
imports.add("from django.conf import settings")
else:
# No need to output bytestrings for dependencies
dependency = tuple(force_text(s) for s in dependency)
dependencies.append(" %s," % self.serialize(dependency)[0])
items["dependencies"] = "\n".join(dependencies) + "\n" if dependencies else ""
# Format imports nicely, swapping imports of functions from migration files
# for comments
migration_imports = set()
for line in list(imports):
if re.match(r"^import (.*)\.\d+[^\s]*$", line):
migration_imports.add(line.split("import")[1].strip())
imports.remove(line)
self.needs_manual_porting = True
# django.db.migrations is always used, but models import may not be.
# If models import exists, merge it with migrations import.
if "from django.db import models" in imports:
imports.discard("from django.db import models")
imports.add("from django.db import migrations, models")
else:
imports.add("from django.db import migrations")
# Sort imports by the package / module to be imported (the part after
# "from" in "from ... import ..." or after "import" in "import ...").
sorted_imports = sorted(imports, key=lambda i: i.split()[1])
items["imports"] = "\n".join(sorted_imports) + "\n" if imports else ""
if migration_imports:
items["imports"] += (
"\n\n# Functions from the following migrations need manual "
"copying.\n# Move them and any dependencies into this file, "
"then update the\n# RunPython operations to refer to the local "
"versions:\n# %s"
) % "\n# ".join(sorted(migration_imports))
# If there's a replaces, make a string for it
if self.migration.replaces:
items['replaces_str'] = "\n replaces = %s\n" % self.serialize(self.migration.replaces)[0]
# Hinting that goes into comment
items.update(
version=get_version(),
timestamp=now().strftime("%Y-%m-%d %H:%M"),
)
if self.migration.initial:
items['initial_str'] = "\n initial = True\n"
return MIGRATION_TEMPLATE % items
@property
def basedir(self):
migrations_package_name, _ = MigrationLoader.migrations_module(self.migration.app_label)
if migrations_package_name is None:
raise ValueError(
"Django can't create migrations for app '%s' because "
"migrations have been disabled via the MIGRATION_MODULES "
"setting." % self.migration.app_label
)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_package_name)
except ImportError:
pass
else:
try:
return upath(module_dir(migrations_module))
except ValueError:
pass
# Alright, see if it's a direct submodule of the app
app_config = apps.get_app_config(self.migration.app_label)
maybe_app_name, _, migrations_package_basename = migrations_package_name.rpartition(".")
if app_config.name == maybe_app_name:
return os.path.join(app_config.path, migrations_package_basename)
# In case of using MIGRATION_MODULES setting and the custom package
# doesn't exist, create one, starting from an existing package
existing_dirs, missing_dirs = migrations_package_name.split("."), []
while existing_dirs:
missing_dirs.insert(0, existing_dirs.pop(-1))
try:
base_module = import_module(".".join(existing_dirs))
except ImportError:
continue
else:
try:
base_dir = upath(module_dir(base_module))
except ValueError:
continue
else:
break
else:
raise ValueError(
"Could not locate an appropriate location to create "
"migrations package %s. Make sure the toplevel "
"package exists and can be imported." %
migrations_package_name)
final_dir = os.path.join(base_dir, *missing_dirs)
if not os.path.isdir(final_dir):
os.makedirs(final_dir)
for missing_dir in missing_dirs:
base_dir = os.path.join(base_dir, missing_dir)
with open(os.path.join(base_dir, "__init__.py"), "w"):
pass
return final_dir
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
return os.path.join(self.basedir, self.filename)
@classmethod
def serialize(cls, value):
return serializer_factory(value).serialize()
MIGRATION_TEMPLATE = """\
# -*- coding: utf-8 -*-
# Generated by Django %(version)s on %(timestamp)s
from __future__ import unicode_literals
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
| bsd-3-clause |
udacity/ggplot | ggplot/components/alphas.py | 12 | 1784 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from .legend import get_labels
from ..utils.exceptions import GgplotError
def assign_alphas(data, aes):
"""
Assigns alpha values to the given data based
on the aes creates an appropriate legend entry
The mapped alpha values fall in the closed range [0.1, 1]
Parameters
----------
data : DataFrame
dataframe which should have alpha values assigned to
aes : aesthetic
mapping, including a mapping from alpha values to variable
Returns
-------
data : DataFrame
the changed dataframe
legend_entry : dict
An entry into the legend dictionary.
Documented in `components.legend`
"""
legend_entry = dict()
if 'alpha' in aes:
alpha_col = aes['alpha']
# Check that values are in the right format
try :
values = data[alpha_col].astype(np.float)
except ValueError :
raise GgplotError(
"Size aesthetic '%s' contains non-numerical data" % alpha_col)
labels, scale_type, indices = get_labels(data, alpha_col)
_min, _max = values.min(), values.max()
normalize = lambda v : np.interp(v, [_min, _max], [0.1, 1])
data[':::alpha_mapping:::'] = normalize(values)
if scale_type == "continuous" :
quantiles = np.percentile(data[':::alpha_mapping:::'], indices)
elif scale_type == "discrete" :
quantiles = normalize(np.array(labels, dtype=np.float))
legend_entry = {
'column_name': alpha_col,
'dict': dict(zip(quantiles, labels)),
'scale_type': scale_type}
return data, legend_entry
| bsd-2-clause |
pwong-mapr/private-hue | desktop/core/ext-py/Mako-0.8.1/examples/bench/basic.py | 58 | 6913 | # basic.py - basic benchmarks adapted from Genshi
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from cgi import escape
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import sys
import timeit
def u(stringlit):
if sys.version_info >= (3,):
return stringlit
else:
return stringlit.decode('latin1')
__all__ = ['mako', 'mako_inheritance', 'jinja2', 'jinja2_inheritance',
'cheetah', 'django', 'myghty', 'genshi', 'kid']
# Templates content and constants
TITLE = 'Just a test'
USER = 'joe'
ITEMS = ['Number %d' % num for num in range(1, 15)]
U_ITEMS = [u(item) for item in ITEMS]
def genshi(dirname, verbose=False):
from genshi.template import TemplateLoader
loader = TemplateLoader([dirname], auto_reload=False)
template = loader.load('template.html')
def render():
data = dict(title=TITLE, user=USER, items=ITEMS)
return template.generate(**data).render('xhtml')
if verbose:
print(render())
return render
def myghty(dirname, verbose=False):
from myghty import interp
interpreter = interp.Interpreter(component_root=dirname)
def render():
data = dict(title=TITLE, user=USER, items=ITEMS)
buffer = StringIO()
interpreter.execute("template.myt", request_args=data, out_buffer=buffer)
return buffer.getvalue()
if verbose:
print(render())
return render
def mako(dirname, verbose=False):
from mako.template import Template
from mako.lookup import TemplateLookup
disable_unicode = (sys.version_info < (3,))
lookup = TemplateLookup(directories=[dirname], filesystem_checks=False, disable_unicode=disable_unicode)
template = lookup.get_template('template.html')
def render():
return template.render(title=TITLE, user=USER, list_items=U_ITEMS)
if verbose:
print(template.code + " " + render())
return render
mako_inheritance = mako
def jinja2(dirname, verbose=False):
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(dirname))
template = env.get_template('template.html')
def render():
return template.render(title=TITLE, user=USER, list_items=U_ITEMS)
if verbose:
print(render())
return render
jinja2_inheritance = jinja2
def cheetah(dirname, verbose=False):
from Cheetah.Template import Template
filename = os.path.join(dirname, 'template.tmpl')
template = Template(file=filename)
def render():
template.__dict__.update({'title': TITLE, 'user': USER,
'list_items': U_ITEMS})
return template.respond()
if verbose:
print(dir(template))
print(template.generatedModuleCode())
print(render())
return render
def django(dirname, verbose=False):
from django.conf import settings
settings.configure(TEMPLATE_DIRS=[os.path.join(dirname, 'templates')])
from django import template, templatetags
from django.template import loader
templatetags.__path__.append(os.path.join(dirname, 'templatetags'))
tmpl = loader.get_template('template.html')
def render():
data = {'title': TITLE, 'user': USER, 'items': ITEMS}
return tmpl.render(template.Context(data))
if verbose:
print(render())
return render
def kid(dirname, verbose=False):
import kid
kid.path = kid.TemplatePath([dirname])
template = kid.Template(file='template.kid')
def render():
template = kid.Template(file='template.kid',
title=TITLE, user=USER, items=ITEMS)
return template.serialize(output='xhtml')
if verbose:
print(render())
return render
def run(engines, number=2000, verbose=False):
basepath = os.path.abspath(os.path.dirname(__file__))
for engine in engines:
dirname = os.path.join(basepath, engine)
if verbose:
print('%s:' % engine.capitalize())
print('--------------------------------------------------------')
else:
sys.stdout.write('%s:' % engine.capitalize())
t = timeit.Timer(setup='from __main__ import %s; render = %s(r"%s", %s)'
% (engine, engine, dirname, verbose),
stmt='render()')
time = t.timeit(number=number) / number
if verbose:
print('--------------------------------------------------------')
print('%.2f ms' % (1000 * time))
if verbose:
print('--------------------------------------------------------')
if __name__ == '__main__':
engines = [arg for arg in sys.argv[1:] if arg[0] != '-']
if not engines:
engines = __all__
verbose = '-v' in sys.argv
if '-p' in sys.argv:
try:
import hotshot, hotshot.stats
prof = hotshot.Profile("template.prof")
benchtime = prof.runcall(run, engines, number=100, verbose=verbose)
stats = hotshot.stats.load("template.prof")
except ImportError:
import cProfile, pstats
stmt = "run(%r, number=%r, verbose=%r)" % (engines, 1000, verbose)
cProfile.runctx(stmt, globals(), {}, "template.prof")
stats = pstats.Stats("template.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats()
else:
run(engines, verbose=verbose)
| apache-2.0 |
m0ppers/arangodb | 3rdParty/V8/V8-5.0.71.39/build/gyp/test/win/gyptest-cl-function-level-linking.py | 332 | 1595 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure function-level linking setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('function-level-linking.gyp', chdir=CHDIR)
test.build('function-level-linking.gyp', test.ALL, chdir=CHDIR)
def CheckForSectionString(binary, search_for, should_exist):
output = test.run_dumpbin('/headers', binary)
if should_exist and search_for not in output:
print 'Did not find "%s" in %s' % (search_for, binary)
test.fail_test()
elif not should_exist and search_for in output:
print 'Found "%s" in %s (and shouldn\'t have)' % (search_for, binary)
test.fail_test()
def Object(proj, obj):
sep = '.' if test.format == 'ninja' else '\\'
return 'obj\\%s%s%s' % (proj, sep, obj)
look_for = '''COMDAT; sym= "int __cdecl comdat_function'''
# When function level linking is on, the functions should be listed as
# separate comdat entries.
CheckForSectionString(
test.built_file_path(Object('test_fll_on', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=True)
CheckForSectionString(
test.built_file_path(Object('test_fll_off', 'function-level-linking.obj'),
chdir=CHDIR),
look_for,
should_exist=False)
test.pass_test()
| apache-2.0 |
qenter/vlc-android | toolchains/arm/lib/python2.7/test/test_httplib.py | 40 | 19698 | import httplib
import array
import httplib
import StringIO
import socket
import errno
import unittest
TestCase = unittest.TestCase
from test import test_support
HOST = test_support.HOST
class FakeSocket:
def __init__(self, text, fileclass=StringIO.StringIO):
self.text = text
self.fileclass = fileclass
self.data = ''
def sendall(self, data):
self.data += ''.join(data)
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise httplib.UnimplementedFileMode()
return self.fileclass(self.text)
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise socket.error(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFStringIO(StringIO.StringIO):
"""Like StringIO, but raises AssertionError on EOF.
This is used below to test that httplib doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = StringIO.StringIO.read(self, n)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = StringIO.StringIO.readline(self, length)
if data == '':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(':', 1)
if len(kv) > 1 and kv[0].lower() == 'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# POST with empty body
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('POST', '/', '')
self.assertEqual(conn._buffer.content_length, '0',
'Header Content-Length not set')
# PUT request with empty body
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('PUT', '/', '')
self.assertEqual(conn._buffer.content_length, '0',
'Header Content-Length not set')
def test_putheader(self):
conn = httplib.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length',42)
self.assertTrue('Content-length: 42' in conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = 'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = 'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
'Accept-Encoding: identity\r\n\r\n'
conn = httplib.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), 'Text')
self.assertTrue(resp.isclosed())
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertRaises(httplib.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = httplib.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertTrue(resp.isclosed())
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertEqual(resp.read(1), '')
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), 'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), 'xt')
self.assertEqual(resp.read(1), '')
self.assertTrue(resp.isclosed())
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80)):
http = httplib.HTTP(hp)
c = http._conn
if h != c.host:
self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
if p != c.port:
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE";'
' Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = httplib.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
if cookies != hdr:
self.fail("multiple headers not combined properly")
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFStringIO)
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read() != "":
self.fail("Did not expect response from HEAD request")
def test_send_file(self):
expected = 'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
'Accept-Encoding: identity\r\nContent-Length:'
body = open(__file__, 'rb')
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected))
def test_send(self):
expected = 'this is a test this is only a test'
conn = httplib.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(array.array('c', expected))
self.assertEqual(expected, sock.data)
sock.data = ''
conn.send(StringIO.StringIO(expected))
self.assertEqual(expected, sock.data)
def test_chunked(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'hello world')
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead, i:
self.assertEqual(i.partial, 'hello world')
self.assertEqual(repr(i),'IncompleteRead(11 bytes read)')
self.assertEqual(str(i),'IncompleteRead(11 bytes read)')
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + '0\r\n')
resp = httplib.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), '')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
def test_negative_content_length(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\n'
'Content-Length: -1\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), 'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = httplib.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except httplib.IncompleteRead as i:
self.assertEqual(i.partial, 'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = httplib.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(socket.error,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
def test_filenoattr(self):
# Just test the fileno attribute in the HTTPResponse Object.
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
self.assertTrue(hasattr(resp,'fileno'),
'HTTPResponse should expose a fileno attribute')
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
self.skipTest("disabled for HTTP 0.9 support")
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises((httplib.LineTooLong, httplib.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
self.assertRaises(httplib.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
)
resp = httplib.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(httplib.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = httplib.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), '')
self.assertTrue(resp.isclosed())
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(httplib.responses[httplib.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = test_support.bind_port(self.serv)
self.source_port = test_support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = httplib.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(httplib, 'HTTPSConnection'),
'httplib.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = httplib.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = test_support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
'''This will prove that the timeout gets through
HTTPConnection and into the socket.
'''
# default -- use global socket timeout
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = httplib.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTimeoutTest(TestCase):
# XXX Here should be tests for HTTPS, there isn't any right now!
def test_attributes(self):
# simple test to check it's storing it
if hasattr(httplib, 'HTTPSConnection'):
h = httplib.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
@unittest.skipIf(not hasattr(httplib, 'HTTPS'), 'httplib.HTTPS not available')
def test_host_port(self):
# Check invalid host_port
# Note that httplib does not accept user:password@ in the host-port.
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(httplib.InvalidURL, httplib.HTTP, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000", "fe80::207:e9ff:fe9b",
8000),
("pypi.python.org:443", "pypi.python.org", 443),
("pypi.python.org", "pypi.python.org", 443),
("pypi.python.org:", "pypi.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443)):
http = httplib.HTTPS(hp)
c = http._conn
if h != c.host:
self.fail("Host incorrectly parsed: %s != %s" % (h, c.host))
if p != c.port:
self.fail("Port incorrectly parsed: %s != %s" % (p, c.host))
def test_main(verbose=None):
test_support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTimeoutTest, SourceAddressTest)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
jsha/letsencrypt | certbot/tests/ocsp_test.py | 2 | 6447 | """Tests for ocsp.py"""
# pylint: disable=protected-access
import unittest
import mock
from certbot import errors
out = """Missing = in header key=value
ocsp: Use -help for summary.
"""
class OCSPTest(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
from certbot import ocsp
with mock.patch('certbot.ocsp.Popen') as mock_popen:
with mock.patch('certbot.util.exe_exists') as mock_exists:
mock_communicate = mock.MagicMock()
mock_communicate.communicate.return_value = (None, out)
mock_popen.return_value = mock_communicate
mock_exists.return_value = True
self.checker = ocsp.RevocationChecker()
def tearDown(self):
pass
@mock.patch('certbot.ocsp.logger.info')
@mock.patch('certbot.ocsp.Popen')
@mock.patch('certbot.util.exe_exists')
def test_init(self, mock_exists, mock_popen, mock_log):
mock_communicate = mock.MagicMock()
mock_communicate.communicate.return_value = (None, out)
mock_popen.return_value = mock_communicate
mock_exists.return_value = True
from certbot import ocsp
checker = ocsp.RevocationChecker()
self.assertEqual(mock_popen.call_count, 1)
self.assertEqual(checker.host_args("x"), ["Host=x"])
mock_communicate.communicate.return_value = (None, out.partition("\n")[2])
checker = ocsp.RevocationChecker()
self.assertEqual(checker.host_args("x"), ["Host", "x"])
self.assertEqual(checker.broken, False)
mock_exists.return_value = False
mock_popen.call_count = 0
checker = ocsp.RevocationChecker()
self.assertEqual(mock_popen.call_count, 0)
self.assertEqual(mock_log.call_count, 1)
self.assertEqual(checker.broken, True)
@mock.patch('certbot.ocsp.RevocationChecker.determine_ocsp_server')
@mock.patch('certbot.util.run_script')
def test_ocsp_revoked(self, mock_run, mock_determine):
self.checker.broken = True
mock_determine.return_value = ("", "")
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.checker.broken = False
mock_run.return_value = tuple(openssl_happy[1:])
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.assertEqual(mock_run.call_count, 0)
mock_determine.return_value = ("http://x.co", "x.co")
self.assertEqual(self.checker.ocsp_revoked("blah.pem", "chain.pem"), False)
mock_run.side_effect = errors.SubprocessError("Unable to load certificate launcher")
self.assertEqual(self.checker.ocsp_revoked("x", "y"), False)
self.assertEqual(mock_run.call_count, 2)
@mock.patch('certbot.ocsp.logger.info')
@mock.patch('certbot.util.run_script')
def test_determine_ocsp_server(self, mock_run, mock_info):
uri = "http://ocsp.stg-int-x1.letsencrypt.org/"
host = "ocsp.stg-int-x1.letsencrypt.org"
mock_run.return_value = uri, ""
self.assertEqual(self.checker.determine_ocsp_server("beep"), (uri, host))
mock_run.return_value = "ftp:/" + host + "/", ""
self.assertEqual(self.checker.determine_ocsp_server("beep"), (None, None))
self.assertEqual(mock_info.call_count, 1)
c = "confusion"
mock_run.side_effect = errors.SubprocessError(c)
self.assertEqual(self.checker.determine_ocsp_server("beep"), (None, None))
@mock.patch('certbot.ocsp.logger')
@mock.patch('certbot.util.run_script')
def test_translate_ocsp(self, mock_run, mock_log):
# pylint: disable=protected-access,star-args
mock_run.return_value = openssl_confused
from certbot import ocsp
self.assertEqual(ocsp._translate_ocsp_query(*openssl_happy), False)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_confused), False)
self.assertEqual(mock_log.debug.call_count, 1)
self.assertEqual(mock_log.warn.call_count, 0)
mock_log.debug.call_count = 0
self.assertEqual(ocsp._translate_ocsp_query(*openssl_unknown), False)
self.assertEqual(mock_log.debug.call_count, 1)
self.assertEqual(mock_log.warn.call_count, 0)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_expired_ocsp), False)
self.assertEqual(mock_log.debug.call_count, 2)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_broken), False)
self.assertEqual(mock_log.warn.call_count, 1)
mock_log.info.call_count = 0
self.assertEqual(ocsp._translate_ocsp_query(*openssl_revoked), True)
self.assertEqual(mock_log.info.call_count, 0)
self.assertEqual(ocsp._translate_ocsp_query(*openssl_expired_ocsp_revoked), True)
self.assertEqual(mock_log.info.call_count, 1)
# pylint: disable=line-too-long
openssl_confused = ("", """
/etc/letsencrypt/live/example.org/cert.pem: good
This Update: Dec 17 00:00:00 2016 GMT
Next Update: Dec 24 00:00:00 2016 GMT
""",
"""
Response Verify Failure
139903674214048:error:27069065:OCSP routines:OCSP_basic_verify:certificate verify error:ocsp_vfy.c:138:Verify error:unable to get local issuer certificate
""")
openssl_happy = ("blah.pem", """
blah.pem: good
This Update: Dec 20 18:00:00 2016 GMT
Next Update: Dec 27 18:00:00 2016 GMT
""",
"Response verify OK")
openssl_revoked = ("blah.pem", """
blah.pem: revoked
This Update: Dec 20 01:00:00 2016 GMT
Next Update: Dec 27 01:00:00 2016 GMT
Revocation Time: Dec 20 01:46:34 2016 GMT
""",
"""Response verify OK""")
openssl_unknown = ("blah.pem", """
blah.pem: unknown
This Update: Dec 20 18:00:00 2016 GMT
Next Update: Dec 27 18:00:00 2016 GMT
""",
"Response verify OK")
openssl_broken = ("", "tentacles", "Response verify OK")
openssl_expired_ocsp = ("blah.pem", """
blah.pem: WARNING: Status times invalid.
140659132298912:error:2707307D:OCSP routines:OCSP_check_validity:status expired:ocsp_cl.c:372:
good
This Update: Apr 6 00:00:00 2016 GMT
Next Update: Apr 13 00:00:00 2016 GMT
""",
"""Response verify OK""")
openssl_expired_ocsp_revoked = ("blah.pem", """
blah.pem: WARNING: Status times invalid.
140659132298912:error:2707307D:OCSP routines:OCSP_check_validity:status expired:ocsp_cl.c:372:
revoked
This Update: Apr 6 00:00:00 2016 GMT
Next Update: Apr 13 00:00:00 2016 GMT
""",
"""Response verify OK""")
if __name__ == '__main__':
unittest.main() # pragma: no cover
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/254_test_ioctl.py | 12 | 2469 | import unittest
from test.test_support import TestSkipped, run_unittest
import os, struct
try:
import fcntl, termios
except ImportError:
raise TestSkipped("No fcntl or termios module")
if not hasattr(termios,'TIOCGPGRP'):
raise TestSkipped("termios module doesn't have TIOCGPGRP")
try:
tty = open("/dev/tty", "r")
tty.close()
except IOError:
raise TestSkipped("Unable to open /dev/tty")
try:
import pty
except ImportError:
pty = None
class IoctlTests(unittest.TestCase):
def test_ioctl(self):
# If this process has been put into the background, TIOCGPGRP returns
# the session ID instead of the process group id.
ids = (os.getpgrp(), os.getsid(0))
tty = open("/dev/tty", "r")
r = fcntl.ioctl(tty, termios.TIOCGPGRP, " ")
rpgrp = struct.unpack("i", r)[0]
self.assert_(rpgrp in ids, "%s not in %s" % (rpgrp, ids))
def test_ioctl_mutate(self):
import array
buf = array.array('i', [0])
ids = (os.getpgrp(), os.getsid(0))
tty = open("/dev/tty", "r")
r = fcntl.ioctl(tty, termios.TIOCGPGRP, buf, 1)
rpgrp = buf[0]
self.assertEquals(r, 0)
self.assert_(rpgrp in ids, "%s not in %s" % (rpgrp, ids))
def test_ioctl_signed_unsigned_code_param(self):
if not pty:
raise TestSkipped('pty module required')
mfd, sfd = pty.openpty()
try:
if termios.TIOCSWINSZ < 0:
set_winsz_opcode_maybe_neg = termios.TIOCSWINSZ
set_winsz_opcode_pos = termios.TIOCSWINSZ & 0xffffffffL
else:
set_winsz_opcode_pos = termios.TIOCSWINSZ
set_winsz_opcode_maybe_neg, = struct.unpack("i",
struct.pack("I", termios.TIOCSWINSZ))
# We're just testing that these calls do not raise exceptions.
saved_winsz = fcntl.ioctl(mfd, termios.TIOCGWINSZ, "\0"*8)
our_winsz = struct.pack("HHHH",80,25,0,0)
# test both with a positive and potentially negative ioctl code
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_pos, our_winsz)
new_winsz = fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, our_winsz)
fcntl.ioctl(mfd, set_winsz_opcode_maybe_neg, saved_winsz)
finally:
os.close(mfd)
os.close(sfd)
def test_main():
run_unittest(IoctlTests)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
physycom/QGIS | python/plugins/processing/gui/matrixmodelerwidget.py | 14 | 5165 | # -*- coding: utf-8 -*-
"""
***************************************************************************
MatrixModelerWidget.py
---------------------
Date : May 2018
Copyright : (C) 2018 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'May 2018'
__copyright__ = '(C) 2018, Alexander Bruy'
import os
import warnings
from qgis.PyQt import uic
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtGui import QStandardItemModel, QStandardItem
from qgis.PyQt.QtWidgets import QInputDialog, QMessageBox
from qgis.core import QgsApplication
pluginPath = os.path.split(os.path.dirname(__file__))[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'matrixmodelerwidgetbase.ui'))
class MatrixModelerWidget(BASE, WIDGET):
def __init__(self, parent=None):
super(MatrixModelerWidget, self).__init__(parent)
self.setupUi(self)
self.btnAddColumn.setIcon(QgsApplication.getThemeIcon('/mActionNewAttribute.svg'))
self.btnRemoveColumn.setIcon(QgsApplication.getThemeIcon('/mActionDeleteAttribute.svg'))
self.btnAddRow.setIcon(QgsApplication.getThemeIcon('/symbologyAdd.svg'))
self.btnRemoveRow.setIcon(QgsApplication.getThemeIcon('/symbologyRemove.svg'))
self.btnClear.setIcon(QgsApplication.getThemeIcon('console/iconClearConsole.svg'))
self.btnAddColumn.clicked.connect(self.addColumn)
self.btnRemoveColumn.clicked.connect(self.removeColumns)
self.btnAddRow.clicked.connect(self.addRow)
self.btnRemoveRow.clicked.connect(self.removeRows)
self.btnClear.clicked.connect(self.clearTable)
items = [QStandardItem('0')]
model = QStandardItemModel()
model.appendColumn(items)
self.tblView.setModel(model)
self.tblView.horizontalHeader().sectionDoubleClicked.connect(self.changeHeader)
def addColumn(self):
model = self.tblView.model()
items = [QStandardItem('0') for i in range(model.rowCount())]
model.appendColumn(items)
def removeColumns(self):
indexes = sorted(self.tblView.selectionModel().selectedColumns())
self.tblView.setUpdatesEnabled(False)
for i in reversed(indexes):
self.tblView.model().removeColumns(i.column(), 1)
self.tblView.setUpdatesEnabled(True)
def addRow(self):
model = self.tblView.model()
items = [QStandardItem('0') for i in range(model.columnCount())]
model.appendRow(items)
def removeRows(self):
indexes = sorted(self.tblView.selectionModel().selectedRows())
self.tblView.setUpdatesEnabled(False)
for i in reversed(indexes):
self.tblView.model().removeRows(i.row(), 1)
self.tblView.setUpdatesEnabled(True)
def clearTable(self, removeAll=False):
res = QMessageBox.question(self, self.tr('Clear?'), self.tr('Are you sure you want to clear table?'))
if res == QMessageBox.Yes:
self.tblView.model().clear()
def changeHeader(self, index):
txt, ok = QInputDialog.getText(self, self.tr("Enter column name"), self.tr("Column name"))
if ok:
self.tblView.model().setHeaderData(index, Qt.Horizontal, txt)
def value(self):
cols = self.tblView.model().columnCount()
rows = self.tblView.model().rowCount()
items = []
for row in range(rows):
for col in range(cols):
items.append(str(self.tblView.model().item(row, col).text()))
return items
def setValue(self, headers, table):
model = self.tblView.model()
model.setHorizontalHeaderLabels(headers)
cols = len(headers)
rows = len(table) // cols
model = QStandardItemModel(rows, cols)
for row in range(rows):
for col in range(cols):
item = QStandardItem(str(table[row * cols + col]))
model.setItem(row, col, item)
self.tblView.setModel(model)
def headers(self):
headers = []
model = self.tblView.model()
for i in range(model.columnCount()):
headers.append(str(model.headerData(i, Qt.Horizontal)))
return headers
def fixedRows(self):
return self.chkFixedRows.isChecked()
def setFixedRows(self, fixedRows):
self.chkFixedRows.setChecked(fixedRows)
| gpl-2.0 |
jnewland/home-assistant | homeassistant/scripts/keyring.py | 11 | 2187 | """Script to get, set and delete secrets stored in the keyring."""
import argparse
import getpass
import os
from homeassistant.util.yaml import _SECRET_NAMESPACE
REQUIREMENTS = ['keyring==17.1.1', 'keyrings.alt==3.1.1']
def run(args):
"""Handle keyring script."""
parser = argparse.ArgumentParser(
description=("Modify Home Assistant secrets in the default keyring. "
"Use the secrets in configuration files with: "
"!secret <name>"))
parser.add_argument(
'--script', choices=['keyring'])
parser.add_argument(
'action', choices=['get', 'set', 'del', 'info'],
help="Get, set or delete a secret")
parser.add_argument(
'name', help="Name of the secret", nargs='?', default=None)
import keyring
from keyring.util import platform_ as platform
args = parser.parse_args(args)
if args.action == 'info':
keyr = keyring.get_keyring()
print('Keyring version {}\n'.format(REQUIREMENTS[0].split('==')[1]))
print('Active keyring : {}'.format(keyr.__module__))
config_name = os.path.join(platform.config_root(), 'keyringrc.cfg')
print('Config location : {}'.format(config_name))
print('Data location : {}\n'.format(platform.data_root()))
elif args.name is None:
parser.print_help()
return 1
if args.action == 'set':
the_secret = getpass.getpass(
'Please enter the secret for {}: '.format(args.name))
keyring.set_password(_SECRET_NAMESPACE, args.name, the_secret)
print('Secret {} set successfully'.format(args.name))
elif args.action == 'get':
the_secret = keyring.get_password(_SECRET_NAMESPACE, args.name)
if the_secret is None:
print('Secret {} not found'.format(args.name))
else:
print('Secret {}={}'.format(args.name, the_secret))
elif args.action == 'del':
try:
keyring.delete_password(_SECRET_NAMESPACE, args.name)
print('Deleted secret {}'.format(args.name))
except keyring.errors.PasswordDeleteError:
print('Secret {} not found'.format(args.name))
| apache-2.0 |
subfusc/anki | thirdparty/send2trash/plat_win.py | 20 | 1695 | # Copyright 2010 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
from ctypes import windll, Structure, byref, c_uint
from ctypes.wintypes import HWND, UINT, LPCWSTR, BOOL
import os
import os.path as op
shell32 = windll.shell32
SHFileOperationW = shell32.SHFileOperationW
class SHFILEOPSTRUCTW(Structure):
_fields_ = [
(u"hwnd", HWND),
(u"wFunc", UINT),
(u"pFrom", LPCWSTR),
(u"pTo", LPCWSTR),
(u"fFlags", c_uint),
(u"fAnyOperationsAborted", BOOL),
(u"hNameMappings", c_uint),
(u"lpszProgressTitle", LPCWSTR),
]
FO_MOVE = 1
FO_COPY = 2
FO_DELETE = 3
FO_RENAME = 4
FOF_MULTIDESTFILES = 1
FOF_SILENT = 4
FOF_NOCONFIRMATION = 16
FOF_ALLOWUNDO = 64
FOF_NOERRORUI = 1024
def send2trash(path):
opath = path
if not isinstance(path, unicode):
path = unicode(path, u'mbcs')
if not op.isabs(path):
path = op.abspath(path)
fileop = SHFILEOPSTRUCTW()
fileop.hwnd = 0
fileop.wFunc = FO_DELETE
fileop.pFrom = LPCWSTR(path + u'\0')
fileop.pTo = None
fileop.fFlags = FOF_ALLOWUNDO | FOF_NOCONFIRMATION | FOF_NOERRORUI | FOF_SILENT
fileop.fAnyOperationsAborted = 0
fileop.hNameMappings = 0
fileop.lpszProgressTitle = None
result = SHFileOperationW(byref(fileop))
if result:
# user's system is broken, just delete
os.unlink(opath)
#msg = u"Couldn't perform operation. Error code: %d" % result
#raise OSError(msg)
| agpl-3.0 |
majidaldo/ansible-modules-core | database/mysql/mysql_variables.py | 84 | 8413 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage mysql variables
(c) 2013, Balazs Pocze <banyek@gawker.com>
Certain parts are taken from Mark Theunissen's mysqldb module
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: mysql_variables
short_description: Manage MySQL global variables
description:
- Query / Set MySQL variables
version_added: 1.3
author: "Balazs Pocze (@banyek)"
options:
variable:
description:
- Variable name to operate
required: True
value:
description:
- If set, then sets variable value to this
required: False
login_user:
description:
- username to connect mysql host, if defined login_password also needed.
required: False
login_password:
description:
- password to connect mysql host, if defined login_user also needed.
required: False
login_host:
description:
- mysql host to connect
required: False
login_port:
version_added: "2.0"
description:
- mysql port to connect
required: False
login_unix_socket:
description:
- unix socket to connect mysql server
'''
EXAMPLES = '''
# Check for sync_binlog setting
- mysql_variables: variable=sync_binlog
# Set read_only variable to 1
- mysql_variables: variable=read_only value=1
'''
import ConfigParser
import os
import warnings
from re import match
try:
import MySQLdb
except ImportError:
mysqldb_found = False
else:
mysqldb_found = True
def typedvalue(value):
"""
Convert value to number whenever possible, return same value
otherwise.
>>> typedvalue('3')
3
>>> typedvalue('3.0')
3.0
>>> typedvalue('foobar')
'foobar'
"""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES WHERE Variable_name = %s", (mysqlvar,))
mysqlvar_val = cursor.fetchall()
if len(mysqlvar_val) is 1:
return mysqlvar_val[0][1]
else:
return None
def setvariable(cursor, mysqlvar, value):
""" Set a global mysql variable to a given value
The DB driver will handle quoting of the given value based on its
type, thus numeric strings like '3.0' or '8' are illegal, they
should be passed as numeric literals.
"""
query = "SET GLOBAL %s = " % mysql_quote_identifier(mysqlvar, 'vars')
try:
cursor.execute(query + "%s", (value,))
cursor.fetchall()
result = True
except Exception, e:
result = str(e)
return result
def strip_quotes(s):
""" Remove surrounding single or double quotes
>>> print strip_quotes('hello')
hello
>>> print strip_quotes('"hello"')
hello
>>> print strip_quotes("'hello'")
hello
>>> print strip_quotes("'hello")
'hello
"""
single_quote = "'"
double_quote = '"'
if s.startswith(single_quote) and s.endswith(single_quote):
s = s.strip(single_quote)
elif s.startswith(double_quote) and s.endswith(double_quote):
s = s.strip(double_quote)
return s
def config_get(config, section, option):
""" Calls ConfigParser.get and strips quotes
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
"""
return strip_quotes(config.get(section, option))
def load_mycnf():
config = ConfigParser.RawConfigParser()
mycnf = os.path.expanduser('~/.my.cnf')
if not os.path.exists(mycnf):
return False
try:
config.readfp(open(mycnf))
except (IOError):
return False
# We support two forms of passwords in .my.cnf, both pass= and password=,
# as these are both supported by MySQL.
try:
passwd = config_get(config, 'client', 'password')
except (ConfigParser.NoOptionError):
try:
passwd = config_get(config, 'client', 'pass')
except (ConfigParser.NoOptionError):
return False
# If .my.cnf doesn't specify a user, default to user login name
try:
user = config_get(config, 'client', 'user')
except (ConfigParser.NoOptionError):
user = getpass.getuser()
creds = dict(user=user, passwd=passwd)
return creds
def main():
module = AnsibleModule(
argument_spec = dict(
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="127.0.0.1"),
login_port=dict(default="3306", type='int'),
login_unix_socket=dict(default=None),
variable=dict(default=None),
value=dict(default=None)
)
)
user = module.params["login_user"]
password = module.params["login_password"]
host = module.params["login_host"]
port = module.params["login_port"]
mysqlvar = module.params["variable"]
value = module.params["value"]
if mysqlvar is None:
module.fail_json(msg="Cannot run without variable to operate with")
if match('^[0-9a-z_]+$', mysqlvar) is None:
module.fail_json(msg="invalid variable name \"%s\"" % mysqlvar)
if not mysqldb_found:
module.fail_json(msg="the python mysqldb module is required")
else:
warnings.filterwarnings('error', category=MySQLdb.Warning)
# Either the caller passes both a username and password with which to connect to
# mysql, or they pass neither and allow this module to read the credentials from
# ~/.my.cnf.
login_password = module.params["login_password"]
login_user = module.params["login_user"]
if login_user is None and login_password is None:
mycnf_creds = load_mycnf()
if mycnf_creds is False:
login_user = "root"
login_password = ""
else:
login_user = mycnf_creds["user"]
login_password = mycnf_creds["passwd"]
elif login_password is None or login_user is None:
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
try:
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
cursor = db_connection.cursor()
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
if value is None:
module.exit_json(msg=mysqlvar_val)
else:
# Type values before using them
value_wanted = typedvalue(value)
value_actual = typedvalue(mysqlvar_val)
if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
try:
result = setvariable(cursor, mysqlvar, value_wanted)
except SQLParseError, e:
result = str(e)
if result is True:
module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.database import *
main()
| gpl-3.0 |
yencarnacion/jaikuengine | .google_appengine/lib/django-1.5/tests/regressiontests/csrf_tests/tests.py | 41 | 13716 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core.context_processors import csrf
from django.http import HttpRequest, HttpResponse
from django.middleware.csrf import CsrfViewMiddleware, CSRF_KEY_LENGTH
from django.template import RequestContext, Template
from django.test import TestCase
from django.test.utils import override_settings
from django.views.decorators.csrf import csrf_exempt, requires_csrf_token, ensure_csrf_cookie
# Response/views used for CsrfResponseMiddleware and CsrfViewMiddleware tests
def post_form_response():
resp = HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text" /></form></body></html>
""", mimetype="text/html")
return resp
def post_form_view(request):
"""A view that returns a POST form (without a token)"""
return post_form_response()
# Response/views used for template tag tests
def token_view(request):
"""A view that uses {% csrf_token %}"""
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""
A view that doesn't use the token, but does use the csrf view processor.
"""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
class TestingHttpRequest(HttpRequest):
"""
A version of HttpRequest that allows us to change some things
more easily
"""
def is_secure(self):
return getattr(self, '_is_secure_override', False)
class CsrfViewMiddlewareTest(TestCase):
# The csrf token is potentially from an untrusted source, so could have
# characters that need dealing with.
_csrf_id_cookie = b"<1>\xc2\xa1"
_csrf_id = "1"
def _get_GET_no_csrf_cookie_request(self):
return TestingHttpRequest()
def _get_GET_csrf_cookie_request(self):
req = TestingHttpRequest()
req.COOKIES[settings.CSRF_COOKIE_NAME] = self._csrf_id_cookie
return req
def _get_POST_csrf_cookie_request(self):
req = self._get_GET_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_no_csrf_cookie_request(self):
req = self._get_GET_no_csrf_cookie_request()
req.method = "POST"
return req
def _get_POST_request_with_token(self):
req = self._get_POST_csrf_cookie_request()
req.POST['csrfmiddlewaretoken'] = self._csrf_id
return req
def _check_token_present(self, response, csrf_id=None):
self.assertContains(response, "name='csrfmiddlewaretoken' value='%s'" % (csrf_id or self._csrf_id))
def test_process_view_token_too_long(self):
"""
Check that if the token is longer than expected, it is ignored and
a new token is created.
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = 'x' * 10000000
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(len(csrf_cookie.value), CSRF_KEY_LENGTH)
def test_process_response_get_token_used(self):
"""
When get_token is used, check that the cookie is created and headers
patched.
"""
req = self._get_GET_no_csrf_cookie_request()
# Put tests for CSRF_COOKIE_* settings here
with self.settings(CSRF_COOKIE_NAME='myname',
CSRF_COOKIE_DOMAIN='.example.com',
CSRF_COOKIE_PATH='/test/',
CSRF_COOKIE_SECURE=True):
# token_view calls get_token() indirectly
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get('myname', False)
self.assertNotEqual(csrf_cookie, False)
self.assertEqual(csrf_cookie['domain'], '.example.com')
self.assertEqual(csrf_cookie['secure'], True)
self.assertEqual(csrf_cookie['path'], '/test/')
self.assertTrue('Cookie' in resp2.get('Vary',''))
def test_process_response_get_token_not_used(self):
"""
Check that if get_token() is not called, the view middleware does not
add a cookie.
"""
# This is important to make pages cacheable. Pages which do call
# get_token(), assuming they use the token, are not cacheable because
# the token is specific to the user
req = self._get_GET_no_csrf_cookie_request()
# non_token_view_using_request_processor does not call get_token(), but
# does use the csrf request processor. By using this, we are testing
# that the view processor is properly lazy and doesn't call get_token()
# until needed.
CsrfViewMiddleware().process_view(req, non_token_view_using_request_processor, (), {})
resp = non_token_view_using_request_processor(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies.get(settings.CSRF_COOKIE_NAME, False)
self.assertEqual(csrf_cookie, False)
# Check the request processing
def test_process_request_no_csrf_cookie(self):
"""
Check that if no CSRF cookies is present, the middleware rejects the
incoming request. This will stop login CSRF.
"""
req = self._get_POST_no_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_no_token(self):
"""
Check that if a CSRF cookie is present but no token, the middleware
rejects the incoming request.
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_process_request_csrf_cookie_and_token(self):
"""
Check that if both a cookie and a token is present, the middleware lets it through.
"""
req = self._get_POST_request_with_token()
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_process_request_csrf_cookie_no_token_exempt_view(self):
"""
Check that if a CSRF cookie is present and no token, but the csrf_exempt
decorator has been applied to the view, the middleware lets it through
"""
req = self._get_POST_csrf_cookie_request()
req2 = CsrfViewMiddleware().process_view(req, csrf_exempt(post_form_view), (), {})
self.assertEqual(None, req2)
def test_csrf_token_in_header(self):
"""
Check that we can pass in the token in a header instead of in the form
"""
req = self._get_POST_csrf_cookie_request()
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_put_and_delete_rejected(self):
"""
Tests that HTTP PUT and DELETE methods have protection
"""
req = TestingHttpRequest()
req.method = 'PUT'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
req = TestingHttpRequest()
req.method = 'DELETE'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(403, req2.status_code)
def test_put_and_delete_allowed(self):
"""
Tests that HTTP PUT and DELETE methods can get through with
X-CSRFToken and a cookie
"""
req = self._get_GET_csrf_cookie_request()
req.method = 'PUT'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
req = self._get_GET_csrf_cookie_request()
req.method = 'DELETE'
req.META['HTTP_X_CSRFTOKEN'] = self._csrf_id
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
# Tests for the template tag method
def test_token_node_no_csrf_cookie(self):
"""
Check that CsrfTokenNode works when no CSRF cookie is set
"""
req = self._get_GET_no_csrf_cookie_request()
resp = token_view(req)
self.assertEqual(resp.content, b'')
def test_token_node_empty_csrf_cookie(self):
"""
Check that we get a new token if the csrf_cookie is the empty string
"""
req = self._get_GET_no_csrf_cookie_request()
req.COOKIES[settings.CSRF_COOKIE_NAME] = b""
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self.assertNotEqual("", resp.content)
def test_token_node_with_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is set
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_exempt_view(self):
"""
Check that get_token still works for a view decorated with 'csrf_exempt'.
"""
req = self._get_GET_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, csrf_exempt(token_view), (), {})
resp = token_view(req)
self._check_token_present(resp)
def test_get_token_for_requires_csrf_token_view(self):
"""
Check that get_token works for a view decorated solely with requires_csrf_token
"""
req = self._get_GET_csrf_cookie_request()
resp = requires_csrf_token(token_view)(req)
self._check_token_present(resp)
def test_token_node_with_new_csrf_cookie(self):
"""
Check that CsrfTokenNode works when a CSRF cookie is created by
the middleware (when one was not already present)
"""
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, token_view, (), {})
resp = token_view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
csrf_cookie = resp2.cookies[settings.CSRF_COOKIE_NAME]
self._check_token_present(resp, csrf_id=csrf_cookie.value)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_bad_referer(self):
"""
Test that a POST HTTPS request with a bad referer is rejected
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.evil.org/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertNotEqual(None, req2)
self.assertEqual(403, req2.status_code)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer(self):
"""
Test that a POST HTTPS request with a good referer is accepted
"""
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com/somepage'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
@override_settings(ALLOWED_HOSTS=['www.example.com'])
def test_https_good_referer_2(self):
"""
Test that a POST HTTPS request with a good referer is accepted
where the referer contains no trailing slash
"""
# See ticket #15617
req = self._get_POST_request_with_token()
req._is_secure_override = True
req.META['HTTP_HOST'] = 'www.example.com'
req.META['HTTP_REFERER'] = 'https://www.example.com'
req2 = CsrfViewMiddleware().process_view(req, post_form_view, (), {})
self.assertEqual(None, req2)
def test_ensures_csrf_cookie_no_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with no middleware
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
resp = view(req)
self.assertTrue(resp.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp.get('Vary',''))
def test_ensures_csrf_cookie_with_middleware(self):
"""
Tests that ensures_csrf_cookie decorator fulfils its promise
with the middleware enabled.
"""
@ensure_csrf_cookie
def view(request):
# Doesn't insert a token or anything
return HttpResponse(content="")
req = self._get_GET_no_csrf_cookie_request()
CsrfViewMiddleware().process_view(req, view, (), {})
resp = view(req)
resp2 = CsrfViewMiddleware().process_response(req, resp)
self.assertTrue(resp2.cookies.get(settings.CSRF_COOKIE_NAME, False))
self.assertTrue('Cookie' in resp2.get('Vary',''))
| apache-2.0 |
0sw4l/villas-de-san-pablo | apps/utils/views.py | 1 | 1899 | from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import CreateView
from django.views.generic import ListView
from django.views.generic import TemplateView
from django.views.generic import UpdateView, DetailView
from apps.utils.shortcuts import get_object_or_none
class BaseListView(LoginRequiredMixin, ListView):
pass
class BaseCreateView(LoginRequiredMixin, CreateView):
template_name = 'apps/base/base_form.html'
def get_context_data(self, **kwargs):
context = super(BaseCreateView, self).get_context_data(**kwargs)
context['action'] = 'Crear'
return context
class BaseListViewDinamicHeader(LoginRequiredMixin, ListView):
context_object_name = "list"
query_fields = ()
HEADER = None
def __init__(self):
super(BaseListViewDinamicHeader, self).__init__()
self.HEADER += ('Acciones',)
def get_queryset(self):
return self.model.objects.all()
def get_context_data(self, **kwargs):
context = super(BaseListViewDinamicHeader, self).get_context_data(**kwargs)
context['header_table'] = self.get_header_table()
return context
def get_header_table(self):
return self.HEADER
class DirectDeleteMixin(object):
def get(self, request, *args, **kwargs):
return self.post(request, *args, **kwargs)
class BaseUpdateView(LoginRequiredMixin, UpdateView):
template_name = 'apps/base/base_form.html'
def get_context_data(self, **kwargs):
context = super(BaseUpdateView, self).get_context_data(**kwargs)
context['action'] = 'Modificar'
return context
def get_object(self, queryset=None):
obj = self.model.objects.get(id=self.kwargs['pk'])
return obj
class BaseTemplateView(LoginRequiredMixin, TemplateView):
pass
class BaseDetailView(LoginRequiredMixin, DetailView):
pass
| mit |
tekton/DocuCanvas | accounts/migrations/0007_auto__add_recordpermission__add_unique_recordpermission_contentType_us.py | 1 | 8125 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RecordPermission'
db.create_table(u'accounts_recordpermission', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('contentType', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('recordID', self.gf('django.db.models.fields.IntegerField')()),
('canView', self.gf('django.db.models.fields.BooleanField')(default=False)),
('canUpdate', self.gf('django.db.models.fields.BooleanField')(default=False)),
('canDelete', self.gf('django.db.models.fields.BooleanField')(default=False)),
('viewableFields', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
('updatableFields', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True)),
))
db.send_create_signal(u'accounts', ['RecordPermission'])
# Adding unique constraint on 'RecordPermission', fields ['contentType', 'user', 'recordID']
db.create_unique(u'accounts_recordpermission', ['contentType_id', 'user_id', 'recordID'])
def backwards(self, orm):
# Removing unique constraint on 'RecordPermission', fields ['contentType', 'user', 'recordID']
db.delete_unique(u'accounts_recordpermission', ['contentType_id', 'user_id', 'recordID'])
# Deleting model 'RecordPermission'
db.delete_table(u'accounts_recordpermission')
models = {
u'accounts.account': {
'Meta': {'object_name': 'Account'},
'avatar': ('django.db.models.fields.CharField', [], {'default': "'/static/img/pony.png'", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'git_account': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'github_account': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'google_plus': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'accounts.googleaccount': {
'Meta': {'object_name': 'GoogleAccount'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Account']", 'null': 'True'}),
'account_label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'credentials': ('oauth2client.django_orm.CredentialsField', [], {'null': 'True'}),
'google_account_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'accounts.recordpermission': {
'Meta': {'unique_together': "(('contentType', 'user', 'recordID'),)", 'object_name': 'RecordPermission'},
'canDelete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canUpdate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'canView': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'contentType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'recordID': ('django.db.models.fields.IntegerField', [], {}),
'updatableFields': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'viewableFields': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | gpl-3.0 |
Ivoz/pip | pip/_vendor/requests/packages/chardet/chardetect.py | 743 | 1141 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from io import open
from sys import argv, stdin
from chardet.universaldetector import UniversalDetector
def description_of(file, name='stdin'):
"""Return a string describing the probable encoding of a file."""
u = UniversalDetector()
for line in file:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '%s: %s with confidence %s' % (name,
result['encoding'],
result['confidence'])
else:
return '%s: no result' % name
def main():
if len(argv) <= 1:
print(description_of(stdin))
else:
for path in argv[1:]:
with open(path, 'rb') as f:
print(description_of(f, path))
if __name__ == '__main__':
main()
| mit |
usakey/vnpy | vn.lts/pyscript/generate_md_functions.py | 73 | 6980 | # encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from lts_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace('\tvirtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace('\tvirtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'RspInfoField' in type_:
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSecurityFtdcRspInfoField empty_error = CSecurityFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'RspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace('\tvirtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(', ') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write(fcName + '\n')
ffunction.write('{\n')
ffunction.write('\t' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetChar(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'int':
line = '\tgetInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = '\tgetDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write('\tint i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write('\treturn i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'MdApi'
fcpp = open('SecurityFtdcMdApi.h', 'r')
ftask = open('lts_md_task.cpp', 'w')
fprocess = open('lts_md_process.cpp', 'w')
ffunction = open('lts_md_function.cpp', 'w')
fdefine = open('lts_md_define.cpp', 'w')
fswitch = open('lts_md_switch.cpp', 'w')
define_count = 1
for line in fcpp:
if "\tvirtual void On" in line:
processCallBack(line)
elif "\tvirtual int" in line:
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close() | mit |
Jumpscale/jumpscale_portal8 | apps/portalbase/AYS81/.macros/wiki/aysservice/3_aysservice.py | 1 | 2580 | from collections import OrderedDict
def main(j, args, params, tags, tasklet):
try:
role = args.getTag('aysrole')
name = args.getTag('aysname')
ayspath = args.getTag('ayspath') or ''
repo = j.atyourservice.repoGet(ayspath)
service = repo.serviceGet(role, name, die=False)
if service:
prods = {}
for prod_role, producers in service.producers.items():
prods.setdefault(prod_role, [])
for producer in producers:
prods[prod_role].append('[{name}|/ays81/Service?aysrole={role}&aysname={name}&ayspath={path}]'.format(
role=prod_role, path=ayspath, name=producer.model.dbobj.name))
parent = {}
if service.parent is not None:
parent['role'] = service.parent.model.role
parent['link'] = '[{name}|/ays81/Service?aysrole={role}&aysname={name}&ayspath={path}]'.format(
role=service.parent.model.role, path=ayspath, name=service.parent.model.dbobj.name)
link_to_template = ('[%s|ays81/ActorTemplate?ayspath=%s&aysname=%s]' % (role,
ayspath, role))
# we prepend service path with '$codedir' to make it work in the explorer.
# because of this line :
# https://github.com/Jumpscale/jumpscale_portal8/blob/master/apps/portalbase/macros/page/explorer/1_main.py#L25
hidden = ['key.priv', 'password', 'passwd', 'pwd', 'oauth.jwt_key', 'keyPriv']
data = j.data.serializer.json.loads(service.model.dataJSON)
data_revised = dict()
for k, v in data.items():
if k.strip() in hidden:
continue
else:
data_revised[k] = v.replace('\\n', '') if isinstance(v, str) else v
args.doc.applyTemplate({
'service': service,
'type': link_to_template,
'data': data_revised,
'name': name,
'role': role,
'producers': OrderedDict(sorted(prods.items())),
'parent': parent,
'actions': service.model.actions,
'reponame': service.aysrepo.name,
})
else:
args.doc.applyTemplate({'error': 'service not found'})
except Exception as e:
args.doc.applyTemplate({'error': e.__str__()})
params.result = (args.doc, args.doc)
return params
| apache-2.0 |
GinnyN/Team-Fortress-RPG-Generators | django/contrib/gis/geos/linestring.py | 411 | 5568 | from django.contrib.gis.geos.base import numpy
from django.contrib.gis.geos.coordseq import GEOSCoordSeq
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos import prototypes as capi
class LineString(GEOSGeometry):
_init_func = capi.create_linestring
_minlength = 2
#### Python 'magic' routines ####
def __init__(self, *args, **kwargs):
"""
Initializes on the given sequence -- may take lists, tuples, NumPy arrays
of X,Y pairs, or Point objects. If Point objects are used, ownership is
_not_ transferred to the LineString object.
Examples:
ls = LineString((1, 1), (2, 2))
ls = LineString([(1, 1), (2, 2)])
ls = LineString(array([(1, 1), (2, 2)]))
ls = LineString(Point(1, 1), Point(2, 2))
"""
# If only one argument provided, set the coords array appropriately
if len(args) == 1: coords = args[0]
else: coords = args
if isinstance(coords, (tuple, list)):
# Getting the number of coords and the number of dimensions -- which
# must stay the same, e.g., no LineString((1, 2), (1, 2, 3)).
ncoords = len(coords)
if coords: ndim = len(coords[0])
else: raise TypeError('Cannot initialize on empty sequence.')
self._checkdim(ndim)
# Incrementing through each of the coordinates and verifying
for i in xrange(1, ncoords):
if not isinstance(coords[i], (tuple, list, Point)):
raise TypeError('each coordinate should be a sequence (list or tuple)')
if len(coords[i]) != ndim: raise TypeError('Dimension mismatch.')
numpy_coords = False
elif numpy and isinstance(coords, numpy.ndarray):
shape = coords.shape # Using numpy's shape.
if len(shape) != 2: raise TypeError('Too many dimensions.')
self._checkdim(shape[1])
ncoords = shape[0]
ndim = shape[1]
numpy_coords = True
else:
raise TypeError('Invalid initialization input for LineStrings.')
# Creating a coordinate sequence object because it is easier to
# set the points using GEOSCoordSeq.__setitem__().
cs = GEOSCoordSeq(capi.create_cs(ncoords, ndim), z=bool(ndim==3))
for i in xrange(ncoords):
if numpy_coords: cs[i] = coords[i,:]
elif isinstance(coords[i], Point): cs[i] = coords[i].tuple
else: cs[i] = coords[i]
# If SRID was passed in with the keyword arguments
srid = kwargs.get('srid', None)
# Calling the base geometry initialization with the returned pointer
# from the function.
super(LineString, self).__init__(self._init_func(cs.ptr), srid=srid)
def __iter__(self):
"Allows iteration over this LineString."
for i in xrange(len(self)):
yield self[i]
def __len__(self):
"Returns the number of points in this LineString."
return len(self._cs)
def _get_single_external(self, index):
return self._cs[index]
_get_single_internal = _get_single_external
def _set_list(self, length, items):
ndim = self._cs.dims #
hasz = self._cs.hasz # I don't understand why these are different
# create a new coordinate sequence and populate accordingly
cs = GEOSCoordSeq(capi.create_cs(length, ndim), z=hasz)
for i, c in enumerate(items):
cs[i] = c
ptr = self._init_func(cs.ptr)
if ptr:
capi.destroy_geom(self.ptr)
self.ptr = ptr
self._post_init(self.srid)
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._checkindex(index)
self._cs[index] = value
def _checkdim(self, dim):
if dim not in (2, 3): raise TypeError('Dimension mismatch.')
#### Sequence Properties ####
@property
def tuple(self):
"Returns a tuple version of the geometry from the coordinate sequence."
return self._cs.tuple
coords = tuple
def _listarr(self, func):
"""
Internal routine that returns a sequence (list) corresponding with
the given function. Will return a numpy array if possible.
"""
lst = [func(i) for i in xrange(len(self))]
if numpy: return numpy.array(lst) # ARRRR!
else: return lst
@property
def array(self):
"Returns a numpy array for the LineString."
return self._listarr(self._cs.__getitem__)
@property
def merged(self):
"Returns the line merge of this LineString."
return self._topology(capi.geos_linemerge(self.ptr))
@property
def x(self):
"Returns a list or numpy array of the X variable."
return self._listarr(self._cs.getX)
@property
def y(self):
"Returns a list or numpy array of the Y variable."
return self._listarr(self._cs.getY)
@property
def z(self):
"Returns a list or numpy array of the Z variable."
if not self.hasz: return None
else: return self._listarr(self._cs.getZ)
# LinearRings are LineStrings used within Polygons.
class LinearRing(LineString):
_minLength = 4
_init_func = capi.create_linearring
| bsd-3-clause |
rec/echomesh | code/python/external/platform/darwin/numpy/distutils/tests/gen_ext/setup.py | 49 | 1099 | #!/usr/bin/env python
fib3_f = '''
C FILE: FIB3.F
SUBROUTINE FIB(A,N)
C
C CALCULATE FIRST N FIBONACCI NUMBERS
C
INTEGER N
REAL*8 A(N)
Cf2py intent(in) n
Cf2py intent(out) a
Cf2py depend(n) a
DO I=1,N
IF (I.EQ.1) THEN
A(I) = 0.0D0
ELSEIF (I.EQ.2) THEN
A(I) = 1.0D0
ELSE
A(I) = A(I-1) + A(I-2)
ENDIF
ENDDO
END
C END FILE FIB3.F
'''
def source_func(ext, build_dir):
import os
from distutils.dep_util import newer
target = os.path.join(build_dir,'fib3.f')
if newer(__file__, target):
f = open(target,'w')
f.write(fib3_f)
f.close()
return [target]
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('gen_ext',parent_package,top_path)
config.add_extension('fib3',
[source_func]
)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(configuration=configuration)
| mit |
varuntiwari27/rally | todo-api/flask/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/markers.py | 228 | 8248 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import operator
import os
import platform
import sys
from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
from pkg_resources.extern.pyparsing import Literal as L # noqa
from ._compat import string_types
from .specifiers import Specifier, InvalidSpecifier
__all__ = [
"InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName",
"Marker", "default_environment",
]
class InvalidMarker(ValueError):
"""
An invalid marker was found, users should refer to PEP 508.
"""
class UndefinedComparison(ValueError):
"""
An invalid operation was attempted on a value that doesn't support it.
"""
class UndefinedEnvironmentName(ValueError):
"""
A name was attempted to be used that does not exist inside of the
environment.
"""
class Node(object):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __repr__(self):
return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
def serialize(self):
raise NotImplementedError
class Variable(Node):
def serialize(self):
return str(self)
class Value(Node):
def serialize(self):
return '"{0}"'.format(self)
class Op(Node):
def serialize(self):
return str(self)
VARIABLE = (
L("implementation_version") |
L("platform_python_implementation") |
L("implementation_name") |
L("python_full_version") |
L("platform_release") |
L("platform_version") |
L("platform_machine") |
L("platform_system") |
L("python_version") |
L("sys_platform") |
L("os_name") |
L("os.name") | # PEP-345
L("sys.platform") | # PEP-345
L("platform.version") | # PEP-345
L("platform.machine") | # PEP-345
L("platform.python_implementation") | # PEP-345
L("python_implementation") | # undocumented setuptools legacy
L("extra")
)
ALIASES = {
'os.name': 'os_name',
'sys.platform': 'sys_platform',
'platform.version': 'platform_version',
'platform.machine': 'platform_machine',
'platform.python_implementation': 'platform_python_implementation',
'python_implementation': 'platform_python_implementation'
}
VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
VERSION_CMP = (
L("===") |
L("==") |
L(">=") |
L("<=") |
L("!=") |
L("~=") |
L(">") |
L("<")
)
MARKER_OP = VERSION_CMP | L("not in") | L("in")
MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
MARKER_VALUE = QuotedString("'") | QuotedString('"')
MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
BOOLOP = L("and") | L("or")
MARKER_VAR = VARIABLE | MARKER_VALUE
MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
MARKER_EXPR = Forward()
MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
MARKER = stringStart + MARKER_EXPR + stringEnd
def _coerce_parse_result(results):
if isinstance(results, ParseResults):
return [_coerce_parse_result(i) for i in results]
else:
return results
def _format_marker(marker, first=True):
assert isinstance(marker, (list, tuple, string_types))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (isinstance(marker, list) and len(marker) == 1 and
isinstance(marker[0], (list, tuple))):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs, op, rhs):
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs)
oper = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(
"Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
)
return oper(lhs, rhs)
_undefined = object()
def _get_env(environment, name):
value = environment.get(name, _undefined)
if value is _undefined:
raise UndefinedEnvironmentName(
"{0!r} does not exist in evaluation environment.".format(name)
)
return value
def _evaluate_markers(markers, environment):
groups = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, string_types))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
lhs_value = _get_env(environment, lhs.value)
rhs_value = rhs.value
else:
lhs_value = lhs.value
rhs_value = _get_env(environment, rhs.value)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info):
version = '{0.major}.{0.minor}.{0.micro}'.format(info)
kind = info.releaselevel
if kind != 'final':
version += kind[0] + str(info.serial)
return version
def default_environment():
if hasattr(sys, 'implementation'):
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
else:
iver = '0'
implementation_name = ''
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": platform.python_version()[:3],
"sys_platform": sys.platform,
}
class Marker(object):
def __init__(self, marker):
try:
self._markers = _coerce_parse_result(MARKER.parseString(marker))
except ParseException as e:
err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
marker, marker[e.loc:e.loc + 8])
raise InvalidMarker(err_str)
def __str__(self):
return _format_marker(self._markers)
def __repr__(self):
return "<Marker({0!r})>".format(str(self))
def evaluate(self, environment=None):
"""Evaluate a marker.
Return the boolean from evaluating the given marker against the
environment. environment is an optional argument to override all or
part of the determined environment.
The environment is determined from the current Python process.
"""
current_environment = default_environment()
if environment is not None:
current_environment.update(environment)
return _evaluate_markers(self._markers, current_environment)
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/externals/funcsigs.py | 1 | 29802 | # Copyright 2001-2013 Python Software Foundation; All Rights Reserved
"""Function signature objects for callables
Back port of Python 3.3's function signature tools from the inspect module,
modified to be compatible with Python 2.6, 2.7 and 3.2+.
"""
from __future__ import absolute_import, division, print_function
import functools
import itertools
import re
import types
try:
from collections import OrderedDict
except ImportError:
from .odict import OrderedDict
__version__ = "0.4"
__all__ = ['BoundArguments', 'Parameter', 'Signature', 'signature']
_WrapperDescriptor = type(type.__call__)
_MethodWrapper = type(all.__call__)
_NonUserDefinedCallables = (_WrapperDescriptor,
_MethodWrapper,
types.BuiltinFunctionType)
def formatannotation(annotation, base_module=None):
if isinstance(annotation, type):
if annotation.__module__ in ('builtins', '__builtin__', base_module):
return annotation.__name__
return annotation.__module__ + '.' + annotation.__name__
return repr(annotation)
def _get_user_defined_method(cls, method_name, *nested):
try:
if cls is type:
return
meth = getattr(cls, method_name)
for name in nested:
meth = getattr(meth, name, meth)
except AttributeError:
return
else:
if not isinstance(meth, _NonUserDefinedCallables):
# Once '__signature__' will be added to 'C'-level
# callables, this check won't be necessary
return meth
def signature(obj):
'''Get a signature object for the passed callable.'''
if not callable(obj):
raise TypeError('{0!r} is not a callable object'.format(obj))
if isinstance(obj, types.MethodType):
sig = signature(obj.__func__)
if obj.__self__ is None:
# Unbound method: the first parameter becomes positional-only
if sig.parameters:
first = sig.parameters.values()[0].replace(
kind=_POSITIONAL_ONLY)
return sig.replace(
parameters=(first,) + tuple(sig.parameters.values())[1:])
else:
return sig
else:
# In this case we skip the first parameter of the underlying
# function (usually `self` or `cls`).
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
try:
sig = obj.__signature__
except AttributeError:
pass
else:
if sig is not None:
return sig
try:
# Was this function wrapped by a decorator?
wrapped = obj.__wrapped__
except AttributeError:
pass
else:
return signature(wrapped)
if isinstance(obj, types.FunctionType):
return Signature.from_function(obj)
if isinstance(obj, functools.partial):
sig = signature(obj.func)
new_params = OrderedDict(sig.parameters.items())
partial_args = obj.args or ()
partial_keywords = obj.keywords or {}
try:
ba = sig.bind_partial(*partial_args, **partial_keywords)
except TypeError as ex:
msg = 'partial object {0!r} has incorrect arguments'.format(obj)
raise ValueError(msg)
for arg_name, arg_value in ba.arguments.items():
param = new_params[arg_name]
if arg_name in partial_keywords:
# We set a new default value, because the following code
# is correct:
#
# >>> def foo(a): print(a)
# >>> print(partial(partial(foo, a=10), a=20)())
# 20
# >>> print(partial(partial(foo, a=10), a=20)(a=30))
# 30
#
# So, with 'partial' objects, passing a keyword argument is
# like setting a new default value for the corresponding
# parameter
#
# We also mark this parameter with '_partial_kwarg'
# flag. Later, in '_bind', the 'default' value of this
# parameter will be added to 'kwargs', to simulate
# the 'functools.partial' real call.
new_params[arg_name] = param.replace(default=arg_value,
_partial_kwarg=True)
elif (param.kind not in (_VAR_KEYWORD, _VAR_POSITIONAL) and
not param._partial_kwarg):
new_params.pop(arg_name)
return sig.replace(parameters=new_params.values())
sig = None
if isinstance(obj, type):
# obj is a class or a metaclass
# First, let's see if it has an overloaded __call__ defined
# in its metaclass
call = _get_user_defined_method(type(obj), '__call__')
if call is not None:
sig = signature(call)
else:
# Now we check if the 'obj' class has a '__new__' method
new = _get_user_defined_method(obj, '__new__')
if new is not None:
sig = signature(new)
else:
# Finally, we should have at least __init__ implemented
init = _get_user_defined_method(obj, '__init__')
if init is not None:
sig = signature(init)
elif not isinstance(obj, _NonUserDefinedCallables):
# An object with __call__
# We also check that the 'obj' is not an instance of
# _WrapperDescriptor or _MethodWrapper to avoid
# infinite recursion (and even potential segfault)
call = _get_user_defined_method(type(obj), '__call__', 'im_func')
if call is not None:
sig = signature(call)
if sig is not None:
# For classes and objects we skip the first parameter of their
# __call__, __new__, or __init__ methods
return sig.replace(parameters=tuple(sig.parameters.values())[1:])
if isinstance(obj, types.BuiltinFunctionType):
# Raise a nicer error message for builtins
msg = 'no signature found for builtin function {0!r}'.format(obj)
raise ValueError(msg)
raise ValueError('callable {0!r} is not supported by signature'.format(obj))
class _void(object):
'''A private marker - used in Parameter & Signature'''
class _empty(object):
pass
class _ParameterKind(int):
def __new__(self, *args, **kwargs):
obj = int.__new__(self, *args)
obj._name = kwargs['name']
return obj
def __str__(self):
return self._name
def __repr__(self):
return '<_ParameterKind: {0!r}>'.format(self._name)
_POSITIONAL_ONLY = _ParameterKind(0, name='POSITIONAL_ONLY')
_POSITIONAL_OR_KEYWORD = _ParameterKind(1, name='POSITIONAL_OR_KEYWORD')
_VAR_POSITIONAL = _ParameterKind(2, name='VAR_POSITIONAL')
_KEYWORD_ONLY = _ParameterKind(3, name='KEYWORD_ONLY')
_VAR_KEYWORD = _ParameterKind(4, name='VAR_KEYWORD')
class Parameter(object):
'''Represents a parameter in a function signature.
Has the following public attributes:
* name : str
The name of the parameter as a string.
* default : object
The default value for the parameter if specified. If the
parameter has no default value, this attribute is not set.
* annotation
The annotation for the parameter if specified. If the
parameter has no annotation, this attribute is not set.
* kind : str
Describes how argument values are bound to the parameter.
Possible values: `Parameter.POSITIONAL_ONLY`,
`Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
`Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
'''
__slots__ = ('_name', '_kind', '_default', '_annotation', '_partial_kwarg')
POSITIONAL_ONLY = _POSITIONAL_ONLY
POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
VAR_POSITIONAL = _VAR_POSITIONAL
KEYWORD_ONLY = _KEYWORD_ONLY
VAR_KEYWORD = _VAR_KEYWORD
empty = _empty
def __init__(self, name, kind, default=_empty, annotation=_empty,
_partial_kwarg=False):
if kind not in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD,
_VAR_POSITIONAL, _KEYWORD_ONLY, _VAR_KEYWORD):
raise ValueError("invalid value for 'Parameter.kind' attribute")
self._kind = kind
if default is not _empty:
if kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
msg = '{0} parameters cannot have default values'.format(kind)
raise ValueError(msg)
self._default = default
self._annotation = annotation
if name is None:
if kind != _POSITIONAL_ONLY:
raise ValueError("None is not a valid name for a "
"non-positional-only parameter")
self._name = name
else:
name = str(name)
if kind != _POSITIONAL_ONLY and not re.match(r'[a-z_]\w*$', name, re.I):
msg = '{0!r} is not a valid parameter name'.format(name)
raise ValueError(msg)
self._name = name
self._partial_kwarg = _partial_kwarg
@property
def name(self):
return self._name
@property
def default(self):
return self._default
@property
def annotation(self):
return self._annotation
@property
def kind(self):
return self._kind
def replace(self, name=_void, kind=_void, annotation=_void,
default=_void, _partial_kwarg=_void):
'''Creates a customized copy of the Parameter.'''
if name is _void:
name = self._name
if kind is _void:
kind = self._kind
if annotation is _void:
annotation = self._annotation
if default is _void:
default = self._default
if _partial_kwarg is _void:
_partial_kwarg = self._partial_kwarg
return type(self)(name, kind, default=default, annotation=annotation,
_partial_kwarg=_partial_kwarg)
def __str__(self):
kind = self.kind
formatted = self._name
if kind == _POSITIONAL_ONLY:
if formatted is None:
formatted = ''
formatted = '<{0}>'.format(formatted)
# Add annotation and default value
if self._annotation is not _empty:
formatted = '{0}:{1}'.format(formatted,
formatannotation(self._annotation))
if self._default is not _empty:
formatted = '{0}={1}'.format(formatted, repr(self._default))
if kind == _VAR_POSITIONAL:
formatted = '*' + formatted
elif kind == _VAR_KEYWORD:
formatted = '**' + formatted
return formatted
def __repr__(self):
return '<{0} at {1:#x} {2!r}>'.format(self.__class__.__name__,
id(self), self.name)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, Parameter) and
self._name == other._name and
self._kind == other._kind and
self._default == other._default and
self._annotation == other._annotation)
def __ne__(self, other):
return not self.__eq__(other)
class BoundArguments(object):
'''Result of `Signature.bind` call. Holds the mapping of arguments
to the function's parameters.
Has the following public attributes:
* arguments : OrderedDict
An ordered mutable mapping of parameters' names to arguments' values.
Does not contain arguments' default values.
* signature : Signature
The Signature object that created this instance.
* args : tuple
Tuple of positional arguments values.
* kwargs : dict
Dict of keyword arguments values.
'''
def __init__(self, signature, arguments):
self.arguments = arguments
self._signature = signature
@property
def signature(self):
return self._signature
@property
def args(self):
args = []
for param_name, param in self._signature.parameters.items():
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
# Keyword arguments mapped by 'functools.partial'
# (Parameter._partial_kwarg is True) are mapped
# in 'BoundArguments.kwargs', along with VAR_KEYWORD &
# KEYWORD_ONLY
break
try:
arg = self.arguments[param_name]
except KeyError:
# We're done here. Other arguments
# will be mapped in 'BoundArguments.kwargs'
break
else:
if param.kind == _VAR_POSITIONAL:
# *args
args.extend(arg)
else:
# plain argument
args.append(arg)
return tuple(args)
@property
def kwargs(self):
kwargs = {}
kwargs_started = False
for param_name, param in self._signature.parameters.items():
if not kwargs_started:
if (param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY) or
param._partial_kwarg):
kwargs_started = True
else:
if param_name not in self.arguments:
kwargs_started = True
continue
if not kwargs_started:
continue
try:
arg = self.arguments[param_name]
except KeyError:
pass
else:
if param.kind == _VAR_KEYWORD:
# **kwargs
kwargs.update(arg)
else:
# plain keyword argument
kwargs[param_name] = arg
return kwargs
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
return (issubclass(other.__class__, BoundArguments) and
self.signature == other.signature and
self.arguments == other.arguments)
def __ne__(self, other):
return not self.__eq__(other)
class Signature(object):
'''A Signature object represents the overall signature of a function.
It stores a Parameter object for each parameter accepted by the
function, as well as information specific to the function itself.
A Signature object has the following public attributes and methods:
* parameters : OrderedDict
An ordered mapping of parameters' names to the corresponding
Parameter objects (keyword-only arguments are in the same order
as listed in `code.co_varnames`).
* return_annotation : object
The annotation for the return type of the function if specified.
If the function has no annotation for its return type, this
attribute is not set.
* bind(*args, **kwargs) -> BoundArguments
Creates a mapping from positional and keyword arguments to
parameters.
* bind_partial(*args, **kwargs) -> BoundArguments
Creates a partial mapping from positional and keyword arguments
to parameters (simulating 'functools.partial' behavior.)
'''
__slots__ = ('_return_annotation', '_parameters')
_parameter_cls = Parameter
_bound_arguments_cls = BoundArguments
empty = _empty
def __init__(self, parameters=None, return_annotation=_empty,
__validate_parameters__=True):
'''Constructs Signature from the given list of Parameter
objects and 'return_annotation'. All arguments are optional.
'''
if parameters is None:
params = OrderedDict()
else:
if __validate_parameters__:
params = OrderedDict()
top_kind = _POSITIONAL_ONLY
for idx, param in enumerate(parameters):
kind = param.kind
if kind < top_kind:
msg = 'wrong parameter order: {0} before {1}'
msg = msg.format(top_kind, param.kind)
raise ValueError(msg)
else:
top_kind = kind
name = param.name
if name is None:
name = str(idx)
param = param.replace(name=name)
if name in params:
msg = 'duplicate parameter name: {0!r}'.format(name)
raise ValueError(msg)
params[name] = param
else:
params = OrderedDict(((param.name, param)
for param in parameters))
self._parameters = params
self._return_annotation = return_annotation
@classmethod
def from_function(cls, func):
'''Constructs Signature for the given python function'''
if not isinstance(func, types.FunctionType):
raise TypeError('{0!r} is not a Python function'.format(func))
Parameter = cls._parameter_cls
# Parameter information.
func_code = func.__code__
pos_count = func_code.co_argcount
arg_names = func_code.co_varnames
positional = tuple(arg_names[:pos_count])
keyword_only_count = getattr(func_code, 'co_kwonlyargcount', 0)
keyword_only = arg_names[pos_count:(pos_count + keyword_only_count)]
annotations = getattr(func, '__annotations__', {})
defaults = func.__defaults__
kwdefaults = getattr(func, '__kwdefaults__', None)
if defaults:
pos_default_count = len(defaults)
else:
pos_default_count = 0
parameters = []
# Non-keyword-only parameters w/o defaults.
non_default_count = pos_count - pos_default_count
for name in positional[:non_default_count]:
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD))
# ... w/ defaults.
for offset, name in enumerate(positional[non_default_count:]):
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_POSITIONAL_OR_KEYWORD,
default=defaults[offset]))
# *args
if func_code.co_flags & 0x04:
name = arg_names[pos_count + keyword_only_count]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_POSITIONAL))
# Keyword-only parameters.
for name in keyword_only:
default = _empty
if kwdefaults is not None:
default = kwdefaults.get(name, _empty)
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_KEYWORD_ONLY,
default=default))
# **kwargs
if func_code.co_flags & 0x08:
index = pos_count + keyword_only_count
if func_code.co_flags & 0x04:
index += 1
name = arg_names[index]
annotation = annotations.get(name, _empty)
parameters.append(Parameter(name, annotation=annotation,
kind=_VAR_KEYWORD))
return cls(parameters,
return_annotation=annotations.get('return', _empty),
__validate_parameters__=False)
@property
def parameters(self):
try:
return types.MappingProxyType(self._parameters)
except AttributeError:
return OrderedDict(self._parameters.items())
@property
def return_annotation(self):
return self._return_annotation
def replace(self, parameters=_void, return_annotation=_void):
'''Creates a customized copy of the Signature.
Pass 'parameters' and/or 'return_annotation' arguments
to override them in the new copy.
'''
if parameters is _void:
parameters = self.parameters.values()
if return_annotation is _void:
return_annotation = self._return_annotation
return type(self)(parameters,
return_annotation=return_annotation)
def __hash__(self):
msg = "unhashable type: '{0}'".format(self.__class__.__name__)
raise TypeError(msg)
def __eq__(self, other):
if (not issubclass(type(other), Signature) or
self.return_annotation != other.return_annotation or
len(self.parameters) != len(other.parameters)):
return False
other_positions = dict((param, idx)
for idx, param in enumerate(other.parameters.keys()))
for idx, (param_name, param) in enumerate(self.parameters.items()):
if param.kind == _KEYWORD_ONLY:
try:
other_param = other.parameters[param_name]
except KeyError:
return False
else:
if param != other_param:
return False
else:
try:
other_idx = other_positions[param_name]
except KeyError:
return False
else:
if (idx != other_idx or
param != other.parameters[param_name]):
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _bind(self, args, kwargs, partial=False):
'''Private method. Don't use directly.'''
arguments = OrderedDict()
parameters = iter(self.parameters.values())
parameters_ex = ()
arg_vals = iter(args)
if partial:
# Support for binding arguments to 'functools.partial' objects.
# See 'functools.partial' case in 'signature()' implementation
# for details.
for param_name, param in self.parameters.items():
if (param._partial_kwarg and param_name not in kwargs):
# Simulating 'functools.partial' behavior
kwargs[param_name] = param.default
while True:
# Let's iterate through the positional arguments and corresponding
# parameters
try:
arg_val = next(arg_vals)
except StopIteration:
# No more positional arguments
try:
param = next(parameters)
except StopIteration:
# No more parameters. That's it. Just need to check that
# we have no `kwargs` after this while loop
break
else:
if param.kind == _VAR_POSITIONAL:
# That's OK, just empty *args. Let's start parsing
# kwargs
break
elif param.name in kwargs:
if param.kind == _POSITIONAL_ONLY:
msg = '{arg!r} parameter is positional only, ' \
'but was passed as a keyword'
msg = msg.format(arg=param.name)
raise TypeError(msg)
parameters_ex = (param,)
break
elif (param.kind == _VAR_KEYWORD or
param.default is not _empty):
# That's fine too - we have a default value for this
# parameter. So, lets start parsing `kwargs`, starting
# with the current parameter
parameters_ex = (param,)
break
else:
if partial:
parameters_ex = (param,)
break
else:
msg = '{arg!r} parameter lacking default value'
msg = msg.format(arg=param.name)
raise TypeError(msg)
else:
# We have a positional argument to process
try:
param = next(parameters)
except StopIteration:
raise TypeError('too many positional arguments')
else:
if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
# Looks like we have no parameter for this positional
# argument
raise TypeError('too many positional arguments')
if param.kind == _VAR_POSITIONAL:
# We have an '*args'-like argument, let's fill it with
# all positional arguments we have left and move on to
# the next phase
values = [arg_val]
values.extend(arg_vals)
arguments[param.name] = tuple(values)
break
if param.name in kwargs:
raise TypeError('multiple values for argument '
'{arg!r}'.format(arg=param.name))
arguments[param.name] = arg_val
# Now, we iterate through the remaining parameters to process
# keyword arguments
kwargs_param = None
for param in itertools.chain(parameters_ex, parameters):
if param.kind == _POSITIONAL_ONLY:
# This should never happen in case of a properly built
# Signature object (but let's have this check here
# to ensure correct behaviour just in case)
raise TypeError('{arg!r} parameter is positional only, '
'but was passed as a keyword'. \
format(arg=param.name))
if param.kind == _VAR_KEYWORD:
# Memorize that we have a '**kwargs'-like parameter
kwargs_param = param
continue
param_name = param.name
try:
arg_val = kwargs.pop(param_name)
except KeyError:
# We have no value for this parameter. It's fine though,
# if it has a default value, or it is an '*args'-like
# parameter, left alone by the processing of positional
# arguments.
if (not partial and param.kind != _VAR_POSITIONAL and
param.default is _empty):
raise TypeError('{arg!r} parameter lacking default value'. \
format(arg=param_name))
else:
arguments[param_name] = arg_val
if kwargs:
if kwargs_param is not None:
# Process our '**kwargs'-like parameter
arguments[kwargs_param.name] = kwargs
else:
raise TypeError('too many keyword arguments')
return self._bound_arguments_cls(self, arguments)
def bind(self, *args, **kwargs):
'''Get a BoundArguments object, that maps the passed `args`
and `kwargs` to the function's signature. Raises `TypeError`
if the passed arguments can not be bound.
'''
return self._bind(args, kwargs)
def bind_partial(self, *args, **kwargs):
'''Get a BoundArguments object, that partially maps the
passed `args` and `kwargs` to the function's signature.
Raises `TypeError` if the passed arguments can not be bound.
'''
return self._bind(args, kwargs, partial=True)
def __str__(self):
result = []
render_kw_only_separator = True
for idx, param in enumerate(self.parameters.values()):
formatted = str(param)
kind = param.kind
if kind == _VAR_POSITIONAL:
# OK, we have an '*args'-like parameter, so we won't need
# a '*' to separate keyword-only arguments
render_kw_only_separator = False
elif kind == _KEYWORD_ONLY and render_kw_only_separator:
# We have a keyword-only parameter to render and we haven't
# rendered an '*args'-like parameter before, so add a '*'
# separator to the parameters list ("foo(arg1, *, arg2)" case)
result.append('*')
# This condition should be only triggered once, so
# reset the flag
render_kw_only_separator = False
result.append(formatted)
rendered = '({0})'.format(', '.join(result))
if self.return_annotation is not _empty:
anno = formatannotation(self.return_annotation)
rendered += ' -> {0}'.format(anno)
return rendered
| mit |
panda73111/aiohttp | demos/polls/tests/conftest.py | 5 | 1110 | import pathlib
import subprocess
import pytest
import aiohttp
@pytest.yield_fixture
def create_app(event_loop, unused_tcp_port):
app = handler = srv = client_session = None
async def create():
nonlocal app, handler, srv, client_session
import aiohttpdemo_polls.main
app, host, port = await aiohttpdemo_polls.main.init(event_loop)
handler = app.make_handler(debug=True, keep_alive_on=False)
srv = await event_loop.create_server(handler, '127.0.0.1', port)
url = "http://127.0.0.1:{}".format(port)
client_session = aiohttp.ClientSession()
return app, url, client_session
yield create
async def finish():
await handler.finish_connections()
await app.finish()
await client_session.close()
srv.close()
await srv.wait_closed()
event_loop.run_until_complete(finish())
BASE_DIR = pathlib.Path(__file__).parent.parent
@pytest.fixture
def app_db():
subprocess.call(
[(BASE_DIR / 'sql' / 'install.sh').as_posix()],
shell=True,
cwd=BASE_DIR.as_posix()
)
| apache-2.0 |
credativ/pulp | server/pulp/server/managers/repo/group/cud.py | 2 | 12615 | import logging
import sys
from celery import task
from pymongo.errors import DuplicateKeyError
from pulp.common.plugins import distributor_constants
from pulp.server import exceptions as pulp_exceptions
from pulp.server.async.tasks import Task
from pulp.server.db.model.repo_group import RepoGroup
from pulp.server.db.model.repository import Repo
from pulp.server.managers import factory as manager_factory
from pulp.server.managers.repo.group.distributor import RepoGroupDistributorManager
_logger = logging.getLogger(__name__)
class RepoGroupManager(object):
@staticmethod
def create_repo_group(group_id, display_name=None, description=None, repo_ids=None, notes=None):
"""
Create a new repo group.
:param group_id: unique id of the repo group
:param display_name: display name of the repo group
:type display_name: str or None
:param description: description of the repo group
:type description: str or None
:param repo_ids: list of ids for repos initially belonging to the repo group
:type repo_ids: list or None
:param notes: notes for the repo group
:type notes: dict or None
:return: SON representation of the repo group
:rtype: bson.SON
"""
if repo_ids:
# Check if ids in repo_ids belong to existing repositories
repo_query_manager = manager_factory.repo_query_manager()
for repo_id in repo_ids:
repo_query_manager.get_repository(repo_id)
# Create repo group
collection = RepoGroup.get_collection()
repo_group = RepoGroup(group_id, display_name, description, repo_ids, notes)
try:
collection.insert(repo_group, safe=True)
except DuplicateKeyError:
raise pulp_exceptions.DuplicateResource(group_id), None, sys.exc_info()[2]
group = collection.find_one({'id': group_id})
return group
@staticmethod
def create_and_configure_repo_group(group_id, display_name=None, description=None,
repo_ids=None, notes=None, distributor_list=None):
"""
Create a new repository group and add distributors in a single call. This is equivalent to
calling RepoGroupManager.create_repo_group and then
RepoGroupDistributorManager.add_distributor for each distributor in the distributor list.
:param group_id: unique id of the repository group
:type group_id: str
:param display_name: user-friendly name of the repository id
:type display_name: str or None
:param description: description of the repository group
:type description: str or None
:param repo_ids: the list of repository ids in this repository group
:type repo_ids: list of str or None
:param notes: A collection of key=value pairs
:type notes: dict or None
:param distributor_list: A list of dictionaries used to add distributors. The following keys
are expected: from pulp.common.constants: DISTRIBUTOR_TYPE_ID_KEY,
DISTRIBUTOR_CONFIG_KEY, and DISTRIBUTOR_ID_KEY, which should hold
values str, dict, and str or None
:type distributor_list: list of dict
:return: SON representation of the repo group
:rtype: bson.SON
"""
if distributor_list is None:
distributor_list = ()
# Validate the distributor list before creating a repo group
if not isinstance(distributor_list, (list, tuple)) or not \
all(isinstance(dist, dict) for dist in distributor_list):
raise pulp_exceptions.InvalidValue(['distributor_list'])
# Create the repo group using the vanilla group create method
repo_group = RepoGroupManager.create_repo_group(group_id, display_name, description,
repo_ids, notes)
for distributor in distributor_list:
try:
# Attempt to add the distributor to the group.
type_id = distributor.get(distributor_constants.DISTRIBUTOR_TYPE_ID_KEY)
plugin_config = distributor.get(distributor_constants.DISTRIBUTOR_CONFIG_KEY)
distributor_id = distributor.get(distributor_constants.DISTRIBUTOR_ID_KEY)
RepoGroupDistributorManager.add_distributor(group_id, type_id, plugin_config,
distributor_id)
except Exception:
# If an exception occurs, pass it on after cleaning up the repository group
_logger.exception('Exception adding distributor to repo group [%s]; the group will'
' be deleted' % group_id)
RepoGroupManager.delete_repo_group(group_id)
raise
return repo_group
@staticmethod
def update_repo_group(group_id, **updates):
"""
Update an existing repo group.
Valid keyword arguments are:
* display_name
* description
* notes
For notes, provide a dict with key:value pairs for changes only. It is
not necessary to provide the entire field value. If a value is empty or
otherwise evaluates to False, that key will be unset.
@param group_id: unique id of the repo group to update
@type group_id: str
@param updates: keyword arguments of attributes to update
@return: SON representation of the updated repo group
@rtype: L{bson.SON}
"""
collection = validate_existing_repo_group(group_id)
keywords = updates.keys()
# validate keywords
valid_keywords = set(('display_name', 'description', 'notes'))
invalid_keywords = set(keywords) - valid_keywords
if invalid_keywords:
raise pulp_exceptions.InvalidValue(list(invalid_keywords))
# handle notes as a delta against the existing notes attribute
notes = updates.pop('notes', None)
if notes:
unset_dict = {}
for key, value in notes.iteritems():
newkey = 'notes.%s' % key
if value:
updates[newkey] = value
else:
unset_dict[newkey] = value
if unset_dict:
collection.update({'id': group_id}, {'$unset': unset_dict}, safe=True)
if updates:
collection.update({'id': group_id}, {'$set': updates}, safe=True)
group = collection.find_one({'id': group_id})
return group
@staticmethod
def delete_repo_group(group_id):
"""
Delete a repo group.
@param group_id: unique id of the repo group to delete
@type group_id: str
"""
collection = validate_existing_repo_group(group_id)
# Delete all distributors on the group
distributors = RepoGroupDistributorManager.find_distributors(group_id)
for distributor in distributors:
RepoGroupDistributorManager.remove_distributor(group_id, distributor['id'])
# Delete from the database
collection.remove({'id': group_id}, safe=True)
def remove_repo_from_groups(self, repo_id, group_ids=None):
"""
Remove a repo from the list of repo groups provided.
If no repo groups are specified, remove the repo from all repo groups
its currently in.
(idempotent: useful when deleting repositories)
@param repo_id: unique id of the repo to remove from repo groups
@type repo_id: str
@param group_ids: list of repo group ids to remove the repo from
@type group_ids: list of None
"""
spec = {}
if group_ids is not None:
spec = {'id': {'$in': group_ids}}
collection = RepoGroup.get_collection()
collection.update(spec, {'$pull': {'repo_ids': repo_id}}, multi=True, safe=True)
@staticmethod
def associate(group_id, criteria):
"""
Associate a set of repos, that match the passed in criteria, to a repo group.
@param group_id: unique id of the group to associate repos to
@type group_id: str
@param criteria: Criteria instance representing the set of repos to associate
@type criteria: L{pulp.server.db.model.criteria.Criteria}
"""
group_collection = validate_existing_repo_group(group_id)
repo_collection = Repo.get_collection()
cursor = repo_collection.query(criteria)
repo_ids = [r['id'] for r in cursor]
if not repo_ids:
return
group_collection.update({'id': group_id},
{'$addToSet': {'repo_ids': {'$each': repo_ids}}},
safe=True)
@staticmethod
def unassociate(group_id, criteria):
"""
Unassociate a set of repos, that match the passed in criteria, from a repo group.
@param group_id: unique id of the group to unassociate repos from
@type group_id: str
@param criteria: Criteria instance representing the set of repos to unassociate
@type criteria: L{pulp.server.db.model.criteria.Criteria}
"""
group_collection = validate_existing_repo_group(group_id)
repo_collection = Repo.get_collection()
cursor = repo_collection.query(criteria)
repo_ids = [r['id'] for r in cursor]
if not repo_ids:
return
group_collection.update({'id': group_id},
{'$pullAll': {'repo_ids': repo_ids}},
safe=True)
def add_notes(self, group_id, notes):
"""
Add a set of notes to a repo group.
@param group_id: unique id of the group to add notes to
@type group_id: str
@param notes: notes to add to the repo group
@type notes: dict
"""
group_collection = validate_existing_repo_group(group_id)
set_doc = dict(('notes.' + k, v) for k, v in notes.items())
if set_doc:
group_collection.update({'id': group_id}, {'$set': set_doc}, safe=True)
def remove_notes(self, group_id, keys):
"""
Remove a set of notes from a repo group.
@param group_id: unique id of the group to remove notes from
@type group_id: str
@param keys: list of note keys to remove
@type keys: list
"""
group_collection = validate_existing_repo_group(group_id)
unset_doc = dict(('notes.' + k, 1) for k in keys)
group_collection.update({'id': group_id}, {'$unset': unset_doc}, safe=True)
def set_note(self, group_id, key, value):
"""
Set a single key and value pair in a repo group's notes.
@param group_id: unique id of the repo group to set a note on
@type group_id: str
@param key: note key
@type key: immutable
@param value: note value
"""
self.add_notes(group_id, {key: value})
def unset_note(self, group_id, key):
"""
Unset a single key and value pair in a repo group's notes.
@param group_id: unique id of the repo group to unset a note on
@type group_id: str
@param key: note key
@type key: immutable
"""
self.remove_notes(group_id, [key])
associate = task(RepoGroupManager.associate, base=Task, ignore_result=True)
create_and_configure_repo_group = task(RepoGroupManager.create_and_configure_repo_group, base=Task)
delete_repo_group = task(RepoGroupManager.delete_repo_group, base=Task, ignore_result=True)
unassociate = task(RepoGroupManager.unassociate, base=Task, ignore_result=True)
update_repo_group = task(RepoGroupManager.update_repo_group, base=Task)
def validate_existing_repo_group(group_id):
"""
Validate the existence of a repo group, given its id.
Returns the repo group db collection upon successful validation,
raises an exception upon failure
@param group_id: unique id of the repo group to validate
@type group_id: str
@return: repo group db collection
@rtype: L{pulp.server.db.connection.PulpCollection}
@raise: L{pulp.server.exceptions.MissingResource}
"""
collection = RepoGroup.get_collection()
repo_group = collection.find_one({'id': group_id})
if repo_group is not None:
return collection
raise pulp_exceptions.MissingResource(repo_group=group_id)
| gpl-2.0 |
codekaki/odoo | addons/account_bank_statement_extensions/__openerp__.py | 55 | 2308 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Bank Statement Extensions to Support e-banking',
'version': '0.3',
'license': 'AGPL-3',
'author': 'Noviat',
'category': 'Generic Modules/Accounting',
'description': '''
Module that extends the standard account_bank_statement_line object for improved e-banking support.
===================================================================================================
This module adds:
-----------------
- valuta date
- batch payments
- traceability of changes to bank statement lines
- bank statement line views
- bank statements balances report
- performance improvements for digital import of bank statement (via
'ebanking_import' context flag)
- name_search on res.partner.bank enhanced to allow search on bank
and iban account numbers
''',
'depends': ['account'],
'demo': [],
'data' : [
'security/ir.model.access.csv',
'account_bank_statement_view.xml',
'account_bank_statement_report.xml',
'wizard/confirm_statement_line_wizard.xml',
'wizard/cancel_statement_line_wizard.xml',
'data/account_bank_statement_extensions_data.xml',
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
noif/locust | docs/conf.py | 34 | 2953 | # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import os
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
# autoclass options
#autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
#templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Locust'
#copyright = ''
# Intersphinx config
intersphinx_mapping = {
'requests': ('http://requests.readthedocs.org/en/latest/', None),
}
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
from locust import version
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# Sphinx will recurse into subversion configuration folders and try to read
# any document file within. These should be ignored.
# Note: exclude_dirnames is new in Sphinx 0.5
exclude_dirnames = []
# Options for HTML output
# -----------------------
html_show_sourcelink = False
html_file_suffix = ".html"
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# HTML theme
#html_theme = "haiku"
#html_theme = "default"
#html_theme_options = {
# "rightsidebar": "true",
# "codebgcolor": "#fafcfa",
# "bodyfont": "Arial",
#}
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
| mit |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/encodings/mac_latin2.py | 219 | 14118 | """ Python Character Mapping Codec mac_latin2 generated from 'MAPPINGS/VENDORS/MICSFT/MAC/LATIN2.TXT' with gencodec.py.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-latin2',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
'\u2020' # 0xA0 -> DAGGER
'\xb0' # 0xA1 -> DEGREE SIGN
'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
'\xa3' # 0xA3 -> POUND SIGN
'\xa7' # 0xA4 -> SECTION SIGN
'\u2022' # 0xA5 -> BULLET
'\xb6' # 0xA6 -> PILCROW SIGN
'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
'\xae' # 0xA8 -> REGISTERED SIGN
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u2122' # 0xAA -> TRADE MARK SIGN
'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
'\xa8' # 0xAC -> DIAERESIS
'\u2260' # 0xAD -> NOT EQUAL TO
'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
'\u2211' # 0xB7 -> N-ARY SUMMATION
'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xac' # 0xC2 -> NOT SIGN
'\u221a' # 0xC3 -> SQUARE ROOT
'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
'\u2206' # 0xC6 -> INCREMENT
'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
'\xa0' # 0xCA -> NO-BREAK SPACE
'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
'\u2013' # 0xD0 -> EN DASH
'\u2014' # 0xD1 -> EM DASH
'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
'\xf7' # 0xD6 -> DIVISION SIGN
'\u25ca' # 0xD7 -> LOZENGE
'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
Pablo126/SSBW | Tarea1y2/flask_1/flask1/lib/python3.5/site-packages/flask/json.py | 121 | 9183 | # -*- coding: utf-8 -*-
"""
flask.jsonimpl
~~~~~~~~~~~~~~
Implementation helpers for the JSON support in Flask.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import io
import uuid
from datetime import date
from .globals import current_app, request
from ._compat import text_type, PY2
from werkzeug.http import http_date
from jinja2 import Markup
# Use the same json implementation as itsdangerous on which we
# depend anyways.
from itsdangerous import json as _json
# Figure out if simplejson escapes slashes. This behavior was changed
# from one version to another without reason.
_slash_escape = '\\/' not in _json.dumps('/')
__all__ = ['dump', 'dumps', 'load', 'loads', 'htmlsafe_dump',
'htmlsafe_dumps', 'JSONDecoder', 'JSONEncoder',
'jsonify']
def _wrap_reader_for_text(fp, encoding):
if isinstance(fp.read(0), bytes):
fp = io.TextIOWrapper(io.BufferedReader(fp), encoding)
return fp
def _wrap_writer_for_text(fp, encoding):
try:
fp.write('')
except TypeError:
fp = io.TextIOWrapper(fp, encoding)
return fp
class JSONEncoder(_json.JSONEncoder):
"""The default Flask JSON encoder. This one extends the default simplejson
encoder by also supporting ``datetime`` objects, ``UUID`` as well as
``Markup`` objects which are serialized as RFC 822 datetime strings (same
as the HTTP date format). In order to support more data types override the
:meth:`default` method.
"""
def default(self, o):
"""Implement this method in a subclass such that it returns a
serializable object for ``o``, or calls the base implementation (to
raise a :exc:`TypeError`).
For example, to support arbitrary iterators, you could implement
default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
if isinstance(o, date):
return http_date(o.timetuple())
if isinstance(o, uuid.UUID):
return str(o)
if hasattr(o, '__html__'):
return text_type(o.__html__())
return _json.JSONEncoder.default(self, o)
class JSONDecoder(_json.JSONDecoder):
"""The default JSON decoder. This one does not change the behavior from
the default simplejson decoder. Consult the :mod:`json` documentation
for more information. This decoder is not only used for the load
functions of this module but also :attr:`~flask.Request`.
"""
def _dump_arg_defaults(kwargs):
"""Inject default arguments for dump functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_encoder)
if not current_app.config['JSON_AS_ASCII']:
kwargs.setdefault('ensure_ascii', False)
kwargs.setdefault('sort_keys', current_app.config['JSON_SORT_KEYS'])
else:
kwargs.setdefault('sort_keys', True)
kwargs.setdefault('cls', JSONEncoder)
def _load_arg_defaults(kwargs):
"""Inject default arguments for load functions."""
if current_app:
kwargs.setdefault('cls', current_app.json_decoder)
else:
kwargs.setdefault('cls', JSONDecoder)
def dumps(obj, **kwargs):
"""Serialize ``obj`` to a JSON formatted ``str`` by using the application's
configured encoder (:attr:`~flask.Flask.json_encoder`) if there is an
application on the stack.
This function can return ``unicode`` strings or ascii-only bytestrings by
default which coerce into unicode strings automatically. That behavior by
default is controlled by the ``JSON_AS_ASCII`` configuration variable
and can be overridden by the simplejson ``ensure_ascii`` parameter.
"""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
rv = _json.dumps(obj, **kwargs)
if encoding is not None and isinstance(rv, text_type):
rv = rv.encode(encoding)
return rv
def dump(obj, fp, **kwargs):
"""Like :func:`dumps` but writes into a file object."""
_dump_arg_defaults(kwargs)
encoding = kwargs.pop('encoding', None)
if encoding is not None:
fp = _wrap_writer_for_text(fp, encoding)
_json.dump(obj, fp, **kwargs)
def loads(s, **kwargs):
"""Unserialize a JSON object from a string ``s`` by using the application's
configured decoder (:attr:`~flask.Flask.json_decoder`) if there is an
application on the stack.
"""
_load_arg_defaults(kwargs)
if isinstance(s, bytes):
s = s.decode(kwargs.pop('encoding', None) or 'utf-8')
return _json.loads(s, **kwargs)
def load(fp, **kwargs):
"""Like :func:`loads` but reads from a file object.
"""
_load_arg_defaults(kwargs)
if not PY2:
fp = _wrap_reader_for_text(fp, kwargs.pop('encoding', None) or 'utf-8')
return _json.load(fp, **kwargs)
def htmlsafe_dumps(obj, **kwargs):
"""Works exactly like :func:`dumps` but is safe for use in ``<script>``
tags. It accepts the same arguments and returns a JSON string. Note that
this is available in templates through the ``|tojson`` filter which will
also mark the result as safe. Due to how this function escapes certain
characters this is safe even if used outside of ``<script>`` tags.
The following characters are escaped in strings:
- ``<``
- ``>``
- ``&``
- ``'``
This makes it safe to embed such strings in any place in HTML with the
notable exception of double quoted attributes. In that case single
quote your attributes or HTML escape it in addition.
.. versionchanged:: 0.10
This function's return value is now always safe for HTML usage, even
if outside of script tags or if used in XHTML. This rule does not
hold true when using this function in HTML attributes that are double
quoted. Always single quote attributes if you use the ``|tojson``
filter. Alternatively use ``|tojson|forceescape``.
"""
rv = dumps(obj, **kwargs) \
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
if not _slash_escape:
rv = rv.replace('\\/', '/')
return rv
def htmlsafe_dump(obj, fp, **kwargs):
"""Like :func:`htmlsafe_dumps` but writes into a file object."""
fp.write(text_type(htmlsafe_dumps(obj, **kwargs)))
def jsonify(*args, **kwargs):
"""This function wraps :func:`dumps` to add a few enhancements that make
life easier. It turns the JSON output into a :class:`~flask.Response`
object with the :mimetype:`application/json` mimetype. For convenience, it
also converts multiple arguments into an array or multiple keyword arguments
into a dict. This means that both ``jsonify(1,2,3)`` and
``jsonify([1,2,3])`` serialize to ``[1,2,3]``.
For clarity, the JSON serialization behavior has the following differences
from :func:`dumps`:
1. Single argument: Passed straight through to :func:`dumps`.
2. Multiple arguments: Converted to an array before being passed to
:func:`dumps`.
3. Multiple keyword arguments: Converted to a dict before being passed to
:func:`dumps`.
4. Both args and kwargs: Behavior undefined and will throw an exception.
Example usage::
from flask import jsonify
@app.route('/_get_current_user')
def get_current_user():
return jsonify(username=g.user.username,
email=g.user.email,
id=g.user.id)
This will send a JSON response like this to the browser::
{
"username": "admin",
"email": "admin@localhost",
"id": 42
}
.. versionchanged:: 0.11
Added support for serializing top-level arrays. This introduces a
security risk in ancient browsers. See :ref:`json-security` for details.
This function's response will be pretty printed if it was not requested
with ``X-Requested-With: XMLHttpRequest`` to simplify debugging unless
the ``JSONIFY_PRETTYPRINT_REGULAR`` config parameter is set to false.
Compressed (not pretty) formatting currently means no indents and no
spaces after separators.
.. versionadded:: 0.2
"""
indent = None
separators = (',', ':')
if current_app.config['JSONIFY_PRETTYPRINT_REGULAR'] and not request.is_xhr:
indent = 2
separators = (', ', ': ')
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
return current_app.response_class(
(dumps(data, indent=indent, separators=separators), '\n'),
mimetype=current_app.config['JSONIFY_MIMETYPE']
)
def tojson_filter(obj, **kwargs):
return Markup(htmlsafe_dumps(obj, **kwargs))
| gpl-3.0 |
enen92/script.tvlogo.downloader | resources/lib/tvlogodownloader.py | 5 | 13160 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 enen92
#
# This program is free software; you can redistribute it and/or modify it under the terms
# of the GNU General Public License as published by the Free Software Foundation;
# either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with this program;
# if not, see <http://www.gnu.org/licenses/>.
from addoncommon.common_variables import *
from addoncommon.tvldutils import *
import thelogodb
import urllib
import json
import sys
import logowindow
import downloader
import automaticd
import postprocessing
import context
def main_menu(select=False,choose=''):
print "[Tvlogo Downloader] Main menu"
options = ["Automatic (All user channels)","Automatic (only user channels without logos)","Manual (Specific user channels)","Entire Packages (database)","Specific channels (database)"]
optionsvar = ["autoall","automissing","manual","package","channel"]
if not select: choose = xbmcgui.Dialog().select('TVLogo Downloader',options)
if choose > -1 or select == True:
if optionsvar[choose] == "autoall":
automaticd.automatic_downloader('all')
elif optionsvar[choose] == "automissing":
automaticd.automatic_downloader('missing')
elif optionsvar[choose] == "manual":
#get tv groups
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "PVR.GetChannelGroups", "params": {"channeltype" : "tv"}, "id": 1 }')
decoded_data = json.loads(json_response)
groupids = []
grouplabels = []
try: groups = decoded_data['result']['channelgroups']
except:
mensagemok('TVLogo Downloader','Live TV is not enabled in kodi or no channels are available.')
sys.exit(0)
for x in range(0, len(decoded_data['result']['channelgroups'])):
#check if group has channels
has_channels = False
if groups[x]["channelgroupid"] > -1:
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "PVR.GetChannels", "params": {"channelgroupid" : '+str(groups[x]["channelgroupid"])+',"properties":["channel","channeltype","thumbnail"]}, "id": 1 }')
channel_number = bool("channels" in json.loads(json_response)["result"])
if channel_number: has_channels = True
if has_channels:
groupids.append(groups[x]["channelgroupid"])
grouplabels.append('TV: ' + groups[x]["label"])
#get radio groups
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "PVR.GetChannelGroups", "params": {"channeltype" : "radio"}, "id": 1 }')
decoded_data = json.loads(json_response)
groups = decoded_data['result']['channelgroups']
for x in range(0, len(decoded_data['result']['channelgroups'])):
#check if group has channels
has_channels = False
if groups[x]["channelgroupid"] > -1:
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "PVR.GetChannels", "params": {"channelgroupid" : '+str(groups[x]["channelgroupid"])+',"properties":["channel","channeltype","thumbnail"]}, "id": 1 }')
channel_number = bool("channels" in json.loads(json_response)["result"])
if channel_number: has_channels = True
if has_channels:
groupids.append(groups[x]["channelgroupid"])
grouplabels.append('Radio: ' + groups[x]["label"])
choose = xbmcgui.Dialog().select('TVLogo Downloader - Channel Groups',grouplabels)
if choose > -1:
groupid = groupids[choose]
json_response = xbmc.executeJSONRPC('{"jsonrpc": "2.0", "method": "PVR.GetChannels", "params": {"channelgroupid" : '+str(groupid)+',"properties":["channel","channeltype","thumbnail"]}, "id": 1 }')
decoded_data = json.loads(json_response)
channel_list = []
channel_labels = []
if "channels" in decoded_data["result"].keys():
for channel in decoded_data["result"]["channels"]:
channel_list.append(channel["channelid"])
channel_labels.append(channel["label"])
if channel_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader - Channel List',channel_labels)
if choose > -1:
context.run(channel_labels[choose])
else:
main_menu()
else:
mensagemok('TVLogo Downloader','No channels available!')
main_menu(select=True,choose=2)
else:
mensagemok('TVLogo Downloader',"Group doesn't have any channels.")
main_menu(select=True,choose=2)
else:
main_menu()
elif optionsvar[choose] == "package":
entire_packages()
elif optionsvar[choose] == "channel":
specific_channels()
def specific_channels():
options = ["By Country","By Country Package","By Package","Search"]
optionsvar = ["country","country_package","package","search"]
choose = xbmcgui.Dialog().select('TVLogo Downloader',options)
if choose > -1:
if optionsvar[choose] == 'country':
countries = thelogodb.Channels().get_countries()
if countries:
country_list = []
for country in countries:
if country["strCountry"] and country != 'None': country_list.append(country["strCountry"])
choose = xbmcgui.Dialog().select('TVLogo Downloader',country_list)
if choose > -1:
channels = thelogodb.Channels().by_country(urllib.quote(country_list[choose]))
if channels:
logowindow.start(channels,"True","True")
specific_channels()
else:
mensagemok('TVLogo Downloader','No channels with logos in thelogodb!')
specific_channels()
else:
specific_channels()
else:
specific_channels()
elif optionsvar[choose] == 'country_package':
country_list = []
packages = thelogodb.Packages().get_all()
if packages:
for package in packages:
country = package["strCountry"]
if country and country != 'None' and country not in country_list: country_list.append(country)
if country_list:
country_list = sorted(country_list)
choose = xbmcgui.Dialog().select('TVLogo Downloader',country_list)
if choose > -1:
country = country_list[choose]
packages = thelogodb.Packages().get_all()
package_list = []
package_id_list = []
if packages:
for package in packages:
if package["strCountry"] == country:
package_label = '['+ str(package['strCountry']) +'] '+ package['strPackage'] + ' ('+str(package['strType'])+')'
package_id = package['idPackage']
package_list.append(package_label)
package_id_list.append(package_id)
if package_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader',package_list)
if choose > -1:
channels = thelogodb.Channels().by_package(package_id_list[choose])
channels_have_logos = False
for channel in channels:
if channel["strLogoWide"]: channels_have_logos = True
if channels and channels_have_logos:
logowindow.start(channels,"True","True")
specific_channels()
else:
mensagemok('TVLogo Downloader','No logos available for this package!')
specific_channels()
else:
entire_packages()
else:
mensagemok('TVLogo Downloader','No packages available!')
entire_packages()
else:
mensagemok('TVLogo Downloader','Error getting packages!')
entire_packages()
else:
mensagemok('TVLogo Downloader','No packages available!')
entire_packages()
elif optionsvar[choose] == 'package':
packages = thelogodb.Packages().get_all()
package_list = []
package_id_list = []
if packages:
for package in packages:
package_label = '['+ str(package['strCountry']) +'] '+ package['strPackage'] + ' ('+str(package['strType'])+')'
package_id = package['idPackage']
package_list.append(package_label)
package_id_list.append(package_id)
if package_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader',package_list)
if choose > -1:
channels = thelogodb.Channels().by_package(package_id_list[choose])
if channels:
logowindow.start(channels,"True","True")
specific_channels()
else:
mensagemok('TVLogo Downloader','No logos available for this package!')
specific_channels()
else:
entire_packages()
else:
mensagemok('TVLogo Downloader','Error getting packages!')
entire_packages()
elif optionsvar[choose] == 'search':
keyb = xbmc.Keyboard('', 'Enter channel')
keyb.doModal()
if (keyb.isConfirmed()):
search_parameter = urllib.quote_plus(keyb.getText())
if search_parameter:
channels = thelogodb.Channels().by_keyword(search_parameter)
if channels:
logowindow.start(channels,"True","True")
else:
mensagemok('TVLogo Downloader','No packages available!')
specific_channels()
else:
specific_channels()
else:
main_menu()
def entire_packages():
options = ["All Packages","Packages by Region","Packages by Country"]
optionsvar = ["all","region","country"]
choose = xbmcgui.Dialog().select('TVLogo Downloader',options)
if choose > -1:
if optionsvar[choose] == 'all':
packages = thelogodb.Packages().get_all()
package_list = []
package_id_list = []
if packages:
for package in packages:
package_label = '['+ str(package['strCountry']) +'] '+ package['strPackage'] + ' ('+str(package['strType'])+')'
package_id = package['idPackage']
package_list.append(package_label)
package_id_list.append(package_id)
if package_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader',package_list)
if choose > -1:
downloader.download_entire_package(str(package_id_list[choose]))
main_menu()
else:
entire_packages()
else:
mensagemok('TVLogo Downloader','Error getting packages!')
entire_packages()
elif optionsvar[choose] == 'region':
options = ["Europe","America","Africa","Asia","Oceania"]
optionsvar = ["Europe","America","Africa","Asia","Oceania"]
choose = xbmcgui.Dialog().select('TVLogo Downloader',options)
if choose > -1:
packages = thelogodb.Packages().by_country(urllib.quote_plus(optionsvar[choose]))
package_list = []
package_id_list = []
if packages:
for package in packages:
package_label = '['+ str(package['strCountry']) +'] '+ package['strPackage'] + ' ('+str(package['strType'])+')'
package_id = package['idPackage']
package_list.append(package_label)
package_id_list.append(package_id)
if package_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader',package_list)
if choose > -1:
downloader.download_entire_package(str(package_id_list[choose]))
main_menu()
else:
entire_packages()
else:
mensagemok('TVLogo Downloader','No packages available!')
entire_packages()
else:
entire_packages()
elif optionsvar[choose] == 'country':
country_list = []
packages = thelogodb.Packages().get_all()
if packages:
for package in packages:
country = package["strCountry"]
if country and country != 'None' and country not in country_list: country_list.append(country)
if country_list:
country_list = sorted(country_list)
choose = xbmcgui.Dialog().select('TVLogo Downloader',country_list)
if choose > -1:
country = country_list[choose]
packages = thelogodb.Packages().get_all()
package_list = []
package_id_list = []
if packages:
for package in packages:
if package["strCountry"] == country:
package_label = '['+ str(package['strCountry']) +'] '+ package['strPackage'] + ' ('+str(package['strType'])+')'
package_id = package['idPackage']
package_list.append(package_label)
package_id_list.append(package_id)
if package_list:
choose = xbmcgui.Dialog().select('TVLogo Downloader',package_list)
if choose > -1:
downloader.download_entire_package(str(package_id_list[choose]))
main_menu()
else:
entire_packages()
else:
mensagemok('TVLogo Downloader','No packages available!')
entire_packages()
else:
mensagemok('TVLogo Downloader','Error getting packages!')
entire_packages()
else:
mensagemok('TVLogo Downloader','No packages available!')
entire_packages()
else:
main_menu()
def get_nonhd_match(channel):
#check if non-hd logo is available if no match is found
if settings.getSetting('search_nonhd') == 'true' and ' hd' in urllib.unquote_plus(channel).lower():
temp = urllib.unquote_plus(channel.lower())
if ' hd ' in temp: newchannel = urllib.quote_plus(urllib.unquote_plus(channel.lower().replace(' hd','')))
elif ' hd ' not in temp and ' hd' in temp: newchannel = urllib.quote_plus(urllib.unquote_plus(channel.lower()).replace(' hd',''))
else: newchannel = urllib.quote_plus(urllib.unquote_plus(channel.lower()).replace('hd',''))
match = thelogodb.Channels().by_keyword(newchannel)
return match
else:
return []
| gpl-2.0 |
brinbois/Sick-Beard | lib/hachoir_metadata/metadata_item.py | 90 | 4916 | from lib.hachoir_core.tools import makeUnicode, normalizeNewline
from lib.hachoir_core.error import HACHOIR_ERRORS
from lib.hachoir_metadata import config
from lib.hachoir_metadata.setter import normalizeString
MIN_PRIORITY = 100
MAX_PRIORITY = 999
QUALITY_FASTEST = 0.0
QUALITY_FAST = 0.25
QUALITY_NORMAL = 0.5
QUALITY_GOOD = 0.75
QUALITY_BEST = 1.0
class DataValue:
def __init__(self, value, text):
self.value = value
self.text = text
class Data:
def __init__(self, key, priority, description,
text_handler=None, type=None, filter=None, conversion=None):
"""
handler is only used if value is not string nor unicode, prototype:
def handler(value) -> str/unicode
"""
assert MIN_PRIORITY <= priority <= MAX_PRIORITY
assert isinstance(description, unicode)
self.metadata = None
self.key = key
self.description = description
self.values = []
if type and not isinstance(type, (tuple, list)):
type = (type,)
self.type = type
self.text_handler = text_handler
self.filter = filter
self.priority = priority
self.conversion = conversion
def _createItem(self, value, text=None):
if text is None:
if isinstance(value, unicode):
text = value
elif self.text_handler:
text = self.text_handler(value)
assert isinstance(text, unicode)
else:
text = makeUnicode(value)
return DataValue(value, text)
def add(self, value):
if isinstance(value, tuple):
if len(value) != 2:
raise ValueError("Data.add() only accept tuple of 2 elements: (value,text)")
value, text = value
else:
text = None
# Skip value 'None'
if value is None:
return
if isinstance(value, (str, unicode)):
value = normalizeString(value)
if not value:
return
# Convert string to Unicode string using charset ISO-8859-1
if self.conversion:
try:
new_value = self.conversion(self.metadata, self.key, value)
except HACHOIR_ERRORS, err:
self.metadata.warning("Error during conversion of %r value: %s" % (
self.key, err))
return
if new_value is None:
dest_types = " or ".join(str(item.__name__) for item in self.type)
self.metadata.warning("Unable to convert %s=%r (%s) to %s" % (
self.key, value, type(value).__name__, dest_types))
return
if isinstance(new_value, tuple):
if text:
value = new_value[0]
else:
value, text = new_value
else:
value = new_value
elif isinstance(value, str):
value = unicode(value, "ISO-8859-1")
if self.type and not isinstance(value, self.type):
dest_types = " or ".join(str(item.__name__) for item in self.type)
self.metadata.warning("Key %r: value %r type (%s) is not %s" % (
self.key, value, type(value).__name__, dest_types))
return
# Skip empty strings
if isinstance(value, unicode):
value = normalizeNewline(value)
if config.MAX_STR_LENGTH \
and config.MAX_STR_LENGTH < len(value):
value = value[:config.MAX_STR_LENGTH] + "(...)"
# Skip duplicates
if value in self:
return
# Use filter
if self.filter and not self.filter(value):
self.metadata.warning("Skip value %s=%r (filter)" % (self.key, value))
return
# For string, if you have "verlongtext" and "verylo",
# keep the longer value
if isinstance(value, unicode):
for index, item in enumerate(self.values):
item = item.value
if not isinstance(item, unicode):
continue
if value.startswith(item):
# Find longer value, replace the old one
self.values[index] = self._createItem(value, text)
return
if item.startswith(value):
# Find truncated value, skip it
return
# Add new value
self.values.append(self._createItem(value, text))
def __len__(self):
return len(self.values)
def __getitem__(self, index):
return self.values[index]
def __contains__(self, value):
for item in self.values:
if value == item.value:
return True
return False
def __cmp__(self, other):
return cmp(self.priority, other.priority)
| gpl-3.0 |
cockroachdb/examples-orms | python/django/cockroach_example/settings.py | 1 | 3362 | """
Django settings for cockroach_example project.
Generated by 'django-admin startproject' using Django 2.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
from urllib.parse import urlparse
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0pld^66i)iv4df8km5vc%1^sskuqjf16jk&z=c^rk--oh6i0i^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cockroach_example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cockroach_example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cockroach_example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
port = 26257
addr = os.getenv('ADDR')
if addr is not None:
url = urlparse(addr)
port = url.port
DATABASES = {
'default': {
'ENGINE' : 'django_cockroachdb',
'NAME' : 'company_django',
'USER' : 'root',
'PASSWORD': '',
'HOST' : 'localhost',
'PORT' : port,
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 |
jbzdak/data-base-checker | bazydanych2/settingsdev.py | 1 | 1181 |
from bazydanych2.settingsshared import *
DEBUG=True
TEMPLATE_DEBUG=True
STATIC_ROOT = '/tmp/staticfiles'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
"level": 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler'
}
},
'root':{
'handlers' : ['console']
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend'
EMAIL_FILE_PATH = '/tmp/app-messages'
INSTALLED_APPS += ('celery_test_app', )
ALLOW_OFFILNE_GRADING = False
SCHEMA_CHECKER_HOST = '192.168.56.30' | gpl-3.0 |
ted-gould/nova | nova/tests/unit/compute/test_compute_mgr.py | 1 | 197801 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for ComputeManager()."""
import contextlib
import time
import uuid
from cinderclient import exceptions as cinder_exception
from eventlet import event as eventlet_event
import mock
from mox3 import mox
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import nova
from nova.compute import build_results
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_server_actions
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova import utils
from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
fake_server_actions.stub_out_action_events(self.stubs)
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(manager.ComputeManager, '_sync_instance_power_state')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def _test_handle_lifecycle_event(self, mock_get, mock_sync,
mock_get_power_state, transition,
event_pwr_state, current_pwr_state):
event = mock.Mock()
event.get_instance_uuid.return_value = mock.sentinel.uuid
event.get_transition.return_value = transition
mock_get_power_state.return_value = current_pwr_state
self.compute.handle_lifecycle_event(event)
mock_get.assert_called_with(mock.ANY, mock.sentinel.uuid,
expected_attrs=[])
if event_pwr_state == current_pwr_state:
mock_sync.assert_called_with(mock.ANY, mock_get.return_value,
event_pwr_state)
else:
self.assertFalse(mock_sync.called)
def test_handle_lifecycle_event(self):
event_map = {virtevent.EVENT_LIFECYCLE_STOPPED: power_state.SHUTDOWN,
virtevent.EVENT_LIFECYCLE_STARTED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_PAUSED: power_state.PAUSED,
virtevent.EVENT_LIFECYCLE_RESUMED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_SUSPENDED:
power_state.SUSPENDED,
}
for transition, pwr_state in six.iteritems(event_map):
self._test_handle_lifecycle_event(transition=transition,
event_pwr_state=pwr_state,
current_pwr_state=pwr_state)
def test_handle_lifecycle_event_state_mismatch(self):
self._test_handle_lifecycle_event(
transition=virtevent.EVENT_LIFECYCLE_STOPPED,
event_pwr_state=power_state.SHUTDOWN,
current_pwr_state=power_state.RUNNING)
def test_delete_instance_info_cache_delete_ordering(self):
call_tracker = mock.Mock()
call_tracker.clear_events_for_instance.return_value = None
mgr_class = self.compute.__class__
orig_delete = mgr_class._delete_instance
specd_compute = mock.create_autospec(mgr_class)
# spec out everything except for the method we really want
# to test, then use call_tracker to verify call sequence
specd_compute._delete_instance = orig_delete
mock_inst = mock.Mock()
mock_inst.uuid = 'inst-1'
mock_inst.save = mock.Mock()
mock_inst.destroy = mock.Mock()
mock_inst.system_metadata = mock.Mock()
def _mark_notify(*args, **kwargs):
call_tracker._notify_about_instance_usage(*args, **kwargs)
def _mark_shutdown(*args, **kwargs):
call_tracker._shutdown_instance(*args, **kwargs)
specd_compute.instance_events = call_tracker
specd_compute._notify_about_instance_usage = _mark_notify
specd_compute._shutdown_instance = _mark_shutdown
mock_inst.info_cache = call_tracker
specd_compute._delete_instance(specd_compute,
self.context,
mock_inst,
mock.Mock(),
mock.Mock())
methods_called = [n for n, a, k in call_tracker.mock_calls]
self.assertEqual(['clear_events_for_instance',
'_notify_about_instance_usage',
'_shutdown_instance', 'delete'],
methods_called)
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
@mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db')
def test_update_available_resource(self, get_db_nodes, get_avail_nodes,
get_rt):
info = {'cn_id': 1}
def _make_compute_node(hyp_hostname):
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id',
'destroy'])
cn.id = info['cn_id']
info['cn_id'] += 1
cn.hypervisor_hostname = hyp_hostname
return cn
def _make_rt(node):
n = mock.Mock(spec_set=['update_available_resource',
'nodename'])
n.nodename = node
return n
ctxt = mock.Mock()
db_nodes = [_make_compute_node('node1'),
_make_compute_node('node2'),
_make_compute_node('node3'),
_make_compute_node('node4')]
avail_nodes = set(['node2', 'node3', 'node4', 'node5'])
avail_nodes_l = list(avail_nodes)
rts = [_make_rt(node) for node in avail_nodes_l]
# Make the 2nd and 3rd ones raise
exc = exception.ComputeHostNotFound(host='fake')
rts[1].update_available_resource.side_effect = exc
exc = test.TestingException()
rts[2].update_available_resource.side_effect = exc
rts_iter = iter(rts)
def _get_rt_side_effect(*args, **kwargs):
return next(rts_iter)
expected_rt_dict = {avail_nodes_l[0]: rts[0],
avail_nodes_l[2]: rts[2],
avail_nodes_l[3]: rts[3]}
get_db_nodes.return_value = db_nodes
get_avail_nodes.return_value = avail_nodes
get_rt.side_effect = _get_rt_side_effect
self.compute.update_available_resource(ctxt)
get_db_nodes.assert_called_once_with(ctxt, use_slave=True)
self.assertEqual([mock.call(node) for node in avail_nodes],
get_rt.call_args_list)
for rt in rts:
rt.update_available_resource.assert_called_once_with(ctxt)
self.assertEqual(expected_rt_dict,
self.compute._resource_tracker_dict)
# First node in set should have been removed from DB
for db_node in db_nodes:
if db_node.hypervisor_hostname == 'node1':
db_node.destroy.assert_called_once_with()
else:
self.assertFalse(db_node.destroy.called)
def test_delete_instance_without_info_cache(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ERROR,
host=self.compute.host,
expected_attrs=['system_metadata'])
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
with contextlib.nested(
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute, '_shutdown_instance'),
mock.patch.object(instance, 'obj_load_attr'),
mock.patch.object(instance, 'save'),
mock.patch.object(instance, 'destroy')
) as (
compute_notify_about_instance_usage, comupte_shutdown_instance,
instance_obj_load_attr, instance_save, instance_destroy
):
instance.info_cache = None
self.compute._delete_instance(self.context, instance, [], quotas)
@mock.patch.object(network_api.API, 'allocate_for_instance')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(time, 'sleep')
def test_allocate_network_succeeds_after_retries(
self, mock_sleep, mock_save, mock_allocate_for_instance):
self.flags(network_allocate_retries=8)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
final_result = 'meow'
dhcp_options = None
mock_allocate_for_instance.side_effect = [
test.TestingException()] * 7 + [final_result]
expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
mock_sleep.has_calls(expected_sleep_times)
self.assertEqual(final_result, res)
# Ensure save is not called in while allocating networks, the instance
# is saved after the allocation.
self.assertFalse(mock_save.called)
self.assertEqual('True', instance.system_metadata['network_allocated'])
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_allocate_network_neg_conf_value_treated_as_zero(self):
self.flags(network_allocate_retries=-1)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
# Only attempted once.
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
@mock.patch.object(network_api.API, 'allocate_for_instance')
@mock.patch.object(manager.ComputeManager, '_instance_update')
@mock.patch.object(time, 'sleep')
def test_allocate_network_with_conf_value_is_one(
self, sleep, _instance_update, allocate_for_instance):
self.flags(network_allocate_retries=1)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
final_result = 'zhangtralon'
allocate_for_instance.side_effect = [test.TestingException(),
final_result]
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
self.assertEqual(final_result, res)
self.assertEqual(1, sleep.call_count)
@mock.patch('nova.utils.spawn_n')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_build_and_run_instance')
def _test_max_concurrent_builds(self, mock_dbari, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
with mock.patch.object(self.compute,
'_build_semaphore') as mock_sem:
instance = objects.Instance(uuid=str(uuid.uuid4()))
for i in (1, 2, 3):
self.compute.build_and_run_instance(self.context, instance,
mock.sentinel.image,
mock.sentinel.request_spec,
{})
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_builds_limited(self):
self.flags(max_concurrent_builds=2)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_unlimited(self):
self.flags(max_concurrent_builds=0)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_semaphore_limited(self):
self.flags(max_concurrent_builds=123)
self.assertEqual(123,
manager.ComputeManager()._build_semaphore.balance)
def test_max_concurrent_builds_semaphore_unlimited(self):
self.flags(max_concurrent_builds=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._build_semaphore.balance)
self.assertIsInstance(compute._build_semaphore,
compute_utils.UnlimitedSemaphore)
def test_nil_out_inst_obj_host_and_node_sets_nil(self):
instance = fake_instance.fake_instance_obj(self.context,
uuid='foo-uuid',
host='foo-host',
node='foo-node')
self.assertIsNotNone(instance.host)
self.assertIsNotNone(instance.node)
self.compute._nil_out_instance_obj_host_and_node(instance)
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
def test_init_host(self):
our_host = self.compute.host
inst = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE,
info_cache=dict(test_instance_info_cache.fake_info_cache,
network_info=None),
security_groups=None)
startup_instances = [inst, inst, inst]
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(
self.context, our_host, columns_to_join=['info_cache'],
use_slave=False
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(self.context)
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
self.mox.StubOutWithMock(self.compute,
'_init_instance')
# Test with defer_iptables_apply
self.flags(defer_iptables_apply=True)
_do_mock_calls(True)
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
# Test without defer_iptables_apply
self.mox.ResetAll()
self.flags(defer_iptables_apply=False)
_do_mock_calls(False)
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
self.compute.init_host()
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
# register_event_listener is called on startup (init_host) and
# in cleanup_host
mock_driver.register_event_listener.assert_has_calls([
mock.call(self.compute.handle_events), mock.call(None)])
mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
def test_init_virt_events_disabled(self):
self.flags(handle_virt_lifecycle_events=False, group='workarounds')
with mock.patch.object(self.compute.driver,
'register_event_listener') as mock_register:
self.compute.init_virt_events()
self.assertFalse(mock_register.called)
@mock.patch('nova.objects.MigrationList.get_by_filters')
@mock.patch('nova.objects.Migration.save')
def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get):
our_host = self.compute.host
not_our_host = 'not-' + our_host
deleted_instance = fake_instance.fake_instance_obj(
self.context, host=not_our_host, uuid='fake-uuid')
migration = objects.Migration(instance_uuid=deleted_instance.uuid)
mock_mig_get.return_value = [migration]
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute, '_init_instance')
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(self.context, our_host,
columns_to_join=['info_cache'],
use_slave=False
).AndReturn([])
self.compute.init_virt_events()
# simulate failed instance
self.compute._get_instances_on_driver(
self.context, {'deleted': False}).AndReturn([deleted_instance])
self.compute.network_api.get_instance_nw_info(
self.context, deleted_instance).AndRaise(
exception.InstanceNotFound(instance_id=deleted_instance['uuid']))
# ensure driver.destroy is called so that driver may
# clean up any dangling files
self.compute.driver.destroy(self.context, deleted_instance,
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_instance_with_binding_failed_vif_type(self):
# this instance will plug a 'binding_failed' vif
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake-uuid',
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
with contextlib.nested(
mock.patch.object(context, 'get_admin_context',
return_value=self.context),
mock.patch.object(compute_utils, 'get_nw_info_for_instance',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute.driver, 'plug_vifs',
side_effect=exception.VirtualInterfacePlugException(
"Unexpected vif_type=binding_failed")),
mock.patch.object(self.compute, '_set_instance_obj_error_state')
) as (get_admin_context, get_nw_info, plug_vifs, set_error_state):
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(self.context, instance)
def test__get_power_state_InstanceNotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.InstanceNotFound(instance_id=1)):
self.assertEqual(self.compute._get_power_state(self.context,
instance),
power_state.NOSTATE)
def test__get_power_state_NotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.NotFound()):
self.assertRaises(exception.NotFound,
self.compute._get_power_state,
self.context, instance)
def test_init_instance_failed_resume_sets_error(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake-uuid',
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
self.flags(resume_guests_state_on_host_boot=True)
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'resume_state_on_host_boot')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_set_instance_obj_error_state')
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
self.compute._get_instance_block_device_info(mox.IgnoreArg(),
instance).AndReturn('fake-bdm')
self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
instance, mox.IgnoreArg(),
'fake-bdm').AndRaise(test.TestingException)
self.compute._set_instance_obj_error_state(mox.IgnoreArg(), instance)
self.mox.ReplayAll()
self.compute._init_instance('fake-context', instance)
def test_init_instance_stuck_in_deleting(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.DELETING)
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.mox.StubOutWithMock(instance, 'obj_load_attr')
self.mox.StubOutWithMock(self.compute, '_create_reservations')
bdms = []
quotas = objects.quotas.Quotas(self.context)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid).AndReturn(bdms)
self.compute._create_reservations(self.context, instance,
instance.project_id,
instance.user_id).AndReturn(quotas)
self.compute._delete_instance(self.context, instance, bdms,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_init_instance_stuck_in_deleting_raises_exception(
self, mock_get_by_instance_uuid, mock_get_by_uuid):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
metadata={},
system_metadata={},
host=self.compute.host,
vm_state=vm_states.ACTIVE,
task_state=task_states.DELETING,
expected_attrs=['metadata', 'system_metadata'])
bdms = []
reservations = ['fake-resv']
def _create_patch(name, attr):
patcher = mock.patch.object(name, attr)
mocked_obj = patcher.start()
self.addCleanup(patcher.stop)
return mocked_obj
mock_delete_instance = _create_patch(self.compute, '_delete_instance')
mock_set_instance_error_state = _create_patch(
self.compute, '_set_instance_obj_error_state')
mock_create_reservations = _create_patch(self.compute,
'_create_reservations')
mock_create_reservations.return_value = reservations
mock_get_by_instance_uuid.return_value = bdms
mock_get_by_uuid.return_value = instance
mock_delete_instance.side_effect = test.TestingException('test')
self.compute._init_instance(self.context, instance)
mock_set_instance_error_state.assert_called_once_with(
self.context, instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
power_on = True if (not old_vm_state or
old_vm_state == vm_states.ACTIVE) else False
sys_meta = {
'old_vm_state': old_vm_state
}
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_states.ERROR,
task_state=task_states.RESIZE_MIGRATING,
power_state=power_state.SHUTDOWN,
system_metadata=sys_meta,
host=self.compute.host,
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'finish_revert_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute, '_retry_reboot')
self.compute._retry_reboot(self.context, instance).AndReturn(
(False, None))
compute_utils.get_nw_info_for_instance(instance).AndReturn(
network_model.NetworkInfo())
self.compute.driver.plug_vifs(instance, [])
self.compute._get_instance_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(self.context, instance,
[], [], power_on)
instance.save()
self.compute.driver.get_info(instance).AndReturn(
hardware.InstanceInfo(state=power_state.SHUTDOWN))
self.compute.driver.get_info(instance).AndReturn(
hardware.InstanceInfo(state=power_state.SHUTDOWN))
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.assertIsNone(instance.task_state)
def test_init_instance_reverts_crashed_migration_from_active(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.ACTIVE)
def test_init_instance_reverts_crashed_migration_from_stopped(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.STOPPED)
def test_init_instance_reverts_crashed_migration_no_old_state(self):
self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
def test_init_instance_resets_crashed_live_migration(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.MIGRATING)
with contextlib.nested(
mock.patch.object(instance, 'save'),
mock.patch('nova.compute.utils.get_nw_info_for_instance',
return_value=network_model.NetworkInfo())
) as (save, get_nw_info):
self.compute._init_instance(self.context, instance)
save.assert_called_once_with(expected_task_state=['migrating'])
get_nw_info.assert_called_once_with(instance)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def _test_init_instance_sets_building_error(self, vm_state,
task_state=None):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_state,
host=self.compute.host,
task_state=task_state)
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_error(self):
self._test_init_instance_sets_building_error(vm_states.BUILDING)
def test_init_instance_sets_rebuilding_errors(self):
tasks = [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]
vms = [vm_states.ACTIVE, vm_states.STOPPED]
for vm_state in vms:
for task_state in tasks:
self._test_init_instance_sets_building_error(
vm_state, task_state)
def _test_init_instance_sets_building_tasks_error(self, instance):
instance.host = self.compute.host
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_tasks_error_scheduling(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=None,
task_state=task_states.SCHEDULING)
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_block_device(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_networking(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.NETWORKING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_spawning(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.SPAWNING
self._test_init_instance_sets_building_tasks_error(instance)
def _test_init_instance_cleans_image_states(self, instance):
with mock.patch.object(instance, 'save') as save:
self.compute._get_power_state = mock.Mock()
self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.host = self.compute.host
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.compute.driver.post_interrupted_snapshot_cleanup.\
assert_called_once_with(self.context, instance)
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def _test_init_instance_cleans_task_states(self, powerstate, state,
mock_get_uuid, mock_get_power_state):
instance = objects.Instance(self.context)
instance.uuid = 'fake-uuid'
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.vm_state = vm_states.ACTIVE
instance.task_state = state
instance.host = self.compute.host
mock_get_power_state.return_value = powerstate
self.compute._init_instance(self.context, instance)
return instance
def test_init_instance_cleans_image_state_pending_upload(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_PENDING_UPLOAD
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_uploading(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_UPLOADING
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
self._test_init_instance_cleans_image_states(instance)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_pausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.PAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_unpausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.UNPAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager.unpause_instance')
def test_init_instance_cleans_paused_unpausing(self, mock_unpause):
def fake_unpause(context, instance):
instance.task_state = None
mock_unpause.side_effect = fake_unpause
instance = self._test_init_instance_cleans_task_states(
power_state.PAUSED, task_states.UNPAUSING)
mock_unpause.assert_called_once_with(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_init_instance_errors_when_not_migrating(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ERROR
instance.task_state = task_states.IMAGE_UPLOADING
instance.host = self.compute.host
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.mox.VerifyAll()
def test_init_instance_deletes_error_deleting_instance(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
vm_state=vm_states.ERROR,
host=self.compute.host,
task_state=task_states.DELETING)
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.mox.StubOutWithMock(instance, 'obj_load_attr')
self.mox.StubOutWithMock(objects.quotas, 'ids_from_instance')
self.mox.StubOutWithMock(self.compute, '_create_reservations')
bdms = []
quotas = objects.quotas.Quotas(self.context)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid).AndReturn(bdms)
objects.quotas.ids_from_instance(self.context, instance).AndReturn(
(instance.project_id, instance.user_id))
self.compute._create_reservations(self.context, instance,
instance.project_id,
instance.user_id).AndReturn(quotas)
self.compute._delete_instance(self.context, instance, bdms,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.mox.VerifyAll()
def test_init_instance_resize_prep(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.RESIZE_PREP,
power_state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(compute_utils, 'get_nw_info_for_instance'),
mock.patch.object(instance, 'save', autospec=True)
) as (mock_get_power_state, mock_nw_info, mock_instance_save):
self.compute._init_instance(self.context, instance)
mock_instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.get_nw_info_for_instance')
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
@mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
def _test_shutdown_instance_exception(self, exc, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
mock_connector.side_effect = exc
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ERROR,
task_state=task_states.DELETING)
bdms = [mock.Mock(id=1, is_volume=True)]
self.compute._shutdown_instance(self.context, instance, bdms,
notify=False, try_deallocate_networks=False)
def test_shutdown_instance_endpoint_not_found(self):
exc = cinder_exception.EndpointNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_client_exception(self):
exc = cinder_exception.ClientException
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_volume_not_found(self):
exc = exception.VolumeNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_disk_not_found(self):
exc = exception.DiskNotFound
self._test_shutdown_instance_exception(exc)
def _test_init_instance_retries_reboot(self, instance, reboot_type,
return_power_state):
instance.host = self.compute.host
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=return_power_state),
mock.patch.object(self.compute, 'reboot_instance'),
mock.patch.object(compute_utils, 'get_nw_info_for_instance')
) as (
_get_power_state,
reboot_instance,
get_nw_info_for_instance
):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, block_device_info=None,
reboot_type=reboot_type)
reboot_instance.assert_has_calls([call])
def test_init_instance_retries_reboot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_SOFT_REBOOT:
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'SOFT',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING_HARD
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_soft_became_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.SHUTDOWN)
self.assertEqual(task_states.REBOOT_PENDING_HARD,
instance.task_state)
def test_init_instance_retries_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def test_init_instance_retries_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def _test_init_instance_cleans_reboot_state(self, instance):
instance.host = self.compute.host
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save', autospec=True),
mock.patch.object(compute_utils, 'get_nw_info_for_instance')
) as (
_get_power_state,
instance_save,
get_nw_info_for_instance
):
self.compute._init_instance(self.context, instance)
instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def test_init_instance_cleans_image_state_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_cleans_image_state_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_retries_power_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
def test_init_instance_retries_power_on(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
def test_init_instance_retries_power_on_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_init_instance_retries_power_off_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_get_instances_on_driver(self):
driver_instances = []
for x in range(10):
driver_instances.append(fake_instance.fake_db_instance())
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndReturn(
[inst['uuid'] for inst in driver_instances])
db.instance_get_all_by_filters(
self.context,
{'uuid': [inst['uuid'] for
inst in driver_instances]},
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
use_slave=True).AndReturn(
driver_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(self.context)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
@mock.patch('nova.virt.driver.ComputeDriver.list_instance_uuids')
@mock.patch('nova.db.api.instance_get_all_by_filters')
def test_get_instances_on_driver_empty(self, mock_list, mock_db):
mock_list.return_value = []
result = self.compute._get_instances_on_driver(self.context)
# instance_get_all_by_filters should not be called
self.assertEqual(0, mock_db.call_count)
self.assertEqual([],
[x['uuid'] for x in result])
def test_get_instances_on_driver_fallback(self):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
filters = {'host': self.compute.host}
self.flags(instance_name_template='inst-%i')
all_instances = []
driver_instances = []
for x in range(10):
instance = fake_instance.fake_db_instance(name='inst-%i' % x,
id=x)
if x % 2:
driver_instances.append(instance)
all_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
db.instance_get_all_by_filters(
self.context, filters,
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
use_slave=True).AndReturn(all_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(self.context, filters)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
def test_instance_usage_audit(self):
instances = [objects.Instance(uuid='foo')]
@classmethod
def fake_task_log(*a, **k):
pass
@classmethod
def fake_get(*a, **k):
return instances
self.flags(instance_usage_audit=True)
self.stubs.Set(objects.TaskLog, 'get', fake_task_log)
self.stubs.Set(objects.InstanceList,
'get_active_by_window_joined', fake_get)
self.stubs.Set(objects.TaskLog, 'begin_task', fake_task_log)
self.stubs.Set(objects.TaskLog, 'end_task', fake_task_log)
self.mox.StubOutWithMock(compute_utils, 'notify_usage_exists')
compute_utils.notify_usage_exists(self.compute.notifier,
self.context, instances[0], ignore_missing_network_data=False)
self.mox.ReplayAll()
self.compute._instance_usage_audit(self.context)
@mock.patch.object(objects.InstanceList, 'get_by_host')
def test_sync_power_states(self, mock_get):
instance = mock.Mock()
mock_get.return_value = [instance]
with mock.patch.object(self.compute._sync_power_pool,
'spawn_n') as mock_spawn:
self.compute._sync_power_states(mock.sentinel.context)
mock_get.assert_called_with(mock.sentinel.context,
self.compute.host, expected_attrs=[],
use_slave=True)
mock_spawn.assert_called_once_with(mock.ANY, instance)
def _get_sync_instance(self, power_state, vm_state, task_state=None,
shutdown_terminate=False):
instance = objects.Instance()
instance.uuid = 'fake-uuid'
instance.power_state = power_state
instance.vm_state = vm_state
instance.host = self.compute.host
instance.task_state = task_state
instance.shutdown_terminate = shutdown_terminate
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(instance, 'save')
return instance
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh(use_slave=False)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh(use_slave=False)
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.SHUTDOWN)
self.assertEqual(instance.power_state, power_state.SHUTDOWN)
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
stop=True, force=False, shutdown_terminate=False):
instance = self._get_sync_instance(
power_state, vm_state, shutdown_terminate=shutdown_terminate)
instance.refresh(use_slave=False)
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
if shutdown_terminate:
self.compute.compute_api.delete(self.context, instance)
elif stop:
if force:
self.compute.compute_api.force_stop(self.context, instance)
else:
self.compute.compute_api.stop(self.context, instance)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
driver_power_state)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_sync_instance_power_state_to_stop(self):
for ps in (power_state.SHUTDOWN, power_state.CRASHED,
power_state.SUSPENDED):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
for ps in (power_state.SHUTDOWN, power_state.CRASHED):
self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
force=True)
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
def test_sync_instance_power_state_to_terminate(self):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
power_state.SHUTDOWN,
force=False, shutdown_terminate=True)
def test_sync_instance_power_state_to_no_stop(self):
for ps in (power_state.PAUSED, power_state.NOSTATE):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
stop=False)
for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_pending_task(
self, mock_sync_power_state):
with mock.patch.object(self.compute.driver,
'get_info') as mock_get_info:
db_instance = objects.Instance(uuid='fake-uuid',
task_state=task_states.POWERING_OFF)
self.compute._query_driver_power_state_and_sync(self.context,
db_instance)
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
self, mock_sync_power_state):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
db_instance = objects.Instance(uuid='fake-uuid', task_state=None)
self.compute._query_driver_power_state_and_sync(self.context,
db_instance)
mock_get_info.assert_called_once_with(db_instance)
mock_sync_power_state.assert_called_once_with(self.context,
db_instance,
power_state.NOSTATE,
use_slave=True)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
class FakeInstance(object):
def __init__(self, uuid, name, smd):
self.uuid = uuid
self.name = name
self.system_metadata = smd
self.cleaned = False
def __getitem__(self, name):
return getattr(self, name)
def save(self):
pass
a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
c = FakeInstance('789', 'banana', {})
self.mox.StubOutWithMock(objects.InstanceList,
'get_by_filters')
objects.InstanceList.get_by_filters(
{'read_deleted': 'yes'},
{'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
'system_metadata'],
use_slave=True).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(True)
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.compute._run_pending_deletes({})
self.assertFalse(a.cleaned)
self.assertEqual('100', a.system_metadata['clean_attempts'])
self.assertTrue(b.cleaned)
self.assertEqual('4', b.system_metadata['clean_attempts'])
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
@mock.patch.object(objects.Migration, 'obj_as_admin')
@mock.patch.object(objects.Migration, 'save')
@mock.patch.object(objects.MigrationList, 'get_by_filters')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def _test_cleanup_incomplete_migrations(self, inst_host,
mock_inst_get_by_filters,
mock_migration_get_by_filters,
mock_save, mock_obj_as_admin):
def fake_inst(context, uuid, host):
inst = objects.Instance(context)
inst.uuid = uuid
inst.host = host
return inst
def fake_migration(uuid, status, inst_uuid, src_host, dest_host):
migration = objects.Migration()
migration.uuid = uuid
migration.status = status
migration.instance_uuid = inst_uuid
migration.source_compute = src_host
migration.dest_compute = dest_host
return migration
fake_instances = [fake_inst(self.context, '111', inst_host),
fake_inst(self.context, '222', inst_host)]
fake_migrations = [fake_migration('123', 'error', '111',
'fake-host', 'fake-mini'),
fake_migration('456', 'error', '222',
'fake-host', 'fake-mini')]
mock_migration_get_by_filters.return_value = fake_migrations
mock_inst_get_by_filters.return_value = fake_instances
with mock.patch.object(self.compute.driver, 'delete_instance_files'):
self.compute._cleanup_incomplete_migrations(self.context)
# Ensure that migration status is set to 'failed' after instance
# files deletion for those instances whose instance.host is not
# same as compute host where periodic task is running.
for inst in fake_instances:
if inst.host != CONF.host:
for mig in fake_migrations:
if inst.uuid == mig.instance_uuid:
self.assertEqual('failed', mig.status)
def test_cleanup_incomplete_migrations_dest_node(self):
"""Test to ensure instance files are deleted from destination node.
If instance gets deleted during resizing/revert-resizing operation,
in that case instance files gets deleted from instance.host (source
host here), but there is possibility that instance files could be
present on destination node.
This test ensures that `_cleanup_incomplete_migration` periodic
task deletes orphaned instance files from destination compute node.
"""
self.flags(host='fake-mini')
self._test_cleanup_incomplete_migrations('fake-host')
def test_cleanup_incomplete_migrations_source_node(self):
"""Test to ensure instance files are deleted from source node.
If instance gets deleted during resizing/revert-resizing operation,
in that case instance files gets deleted from instance.host (dest
host here), but there is possibility that instance files could be
present on source node.
This test ensures that `_cleanup_incomplete_migration` periodic
task deletes orphaned instance files from source compute node.
"""
self.flags(host='fake-host')
self._test_cleanup_incomplete_migrations('fake-mini')
def test_attach_interface_failure(self):
# Test that the fault methods are invoked when an attach fails
db_instance = fake_instance.fake_db_instance()
f_instance = objects.Instance._from_db_object(self.context,
objects.Instance(),
db_instance)
e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(self.compute.network_api,
'allocate_port_for_instance',
side_effect=e)
@mock.patch.object(self.compute, '_instance_update',
side_effect=lambda *a, **k: {})
def do_test(update, meth, add_fault):
self.assertRaises(exception.InterfaceAttachFailed,
self.compute.attach_interface,
self.context, f_instance, 'net_id', 'port_id',
None)
add_fault.assert_has_calls([
mock.call(self.context, f_instance, e,
mock.ANY)])
do_test()
def test_detach_interface_failure(self):
# Test that the fault methods are invoked when a detach fails
# Build test data that will cause a PortNotFound exception
f_instance = mock.MagicMock()
f_instance.info_cache = mock.MagicMock()
f_instance.info_cache.network_info = []
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(self.compute, '_set_instance_obj_error_state')
def do_test(meth, add_fault):
self.assertRaises(exception.PortNotFound,
self.compute.detach_interface,
self.context, f_instance, 'port_id')
add_fault.assert_has_calls(
[mock.call(self.context, f_instance, mock.ANY, mock.ANY)])
do_test()
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching',
'size': 1}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'available',
'size': 2}
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'source_type': 'volume',
'destination_type': 'volume', 'instance_uuid': 'fake',
'connection_info': '{"foo": "bar"}'})
def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
volumes[old_volume_id]['status'] = 'in-use'
return {'save_volume_id': new_volume_id}
def fake_func_exc(*args, **kwargs):
raise AttributeError # Random exception
def fake_swap_volume(old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
self.assertEqual(resize_to, 2)
def fake_block_device_mapping_update(ctxt, id, updates, legacy):
self.assertEqual(2, updates['volume_size'])
return fake_bdm
self.stubs.Set(self.compute.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
fake_vol_unreserve)
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
fake_vol_api_func)
self.stubs.Set(db, 'block_device_mapping_get_by_volume_id',
lambda x, y, z: fake_bdm)
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
fake_swap_volume)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
self.stubs.Set(db, 'block_device_mapping_update',
fake_block_device_mapping_update)
self.stubs.Set(db,
'instance_fault_create',
lambda x, y:
test_instance_fault.fake_faults['fake-uuid'][0])
self.stubs.Set(self.compute, '_instance_update',
lambda c, u, **k: {})
# Good path
self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
# Error paths
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
@mock.patch.object(compute_utils, 'EventReporter')
def test_check_can_live_migrate_source(self, event_mock):
is_volume_backed = 'volume_backed'
dest_check_data = dict(foo='bar')
db_instance = fake_instance.fake_db_instance()
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
expected_dest_check_data = dict(dest_check_data,
is_volume_backed=is_volume_backed)
self.mox.StubOutWithMock(self.compute.compute_api,
'is_volume_backed_instance')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_source')
self.compute.compute_api.is_volume_backed_instance(
self.context, instance).AndReturn(is_volume_backed)
self.compute._get_instance_block_device_info(
self.context, instance, refresh_conn_info=True
).AndReturn({'block_device_mapping': 'fake'})
self.compute.driver.check_can_live_migrate_source(
self.context, instance, expected_dest_check_data,
{'block_device_mapping': 'fake'})
self.mox.ReplayAll()
self.compute.check_can_live_migrate_source(
self.context, instance=instance,
dest_check_data=dest_check_data)
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_source',
instance.uuid)
@mock.patch.object(compute_utils, 'EventReporter')
def _test_check_can_live_migrate_destination(self, event_mock,
do_raise=False,
has_mig_data=False):
db_instance = fake_instance.fake_db_instance(host='fake-host')
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
instance.host = 'fake-host'
block_migration = 'block_migration'
disk_over_commit = 'disk_over_commit'
src_info = 'src_info'
dest_info = 'dest_info'
dest_check_data = dict(foo='bar')
mig_data = dict(cow='moo')
expected_result = dict(mig_data)
if has_mig_data:
dest_check_data['migrate_data'] = dict(cat='meow')
expected_result.update(cat='meow')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
self.compute._get_compute_info(self.context,
'fake-host').AndReturn(src_info)
self.compute._get_compute_info(self.context,
CONF.host).AndReturn(dest_info)
self.compute.driver.check_can_live_migrate_destination(
self.context, instance, src_info, dest_info,
block_migration, disk_over_commit).AndReturn(dest_check_data)
mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
self.context, instance, dest_check_data)
if do_raise:
mock_meth.AndRaise(test.TestingException())
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(
self.context, mox.IgnoreArg()).AndReturn(
test_instance_fault.fake_faults['fake-uuid'][0])
else:
mock_meth.AndReturn(mig_data)
self.compute.driver.check_can_live_migrate_destination_cleanup(
self.context, dest_check_data)
self.mox.ReplayAll()
result = self.compute.check_can_live_migrate_destination(
self.context, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(expected_result, result)
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
instance.uuid)
def test_check_can_live_migrate_destination_success(self):
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_success_w_mig_data(self):
self._test_check_can_live_migrate_destination(has_mig_data=True)
def test_check_can_live_migrate_destination_fail(self):
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_prepare_for_instance_event(self, lock_name_mock):
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
self.assertIn('foo', self.compute.instance_events._events)
self.assertIn('test-event',
self.compute.instance_events._events['foo'])
self.assertEqual(
result,
self.compute.instance_events._events['foo']['test-event'])
self.assertTrue(hasattr(result, 'send'))
lock_name_mock.assert_called_once_with(inst_obj)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_pop_instance_event(self, lock_name_mock):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'network-vif-plugged': event,
}
}
inst_obj = objects.Instance(uuid='foo')
event_obj = objects.InstanceExternalEvent(name='network-vif-plugged',
tag=None)
result = self.compute.instance_events.pop_instance_event(inst_obj,
event_obj)
self.assertEqual(result, event)
lock_name_mock.assert_called_once_with(inst_obj)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_clear_events_for_instance(self, lock_name_mock):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'test-event': event,
}
}
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events.clear_events_for_instance(
inst_obj)
self.assertEqual(result, {'test-event': event})
lock_name_mock.assert_called_once_with(inst_obj)
def test_instance_events_lock_name(self):
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events._lock_name(inst_obj)
self.assertEqual(result, 'foo-events')
def test_prepare_for_instance_event_again(self):
inst_obj = objects.Instance(uuid='foo')
self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
# A second attempt will avoid creating a new list; make sure we
# get the current list
result = self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
self.assertIn('foo', self.compute.instance_events._events)
self.assertIn('test-event',
self.compute.instance_events._events['foo'])
self.assertEqual(
result,
self.compute.instance_events._events['foo']['test-event'])
self.assertTrue(hasattr(result, 'send'))
def test_process_instance_event(self):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'network-vif-plugged': event,
}
}
inst_obj = objects.Instance(uuid='foo')
event_obj = objects.InstanceExternalEvent(name='network-vif-plugged',
tag=None)
self.compute._process_instance_event(inst_obj, event_obj)
self.assertTrue(event.ready())
self.assertEqual(event_obj, event.wait())
self.assertEqual({}, self.compute.instance_events._events)
def test_process_instance_vif_deleted_event(self):
vif1 = fake_network_cache_model.new_vif()
vif1['id'] = '1'
vif2 = fake_network_cache_model.new_vif()
vif2['id'] = '2'
nw_info = network_model.NetworkInfo([vif1, vif2])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid='uuid')
inst_obj = objects.Instance(id=3, uuid='uuid', info_cache=info_cache)
@mock.patch.object(manager.base_net_api,
'update_instance_cache_with_nw_info')
@mock.patch.object(self.compute.driver, 'detach_interface')
def do_test(detach_interface, update_instance_cache_with_nw_info):
self.compute._process_instance_vif_deleted_event(self.context,
inst_obj,
vif2['id'])
update_instance_cache_with_nw_info.assert_called_once_with(
self.compute.network_api,
self.context,
inst_obj,
nw_info=[vif1])
detach_interface.assert_called_once_with(inst_obj, vif2)
do_test()
def test_external_instance_event(self):
instances = [
objects.Instance(id=1, uuid='uuid1'),
objects.Instance(id=2, uuid='uuid2'),
objects.Instance(id=3, uuid='uuid3')]
events = [
objects.InstanceExternalEvent(name='network-changed',
tag='tag1',
instance_uuid='uuid1'),
objects.InstanceExternalEvent(name='network-vif-plugged',
instance_uuid='uuid2',
tag='tag2'),
objects.InstanceExternalEvent(name='network-vif-deleted',
instance_uuid='uuid3',
tag='tag3')]
@mock.patch.object(self.compute, '_process_instance_vif_deleted_event')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_process_instance_event')
def do_test(_process_instance_event, get_instance_nw_info,
_process_instance_vif_deleted_event):
self.compute.external_instance_event(self.context,
instances, events)
get_instance_nw_info.assert_called_once_with(self.context,
instances[0])
_process_instance_event.assert_called_once_with(instances[1],
events[1])
_process_instance_vif_deleted_event.assert_called_once_with(
self.context, instances[2], events[2].tag)
do_test()
def test_external_instance_event_with_exception(self):
vif1 = fake_network_cache_model.new_vif()
vif1['id'] = '1'
vif2 = fake_network_cache_model.new_vif()
vif2['id'] = '2'
nw_info = network_model.NetworkInfo([vif1, vif2])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid='uuid2')
instances = [
objects.Instance(id=1, uuid='uuid1'),
objects.Instance(id=2, uuid='uuid2', info_cache=info_cache),
objects.Instance(id=3, uuid='uuid3')]
events = [
objects.InstanceExternalEvent(name='network-changed',
tag='tag1',
instance_uuid='uuid1'),
objects.InstanceExternalEvent(name='network-vif-deleted',
instance_uuid='uuid2',
tag='2'),
objects.InstanceExternalEvent(name='network-vif-plugged',
instance_uuid='uuid3',
tag='tag3')]
# Make sure all the three events are handled despite the exceptions in
# processing events 1 and 2
@mock.patch.object(manager.base_net_api,
'update_instance_cache_with_nw_info')
@mock.patch.object(self.compute.driver, 'detach_interface',
side_effect=exception.NovaException)
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
side_effect=exception.InstanceInfoCacheNotFound(
instance_uuid='uuid1'))
@mock.patch.object(self.compute, '_process_instance_event')
def do_test(_process_instance_event, get_instance_nw_info,
detach_interface, update_instance_cache_with_nw_info):
self.compute.external_instance_event(self.context,
instances, events)
get_instance_nw_info.assert_called_once_with(self.context,
instances[0])
update_instance_cache_with_nw_info.assert_called_once_with(
self.compute.network_api,
self.context,
instances[1],
nw_info=[vif1])
detach_interface.assert_called_once_with(instances[1], vif2)
_process_instance_event.assert_called_once_with(instances[2],
events[2])
do_test()
def test_cancel_all_events(self):
inst = objects.Instance(uuid='uuid')
fake_eventlet_event = mock.MagicMock()
self.compute.instance_events._events = {
inst.uuid: {
'network-vif-plugged-bar': fake_eventlet_event,
}
}
self.compute.instance_events.cancel_all_events()
self.assertTrue(fake_eventlet_event.send.called)
event = fake_eventlet_event.send.call_args_list[0][0][0]
self.assertEqual('network-vif-plugged', event.name)
self.assertEqual('bar', event.tag)
self.assertEqual('failed', event.status)
def test_cleanup_cancels_all_events(self):
with mock.patch.object(self.compute, 'instance_events') as mock_ev:
self.compute.cleanup_host()
mock_ev.cancel_all_events.assert_called_once_with()
def test_cleanup_blocks_new_events(self):
instance = objects.Instance(uuid='uuid')
self.compute.instance_events.cancel_all_events()
callback = mock.MagicMock()
body = mock.MagicMock()
with self.compute.virtapi.wait_for_instance_event(
instance, ['network-vif-plugged-bar'],
error_callback=callback):
body()
self.assertTrue(body.called)
callback.assert_called_once_with('network-vif-plugged-bar', instance)
def test_pop_events_fails_gracefully(self):
inst = objects.Instance(uuid='uuid')
event = mock.MagicMock()
self.compute.instance_events._events = None
self.assertIsNone(
self.compute.instance_events.pop_instance_event(inst, event))
def test_clear_events_fails_gracefully(self):
inst = objects.Instance(uuid='uuid')
self.compute.instance_events._events = None
self.assertEqual(
self.compute.instance_events.clear_events_for_instance(inst), {})
def test_retry_reboot_pending_soft(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
instance.vm_state = vm_states.ACTIVE
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'SOFT')
def test_retry_reboot_pending_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.vm_state = vm_states.ACTIVE
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_soft_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.NOSTATE):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_hard_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED_HARD
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.NOSTATE):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_hard_on(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED_HARD
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_no_reboot(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = 'bar'
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
@mock.patch('nova.objects.Instance._from_db_object')
def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
bdm = mock.sentinel.bdm
inst_obj = mock.sentinel.inst_obj
bdm_get.return_value = bdm
inst_from_db.return_value = inst_obj
with mock.patch.object(self.compute, 'volume_api'):
self.compute.remove_volume_connection(self.context, 'vol',
inst_obj)
detach.assert_called_once_with(self.context, inst_obj, bdm)
def test_detach_volume(self):
self._test_detach_volume()
def test_detach_volume_not_destroy_bdm(self):
self._test_detach_volume(destroy_bdm=False)
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
def _test_detach_volume(self, notify_inst_usage, detach,
bdm_get, destroy_bdm=True):
volume_id = '123'
inst_obj = mock.sentinel.inst_obj
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
bdm.device_name = 'vdb'
bdm_get.return_value = bdm
with mock.patch.object(self.compute, 'volume_api') as volume_api:
with mock.patch.object(self.compute, 'driver') as driver:
connector_sentinel = mock.sentinel.connector
driver.get_volume_connector.return_value = connector_sentinel
self.compute._detach_volume(self.context, volume_id,
inst_obj,
destroy_bdm=destroy_bdm)
detach.assert_called_once_with(self.context, inst_obj, bdm)
driver.get_volume_connector.assert_called_once_with(inst_obj)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, connector_sentinel)
volume_api.detach.assert_called_once_with(mock.ANY, volume_id)
notify_inst_usage.assert_called_once_with(
self.context, inst_obj, "volume.detach",
extra_usage_info={'volume_id': volume_id}
)
if destroy_bdm:
bdm.destroy.assert_called_once_with()
else:
self.assertFalse(bdm.destroy.called)
def _test_rescue(self, clean_shutdown=True):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE)
fake_nw_info = network_model.NetworkInfo()
rescue_image_meta = {'id': 'fake', 'name': 'fake'}
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=self.context),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=fake_nw_info),
mock.patch.object(self.compute, '_get_rescue_image',
return_value=rescue_image_meta),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute, '_power_off_instance'),
mock.patch.object(self.compute.driver, 'rescue'),
mock.patch.object(compute_utils, 'notify_usage_exists'),
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save')
) as (
elevated_context, get_nw_info,
get_rescue_image, notify_instance_usage, power_off_instance,
driver_rescue, notify_usage_exists, get_power_state, instance_save
):
self.compute.rescue_instance(
self.context, instance, rescue_password='verybadpass',
rescue_image_ref=None, clean_shutdown=clean_shutdown)
# assert the field values on the instance object
self.assertEqual(vm_states.RESCUED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(power_state.RUNNING, instance.power_state)
self.assertIsNotNone(instance.launched_at)
# assert our mock calls
get_nw_info.assert_called_once_with(self.context, instance)
get_rescue_image.assert_called_once_with(
self.context, instance, None)
extra_usage_info = {'rescue_image_name': 'fake'}
notify_calls = [
mock.call(self.context, instance, "rescue.start",
extra_usage_info=extra_usage_info,
network_info=fake_nw_info),
mock.call(self.context, instance, "rescue.end",
extra_usage_info=extra_usage_info,
network_info=fake_nw_info)
]
notify_instance_usage.assert_has_calls(notify_calls)
power_off_instance.assert_called_once_with(self.context, instance,
clean_shutdown)
driver_rescue.assert_called_once_with(
self.context, instance, fake_nw_info, rescue_image_meta,
'verybadpass')
notify_usage_exists.assert_called_once_with(self.compute.notifier,
self.context, instance, current_period=True)
instance_save.assert_called_once_with(
expected_task_state=task_states.RESCUING)
def test_rescue(self):
self._test_rescue()
def test_rescue_forced_shutdown(self):
self._test_rescue(clean_shutdown=False)
def test_unrescue(self):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.RESCUED)
fake_nw_info = network_model.NetworkInfo()
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=self.context),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=fake_nw_info),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute.driver, 'unrescue'),
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save')
) as (
elevated_context, get_nw_info,
notify_instance_usage, driver_unrescue, get_power_state,
instance_save
):
self.compute.unrescue_instance(self.context, instance)
# assert the field values on the instance object
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(power_state.RUNNING, instance.power_state)
# assert our mock calls
get_nw_info.assert_called_once_with(self.context, instance)
notify_calls = [
mock.call(self.context, instance, "unrescue.start",
network_info=fake_nw_info),
mock.call(self.context, instance, "unrescue.end",
network_info=fake_nw_info)
]
notify_instance_usage.assert_has_calls(notify_calls)
driver_unrescue.assert_called_once_with(instance, fake_nw_info)
instance_save.assert_called_once_with(
expected_task_state=task_states.UNRESCUING)
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch.object(objects.Instance, 'save')
@mock.patch('nova.utils.generate_password', return_value='fake-pass')
def test_set_admin_password(self, gen_password_mock,
instance_save_mock, power_state_mock):
# Ensure instance can have its admin password set.
instance = fake_instance.fake_instance_obj(
self.context,
vm_state=vm_states.ACTIVE,
task_state=task_states.UPDATING_PASSWORD)
@mock.patch.object(self.context, 'elevated', return_value=self.context)
@mock.patch.object(self.compute.driver, 'set_admin_password')
def do_test(driver_mock, elevated_mock):
# call the manager method
self.compute.set_admin_password(self.context, instance, None)
# make our assertions
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
power_state_mock.assert_called_once_with(self.context, instance)
driver_mock.assert_called_once_with(instance, 'fake-pass')
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
do_test()
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.NOSTATE)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_set_admin_password_bad_state(self, add_fault_mock,
instance_save_mock,
update_mock,
power_state_mock):
# Test setting password while instance is rebuilding.
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self.context, 'elevated',
return_value=self.context):
# call the manager method
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context, instance, None)
# make our assertions
power_state_mock.assert_called_once_with(self.context, instance)
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
add_fault_mock.assert_called_once_with(
self.context, instance, mock.ANY, mock.ANY)
@mock.patch('nova.utils.generate_password', return_value='fake-pass')
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def _do_test_set_admin_password_driver_error(self, exc,
expected_vm_state,
expected_task_state,
expected_exception,
add_fault_mock,
instance_save_mock,
update_mock,
power_state_mock,
gen_password_mock):
# Ensure expected exception is raised if set_admin_password fails.
instance = fake_instance.fake_instance_obj(
self.context,
vm_state=vm_states.ACTIVE,
task_state=task_states.UPDATING_PASSWORD)
@mock.patch.object(self.context, 'elevated', return_value=self.context)
@mock.patch.object(self.compute.driver, 'set_admin_password',
side_effect=exc)
def do_test(driver_mock, elevated_mock):
# error raised from the driver should not reveal internal
# information so a new error is raised
self.assertRaises(expected_exception,
self.compute.set_admin_password,
self.context,
instance=instance,
new_pass=None)
if expected_exception == NotImplementedError:
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
else:
# setting the instance to error state
instance_save_mock.assert_called_once_with()
self.assertEqual(expected_vm_state, instance.vm_state)
# check revert_task_state decorator
update_mock.assert_called_once_with(
self.context, instance, task_state=expected_task_state)
# check wrap_instance_fault decorator
add_fault_mock.assert_called_once_with(
self.context, instance, mock.ANY, mock.ANY)
do_test()
def test_set_admin_password_driver_not_authorized(self):
# Ensure expected exception is raised if set_admin_password not
# authorized.
exc = exception.Forbidden('Internal error')
expected_exception = exception.InstancePasswordSetFailed
self._do_test_set_admin_password_driver_error(
exc, vm_states.ERROR, None, expected_exception)
def test_set_admin_password_driver_not_implemented(self):
# Ensure expected exception is raised if set_admin_password not
# implemented by driver.
exc = NotImplementedError()
expected_exception = NotImplementedError
self._do_test_set_admin_password_driver_error(
exc, vm_states.ACTIVE, None, expected_exception)
def test_destroy_evacuated_instances(self):
our_host = self.compute.host
instance_1 = objects.Instance(self.context)
instance_1.uuid = 'foo'
instance_1.task_state = None
instance_1.vm_state = vm_states.ACTIVE
instance_1.host = 'not-' + our_host
instance_2 = objects.Instance(self.context)
instance_2.uuid = 'bar'
instance_2.task_state = None
instance_2.vm_state = vm_states.ACTIVE
instance_2.host = 'not-' + our_host
# Only instance 2 has a migration record
migration = objects.Migration(instance_uuid=instance_2.uuid)
with contextlib.nested(
mock.patch.object(self.compute, '_get_instances_on_driver',
return_value=[instance_1,
instance_2]),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=None),
mock.patch.object(self.compute, '_get_instance_block_device_info',
return_value={}),
mock.patch.object(self.compute, '_is_instance_storage_shared',
return_value=False),
mock.patch.object(self.compute.driver, 'destroy'),
mock.patch('nova.objects.MigrationList.get_by_filters'),
mock.patch('nova.objects.Migration.save')
) as (_get_instances_on_driver, get_instance_nw_info,
_get_instance_block_device_info, _is_instance_storage_shared,
destroy, migration_list, migration_save):
migration_list.return_value = [migration]
self.compute._destroy_evacuated_instances(self.context)
# Only instance 2 should be deleted. Instance 1 is still running
# here, but no migration from our host exists, so ignore it
destroy.assert_called_once_with(self.context, instance_2, None,
{}, True)
@mock.patch('nova.compute.manager.ComputeManager.'
'_destroy_evacuated_instances')
@mock.patch('nova.compute.manager.LOG')
def test_init_host_foreign_instance(self, mock_log, mock_destroy):
inst = mock.MagicMock()
inst.host = self.compute.host + '-alt'
self.compute._init_instance(mock.sentinel.context, inst)
self.assertFalse(inst.save.called)
self.assertTrue(mock_log.warning.called)
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
inst_update_mock):
instance = fake_instance.fake_instance_obj(self.context)
def do_test():
with self.compute._error_out_instance_on_exception(
self.context, instance, instance_state=vm_states.STOPPED):
raise NotImplementedError('test')
self.assertRaises(NotImplementedError, do_test)
inst_update_mock.assert_called_once_with(
self.context, instance,
vm_state=vm_states.STOPPED, task_state=None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_inst_fault_rollback(self,
inst_update_mock):
instance = fake_instance.fake_instance_obj(self.context)
def do_test():
with self.compute._error_out_instance_on_exception(self.context,
instance):
raise exception.InstanceFaultRollback(
inner_exception=test.TestingException('test'))
self.assertRaises(test.TestingException, do_test)
inst_update_mock.assert_called_once_with(
self.context, instance,
vm_state=vm_states.ACTIVE, task_state=None)
@mock.patch('nova.compute.manager.ComputeManager.'
'_set_instance_obj_error_state')
def test_error_out_instance_on_exception_unknown_with_quotas(self,
set_error):
instance = fake_instance.fake_instance_obj(self.context)
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
def do_test():
with self.compute._error_out_instance_on_exception(
self.context, instance, quotas):
raise test.TestingException('test')
self.assertRaises(test.TestingException, do_test)
self.assertEqual(1, len(quotas.method_calls))
self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
set_error.assert_called_once_with(self.context, instance)
def test_cleanup_volumes(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': False})
bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_do_not_delete_dict, bdm_delete_dict])
with mock.patch.object(self.compute.volume_api,
'delete') as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
volume_delete.assert_called_once_with(self.context,
bdms[1].volume_id)
def test_cleanup_volumes_exception_do_not_raise(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': True})
bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_dict1, bdm_dict2])
with mock.patch.object(self.compute.volume_api,
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
raise_exc=False)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
def test_cleanup_volumes_exception_raise(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': True})
bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_dict1, bdm_dict2])
with mock.patch.object(self.compute.volume_api,
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.assertRaises(test.TestingException,
self.compute._cleanup_volumes, self.context, instance.uuid,
bdms)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
def test_stop_instance_task_state_none_power_state_shutdown(self):
# Tests that stop_instance doesn't puke when the instance power_state
# is shutdown and the task_state is None.
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE,
task_state=None, power_state=power_state.SHUTDOWN)
@mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.SHUTDOWN)
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, '_power_off_instance')
@mock.patch.object(instance, 'save')
def do_test(save_mock, power_off_mock, notify_mock, get_state_mock):
# run the code
self.compute.stop_instance(self.context, instance, True)
# assert the calls
self.assertEqual(2, get_state_mock.call_count)
notify_mock.assert_has_calls([
mock.call(self.context, instance, 'power_off.start'),
mock.call(self.context, instance, 'power_off.end')
])
power_off_mock.assert_called_once_with(
self.context, instance, True)
save_mock.assert_called_once_with(
expected_task_state=[task_states.POWERING_OFF, None])
self.assertEqual(power_state.SHUTDOWN, instance.power_state)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.STOPPED, instance.vm_state)
do_test()
def test_reset_network_driver_not_implemented(self):
instance = fake_instance.fake_instance_obj(self.context)
@mock.patch.object(self.compute.driver, 'reset_network',
side_effect=NotImplementedError())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def do_test(mock_add_fault, mock_reset):
self.assertRaises(messaging.ExpectedException,
self.compute.reset_network,
self.context,
instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.reset_network,
self.context,
instance)
do_test()
def test_rebuild_default_impl(self):
def _detach(context, bdms):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
self.assertTrue(mock_power_off.called)
self.assertFalse(mock_destroy.called)
def _attach(context, instance, bdms, do_check_attach=True):
return {'block_device_mapping': 'shared_block_storage'}
def _spawn(context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self.assertEqual(block_device_info['block_device_mapping'],
'shared_block_storage')
with contextlib.nested(
mock.patch.object(self.compute.driver, 'destroy',
return_value=None),
mock.patch.object(self.compute.driver, 'spawn',
side_effect=_spawn),
mock.patch.object(objects.Instance, 'save',
return_value=None),
mock.patch.object(self.compute, '_power_off_instance',
return_value=None)
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off
):
instance = fake_instance.fake_instance_obj(self.context)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
self.compute._rebuild_default_impl(self.context,
instance,
None,
[],
admin_password='new_pass',
bdms=[],
detach_block_devices=_detach,
attach_block_devices=_attach,
network_info=None,
recreate=False,
block_device_info=None,
preserve_ephemeral=False)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
network_info=None, block_device_info=None)
mock_power_off.assert_called_once_with(
self.context, instance, clean_shutdown=True)
@mock.patch.object(utils, 'last_completed_audit_period',
return_value=(0, 0))
@mock.patch.object(time, 'time', side_effect=[10, 20, 21])
@mock.patch.object(objects.InstanceList, 'get_by_host', return_value=[])
@mock.patch.object(objects.BandwidthUsage, 'get_by_instance_uuid_and_mac')
@mock.patch.object(db, 'bw_usage_update')
def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac,
get_by_host, time, last_completed_audit):
bw_counters = [{'uuid': 'fake-uuid', 'mac_address': 'fake-mac',
'bw_in': 1, 'bw_out': 2}]
usage = objects.BandwidthUsage()
usage.bw_in = 3
usage.bw_out = 4
usage.last_ctr_in = 0
usage.last_ctr_out = 0
self.flags(bandwidth_poll_interval=1)
get_by_uuid_mac.return_value = usage
_time = timeutils.utcnow()
bw_usage_update.return_value = {'uuid': '', 'mac': '',
'start_period': _time, 'last_refreshed': _time, 'bw_in': 0,
'bw_out': 0, 'last_ctr_in': 0, 'last_ctr_out': 0, 'deleted': 0,
'created_at': _time, 'updated_at': _time, 'deleted_at': _time}
with mock.patch.object(self.compute.driver,
'get_all_bw_counters', return_value=bw_counters):
self.compute._poll_bandwidth_usage(self.context)
get_by_uuid_mac.assert_called_once_with(self.context, 'fake-uuid',
'fake-mac', start_period=0, use_slave=True)
# NOTE(sdague): bw_usage_update happens at some time in
# the future, so what last_refreshed is is irrelevant.
bw_usage_update.assert_called_once_with(self.context, 'fake-uuid',
'fake-mac', 0, 4, 6, 1, 2,
last_refreshed=mock.ANY,
update_cells=False)
def test_reverts_task_state_instance_not_found(self):
# Tests that the reverts_task_state decorator in the compute manager
# will not trace when an InstanceNotFound is raised.
instance = objects.Instance(uuid='fake')
instance_update_mock = mock.Mock(
side_effect=exception.InstanceNotFound(instance_id=instance.uuid))
self.compute._instance_update = instance_update_mock
log_mock = mock.Mock()
manager.LOG = log_mock
@manager.reverts_task_state
def fake_function(self, context, instance):
raise test.TestingException()
self.assertRaises(test.TestingException, fake_function,
self, self.context, instance)
self.assertFalse(log_mock.called)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'update_instance_info')
def test_update_scheduler_instance_info(self, mock_update):
instance = objects.Instance(uuid='fake')
self.compute._update_scheduler_instance_info(self.context, instance)
self.assertEqual(mock_update.call_count, 1)
args = mock_update.call_args[0]
self.assertNotEqual(args[0], self.context)
self.assertIsInstance(args[0], self.context.__class__)
self.assertEqual(args[1], self.compute.host)
# Send a single instance; check that the method converts to an
# InstanceList
self.assertIsInstance(args[2], objects.InstanceList)
self.assertEqual(args[2].objects[0], instance)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'delete_instance_info')
def test_delete_scheduler_instance_info(self, mock_delete):
self.compute._delete_scheduler_instance_info(self.context,
mock.sentinel.inst_uuid)
self.assertEqual(mock_delete.call_count, 1)
args = mock_delete.call_args[0]
self.assertNotEqual(args[0], self.context)
self.assertIsInstance(args[0], self.context.__class__)
self.assertEqual(args[1], self.compute.host)
self.assertEqual(args[2], mock.sentinel.inst_uuid)
@mock.patch.object(nova.context.RequestContext, 'elevated')
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'sync_instance_info')
def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host,
mock_elevated):
inst1 = objects.Instance(uuid='fake1')
inst2 = objects.Instance(uuid='fake2')
inst3 = objects.Instance(uuid='fake3')
exp_uuids = [inst.uuid for inst in [inst1, inst2, inst3]]
mock_get_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
fake_elevated = context.get_admin_context()
mock_elevated.return_value = fake_elevated
self.compute._sync_scheduler_instance_info(self.context)
mock_get_by_host.assert_called_once_with(
fake_elevated, self.compute.host, expected_attrs=[],
use_slave=True)
mock_sync.assert_called_once_with(fake_elevated, self.compute.host,
exp_uuids)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'sync_instance_info')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'delete_instance_info')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'update_instance_info')
def test_scheduler_info_updates_off(self, mock_update, mock_delete,
mock_sync):
mgr = self.compute
mgr.send_instance_updates = False
mgr._update_scheduler_instance_info(self.context,
mock.sentinel.instance)
mgr._delete_scheduler_instance_info(self.context,
mock.sentinel.instance_uuid)
mgr._sync_scheduler_instance_info(self.context)
# None of the calls should have been made
self.assertFalse(mock_update.called)
self.assertFalse(mock_delete.called)
self.assertFalse(mock_sync.called)
def test_refresh_instance_security_rules_takes_non_object(self):
inst = fake_instance.fake_db_instance()
with mock.patch.object(self.compute.driver,
'refresh_instance_security_rules') as mock_r:
self.compute.refresh_instance_security_rules(self.context, inst)
self.assertIsInstance(mock_r.call_args_list[0][0][0],
objects.Instance)
def test_set_instance_obj_error_state_with_clean_task_state(self):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance,
clean_task_state=True)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_set_instance_obj_error_state_by_default(self):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertEqual(task_states.SPAWNING, instance.task_state)
@mock.patch.object(objects.Instance, 'save')
def test_instance_update(self, mock_save):
instance = objects.Instance(task_state=task_states.SCHEDULING,
vm_state=vm_states.BUILDING)
updates = {'task_state': None, 'vm_state': vm_states.ERROR}
with mock.patch.object(self.compute,
'_update_resource_tracker') as mock_rt:
self.compute._instance_update(self.context, instance, **updates)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
mock_save.assert_called_once_with()
mock_rt.assert_called_once_with(self.context, instance)
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerBuildInstanceTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.admin_pass = 'pass'
self.injected_files = []
self.image = {}
self.node = 'fake-node'
self.limits = {}
self.requested_networks = []
self.security_groups = []
self.block_device_mapping = []
self.filter_properties = {'retry': {'num_attempts': 1,
'hosts': [[self.compute.host,
'fake-node']]}}
def fake_network_info():
return network_model.NetworkInfo([{'address': '1.2.3.4'}])
self.network_info = network_model.NetworkInfoAsyncWrapper(
fake_network_info)
self.block_device_info = self.compute._prep_block_device(context,
self.instance, self.block_device_mapping)
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, self.node)
self.compute._resource_tracker_dict[self.node] = fake_rt
def _do_build_instance_update(self, reschedule_update=False):
self.mox.StubOutWithMock(self.instance, 'save')
self.instance.save(
expected_task_state=(task_states.SCHEDULING, None)).AndReturn(
self.instance)
if reschedule_update:
self.instance.save().AndReturn(self.instance)
def _build_and_run_instance_update(self):
self.mox.StubOutWithMock(self.instance, 'save')
self._build_resources_instance_update(stub=False)
self.instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance)
def _build_resources_instance_update(self, stub=True):
if stub:
self.mox.StubOutWithMock(self.instance, 'save')
self.instance.save().AndReturn(self.instance)
def _notify_about_instance_usage(self, event, stub=True, **kwargs):
if stub:
self.mox.StubOutWithMock(self.compute,
'_notify_about_instance_usage')
self.compute._notify_about_instance_usage(self.context, self.instance,
event, **kwargs)
def _instance_action_events(self):
self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start')
self.mox.StubOutWithMock(objects.InstanceActionEvent,
'event_finish_with_failure')
objects.InstanceActionEvent.event_start(
self.context, self.instance.uuid, mox.IgnoreArg(),
want_result=False)
objects.InstanceActionEvent.event_finish_with_failure(
self.context, self.instance.uuid, mox.IgnoreArg(),
exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
want_result=False)
@staticmethod
def _assert_build_instance_hook_called(mock_hooks, result):
# NOTE(coreywright): we want to test the return value of
# _do_build_and_run_instance, but it doesn't bubble all the way up, so
# mock the hooking, which allows us to test that too, though a little
# too intimately
mock_hooks.setdefault().run_post.assert_called_once_with(
'build_instance', result, mock.ANY, mock.ANY, f=None)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_build_and_run_instance_called_with_proper_args(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.ACTIVE)
# This test when sending an icehouse compatible rpc call to juno compute
# node, NetworkRequest object can load from three items tuple.
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance')
@mock.patch('nova.utils.spawn_n')
def test_build_and_run_instance_with_icehouse_requested_network(
self, mock_spawn, mock_build_and_run, mock_save):
fake_server_actions.stub_out_action_events(self.stubs)
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
mock_save.return_value = self.instance
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=[objects.NetworkRequest(
network_id='fake_network_id',
address='10.0.0.1',
port_id='fake_port_id')],
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
requested_network = mock_build_and_run.call_args[0][5][0]
self.assertEqual('fake_network_id', requested_network.network_id)
self.assertEqual('10.0.0.1', str(requested_network.address))
self.assertEqual('fake_port_id', requested_network.port_id)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_build_abort_exception(self, mock_spawn, mock_hooks):
def fake_spawn(f, *args, **kwargs):
# NOTE(danms): Simulate the detached nature of spawn so that
# we confirm that the inner task has the fault logic
try:
return f(*args, **kwargs)
except Exception:
pass
mock_spawn.side_effect = fake_spawn
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.BuildAbortException(reason='',
instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute._cleanup_volumes(self.context, self.instance.uuid,
self.block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, mox.IgnoreArg(), mox.IgnoreArg())
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute._set_instance_obj_error_state(self.context, self.instance,
clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception(self, mock_spawn, mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.network_api.cleanup_instance_network_on_host(self.context,
self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
def test_rescheduled_exception_with_non_ascii_exception(self):
exc = exception.NovaException(u's\xe9quence')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
@mock.patch.object(manager.ComputeManager, '_build_and_run_instance')
@mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances')
@mock.patch.object(network_api.API, 'cleanup_instance_network_on_host')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
@mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance')
def test_rescheduled_exception_with_network_allocated(self,
mock_macs_for_instance, mock_event_finish,
mock_event_start, mock_ins_save, mock_cleanup_network,
mock_build_ins, mock_build_and_run):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
system_metadata={'network_allocated': 'True'},
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
mock_ins_save.return_value = instance
mock_macs_for_instance.return_value = []
mock_build_and_run.side_effect = exception.RescheduledException(
reason='', instance_uuid=self.instance.uuid)
self.compute._do_build_and_run_instance(self.context, instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
mock_build_and_run.assert_called_once_with(self.context,
instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
mock_cleanup_network.assert_called_once_with(
self.context, instance, self.compute.host)
mock_build_ins.assert_called_once_with(self.context,
[instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_without_retry(self, mock_spawn, mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
{}).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
mox.IgnoreArg(), mox.IgnoreArg())
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute._set_instance_obj_error_state(self.context, self.instance,
clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties={},
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_do_not_deallocate_network(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.driver,
'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.driver.deallocate_networks_on_reschedule(
self.instance).AndReturn(False)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_deallocate_network(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.driver,
'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.driver.deallocate_networks_on_reschedule(
self.instance).AndReturn(True)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
def _test_build_and_run_exceptions(self, exc, set_error=False,
cleanup_volumes=False, nil_out_host_and_node=False):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(exc)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
if cleanup_volumes:
self.compute._cleanup_volumes(self.context, self.instance.uuid,
self.block_device_mapping, raise_exc=False)
if nil_out_host_and_node:
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.compute._nil_out_instance_obj_host_and_node(self.instance)
if set_error:
self.mox.StubOutWithMock(self.compute,
'_set_instance_obj_error_state')
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, mox.IgnoreArg(), mox.IgnoreArg())
self.compute._set_instance_obj_error_state(self.context,
self.instance, clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
with contextlib.nested(
mock.patch('nova.utils.spawn_n'),
mock.patch('nova.hooks._HOOKS')
) as (
mock_spawn,
mock_hooks
):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
def test_build_and_run_notfound_exception(self):
self._test_build_and_run_exceptions(exception.InstanceNotFound(
instance_id=''))
def test_build_and_run_unexpecteddeleting_exception(self):
self._test_build_and_run_exceptions(
exception.UnexpectedDeletingTaskStateError(
instance_uuid='fake_uuid', expected={}, actual={}))
def test_build_and_run_buildabort_exception(self):
self._test_build_and_run_exceptions(
exception.BuildAbortException(instance_uuid='', reason=''),
set_error=True, cleanup_volumes=True, nil_out_host_and_node=True)
def test_build_and_run_unhandled_exception(self):
self._test_build_and_run_exceptions(test.TestingException(),
set_error=True, cleanup_volumes=True,
nil_out_host_and_node=True)
def test_instance_not_found(self):
exc = exception.InstanceNotFound(instance_id=1)
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.end',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.InstanceNotFound,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
def test_reschedule_on_exception(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
exc = test.TestingException()
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
def test_spawn_network_alloc_failure(self):
# Because network allocation is asynchronous, failures may not present
# themselves until the virt spawn method is called.
self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks())
def test_build_and_run_no_more_fixedips_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.NoMoreFixedIps("error messge"))
def test_build_and_run_flavor_disk_smaller_image_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorDiskSmallerThanImage(
flavor_size=0, image_size=1))
def test_build_and_run_flavor_disk_smaller_min_disk(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorDiskSmallerThanMinDisk(
flavor_size=0, image_min_disk=1))
def test_build_and_run_flavor_memory_too_small_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorMemoryTooSmall())
def test_build_and_run_image_not_active_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.ImageNotActive(image_id=self.image.get('id')))
def test_build_and_run_image_unacceptable_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.ImageUnacceptable(image_id=self.image.get('id'),
reason=""))
def _test_build_and_run_spawn_exceptions(self, exc):
with contextlib.nested(
mock.patch.object(self.compute.driver, 'spawn',
side_effect=exc),
mock.patch.object(self.instance, 'save',
side_effect=[self.instance, self.instance, self.instance]),
mock.patch.object(self.compute,
'_build_networks_for_instance',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute,
'_notify_about_instance_usage'),
mock.patch.object(self.compute,
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
_validate_instance_group_policy.assert_called_once_with(
self.context, self.instance, self.filter_properties)
_build_networks_for_instance.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
_notify_about_instance_usage.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
mock.call(self.context, self.instance, 'create.error',
fault=exc)])
save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
spawn.assert_has_calls([mock.call(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info)])
_shutdown_instance.assert_called_once_with(self.context,
self.instance, self.block_device_mapping,
self.requested_networks, try_deallocate_networks=True)
@mock.patch('nova.utils.spawn_n')
def test_reschedule_on_resources_unavailable(self, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
reason = 'resource unavailable'
exc = exception.ComputeResourcesUnavailable(reason=reason)
class FakeResourceTracker(object):
def instance_claim(self, context, instance, limits):
raise exc
self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.compute._get_resource_tracker(self.node).AndReturn(
FakeResourceTracker())
self._do_build_instance_update(reschedule_update=True)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
def test_build_resources_buildabort_reraise(self):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
self.mox.StubOutWithMock(self.compute, '_build_resources')
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups, self.image,
self.block_device_mapping).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance,
self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping,
self.node, self.limits, self.filter_properties)
mock_save.assert_called_once_with()
def test_build_resources_reraises_on_failed_bdm_prep(self):
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self._build_resources_instance_update()
self.compute._prep_block_device(self.context, self.instance,
self.block_device_mapping).AndRaise(test.TestingException())
self.mox.ReplayAll()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_failed_bdm_prep_from_delete_raises_unexpected(self):
with contextlib.nested(
mock.patch.object(self.compute,
'_build_networks_for_instance',
return_value=self.network_info),
mock.patch.object(self.instance, 'save',
side_effect=exception.UnexpectedDeletingTaskStateError(
instance_uuid='fake_uuid',
actual={'task_state': task_states.DELETING},
expected={'task_state': None})),
) as (_build_networks_for_instance, save):
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e,
exception.UnexpectedDeletingTaskStateError)
_build_networks_for_instance.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
save.assert_has_calls([mock.call()])
def test_build_resources_aborts_on_failed_network_alloc(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndRaise(
test.TestingException())
self.mox.ReplayAll()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups, self.image,
self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_failed_network_alloc_from_delete_raises_unexpected(self):
with mock.patch.object(self.compute,
'_build_networks_for_instance') as _build_networks:
exc = exception.UnexpectedDeletingTaskStateError
_build_networks.side_effect = exc(
instance_uuid='fake_uuid',
actual={'task_state': task_states.DELETING},
expected={'task_state': None})
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exc)
_build_networks.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
def test_build_resources_with_network_info_obj_on_spawn_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
network_model.NetworkInfo([{'address': '1.2.3.4'}]))
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
self.mox.ReplayAll()
test_exception = test.TestingException()
def fake_spawn():
raise test_exception
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertEqual(test_exception, e)
def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
self.mox.ReplayAll()
test_exception = test.TestingException()
def fake_spawn():
raise test_exception
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertEqual(test_exception, e)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_instance_not_found_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
expected_exc = exception.InstanceNotFound(
instance_id=self.instance.uuid)
mock_save.side_effect = expected_exc
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except Exception as e:
self.assertEqual(expected_exc, e)
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_unexpected_task_error_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
mock_save.side_effect = exception.UnexpectedTaskStateError(
instance_uuid='fake_uuid', expected={}, actual={})
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except exception.BuildAbortException:
pass
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_exception_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
mock_save.side_effect = Exception()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except exception.BuildAbortException:
pass
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
def test_build_resources_aborts_on_cleanup_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False).AndRaise(
test.TestingException())
self._build_resources_instance_update()
self.mox.ReplayAll()
def fake_spawn():
raise test.TestingException()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_build_networks_if_not_allocated(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata={},
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.compute._allocate_network(self.context, instance,
self.requested_networks, None, self.security_groups, None)
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_build_networks_if_allocated_false(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata=dict(network_allocated='False'),
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.compute._allocate_network(self.context, instance,
self.requested_networks, None, self.security_groups, None)
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_return_networks_if_found(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata=dict(network_allocated='True'),
expected_attrs=['system_metadata'])
def fake_network_info():
return network_model.NetworkInfo([{'address': '123.123.123.123'}])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, instance.host)
self.compute.network_api.get_instance_nw_info(
self.context, instance).AndReturn(
network_model.NetworkInfoAsyncWrapper(fake_network_info))
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_cleanup_allocated_networks_instance_not_found(self):
with contextlib.nested(
mock.patch.object(self.compute, '_deallocate_network'),
mock.patch.object(self.instance, 'save',
side_effect=exception.InstanceNotFound(instance_id=''))
) as (_deallocate_network, save):
# Testing that this doesn't raise an exeption
self.compute._cleanup_allocated_networks(self.context,
self.instance, self.requested_networks)
save.assert_called_once_with()
self.assertEqual('False',
self.instance.system_metadata['network_allocated'])
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
mock_instance_update):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
# Check that launched_at is set on the instance
self.assertIsNotNone(args[1].launched_at)
with contextlib.nested(
mock.patch.object(self.compute,
'_update_scheduler_instance_info'),
mock.patch.object(self.compute.driver, 'spawn'),
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_upd, mock_spawn, mock_networks, mock_save, mock_notify):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
expected_call = mock.call(self.context, self.instance,
'create.end', extra_usage_info={'message': u'Success'},
network_info=[])
create_end_call = mock_notify.call_args_list[
mock_notify.call_count - 1]
self.assertEqual(expected_call, create_end_call)
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_create_end_on_instance_delete(self, mock_instance_update):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
# Check that launched_at is set on the instance
self.assertIsNotNone(args[1].launched_at)
exc = exception.InstanceNotFound(instance_id='')
with contextlib.nested(
mock.patch.object(self.compute.driver, 'spawn'),
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
side_effect=[None, None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
self.assertRaises(exception.InstanceNotFound,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
expected_call = mock.call(self.context, self.instance,
'create.end', fault=exc)
create_end_call = mock_notify.call_args_list[
mock_notify.call_count - 1]
self.assertEqual(expected_call, create_end_call)
class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerMigrationTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.image = {}
self.instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.migration = objects.Migration(context=self.context.elevated(),
new_instance_type_id=7)
self.migration.status = 'migrating'
fake_server_actions.stub_out_action_events(self.stubs)
@mock.patch.object(objects.Migration, 'save')
@mock.patch.object(objects.Migration, 'obj_as_admin')
def test_errors_out_migration_decorator(self, mock_save,
mock_obj_as_admin):
# Tests that errors_out_migration decorator in compute manager
# sets migration status to 'error' when an exception is raised
# from decorated method
instance = fake_instance.fake_instance_obj(self.context)
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'migrating'
migration.id = 0
@manager.errors_out_migration
def fake_function(self, context, instance, migration):
raise test.TestingException()
mock_obj_as_admin.return_value = mock.MagicMock()
self.assertRaises(test.TestingException, fake_function,
self, self.context, instance, migration)
self.assertEqual('error', migration.status)
mock_save.assert_called_once_with()
mock_obj_as_admin.assert_called_once_with()
def test_finish_resize_failure(self):
with contextlib.nested(
mock.patch.object(self.compute, '_finish_resize',
side_effect=exception.ResizeError(reason='')),
mock.patch.object(db, 'instance_fault_create'),
mock.patch.object(self.compute, '_instance_update'),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.migration, 'obj_as_admin',
return_value=mock.MagicMock())
) as (meth, fault_create, instance_update, instance_save,
migration_save, migration_obj_as_admin):
fault_create.return_value = (
test_instance_fault.fake_faults['fake-uuid'][0])
self.assertRaises(
exception.ResizeError, self.compute.finish_resize,
context=self.context, disk_info=[], image=self.image,
instance=self.instance, reservations=[],
migration=self.migration
)
self.assertEqual("error", self.migration.status)
migration_save.assert_called_once_with()
migration_obj_as_admin.assert_called_once_with()
def test_resize_instance_failure(self):
self.migration.dest_host = None
with contextlib.nested(
mock.patch.object(self.compute.driver,
'migrate_disk_and_power_off',
side_effect=exception.ResizeError(reason='')),
mock.patch.object(db, 'instance_fault_create'),
mock.patch.object(self.compute, '_instance_update'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.migration, 'obj_as_admin',
return_value=mock.MagicMock()),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=None),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute,
'_get_instance_block_device_info',
return_value=None),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=None),
mock.patch.object(objects.Flavor,
'get_by_id',
return_value=None)
) as (meth, fault_create, instance_update,
migration_save, migration_obj_as_admin, nw_info, save_inst,
notify, vol_block_info, bdm, flavor):
fault_create.return_value = (
test_instance_fault.fake_faults['fake-uuid'][0])
self.assertRaises(
exception.ResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
reservations=[], migration=self.migration,
instance_type='type', clean_shutdown=True)
self.assertEqual("error", self.migration.status)
self.assertEqual([mock.call(), mock.call()],
migration_save.mock_calls)
self.assertEqual([mock.call(), mock.call()],
migration_obj_as_admin.mock_calls)
def _test_revert_resize_instance_destroy_disks(self, is_shared=False):
# This test asserts that _is_instance_storage_shared() is called from
# revert_resize() and the return value is passed to driver.destroy().
# Otherwise we could regress this.
@mock.patch.object(self.instance, 'revert_migration_context')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_is_instance_storage_shared')
@mock.patch.object(self.compute, 'finish_revert_resize')
@mock.patch.object(self.compute, '_instance_update')
@mock.patch.object(self.compute, '_get_resource_tracker')
@mock.patch.object(self.compute.driver, 'destroy')
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
destroy,
_get_resource_tracker,
_instance_update,
finish_revert_resize,
_is_instance_storage_shared,
get_instance_nw_info,
revert_migration_context):
self.migration.source_compute = self.instance['host']
# Inform compute that instance uses non-shared or shared storage
_is_instance_storage_shared.return_value = is_shared
self.compute.revert_resize(context=self.context,
migration=self.migration,
instance=self.instance,
reservations=None)
_is_instance_storage_shared.assert_called_once_with(
self.context, self.instance,
host=self.migration.source_compute)
# If instance storage is shared, driver destroy method
# should not destroy disks otherwise it should destroy disks.
destroy.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, not is_shared)
do_test()
def test_revert_resize_instance_destroy_disks_shared_storage(self):
self._test_revert_resize_instance_destroy_disks(is_shared=True)
def test_revert_resize_instance_destroy_disks_non_shared_storage(self):
self._test_revert_resize_instance_destroy_disks(is_shared=False)
def test_consoles_enabled(self):
self.flags(enabled=False, group='vnc')
self.flags(enabled=False, group='spice')
self.flags(enabled=False, group='rdp')
self.flags(enabled=False, group='serial_console')
self.assertFalse(self.compute._consoles_enabled())
self.flags(enabled=True, group='vnc')
self.assertTrue(self.compute._consoles_enabled())
self.flags(enabled=False, group='vnc')
for console in ['spice', 'rdp', 'serial_console']:
self.flags(enabled=True, group=console)
self.assertTrue(self.compute._consoles_enabled())
self.flags(enabled=False, group=console)
@mock.patch('nova.utils.spawn_n')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
def _test_max_concurrent_live(self, mock_lm, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
instance = objects.Instance(uuid=str(uuid.uuid4()))
migration = objects.Migration()
self.compute.live_migration(self.context,
mock.sentinel.dest,
instance,
mock.sentinel.block_migration,
migration,
mock.sentinel.migrate_data)
self.assertEqual('queued', migration.status)
migration.save.assert_called_once_with()
with mock.patch.object(self.compute,
'_live_migration_semaphore') as mock_sem:
for i in (1, 2, 3):
_do_it()
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_live_limited(self):
self.flags(max_concurrent_live_migrations=2)
self._test_max_concurrent_live()
def test_max_concurrent_live_unlimited(self):
self.flags(max_concurrent_live_migrations=0)
self._test_max_concurrent_live()
def test_max_concurrent_live_semaphore_limited(self):
self.flags(max_concurrent_live_migrations=123)
self.assertEqual(
123,
manager.ComputeManager()._live_migration_semaphore.balance)
def test_max_concurrent_live_semaphore_unlimited(self):
self.flags(max_concurrent_live_migrations=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._live_migration_semaphore.balance)
self.assertIsInstance(compute._live_migration_semaphore,
compute_utils.UnlimitedSemaphore)
def test_max_concurrent_live_semaphore_negative(self):
self.flags(max_concurrent_live_migrations=-2)
compute = manager.ComputeManager()
self.assertEqual(0, compute._live_migration_semaphore.balance)
self.assertIsInstance(compute._live_migration_semaphore,
compute_utils.UnlimitedSemaphore)
| apache-2.0 |
jn7163/django | tests/null_fk_ordering/tests.py | 381 | 2012 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Article, Author, Comment, Forum, Post, SystemInfo
class NullFkOrderingTests(TestCase):
def test_ordering_across_null_fk(self):
"""
Regression test for #7512
ordering across nullable Foreign Keys shouldn't exclude results
"""
author_1 = Author.objects.create(name='Tom Jones')
author_2 = Author.objects.create(name='Bob Smith')
Article.objects.create(title='No author on this article')
Article.objects.create(author=author_1, title='This article written by Tom Jones')
Article.objects.create(author=author_2, title='This article written by Bob Smith')
# We can't compare results directly (since different databases sort NULLs to
# different ends of the ordering), but we can check that all results are
# returned.
self.assertEqual(len(list(Article.objects.all())), 3)
s = SystemInfo.objects.create(system_name='System Info')
f = Forum.objects.create(system_info=s, forum_name='First forum')
p = Post.objects.create(forum=f, title='First Post')
Comment.objects.create(post=p, comment_text='My first comment')
Comment.objects.create(comment_text='My second comment')
s2 = SystemInfo.objects.create(system_name='More System Info')
f2 = Forum.objects.create(system_info=s2, forum_name='Second forum')
p2 = Post.objects.create(forum=f2, title='Second Post')
Comment.objects.create(comment_text='Another first comment')
Comment.objects.create(post=p2, comment_text='Another second comment')
# We have to test this carefully. Some databases sort NULL values before
# everything else, some sort them afterwards. So we extract the ordered list
# and check the length. Before the fix, this list was too short (some values
# were omitted).
self.assertEqual(len(list(Comment.objects.all())), 4)
| bsd-3-clause |
cmelange/ansible | lib/ansible/modules/storage/netapp/netapp_e_storage_system.py | 6 | 11504 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_storage_system
version_added: "2.2"
short_description: Add/remove arrays from the Web Services Proxy
description:
- Manage the arrays accessible via a NetApp Web Services Proxy for NetApp E-series storage arrays.
options:
api_username:
required: true
description:
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_password:
required: true
description:
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
api_url:
required: true
description:
- The url to the SANtricity WebServices Proxy or embedded REST API.
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
required: true
description:
- The ID of the array to manage. This value must be unique for each array.
state:
required: true
description:
- Whether the specified array should be configured on the Web Services Proxy or not.
choices: ['present', 'absent']
controller_addresses:
required: true
description:
- The list addresses for the out-of-band management adapter or the agent host. Mutually exclusive of array_wwn parameter.
array_wwn:
required: false
description:
- The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter.
array_password:
required: false
description:
- The management password of the array to manage, if set.
enable_trace:
required: false
default: false
description:
- Enable trace logging for SYMbol calls to the storage system.
meta_tags:
required: false
default: None
description:
- Optional meta tags to associate to this storage system
author: Kevin Hulquest (@hulquest)
'''
EXAMPLES = '''
---
- name: Presence of storage system
netapp_e_storage_system:
ssid: "{{ item.key }}"
state: present
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
controller_addresses:
- "{{ item.value.address1 }}"
- "{{ item.value.address2 }}"
with_dict: "{{ storage_systems }}"
when: check_storage_system
'''
RETURN = '''
msg: Storage system removed.
msg: Storage system added.
'''
import json
from datetime import datetime as dt, timedelta
from time import sleep
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.six.moves.urllib.error import HTTPError
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_body, timeout):
(rc, resp) = request(api_url + "/storage-systems", data=request_body, headers=post_headers,
method='POST', url_username=api_usr, url_password=api_pwd,
validate_certs=validate_certs)
status = None
return_resp = resp
if 'status' in resp:
status = resp['status']
if rc == 201:
status = 'neverContacted'
fail_after_time = dt.utcnow() + timedelta(seconds=timeout)
while status == 'neverContacted':
if dt.utcnow() > fail_after_time:
raise Exception("web proxy timed out waiting for array status")
sleep(1)
(rc, system_resp) = request(api_url + "/storage-systems/%s" % ssid,
headers=dict(Accept="application/json"), url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
status = system_resp['status']
return_resp = system_resp
return status, return_resp
def main():
argument_spec = basic_auth_argument_spec()
argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
ssid=dict(required=True, type='str'),
controller_addresses=dict(type='list'),
array_wwn=dict(required=False, type='str'),
array_password=dict(required=False, type='str', no_log=True),
array_status_timeout_sec=dict(default=60, type='int'),
enable_trace=dict(default=False, type='bool'),
meta_tags=dict(type='list')
))
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[['controller_addresses', 'array_wwn']],
required_if=[('state', 'present', ['controller_addresses'])]
)
p = module.params
state = p['state']
ssid = p['ssid']
controller_addresses = p['controller_addresses']
array_wwn = p['array_wwn']
array_password = p['array_password']
array_status_timeout_sec = p['array_status_timeout_sec']
validate_certs = p['validate_certs']
meta_tags = p['meta_tags']
enable_trace = p['enable_trace']
api_usr = p['api_username']
api_pwd = p['api_password']
api_url = p['api_url']
changed = False
array_exists = False
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, headers=dict(Accept="application/json"),
url_username=api_usr, url_password=api_pwd, validate_certs=validate_certs,
ignore_errors=True)
except:
err = get_exception()
module.fail_json(msg="Error accessing storage-system with id [%s]. Error [%s]" % (ssid, str(err)))
array_exists = True
array_detail = resp
if rc == 200:
if state == 'absent':
changed = True
array_exists = False
elif state == 'present':
current_addresses = frozenset(i for i in (array_detail['ip1'], array_detail['ip2']) if i)
if set(controller_addresses) != current_addresses:
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn']))
elif rc == 404:
if state == 'present':
changed = True
array_exists = False
else:
changed = False
module.exit_json(changed=changed, msg="Storage system was not present.")
if changed and not module.check_mode:
if state == 'present':
if not array_exists:
# add the array
array_add_req = dict(
id=ssid,
controllerAddresses=controller_addresses,
metaTags=meta_tags,
enableTrace=enable_trace
)
if array_wwn:
array_add_req['wwn'] = array_wwn
if array_password:
array_add_req['password'] = array_password
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
request_data = json.dumps(array_add_req)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, request_data,
array_status_timeout_sec)
except:
err = get_exception()
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, request_data, str(err)))
else: # array exists, modify...
post_headers = dict(Accept="application/json")
post_headers['Content-Type'] = 'application/json'
post_body = dict(
controllerAddresses=controller_addresses,
removeAllTags=True,
enableTrace=enable_trace,
metaTags=meta_tags
)
try:
(rc, resp) = do_post(ssid, api_url, post_headers, api_usr, api_pwd, validate_certs, post_body,
array_status_timeout_sec)
except:
err = get_exception()
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
(ssid, post_body, str(err)))
elif state == 'absent':
# delete the array
try:
(rc, resp) = request(api_url + "/storage-systems/%s" % ssid, method='DELETE',
url_username=api_usr,
url_password=api_pwd, validate_certs=validate_certs)
except:
err = get_exception()
module.fail_json(msg="Failed to remove storage array. Id[%s]. Error[%s]." % (ssid, str(err)))
if rc == 422:
module.exit_json(changed=changed, msg="Storage system was not presnt.")
if rc == 204:
module.exit_json(changed=changed, msg="Storage system removed.")
module.exit_json(changed=changed, **resp)
if __name__ == '__main__':
main()
| gpl-3.0 |
ezequielpereira/Time-Line | libs/wx/tools/Editra/src/eclib/filterdlg.py | 2 | 5746 | ###############################################################################
# Name: filterdlg.py #
# Purpose: Filter dialog #
# Author: Cody Precord <cprecord@editra.org> #
# Copyright: (c) 2009 Cody Precord <staff@editra.org> #
# License: wxWindows License #
###############################################################################
"""
Editra Control Library: FilterDialog
"""
__author__ = "Cody Precord <cprecord@editra.org>"
__svnid__ = "$Id: filterdlg.py 63825 2010-04-02 01:20:36Z CJP $"
__revision__ = "$Revision: 63825 $"
__all__ = ["FilterDialog",]
#-----------------------------------------------------------------------------#
# Imports
import wx
# Eclib Imports
import ecbasewin
#-----------------------------------------------------------------------------#
# Globals
#_ = wx.GetTranslation
from gettext import gettext as _
#-----------------------------------------------------------------------------#
class FilterDialog(ecbasewin.ECBaseDlg):
"""Dialog that allows adding and removing items from a filter list"""
def __init__(self, parent, id=wx.ID_ANY, title=u"",
pos=wx.DefaultPosition, size=wx.DefaultSize,
style=wx.DEFAULT_DIALOG_STYLE, name=u"FilterDialog"):
ecbasewin.ECBaseDlg.__init__(self, parent, id, title,
pos, size, style, name)
# Attributes
self.SetPanel(FilterPanel(self))
# Event Handlers
#-----------------------------------------------------------------------------#
class FilterPanel(wx.Panel):
"""Filter dialog panel"""
def __init__(self, parent):
wx.Panel.__init__(self, parent)
# Attributes
self._left = wx.ListBox(self, style=wx.LB_EXTENDED|wx.LB_SORT)
self._right = wx.ListBox(self, style=wx.LB_EXTENDED)
self.__DoLayout()
# Event Handlers
self.Bind(wx.EVT_BUTTON, self.OnButton)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateButton, id=wx.ID_ADD)
self.Bind(wx.EVT_UPDATE_UI, self.OnUpdateButton, id=wx.ID_REMOVE)
def __DoLayout(self):
"""Layout the panel"""
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(self._left, 1, wx.EXPAND|wx.ALL, 10)
# Add buttons
bvsizer = wx.BoxSizer(wx.VERTICAL)
addb = wx.Button(self, wx.ID_ADD, label=_("Add >>"))
removeb = wx.Button(self, wx.ID_REMOVE, label=_("<< Remove"))
bvsizer.AddStretchSpacer()
bvsizer.AddMany([(addb, 0, wx.EXPAND),
((10, 15), 0),
(removeb, 0, wx.EXPAND)])
bvsizer.AddStretchSpacer()
hsizer.Add(bvsizer, 0, wx.ALIGN_CENTER)
hsizer.Add(self._right, 1, wx.EXPAND|wx.ALL, 10)
vsizer.Add(hsizer, 1, wx.EXPAND)
# Add main dialog buttons
bsizer = wx.StdDialogButtonSizer()
bsizer.AddButton(wx.Button(self, wx.ID_OK))
btn = wx.Button(self, wx.ID_CANCEL)
bsizer.AddButton(btn)
btn.SetDefault()
bsizer.Realize()
vsizer.Add(bsizer, 0, wx.ALIGN_RIGHT)
vsizer.AddSpacer(8)
self.SetSizer(vsizer)
self.SetAutoLayout(True)
@ecbasewin.expose(FilterDialog)
def GetIncludes(self):
"""Get the items from the includes list
@return: list of strings
"""
return self._right.GetItems()
@ecbasewin.expose(FilterDialog)
def SetIncludes(self, items):
"""Set the items in the includes list
@param items: list of strings
"""
return self._right.SetItems(items)
@ecbasewin.expose(FilterDialog)
def GetExcludes(self):
"""Get the items from the excludes list
@return: list of strings
"""
return self._left.GetItems()
@ecbasewin.expose(FilterDialog)
def SetExcludes(self, items):
"""set the items in the excludes list
@param items: list of strings
"""
return self._left.SetItems(items)
@ecbasewin.expose(FilterDialog)
def SetListValues(self, valuemap):
"""Set the values of the filter lists
@param valuemap: dict(item=bool)
"""
includes = list()
excludes = list()
for item, include in valuemap.iteritems():
if include:
includes.append(item)
else:
excludes.append(item)
includes.sort()
excludes.sort()
self.SetIncludes(includes)
self.SetExcludes(excludes)
def OnButton(self, evt):
e_id = evt.GetId()
if e_id in (wx.ID_ADD, wx.ID_REMOVE):
cmap = { wx.ID_ADD : (self._left, self._right),
wx.ID_REMOVE : (self._right, self._left) }
idxs = list()
for sel in cmap[e_id][0].GetSelections():
selstr = cmap[e_id][0].GetString(sel)
cmap[e_id][1].Append(selstr)
idxs.append(sel)
idxs.sort()
idxs.reverse()
for idx in idxs:
cmap[e_id][0].Delete(idx)
else:
evt.Skip()
def OnUpdateButton(self, evt):
"""Enable/Disable the Add/Remove buttons based on
selections in the list.
"""
e_id = evt.GetId()
if e_id == wx.ID_ADD:
evt.Enable(len(self._left.GetSelections()))
elif e_id == wx.ID_REMOVE:
evt.Enable(len(self._right.GetSelections()))
else:
evt.Skip()
| gpl-3.0 |
Lujeni/ansible | lib/ansible/modules/cloud/google/gcp_iam_service_account_key.py | 16 | 9842 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_iam_service_account_key
description:
- A service account in the Identity and Access Management API.
short_description: Creates a GCP ServiceAccountKey
version_added: '2.8'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
private_key_type:
description:
- Output format for the service account key.
- 'Some valid choices include: "TYPE_UNSPECIFIED", "TYPE_PKCS12_FILE", "TYPE_GOOGLE_CREDENTIALS_FILE"'
required: false
type: str
key_algorithm:
description:
- Specifies the algorithm for the key.
- 'Some valid choices include: "KEY_ALG_UNSPECIFIED", "KEY_ALG_RSA_1024", "KEY_ALG_RSA_2048"'
required: false
type: str
service_account:
description:
- The name of the serviceAccount.
- 'This field represents a link to a ServiceAccount resource in GCP. It can be
specified in two ways. First, you can place a dictionary with key ''name'' and
value of your resource''s name Alternatively, you can add `register: name-of-resource`
to a gcp_iam_service_account task and then set this service_account field to
"{{ name-of-resource }}"'
required: false
type: dict
path:
description:
- The full name of the file that will hold the service account private key. The
management of this file will depend on the value of sync_file parameter.
- File path must be absolute.
required: false
type: path
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
'''
EXAMPLES = '''
- name: create a service account
gcp_iam_service_account:
name: test-ansible@graphite-playground.google.com.iam.gserviceaccount.com
display_name: My Ansible test key
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: serviceaccount
- name: create a service account key
gcp_iam_service_account_key:
service_account: "{{ serviceaccount }}"
private_key_type: TYPE_GOOGLE_CREDENTIALS_FILE
path: "~/test_account.json"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
name:
description:
- The name of the key.
returned: success
type: str
privateKeyType:
description:
- Output format for the service account key.
returned: success
type: str
keyAlgorithm:
description:
- Specifies the algorithm for the key.
returned: success
type: str
privateKeyData:
description:
- Private key data. Base-64 encoded.
returned: success
type: str
publicKeyData:
description:
- Public key data. Base-64 encoded.
returned: success
type: str
validAfterTime:
description:
- Key can only be used after this time.
returned: success
type: str
validBeforeTime:
description:
- Key can only be used before this time.
returned: success
type: str
serviceAccount:
description:
- The name of the serviceAccount.
returned: success
type: dict
path:
description:
- The full name of the file that will hold the service account private key. The
management of this file will depend on the value of sync_file parameter.
- File path must be absolute.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
from ansible.module_utils._text import to_native
import json
import os
import mimetypes
import hashlib
import base64
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
private_key_type=dict(type='str'),
key_algorithm=dict(type='str'),
service_account=dict(type='dict'),
path=dict(type='path'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/iam']
state = module.params['state']
# If file exists, we're doing a no-op or deleting the key.
changed = False
if os.path.isfile(module.params['path']):
fetch = fetch_resource(module)
# If file exists and we should delete the file, delete it.
if fetch and module.params['state'] == 'absent':
delete(module)
changed = True
# Create the file if present state and no current file.
elif module.params['state'] == 'present':
create(module)
changed = True
# Not returning any information about the key because that information should
# end up in logs.
module.exit_json(**{'changed': changed, 'file_path': module.params['path']})
def create(module):
auth = GcpSession(module, 'iam')
json_content = return_if_object(module, auth.post(self_link(module), resource_to_request(module)))
with open(module.params['path'], 'w') as f:
private_key_contents = to_native(base64.b64decode(json_content['privateKeyData']))
f.write(private_key_contents)
def delete(module):
auth = GcpSession(module, 'iam')
return return_if_object(module, auth.delete(self_link_from_file(module)))
def resource_to_request(module):
request = {u'privateKeyType': module.params.get('private_key_type'), u'keyAlgorithm': module.params.get('key_algorithm')}
return_vals = {}
for k, v in request.items():
if v:
return_vals[k] = v
return return_vals
def fetch_resource(module):
auth = GcpSession(module, 'iam')
return return_if_object(module, auth.get(self_link_from_file(module)))
def key_name_from_file(filename, module):
with open(filename, 'r') as f:
try:
json_data = json.loads(f.read())
return "projects/{project_id}/serviceAccounts/{client_email}/keys/{private_key_id}".format(**json_data)
except ValueError as inst:
module.fail_json(msg="File is not a valid GCP JSON service account key")
def self_link_from_file(module):
key_name = key_name_from_file(module.params['path'], module)
return "https://iam.googleapis.com/v1/{key_name}".format(key_name=key_name)
def self_link(module):
results = {'project': module.params['project'], 'service_account': replace_resource_dict(module.params['service_account'], 'name')}
return "https://iam.googleapis.com/v1/projects/{project}/serviceAccounts/{service_account}/keys".format(**results)
def return_if_object(module, response):
# If not found, return nothing.
# return_if_object not used in any context where 404 means error.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == '__main__':
main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.