repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
Vibek/Human_intention | src/skeleton_markers/msg/_recognitionActionActionFeedback.py | 1 | 11462 | """autogenerated by genpy from skeleton_markers/recognitionActionActionFeedback.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import skeleton_markers.msg
import genpy
import actionlib_msgs.msg
import std_msgs.msg
class recognitionActionActionFeedback(genpy.Message):
_md5sum = "aae20e09065c3809e8a8e87c4c8953fd"
_type = "skeleton_markers/recognitionActionActionFeedback"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
recognitionActionFeedback feedback
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: skeleton_markers/recognitionActionFeedback
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
# Define a feedback message
"""
__slots__ = ['header','status','feedback']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','skeleton_markers/recognitionActionFeedback']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,feedback
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(recognitionActionActionFeedback, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = skeleton_markers.msg.recognitionActionFeedback()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.feedback = skeleton_markers.msg.recognitionActionFeedback()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = skeleton_markers.msg.recognitionActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.feedback is None:
self.feedback = skeleton_markers.msg.recognitionActionFeedback()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_B = struct.Struct("<B")
_struct_2I = struct.Struct("<2I")
| mit |
camptocamp/odoo | addons/account/account_financial_report.py | 40 | 7646 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
# ---------------------------------------------------------
# Account Financial Report
# ---------------------------------------------------------
class account_financial_report(osv.osv):
_name = "account.financial.report"
_description = "Account Report"
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
'''Returns a dictionary with key=the ID of a record and value = the level of this
record in the tree structure.'''
res = {}
for report in self.browse(cr, uid, ids, context=context):
level = 0
if report.parent_id:
level = report.parent_id.level + 1
res[report.id] = level
return res
def _get_children_by_order(self, cr, uid, ids, context=None):
'''returns a dictionary with the key= the ID of a record and value = all its children,
computed recursively, and sorted by sequence. Ready for the printing'''
res = []
for id in ids:
res.append(id)
ids2 = self.search(cr, uid, [('parent_id', '=', id)], order='sequence ASC', context=context)
res += self._get_children_by_order(cr, uid, ids2, context=context)
return res
def _get_balance(self, cr, uid, ids, field_names, args, context=None):
'''returns a dictionary with key=the ID of a record and value=the balance amount
computed for this record. If the record is of type :
'accounts' : it's the sum of the linked accounts
'account_type' : it's the sum of leaf accoutns with such an account_type
'account_report' : it's the amount of the related report
'sum' : it's the sum of the children of this record (aka a 'view' record)'''
account_obj = self.pool.get('account.account')
res = {}
for report in self.browse(cr, uid, ids, context=context):
if report.id in res:
continue
res[report.id] = dict((fn, 0.0) for fn in field_names)
if report.type == 'accounts':
# it's the sum of the linked accounts
for a in report.account_ids:
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_type':
# it's the sum the leaf accounts with such an account type
report_types = [x.id for x in report.account_type_ids]
account_ids = account_obj.search(cr, uid, [('user_type','in', report_types), ('type','!=','view')], context=context)
for a in account_obj.browse(cr, uid, account_ids, context=context):
for field in field_names:
res[report.id][field] += getattr(a, field)
elif report.type == 'account_report' and report.account_report_id:
# it's the amount of the linked report
res2 = self._get_balance(cr, uid, [report.account_report_id.id], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
elif report.type == 'sum':
# it's the sum of the children of this account.report
res2 = self._get_balance(cr, uid, [rec.id for rec in report.children_ids], field_names, False, context=context)
for key, value in res2.items():
for field in field_names:
res[report.id][field] += value[field]
return res
_columns = {
'name': fields.char('Report Name', size=128, required=True, translate=True),
'parent_id': fields.many2one('account.financial.report', 'Parent'),
'children_ids': fields.one2many('account.financial.report', 'parent_id', 'Account Report'),
'sequence': fields.integer('Sequence'),
'balance': fields.function(_get_balance, 'Balance', multi='balance'),
'debit': fields.function(_get_balance, 'Debit', multi='balance'),
'credit': fields.function(_get_balance, 'Credit', multi="balance"),
'level': fields.function(_get_level, string='Level', store=True, type='integer'),
'type': fields.selection([
('sum','View'),
('accounts','Accounts'),
('account_type','Account Type'),
('account_report','Report Value'),
],'Type'),
'account_ids': fields.many2many('account.account', 'account_account_financial_report', 'report_line_id', 'account_id', 'Accounts'),
'account_report_id': fields.many2one('account.financial.report', 'Report Value'),
'account_type_ids': fields.many2many('account.account.type', 'account_account_financial_report_type', 'report_id', 'account_type_id', 'Account Types'),
'sign': fields.selection([(-1, 'Reverse balance sign'), (1, 'Preserve balance sign')], 'Sign on Reports', required=True, help='For accounts that are typically more debited than credited and that you would like to print as negative amounts in your reports, you should reverse the sign of the balance; e.g.: Expense account. The same applies for accounts that are typically more credited than debited and that you would like to print as positive amounts in your reports; e.g.: Income account.'),
'display_detail': fields.selection([
('no_detail','No detail'),
('detail_flat','Display children flat'),
('detail_with_hierarchy','Display children with hierarchy')
], 'Display details'),
'style_overwrite': fields.selection([
(0, 'Automatic formatting'),
(1,'Main Title 1 (bold, underlined)'),
(2,'Title 2 (bold)'),
(3,'Title 3 (bold, smaller)'),
(4,'Normal Text'),
(5,'Italic Text (smaller)'),
(6,'Smallest Text'),
],'Financial Report Style', help="You can set up here the format you want this record to be displayed. If you leave the automatic formatting, it will be computed based on the financial reports hierarchy (auto-computed field 'level')."),
}
_defaults = {
'type': 'sum',
'display_detail': 'detail_flat',
'sign': 1,
'style_overwrite': 0,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
UNC-Major-Lab/Fragment-Isotope-Distribution-Paper | scripts/theoretical/mergeStats.py | 1 | 2010 | #!/usr/bin/env python
import sys
import os
import re
from collections import defaultdict
from math import floor
from math import isnan
root_dir = sys.argv[1]
prefix = sys.argv[2]
do_iso = sys.argv[3]
method2stats = defaultdict(list)
fileCount = 0;
for f in os.listdir(root_dir):
fp = root_dir+"/"+f
if os.path.isfile(fp) and ".out" in f and f.startswith(prefix):
infile = open(fp)
fileCount+=1
for line in infile:
if do_iso == "F":
[count, mean, min_v, q1, median, q3, max_v, method, stat] = line.strip().split("\t")
label = method + "\t" + stat
else:
[count, mean, min_v, q1, median, q3, max_v, iso, method, stat] = line.strip().split("\t")
label = iso + "\t" + method + "\t" + stat
count = float(count)
mean = float(mean)
min_v = float(min_v)
q1 = float(q1)
median = float(median)
q3 = float(q3)
max_v = float(max_v)
if not method2stats.has_key(label):
method2stats[label] = [count, count * mean, min_v, q1, median, q3, max_v]
else:
method2stats[label][0] += count
method2stats[label][1] += count * mean
method2stats[label][2] = min(min_v, method2stats[label][2])
method2stats[label][3] += q1
method2stats[label][4] += median
method2stats[label][5] += q3
method2stats[label][6] = max(max_v, method2stats[label][6])
infile.close()
for method in method2stats:
mean = method2stats[method][1] / method2stats[method][0]
min_v = method2stats[method][2]
q1 = method2stats[method][3] / fileCount
median = method2stats[method][4] / fileCount
q3 = method2stats[method][5] / fileCount
max_v = method2stats[method][6]
print "\t".join([str(mean), str(min_v), str(q1), str(median), str(q3), str(max_v), method])
| mit |
codrut3/tensorflow | tensorflow/python/training/queue_runner.py | 139 | 1240 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Create threads to run multiple enqueue ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.training.queue_runner_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
# Documented in training.py:
"QueueRunner",
"add_queue_runner",
"start_queue_runners",
]
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
alesdotio/Spirit | spirit/user/auth/tests/tests.py | 1 | 23255 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth import get_user_model
from django.core import mail
from django.utils import timezone
from django.utils.translation import ugettext as _
from django.test.utils import override_settings
from django.core.urlresolvers import NoReverseMatch
from ....core.tests import utils
from ..forms import RegistrationForm, ResendActivationForm, LoginForm
from ..backends import EmailAuthBackend
from ...utils.tokens import UserActivationTokenGenerator
from ...models import UserProfile
from .urls import CustomRegisterForm
User = get_user_model()
class UserViewTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
self.user2 = utils.create_user()
self.category = utils.create_category()
self.topic = utils.create_topic(self.category, user=self.user2)
self.topic2 = utils.create_topic(self.category)
def test_login_email(self):
"""
try to login by email
"""
# get
response = self.client.get(reverse('spirit:user:auth:login'))
self.assertEqual(response.status_code, 200)
# post
form_data = {'username': self.user.email, 'password': "bar"}
response = self.client.post(reverse('spirit:user:auth:login'),
form_data)
expected_url = reverse('spirit:user:update')
self.assertRedirects(response, expected_url, status_code=302)
def test_login_redirect(self):
"""
try to login with a logged in user
"""
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:login'))
expected_url = self.user.st.get_absolute_url()
self.assertRedirects(response, expected_url, status_code=302)
# next
response = self.client.get(reverse('spirit:user:auth:login') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
def test_register(self):
"""
register
"""
# get
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertEqual(response.status_code, 200)
# post
form_data = {'username': 'uniquefoo', 'email': 'some@some.com', 'password1': 'pass123', 'password2': 'pass123'}
response = self.client.post(reverse('spirit:user:auth:register'),
form_data)
expected_url = reverse('spirit:user:auth:login')
self.assertRedirects(response, expected_url, status_code=302)
# redirect logged in user
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertRedirects(response, reverse('spirit:user:update'), status_code=302)
def test_register_email_sent(self):
"""
register and send activation email
"""
form_data = {'username': 'uniquefoo', 'email': 'some@some.com', 'password1': 'pass123', 'password2': 'pass123'}
response = self.client.post(reverse('spirit:user:auth:register'), form_data)
self.assertEqual(response.status_code, 302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, _("User activation"))
def test_register_next_logged_in(self):
"""
redirect next on register
"""
# redirect logged in user
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:register') + "?next=/fakepath/")
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
@override_settings(ROOT_URLCONF='spirit.user.auth.tests.urls')
def test_register_custom_form(self):
"""
Should allow a custom form
"""
response = self.client.get(reverse('spirit:user:auth:register'))
self.assertIsInstance(response.context['form'], CustomRegisterForm)
response = self.client.post(reverse('spirit:user:auth:register'), {})
self.assertIsInstance(response.context['form'], CustomRegisterForm)
@override_settings(ST_TESTS_RATELIMIT_NEVER_EXPIRE=True)
def test_login_rate_limit(self):
"""
test rate limit 5/5m
"""
form_data = {'username': self.user.email, 'password': "badpassword"}
for attempt in range(5):
url = reverse('spirit:user:auth:login')
response = self.client.post(url, form_data)
self.assertTemplateUsed(response, 'spirit/user/auth/login.html')
url = reverse('spirit:user:auth:login') + "?next=/path/"
response = self.client.post(url, form_data)
self.assertRedirects(response, url, status_code=302)
@override_settings(ST_TESTS_RATELIMIT_NEVER_EXPIRE=True)
def test_custom_reset_password(self):
"""
test rate limit 5/5m
"""
form_data = {'email': "bademail@bad.com", }
for attempt in range(5):
response = self.client.post(reverse('spirit:user:auth:password-reset'), form_data)
expected_url = reverse("spirit:user:auth:password-reset-done")
self.assertRedirects(response, expected_url, status_code=302)
response = self.client.post(reverse('spirit:user:auth:password-reset'), form_data)
expected_url = reverse("spirit:user:auth:password-reset")
self.assertRedirects(response, expected_url, status_code=302)
def test_password_reset_confirm(self):
"""
test access
"""
response = self.client.get(
reverse(
'spirit:user:auth:password-reset-confirm',
kwargs={'uidb64': 'f-a-k-e', 'token': 'f-a-k-e'}
)
)
self.assertEqual(response.status_code, 200)
def test_admin_login(self):
"""
Redirect to regular user login (optional)
make sure you added:
admin.site.login = login_required(admin.site.login)
to urls.py (the one in your project's root)
"""
# TODO: document that devs should be doing this.
try:
url = reverse('admin:login')
except NoReverseMatch:
return
response = self.client.get(url)
expected_url = reverse("spirit:user:auth:login") + "?next=" + reverse('admin:login')
self.assertRedirects(response, expected_url, status_code=302)
def test_registration_activation(self):
"""
registration activation
"""
self.user.st.is_verified = False
self.user.is_active = False
self.user.save()
token = UserActivationTokenGenerator().generate(self.user)
response = self.client.get(
reverse(
'spirit:user:auth:registration-activation',
kwargs={'pk': self.user.pk, 'token': token}
)
)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertTrue(User.objects.get(pk=self.user.pk).is_active)
def test_registration_activation_invalid(self):
"""
Activation token should not work if user is verified
ActiveUserMiddleware required
"""
self.user.st.is_verified = False
token = UserActivationTokenGenerator().generate(self.user)
utils.login(self)
User.objects.filter(pk=self.user.pk).update(is_active=False)
UserProfile.objects.filter(user__pk=self.user.pk).update(is_verified=True)
response = self.client.get(
reverse(
'spirit:user:auth:registration-activation',
kwargs={'pk': self.user.pk, 'token': token}
)
)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertFalse(User.objects.get(pk=self.user.pk).is_active)
def test_resend_activation_email(self):
"""
resend_activation_email
"""
user = utils.create_user(password="foo")
form_data = {'email': user.email,
'password': "foo"}
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
self.assertEquals(len(mail.outbox), 1)
self.assertEquals(mail.outbox[0].subject, _("User activation"))
# get
response = self.client.get(reverse('spirit:user:auth:resend-activation'))
self.assertEquals(response.status_code, 200)
def test_resend_activation_email_invalid_previously_logged_in(self):
"""
resend_activation_email invalid if is_verified was set
"""
user = utils.create_user(password="foo")
user.st.is_verified = True
user.st.save()
form_data = {'email': user.email,
'password': "foo"}
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 0)
def test_resend_activation_email_invalid_email(self):
"""
resend_activation_email invalid password
"""
utils.create_user(password="foo")
form_data = {'email': "bad@foo.com", }
response = self.client.post(reverse('spirit:user:auth:resend-activation'),
form_data)
self.assertEquals(response.status_code, 302)
self.assertEquals(len(mail.outbox), 0)
def test_resend_activation_email_redirect_logged(self):
"""
resend_activation_email redirect to profile if user is logged in
"""
utils.login(self)
response = self.client.get(reverse('spirit:user:auth:resend-activation'))
expected_url = reverse("spirit:user:update")
self.assertRedirects(response, expected_url, status_code=302)
def test_logout(self):
"""
should log out on POST only
"""
utils.login(self)
# get should display confirmation message
response = self.client.get(reverse('spirit:user:auth:logout'))
self.assertEqual(response.status_code, 200)
self.assertTrue(self.client.session.items())
# post should log out the user (clear the session)
response = self.client.post(reverse('spirit:user:auth:logout'))
expected_url = "/"
self.assertRedirects(response, expected_url, status_code=302)
self.assertFalse(self.client.session.items())
# next
utils.login(self)
self.assertTrue(self.client.session.items())
response = self.client.post(reverse('spirit:user:auth:logout') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
self.assertFalse(self.client.session.items())
def test_logout_anonymous_redirect(self):
"""
should log out on POST only
"""
# redirect to login if user is anonymous
response = self.client.get(reverse('spirit:user:auth:logout'))
expected_url = reverse("spirit:user:auth:login")
self.assertRedirects(response, expected_url, status_code=302)
# next if user is anonymous
response = self.client.get(reverse('spirit:user:auth:logout') + '?next=/fakepath/')
self.assertRedirects(response, '/fakepath/', status_code=302, target_status_code=404)
def test_is_suspended_until(self):
self.user.st.is_suspended_until = timezone.now() + datetime.timedelta(days=1)
self.user.st.save()
# try to login
form_data = {'username': self.user.email, 'password': "bar"}
response = self.client.post(reverse('spirit:user:auth:login'), form_data)
self.assertEqual(response.status_code, 302)
# we were actually logged out immediately after
response = self.client.get(reverse('spirit:user:update'))
self.assertRedirects(response, '%s?next=%s' % (reverse('spirit:user:auth:login'), reverse('spirit:user:update')), status_code=302)
# not suspended any more
self.user.st.is_suspended_until = timezone.now()
self.user.st.save()
response = self.client.post(reverse('spirit:user:auth:login'), form_data)
self.assertRedirects(response, reverse("spirit:user:update"), status_code=302)
response = self.client.get(reverse('spirit:user:update'))
self.assertEqual(response.status_code, 200)
class UserFormTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user()
def test_registration(self):
"""
register
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_registration_login(self):
"""
Register and login
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
user = form.save()
self.assertFalse(user.is_active)
user.is_active = True
user.save()
utils.login(self, user=user, password='pass123') # Asserts if can't login
def test_registration_email_required(self):
"""
Registration should require the email field
"""
form_data = {'username': 'foo',
'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertIn('email', form.errors)
def test_registration_invalid(self):
"""
invalid email and user
"""
User.objects.create_user(username="foo", password="bar", email="foo@foo.com")
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('username', form.cleaned_data)
self.assertNotIn('foo@foo.com', form.cleaned_data)
def test_registration_password_invalid(self):
"""
invalid password length
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'password1': 'pass', 'password2': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
def test_registration_honeypot(self):
"""
registration honeypot
"""
form_data = {'username': 'foo', 'email': 'foo@foo.com',
'password1': 'pass123', 'password2': 'pass123',
'honeypot': 'im a robot'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('honeypot', form.cleaned_data)
def test_registration_email_duplication(self):
"""
register, don't allow email duplication
"""
utils.create_user(email='duplicated@bar.com')
form_data = {'username': 'foo', 'email': 'duplicated@bar.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email', form.cleaned_data)
@override_settings(ST_UNIQUE_EMAILS=False)
def test_registration_email_duplication_allowed(self):
"""
Duplicated email allowed
"""
utils.create_user(email='duplicated@bar.com')
form_data = {'username': 'foo', 'email': 'duplicated@bar.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
def test_registration_email_confirmation(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'foo@bar.com',
'email2': 'foofoo@bar.com', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email2', form.cleaned_data)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_registration_email_confirmation_case_insensitive(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'FOO@bar.com',
'password1': 'pass123', 'password2': 'pass123'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), True)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_registration_email_confirmation_case_sensitive(self):
"""
Confirmation email should match email
"""
form_data = {'username': 'foo', 'email': 'FOO@bar.com',
'email2': 'FOO@BAR.COM', 'password': 'pass'}
form = RegistrationForm(data=form_data)
self.assertEqual(form.is_valid(), False)
self.assertNotIn('email2', form.cleaned_data)
def test_resend_activation_email(self):
"""
resend activation
"""
user = utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'newfoo@bar.com', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user)
def test_resend_activation_email_invalid_email(self):
"""
resend activation invalid
"""
form_data = {'email': 'bad@bar.com', }
form = ResendActivationForm(form_data)
self.assertFalse(form.is_valid())
def test_resend_activation_email_duplication(self):
"""
Send email to the first *not verified* user found
"""
utils.create_user(email="duplicated@bar.com")
user2 = utils.create_user(email="duplicated@bar.com")
user3 = utils.create_user(email="duplicated@bar.com")
form_data = {'email': 'duplicated@bar.com', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user3)
user3.st.is_verified = True
user3.st.save()
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user2)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_resend_activation_email_case_insensitive(self):
"""
Should lower the email before checking it
"""
user = utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'NeWfOO@bAr.COM', }
form = ResendActivationForm(form_data)
self.assertTrue(form.is_valid())
self.assertEqual(form.get_user(), user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_resend_activation_email_case_sensitive(self):
"""
Should NOT lower the email before checking it
"""
utils.create_user(email="newfoo@bar.com")
form_data = {'email': 'NeWfOO@bAr.COM', }
form = ResendActivationForm(form_data)
self.assertFalse(form.is_valid())
self.assertRaises(AttributeError, form.get_user)
def test_login(self):
"""
Should login the user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "foobar", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
def test_login_email(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "foobar@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_login_email_case_sensitive(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "FOOBAR@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_login_email_case_sensitive(self):
"""
Should login the user by email
"""
utils.create_user(email="foobar@bar.com", password="foo")
form_data = {'username': "FOOBAR@bar.com", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertTrue(form.is_valid())
def test_login_invalid(self):
"""
Should not login invalid user
"""
form = LoginForm(data={})
self.assertFalse(form.is_valid())
def test_login_password_invalid(self):
"""
Should not login invalid user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "foobar", 'password': "bad"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
def test_login_username_invalid(self):
"""
Should not login invalid user
"""
utils.create_user(username="foobar", password="foo")
form_data = {'username': "bad", 'password': "foo"}
form = LoginForm(data=form_data)
self.assertFalse(form.is_valid())
class UserBackendTest(TestCase):
def setUp(self):
utils.cache_clear()
self.user = utils.create_user(email="foobar@bar.com", password="bar")
def test_email_auth_backend(self):
user = EmailAuthBackend().authenticate(username="foobar@bar.com", password="bar")
self.assertEqual(user, self.user)
def test_email_auth_backend_email_duplication(self):
"""
it should NOT authenticate when the email is not unique (current behaviour, sorry)
"""
utils.create_user(email="duplicated@bar.com", password="foo")
utils.create_user(email="duplicated@bar.com", password="foo2")
user = EmailAuthBackend().authenticate(username="duplicated@bar.com", password="foo")
self.assertIsNone(user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=True)
def test_email_auth_backend_case_insensitive(self):
user = EmailAuthBackend().authenticate(username="FooBar@bAr.COM", password="bar")
self.assertEqual(user, self.user)
@override_settings(ST_CASE_INSENSITIVE_EMAILS=False)
def test_email_auth_backend_case_sensitive(self):
user = EmailAuthBackend().authenticate(username="FooBar@bAr.COM", password="bar")
self.assertIsNone(user)
| mit |
amuehlem/misp-modules | misp_modules/modules/expansion/passivetotal.py | 2 | 11426 | import json
import logging
import sys
from passivetotal.common.utilities import is_ip
log = logging.getLogger('passivetotal')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
misperrors = {'error': 'Error'}
mispattributes = {
'input': ['hostname', 'domain', 'ip-src', 'ip-dst',
'x509-fingerprint-sha1', 'email-src', 'email-dst',
'target-email', 'whois-registrant-email',
'whois-registrant-phone', 'text', 'whois-registrant-name',
'whois-registrar', 'whois-creation-date'],
'output': ['hostname', 'domain', 'ip-src', 'ip-dst',
'x509-fingerprint-sha1', 'email-src', 'email-dst',
'target-email', 'whois-registrant-email',
'whois-registrant-phone', 'text', 'whois-registrant-name',
'whois-registrar', 'whois-creation-date', 'md5', 'sha1',
'sha256', 'link']
}
moduleinfo = {
'version': '1.0',
'author': 'Brandon Dixon',
'description': 'The PassiveTotal MISP expansion module brings the datasets derived from Internet scanning directly into your MISP instance. This module supports passive DNS, historic SSL, WHOIS, and host attributes. In order to use the module, you must have a valid PassiveTotal account username and API key. Registration is free and can be done by visiting https://www.passivetotal.org/register',
'module-type': ['expansion', 'hover']
}
moduleconfig = ['username', 'api_key']
query_playbook = [
{'inputs': ['ip-src', 'ip-dst', 'hostname', 'domain'],
'services': ['whois', 'ssl', 'dns', 'enrichment'],
'name': 'generic'},
{'inputs': ['whois-registrant-email', 'whois-registrant-phone',
'whois-registrant-name', 'email-src', 'email-dst',
'target-email'],
'services': ['whois'],
'name': 'reverse-whois'},
{'inputs': ['x509-fingerprint-sha1'],
'services': ['ssl'],
'name': 'ssl-history'},
]
def query_finder(request):
"""Find the query value in the client request."""
for item in mispattributes['input']:
if not request.get(item, None):
continue
playbook = None
for x in query_playbook:
if item not in x['inputs']:
continue
playbook = x
break
return {'type': item, 'value': request.get(item), 'playbook': playbook}
def build_profile(request):
"""Check the incoming request for a valid configuration."""
output = {'success': False}
config = request.get('config', None)
if not config:
misperrors['error'] = "Configuration is missing from the request."
return output
for item in moduleconfig:
if config.get(item, None):
continue
misperrors['error'] = "PassiveTotal authentication is missing."
return output
profile = {'success': True, 'config': config}
profile.update(query_finder(request))
return profile
def _generate_request_instance(conf, request_type):
"""Automatically generate a request instance to use.
In the end, this saves us from having to load each request class in a
explicit way. Loading via a string is helpful to reduce the code per
call.
:param request_type: Type of client to load
:return: Loaded PassiveTotal client
"""
pt_username = conf.get('username')
pt_api_key = conf.get('api_key')
class_lookup = {'dns': 'DnsRequest', 'whois': 'WhoisRequest',
'ssl': 'SslRequest', 'enrichment': 'EnrichmentRequest',
'attributes': 'AttributeRequest'}
class_name = class_lookup[request_type]
mod = __import__('passivetotal.libs.%s' % request_type,
fromlist=[class_name])
loaded = getattr(mod, class_name)
headers = {'PT-INTEGRATION': 'MISP'}
authenticated = loaded(pt_username, pt_api_key, headers=headers)
return authenticated
def _has_error(results):
"""Check to see if there's an error in place and log it."""
if 'error' in results:
msg = "%s - %s" % (results['error']['message'],
results['error']['developer_message'])
misperrors['error'] = msg
return True
return False
def process_ssl_details(instance, query):
"""Process details for a specific certificate."""
log.debug("SSL Details: starting")
values = list()
_ = instance.get_ssl_certificate_details(query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
for key, value in _.items():
if not value:
continue
values.append(value)
txt = [{'types': ['ssl-cert-attributes'], 'values': list(set(values))}]
log.debug("SSL Details: ending")
return txt
def process_ssl_history(instance, query):
"""Process the history for an SSL certificate."""
log.debug("SSL History: starting")
type_map = {
'ip': ['ip-src', 'ip-dst'],
'domain': ['domain', 'hostname'],
'sha1': ['x509-fingerprint-sha1']
}
hits = {'ip': list(), 'sha1': list(), 'domain': list()}
_ = instance.get_ssl_certificate_history(query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
for item in _.get('results', []):
hits['ip'] += item.get('ipAddresses', [])
hits['sha1'].append(item['sha1'])
hits['domain'] += item.get('domains', [])
tmp = list()
for key, value in hits.items():
txt = {'types': type_map[key], 'values': list(set(value))}
tmp.append(txt)
log.debug("SSL Details: ending")
return tmp
def process_whois_details(instance, query):
"""Process the detail from the WHOIS record."""
log.debug("WHOIS Details: starting")
tmp = list()
_ = instance.get_whois_details(query=query, compact_record=True)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
if _.get('contactEmail', None):
tmp.append({'types': ['whois-registrant-email'], 'values': [_.get('contactEmail')]})
phones = _['compact']['telephone']['raw']
tmp.append({'types': ['whois-registrant-phone'], 'values': phones})
names = _['compact']['name']['raw']
tmp.append({'types': ['whois-registrant-name'], 'values': names})
if _.get('registrar', None):
tmp.append({'types': ['whois-registrar'], 'values': [_.get('registrar')]})
if _.get('registered', None):
tmp.append({'types': ['whois-creation-date'], 'values': [_.get('registered')]})
log.debug("WHOIS Details: ending")
return tmp
def process_whois_search(instance, query, qtype):
"""Process a WHOIS search for a specific field value."""
log.debug("WHOIS Search: starting")
if qtype in ['whois-registrant-email', 'email-src', 'email-dst', 'target-email']:
field_type = 'email'
if qtype in ['whois-registrant-phone']:
field_type = 'phone'
if qtype in ['whois-registrant-name']:
field_type = 'name'
domains = list()
_ = instance.search_whois_by_field(field=field_type, query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
for item in _.get('results', []):
domain = item.get('domain', None)
if not domain:
continue
domains.append(domain)
tmp = [{'types': ['hostname', 'domain'], 'values': list(set(domains))}]
log.debug("WHOIS Search: ending")
return tmp
def process_passive_dns(instance, query):
"""Process passive DNS data."""
log.debug("Passive DNS: starting")
tmp = list()
_ = instance.get_unique_resolutions(query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
if is_ip(query):
tmp = [{'types': ['domain', 'hostname'], 'values': _.get('results', [])}]
else:
tmp = [{'types': ['ip-src', 'ip-dst'], 'values': _.get('results', [])}]
log.debug("Passive DNS: ending")
return tmp
def process_osint(instance, query):
"""Process OSINT links."""
log.debug("OSINT: starting")
urls = list()
_ = instance.get_osint(query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
for item in _.get('results', []):
urls.append(item['sourceUrl'])
tmp = [{'types': ['link'], 'values': urls}]
log.debug("OSINT: ending")
return tmp
def process_malware(instance, query):
"""Process malware samples."""
log.debug("Malware: starting")
content = {'hashes': list(), 'urls': list()}
_ = instance.get_malware(query=query)
err = _has_error(_)
if err:
raise Exception("We hit an error, time to bail!")
for item in _.get('results', []):
content['hashes'].append(item['sample'])
content['urls'].append(item['sourceUrl'])
tmp = [{'types': ['link'], 'values': content['urls']}]
hashes = {'md5': list(), 'sha1': list(), 'sha256': list()}
for h in content['hashes']:
if len(h) == 32:
hashes['md5'].append(h)
elif len(h) == 41:
hashes['sha1'].append(h)
elif len(h) == 64:
hashes['sha256'].append(h)
tmp += [{'types': ['md5'], 'values': hashes['md5']}]
tmp += [{'types': ['sha1'], 'values': hashes['sha1']}]
tmp += [{'types': ['sha256'], 'values': hashes['sha256']}]
log.debug("Malware: ending")
return tmp
def handler(q=False):
if not q:
return q
request = json.loads(q)
profile = build_profile(request)
if not profile['success']:
log.error(misperrors['error'])
return misperrors
output = {'results': list()}
instances = dict()
for service in profile['playbook']['services']:
instances[service] = _generate_request_instance(
profile['config'], service)
play_type = profile['playbook']['name']
query = profile['value']
qtype = profile['type']
try:
if play_type == 'generic':
results = process_passive_dns(instances['dns'], query)
output['results'] += results
results = process_whois_details(instances['whois'], query)
output['results'] += results
results = process_ssl_history(instances['ssl'], query)
output['results'] += results
results = process_osint(instances['enrichment'], query)
output['results'] += results
results = process_malware(instances['enrichment'], query)
output['results'] += results
elif play_type == 'reverse-whois':
results = process_whois_search(instances['whois'], query, qtype)
output['results'] += results
elif play_type == 'ssl-history':
results = process_ssl_details(instances['ssl'], query)
output['results'] += results
results = process_ssl_history(instances['ssl'], query)
output['results'] += results
else:
log.error("Unsupported query pattern issued.")
except:
return misperrors
return output
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo
| agpl-3.0 |
drmrd/ansible | lib/ansible/modules/remote_management/oneview/oneview_ethernet_network_facts.py | 125 | 4863 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network_facts
short_description: Retrieve the facts about one or more of the OneView Ethernet Networks
description:
- Retrieve the facts about one or more of the Ethernet Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Ethernet Network name.
options:
description:
- "List with options to gather additional facts about an Ethernet Network and related resources.
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather paginated and filtered facts about Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 1
count: 3
sort: 'name:descending'
filter: 'purpose=General'
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: Ethernet network name
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name with options
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: eth1
options:
- associatedProfiles
- associatedUplinkGroups
delegate_to: localhost
- debug: var=enet_associated_profiles
- debug: var=enet_associated_uplink_groups
'''
RETURN = '''
ethernet_networks:
description: Has all the OneView facts about the Ethernet Networks.
returned: Always, but can be null.
type: dict
enet_associated_profiles:
description: Has all the OneView facts about the profiles which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
enet_associated_uplink_groups:
description: Has all the OneView facts about the uplink sets which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EthernetNetworkFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict')
)
def __init__(self):
super(EthernetNetworkFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
ansible_facts = {}
if self.module.params['name']:
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
if self.module.params.get('options') and ethernet_networks:
ansible_facts = self.__gather_optional_facts(ethernet_networks[0])
else:
ethernet_networks = self.resource_client.get_all(**self.facts_params)
ansible_facts['ethernet_networks'] = ethernet_networks
return dict(changed=False, ansible_facts=ansible_facts)
def __gather_optional_facts(self, ethernet_network):
ansible_facts = {}
if self.options.get('associatedProfiles'):
ansible_facts['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
if self.options.get('associatedUplinkGroups'):
ansible_facts['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
return ansible_facts
def __get_associated_profiles(self, ethernet_network):
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
def __get_associated_uplink_groups(self, ethernet_network):
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
def main():
EthernetNetworkFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
callowayproject/django-filebrowser | filebrowser/templatetags/fb_tags.py | 1 | 2733 | # coding: utf-8
from django import template
from django.utils.encoding import smart_unicode
from django.utils.safestring import mark_safe
register = template.Library()
@register.inclusion_tag('filebrowser/include/_response.html', takes_context=True)
def query_string(context, add=None, remove=None):
"""
Allows the addition and removal of query string parameters.
_response.html is just {{ response }}
Usage:
http://www.url.com/{% query_string "param_to_add=value, param_to_add=value" "param_to_remove, params_to_remove" %}
http://www.url.com/{% query_string "" "filter" %}filter={{new_filter}}
http://www.url.com/{% query_string "sort=value" "sort" %}
"""
# Written as an inclusion tag to simplify getting the context.
add = string_to_dict(add)
remove = string_to_list(remove)
params = context['query'].copy()
response = get_query_string(params, add, remove)
return {'response': response }
def query_helper(query, add=None, remove=None):
"""
Helper Function for use within views.
"""
add = string_to_dict(add)
remove = string_to_list(remove)
params = query.copy()
return get_query_string(params, add, remove)
def get_query_string(p, new_params=None, remove=None):
"""
Add and remove query parameters. From `django.contrib.admin`.
"""
if new_params is None: new_params = {}
if remove is None: remove = []
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if k in p and v is None:
del p[k]
elif v is not None:
p[k] = v
return mark_safe('?' + '&'.join([u'%s=%s' % (k, v) for k, v in p.items()]).replace(' ', '%20'))
def string_to_dict(string):
"""
Usage::
{{ url|thumbnail:"width=10,height=20" }}
{{ url|thumbnail:"width=10" }}
{{ url|thumbnail:"height=20" }}
"""
kwargs = {}
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '': continue
kw, val = arg.split('=', 1)
kwargs[kw] = val
return kwargs
def string_to_list(string):
"""
Usage::
{{ url|thumbnail:"width,height" }}
"""
args = []
if string:
string = str(string)
if ',' not in string:
# ensure at least one ','
string += ','
for arg in string.split(','):
arg = arg.strip()
if arg == '': continue
args.append(arg)
return args
| bsd-3-clause |
guaix-ucm/numina | numina/drps/tests/test_pipelines.py | 3 | 2246 |
import pkg_resources
import pkgutil
from numina.drps.drpsystem import DrpSystem
from numina.core.pipeline import InstrumentDRP, Pipeline
def assert_valid_instrument(instrument):
assert isinstance(instrument, InstrumentDRP)
pipes = instrument.pipelines
assert 'default' in pipes
for k, v in pipes.items():
assert k == v.name
assert isinstance(v, Pipeline)
def test_fake_pipeline(monkeypatch):
def mockreturn(group=None):
def fake_loader():
confs = None
modes = None
pipelines = {'default': Pipeline('default', {}, 1)}
fake = InstrumentDRP('FAKE', confs, modes, pipelines)
return fake
ep = pkg_resources.EntryPoint('fake', 'fake.loader')
monkeypatch.setattr(ep, 'load', lambda: fake_loader)
return [ep]
monkeypatch.setattr(pkg_resources, 'iter_entry_points', mockreturn)
alldrps = DrpSystem().load().query_all()
for k, v in alldrps.items():
assert_valid_instrument(v)
def test_fake_pipeline_alt(drpmocker):
drpdata1 = pkgutil.get_data('numina.drps.tests', 'drptest1.yaml')
drpmocker.add_drp('TEST1', drpdata1)
mydrp = DrpSystem().load().query_by_name('TEST1')
assert mydrp is not None
assert_valid_instrument(mydrp)
def test_fake_pipeline_alt2(drpmocker):
drpdata1 = pkgutil.get_data('numina.drps.tests', 'drptest1.yaml')
ob_to_test = """
id: 4
mode: bias
instrument: TEST1
images:
- ThAr_LR-U.fits
"""
drpmocker.add_drp('TEST1', drpdata1)
import yaml
from numina.core.oresult import obsres_from_dict
from numina.tests.recipes import BiasRecipe
oblock = obsres_from_dict(yaml.safe_load(ob_to_test))
drp = DrpSystem().load().query_by_name(oblock.instrument)
assert drp is not None
assert_valid_instrument(drp)
this_pipeline = drp.pipelines[oblock.pipeline]
expected = 'numina.tests.recipes.BiasRecipe'
assert this_pipeline.get_recipe(oblock.mode) == expected
recipe = this_pipeline.get_recipe_object(oblock.mode)
assert isinstance(recipe, BiasRecipe)
assert recipe.instrument == "TEST1"
assert recipe.mode == "bias"
assert recipe.simulate_error == True
| gpl-3.0 |
kdebrab/pandas | pandas/tests/io/json/test_pandas.py | 2 | 50233 | # -*- coding: utf-8 -*-
# pylint: disable-msg=W0612,E1101
import pytest
from pandas.compat import (range, lrange, StringIO,
OrderedDict, is_platform_32bit)
import os
import numpy as np
from pandas import (Series, DataFrame, DatetimeIndex, Timestamp,
read_json, compat)
from datetime import timedelta
import pandas as pd
import json
from pandas.util.testing import (assert_almost_equal, assert_frame_equal,
assert_series_equal, network,
ensure_clean, assert_index_equal)
import pandas.util.testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])
_intframe = DataFrame(dict((k, v.astype(np.int64))
for k, v in compat.iteritems(_seriesd)))
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ['bah'] * 5 + ['bar'] * 5 + ['baz'] * \
5 + ['foo'] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name='E')
_cat_frame['E'] = list(reversed(cat))
_cat_frame['sort'] = np.arange(len(_cat_frame), dtype='int64')
_mixed_frame = _frame.copy()
class TestPandasContainer(object):
@pytest.fixture(scope="function", autouse=True)
def setup(self, datapath):
self.dirpath = datapath("io", "json", "data")
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.objSeries = tm.makeObjectSeries()
self.objSeries.name = 'objects'
self.empty_series = Series([], index=[])
self.empty_frame = DataFrame({})
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.dirpath
del self.ts
del self.series
del self.objSeries
del self.empty_series
del self.empty_frame
del self.frame
del self.frame2
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self):
df = DataFrame([['a', 'b'], ['c', 'd']],
index=['index " 1', 'index / 2'],
columns=['a \\ b', 'y / z'])
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
assert_frame_equal(df, read_json(df.to_json(orient='columns'),
orient='columns'))
assert_frame_equal(df, read_json(df.to_json(orient='index'),
orient='index'))
df_unser = read_json(df.to_json(orient='records'), orient='records')
assert_index_equal(df.columns, df_unser.columns)
tm.assert_numpy_array_equal(df.values, df_unser.values)
def test_frame_non_unique_index(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 1],
columns=['x', 'y'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split'))
unser = read_json(df.to_json(orient='records'), orient='records')
tm.assert_index_equal(df.columns, unser.columns)
tm.assert_almost_equal(df.values, unser.values)
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
def test_frame_non_unique_columns(self):
df = DataFrame([['a', 'b'], ['c', 'd']], index=[1, 2],
columns=['x', 'x'])
pytest.raises(ValueError, df.to_json, orient='index')
pytest.raises(ValueError, df.to_json, orient='columns')
pytest.raises(ValueError, df.to_json, orient='records')
assert_frame_equal(df, read_json(df.to_json(orient='split'),
orient='split', dtype=False))
unser = read_json(df.to_json(orient='values'), orient='values')
tm.assert_numpy_array_equal(df.values, unser.values)
# GH4377; duplicate columns not processing correctly
df = DataFrame([['a', 'b'], ['c', 'd']], index=[
1, 2], columns=['x', 'y'])
result = read_json(df.to_json(orient='split'), orient='split')
assert_frame_equal(result, df)
def _check(df):
result = read_json(df.to_json(orient='split'), orient='split',
convert_dates=['x'])
assert_frame_equal(result, df)
for o in [[['a', 'b'], ['c', 'd']],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp('20130101'), 3.5],
[Timestamp('20130102'), 4.5]]]:
_check(DataFrame(o, index=[1, 2], columns=['x', 'x']))
def test_frame_from_json_to_json(self):
def _check_orient(df, orient, dtype=None, numpy=False,
convert_axes=True, check_dtype=True, raise_ok=None,
sort=None, check_index_type=True,
check_column_type=True, check_numpy_dtype=False):
if sort is not None:
df = df.sort_values(sort)
else:
df = df.sort_index()
# if we are not unique, then check that we are raising ValueError
# for the appropriate orients
if not df.index.is_unique and orient in ['index', 'columns']:
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
if (not df.columns.is_unique and
orient in ['index', 'columns', 'records']):
pytest.raises(
ValueError, lambda: df.to_json(orient=orient))
return
dfjson = df.to_json(orient=orient)
try:
unser = read_json(dfjson, orient=orient, dtype=dtype,
numpy=numpy, convert_axes=convert_axes)
except Exception as detail:
if raise_ok is not None:
if isinstance(detail, raise_ok):
return
raise
if sort is not None and sort in unser.columns:
unser = unser.sort_values(sort)
else:
unser = unser.sort_index()
if dtype is False:
check_dtype = False
if not convert_axes and df.index.dtype.type == np.datetime64:
unser.index = DatetimeIndex(
unser.index.values.astype('i8') * 1e6)
if orient == "records":
# index is not captured in this orientation
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
tm.assert_index_equal(df.columns, unser.columns,
exact=check_column_type)
elif orient == "values":
# index and cols are not captured in this orientation
if numpy is True and df.shape == (0, 0):
assert unser.shape[0] == 0
else:
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
elif orient == "split":
# index and col labels might not be strings
unser.index = [str(i) for i in unser.index]
unser.columns = [str(i) for i in unser.columns]
if sort is None:
unser = unser.sort_index()
tm.assert_almost_equal(df.values, unser.values,
check_dtype=check_numpy_dtype)
else:
if convert_axes:
tm.assert_frame_equal(df, unser, check_dtype=check_dtype,
check_index_type=check_index_type,
check_column_type=check_column_type)
else:
tm.assert_frame_equal(df, unser, check_less_precise=False,
check_dtype=check_dtype)
def _check_all_orients(df, dtype=None, convert_axes=True,
raise_ok=None, sort=None, check_index_type=True,
check_column_type=True):
# numpy=False
if convert_axes:
_check_orient(df, "columns", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "records", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "split", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "index", dtype=dtype,
convert_axes=False, sort=sort)
_check_orient(df, "values", dtype=dtype,
convert_axes=False, sort=sort)
# numpy=True and raise_ok might be not None, so ignore the error
if convert_axes:
_check_orient(df, "columns", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "records", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "split", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "index", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "values", dtype=dtype, numpy=True,
raise_ok=raise_ok, sort=sort,
check_index_type=False, check_column_type=False)
_check_orient(df, "columns", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "records", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "split", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "index", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
_check_orient(df, "values", dtype=dtype, numpy=True,
convert_axes=False, raise_ok=raise_ok, sort=sort)
# basic
_check_all_orients(self.frame)
assert self.frame.to_json() == self.frame.to_json(orient="columns")
_check_all_orients(self.intframe, dtype=self.intframe.values.dtype)
_check_all_orients(self.intframe, dtype=False)
# big one
# index and columns are strings as all unserialised JSON object keys
# are assumed to be strings
biggie = DataFrame(np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)])
_check_all_orients(biggie, dtype=False, convert_axes=False)
# dtypes
_check_all_orients(DataFrame(biggie, dtype=np.float64),
dtype=np.float64, convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype=np.int), dtype=np.int,
convert_axes=False)
_check_all_orients(DataFrame(biggie, dtype='U3'), dtype='U3',
convert_axes=False, raise_ok=ValueError)
# categorical
_check_all_orients(self.categorical, sort='sort', raise_ok=ValueError)
# empty
_check_all_orients(self.empty_frame, check_index_type=False,
check_column_type=False)
# time series data
_check_all_orients(self.tsframe)
# mixed data
index = pd.Index(['a', 'b', 'c', 'd', 'e'])
data = {'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': [True, False, True, False, True]}
df = DataFrame(data=data, index=index)
_check_orient(df, "split", check_dtype=False)
_check_orient(df, "records", check_dtype=False)
_check_orient(df, "values", check_dtype=False)
_check_orient(df, "columns", check_dtype=False)
# index oriented is problematic as it is read back in in a transposed
# state, so the columns are interpreted as having mixed data and
# given object dtypes.
# force everything to have object dtype beforehand
_check_orient(df.transpose().transpose(), "index", dtype=False)
def test_frame_from_json_bad_data(self):
pytest.raises(ValueError, read_json, StringIO('{"key":b:a:d}'))
# too few indices
json = StringIO('{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(ValueError, read_json, json,
orient="split")
# too many columns
json = StringIO('{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
pytest.raises(AssertionError, read_json, json,
orient="split")
# bad key
json = StringIO('{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}')
with tm.assert_raises_regex(ValueError,
r"unexpected key\(s\): badkey"):
read_json(json, orient="split")
def test_frame_from_json_nones(self):
df = DataFrame([[1, 2], [4, 5, 6]])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
df = DataFrame([['1', '2'], ['4', '5', '6']])
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), convert_axes=False, dtype=False)
assert unser['2']['0'] is None
unser = read_json(df.to_json(), numpy=False)
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), numpy=False, dtype=False)
assert unser[2][0] is None
unser = read_json(df.to_json(), numpy=False,
convert_axes=False, dtype=False)
assert unser['2']['0'] is None
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = np.inf
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
df.loc[0, 2] = np.NINF
unser = read_json(df.to_json())
assert np.isnan(unser[2][0])
unser = read_json(df.to_json(), dtype=False)
assert np.isnan(unser[2][0])
@pytest.mark.skipif(is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_frame_to_json_float_precision(self):
df = pd.DataFrame([dict(a_float=0.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":2.0}}'
df = pd.DataFrame([dict(a_float=-1.95)])
encoded = df.to_json(double_precision=1)
assert encoded == '{"a_float":{"0":-2.0}}'
df = pd.DataFrame([dict(a_float=0.995)])
encoded = df.to_json(double_precision=2)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.9995)])
encoded = df.to_json(double_precision=3)
assert encoded == '{"a_float":{"0":1.0}}'
df = pd.DataFrame([dict(a_float=0.99999999999999944)])
encoded = df.to_json(double_precision=15)
assert encoded == '{"a_float":{"0":1.0}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
pytest.raises(ValueError, df.to_json, orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=['jim', 'joe'])
assert not df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
# GH 7445
result = pd.DataFrame({'test': []}, index=[]).to_json(orient='columns')
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=['jim', 'joe'])
df['joe'] = df['joe'].astype('i8')
assert df._is_mixed_type
assert_frame_equal(read_json(df.to_json(), dtype=dict(df.dtypes)), df,
check_index_type=False)
def test_frame_mixedtype_orient(self): # GH10289
vals = [[10, 1, 'foo', .1, .01],
[20, 2, 'bar', .2, .02],
[30, 3, 'baz', .3, .03],
[40, 4, 'qux', .4, .04]]
df = DataFrame(vals, index=list('abcd'),
columns=['1st', '2nd', '3rd', '4th', '5th'])
assert df._is_mixed_type
right = df.copy()
for orient in ['split', 'index', 'columns']:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient='records')
left = read_json(inp, orient='records', convert_axes=False)
assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient='values')
left = read_json(inp, orient='values', convert_axes=False)
assert_frame_equal(left, right)
def test_v12_compat(self):
df = DataFrame(
[[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478]],
columns=['A', 'B', 'C', 'D'],
index=pd.date_range('2000-01-03', '2000-01-07'))
df['date'] = pd.Timestamp('19920106 18:21:32.12')
df.iloc[3, df.columns.get_loc('date')] = pd.Timestamp('20130101')
df['modified'] = df['date']
df.iloc[1, df.columns.get_loc('modified')] = pd.NaT
v12_json = os.path.join(self.dirpath, 'tsframe_v012.json')
df_unser = pd.read_json(v12_json)
assert_frame_equal(df, df_unser)
df_iso = df.drop(['modified'], axis=1)
v12_iso_json = os.path.join(self.dirpath, 'tsframe_iso_v012.json')
df_unser_iso = pd.read_json(v12_iso_json)
assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range('20000101', periods=10, freq='H')
df_mixed = DataFrame(OrderedDict(
float_1=[-0.92077639, 0.77434435, 1.25234727, 0.61485564,
-0.60316077, 0.24653374, 0.28668979, -2.51969012,
0.95748401, -1.02970536],
int_1=[19680418, 75337055, 99973684, 65103179, 79373900,
40314334, 21290235, 4991321, 41903419, 16008365],
str_1=['78c608f1', '64a99743', '13d2ff52', 'ca7f4af2', '97236474',
'bde7e214', '1a6bde47', 'b1190be5', '7a669144', '8d64d068'],
float_2=[-0.0428278, -1.80872357, 3.36042349, -0.7573685,
-0.48217572, 0.86229683, 1.08935819, 0.93898739,
-0.03030452, 1.43366348],
str_2=['14f04af9', 'd085da90', '4bcfac83', '81504caf', '2ffef4a9',
'08e2f5c4', '07e1af03', 'addbd4a7', '1f6a09ba', '4bfc4d87'],
int_2=[86967717, 98098830, 51927505, 20372254, 12601730, 20884027,
34193846, 10561746, 24867120, 76131025]
), index=index)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype('unicode')
df_roundtrip = pd.read_json(df_mixed.to_json(orient='split'),
orient='split')
assert_frame_equal(df_mixed, df_roundtrip,
check_index_type=True,
check_column_type=True,
check_frame_type=True,
by_blocks=True,
check_exact=True)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing(object):
def __init__(self, hexed):
self.hexed = hexed
if compat.PY2:
self.binary = hexed.decode('hex')
else:
self.binary = bytes.fromhex(hexed)
def __str__(self):
return self.hexed
hexed = '574b4454ba8c5eb4f98a8f45'
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({'A': [binthing.hexed]})
assert df_printable.to_json() == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({'A': [binthing]})
with pytest.raises(OverflowError):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({'A': [binthing], 'B': [1]},
columns=['A', 'B'])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
assert df_nonprintable.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}}}}'.format(hex=hexed)
assert df_mixed.to_json(default_handler=str) == \
'{{"A":{{"0":"{hex}"}},"B":{{"0":1}}}}'.format(hex=hexed)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
df = pd.DataFrame({'bar' * 100000: [1], 'foo': [1337]})
assert df.to_json() == \
'{{"{bar}":{{"0":1}},"foo":{{"0":1337}}}}'.format(
bar=('bar' * 100000))
def test_series_non_unique_index(self):
s = Series(['a', 'b'], index=[1, 1])
pytest.raises(ValueError, s.to_json, orient='index')
assert_series_equal(s, read_json(s.to_json(orient='split'),
orient='split', typ='series'))
unser = read_json(s.to_json(orient='records'),
orient='records', typ='series')
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_from_json_to_json(self):
def _check_orient(series, orient, dtype=None, numpy=False,
check_index_type=True):
series = series.sort_index()
unser = read_json(series.to_json(orient=orient),
typ='series', orient=orient, numpy=numpy,
dtype=dtype)
unser = unser.sort_index()
if orient == "records" or orient == "values":
assert_almost_equal(series.values, unser.values)
else:
if orient == "split":
assert_series_equal(series, unser,
check_index_type=check_index_type)
else:
assert_series_equal(series, unser, check_names=False,
check_index_type=check_index_type)
def _check_all_orients(series, dtype=None, check_index_type=True):
_check_orient(series, "columns", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype)
_check_orient(series, "columns", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "records", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "split", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "index", dtype=dtype, numpy=True,
check_index_type=check_index_type)
_check_orient(series, "values", dtype=dtype, numpy=True,
check_index_type=check_index_type)
# basic
_check_all_orients(self.series)
assert self.series.to_json() == self.series.to_json(orient="index")
objSeries = Series([str(d) for d in self.objSeries],
index=self.objSeries.index,
name=self.objSeries.name)
_check_all_orients(objSeries, dtype=False)
# empty_series has empty index with object dtype
# which cannot be revert
assert self.empty_series.index.dtype == np.object_
_check_all_orients(self.empty_series, check_index_type=False)
_check_all_orients(self.ts)
# dtype
s = Series(lrange(6), index=['a', 'b', 'c', 'd', 'e', 'f'])
_check_all_orients(Series(s, dtype=np.float64), dtype=np.float64)
_check_all_orients(Series(s, dtype=np.int), dtype=np.int)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
pytest.raises(ValueError, s.to_json, orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ='series', precise_float=True)
assert_series_equal(result, s, check_index_type=False)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
assert_frame_equal(result, df, check_index_type=False,
check_column_type=False)
def test_typ(self):
s = Series(lrange(6), index=['a', 'b', 'c',
'd', 'e', 'f'], dtype='int64')
result = read_json(s.to_json(), typ=None)
assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
assert_frame_equal(result, df)
df = DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, index=['A', 'B', 'C'])
result = read_json(df.to_json())
assert_frame_equal(result, df)
def test_path(self):
with ensure_clean('test.json') as path:
for df in [self.frame, self.frame2, self.intframe, self.tsframe,
self.mixed_frame]:
df.to_json(path)
read_json(path)
def test_axis_dates(self):
# frame
json = self.tsframe.to_json()
result = read_json(json)
assert_frame_equal(result, self.tsframe)
# series
json = self.ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, self.ts, check_names=False)
assert result.name is None
def test_convert_dates(self):
# frame
df = self.tsframe.copy()
df['date'] = Timestamp('20130101')
json = df.to_json()
result = read_json(json)
assert_frame_equal(result, df)
df['foo'] = 1.
json = df.to_json(date_unit='ns')
result = read_json(json, convert_dates=False)
expected = df.copy()
expected['date'] = expected['date'].values.view('i8')
expected['foo'] = expected['foo'].astype('int64')
assert_frame_equal(result, expected)
# series
ts = Series(Timestamp('20130101'), index=self.ts.index)
json = ts.to_json()
result = read_json(json, typ='series')
assert_series_equal(result, ts)
def test_convert_dates_infer(self):
# GH10747
from pandas.io.json import dumps
infer_words = ['trade_time', 'date', 'datetime', 'sold_at',
'modified', 'timestamp', 'timestamps']
for infer_word in infer_words:
data = [{'id': 1, infer_word: 1036713600000}, {'id': 2}]
expected = DataFrame([[1, Timestamp('2002-11-08')], [2, pd.NaT]],
columns=['id', infer_word])
result = read_json(dumps(data))[['id', infer_word]]
assert_frame_equal(result, expected)
def test_date_format_frame(self):
df = self.tsframe.copy()
def test_w_date(date, date_unit=None):
df['date'] = Timestamp(date)
df.iloc[1, df.columns.get_loc('date')] = pd.NaT
df.iloc[5, df.columns.get_loc('date')] = pd.NaT
if date_unit:
json = df.to_json(date_format='iso', date_unit=date_unit)
else:
json = df.to_json(date_format='iso')
result = read_json(json)
assert_frame_equal(result, df)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
pytest.raises(ValueError, df.to_json, date_format='iso',
date_unit='foo')
def test_date_format_series(self):
def test_w_date(date, date_unit=None):
ts = Series(Timestamp(date), index=self.ts.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format='iso', date_unit=date_unit)
else:
json = ts.to_json(date_format='iso')
result = read_json(json, typ='series')
assert_series_equal(result, ts)
test_w_date('20130101 20:43:42.123')
test_w_date('20130101 20:43:42', date_unit='s')
test_w_date('20130101 20:43:42.123', date_unit='ms')
test_w_date('20130101 20:43:42.123456', date_unit='us')
test_w_date('20130101 20:43:42.123456789', date_unit='ns')
ts = Series(Timestamp('20130101 20:43:42.123'), index=self.ts.index)
pytest.raises(ValueError, ts.to_json, date_format='iso',
date_unit='foo')
def test_date_unit(self):
df = self.tsframe.copy()
df['date'] = Timestamp('20130101 20:43:42')
dl = df.columns.get_loc('date')
df.iloc[1, dl] = Timestamp('19710101 20:43:42')
df.iloc[2, dl] = Timestamp('21460101 20:43:42')
df.iloc[4, dl] = pd.NaT
for unit in ('s', 'ms', 'us', 'ns'):
json = df.to_json(date_format='epoch', date_unit=unit)
# force date unit
result = read_json(json, date_unit=unit)
assert_frame_equal(result, df)
# detect date unit
result = read_json(json, date_unit=None)
assert_frame_equal(result, df)
def test_weird_nested_json(self):
# this used to core dump the parser
s = r'''{
"status": "success",
"data": {
"posts": [
{
"id": 1,
"title": "A blog post",
"body": "Some useful content"
},
{
"id": 2,
"title": "Another blog post",
"body": "More content"
}
]
}
}'''
read_json(s)
def test_doc_example(self):
dfj2 = DataFrame(np.random.randn(5, 2), columns=list('AB'))
dfj2['date'] = Timestamp('20130101')
dfj2['ints'] = lrange(5)
dfj2['bools'] = True
dfj2.index = pd.date_range('20130101', periods=5)
json = dfj2.to_json()
result = read_json(json, dtype={'ints': np.int64, 'bools': np.bool_})
assert_frame_equal(result, result)
def test_misc_example(self):
# parsing unordered input fails
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]', numpy=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
error_msg = """DataFrame\\.index are different
DataFrame\\.index values are different \\(100\\.0 %\\)
\\[left\\]: Index\\(\\[u?'a', u?'b'\\], dtype='object'\\)
\\[right\\]: RangeIndex\\(start=0, stop=2, step=1\\)"""
with tm.assert_raises_regex(AssertionError, error_msg):
assert_frame_equal(result, expected, check_index_type=False)
result = read_json('[{"a": 1, "b": 2}, {"b":2, "a" :1}]')
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
@network
def test_round_trip_exception_(self):
# GH 3867
csv = 'https://raw.github.com/hayd/lahman2012/master/csvs/Teams.csv'
df = pd.read_csv(csv)
s = df.to_json()
result = pd.read_json(s)
assert_frame_equal(result.reindex(
index=df.index, columns=df.columns), df)
@network
def test_url(self):
url = 'https://api.github.com/repos/pandas-dev/pandas/issues?per_page=5' # noqa
result = read_json(url, convert_dates=True)
for c in ['created_at', 'closed_at', 'updated_at']:
assert result[c].dtype == 'datetime64[ns]'
def test_timedelta(self):
converter = lambda x: pd.to_timedelta(x, unit='ms')
s = Series([timedelta(23), timedelta(seconds=5)])
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
s = Series([timedelta(23), timedelta(seconds=5)],
index=pd.Index([0, 1]))
assert s.dtype == 'timedelta64[ns]'
result = pd.read_json(s.to_json(), typ='series').apply(converter)
assert_series_equal(result, s)
frame = DataFrame([timedelta(23), timedelta(seconds=5)])
assert frame[0].dtype == 'timedelta64[ns]'
assert_frame_equal(frame, pd.read_json(frame.to_json())
.apply(converter))
frame = DataFrame({'a': [timedelta(days=23), timedelta(seconds=5)],
'b': [1, 2],
'c': pd.date_range(start='20130101', periods=2)})
result = pd.read_json(frame.to_json(date_unit='ns'))
result['a'] = pd.to_timedelta(result.a, unit='ns')
result['c'] = pd.to_datetime(result.c)
assert_frame_equal(frame, result)
def test_mixed_timedelta_datetime(self):
frame = DataFrame({'a': [timedelta(23), pd.Timestamp('20130101')]},
dtype=object)
expected = DataFrame({'a': [pd.Timedelta(frame.a[0]).value,
pd.Timestamp(frame.a[1]).value]})
result = pd.read_json(frame.to_json(date_unit='ns'),
dtype={'a': 'int64'})
assert_frame_equal(result, expected, check_index_type=False)
def test_default_handler(self):
value = object()
frame = DataFrame({'a': [7, value]})
expected = DataFrame({'a': [7, str(value)]})
result = pd.read_json(frame.to_json(default_handler=str))
assert_frame_equal(expected, result, check_index_type=False)
def test_default_handler_indirect(self):
from pandas.io.json import dumps
def default(obj):
if isinstance(obj, complex):
return [('mathjs', 'Complex'),
('re', obj.real),
('im', obj.imag)]
return str(obj)
df_list = [9, DataFrame({'a': [1, 'STR', complex(4, -5)],
'b': [float('nan'), None, 'N/A']},
columns=['a', 'b'])]
expected = ('[9,[[1,null],["STR",null],[[["mathjs","Complex"],'
'["re",4.0],["im",-5.0]],"N\\/A"]]]')
assert dumps(df_list, default_handler=default,
orient="values") == expected
def test_default_handler_numpy_unsupported_dtype(self):
# GH12554 to_json raises 'Unhandled numpy dtype 15'
df = DataFrame({'a': [1, 2.3, complex(4, -5)],
'b': [float('nan'), None, complex(1.2, 0)]},
columns=['a', 'b'])
expected = ('[["(1+0j)","(nan+0j)"],'
'["(2.3+0j)","(nan+0j)"],'
'["(4-5j)","(1.2+0j)"]]')
assert df.to_json(default_handler=str, orient="values") == expected
def test_default_handler_raises(self):
def my_handler_raises(obj):
raise TypeError("raisin")
pytest.raises(TypeError,
DataFrame({'a': [1, 2, object()]}).to_json,
default_handler=my_handler_raises)
pytest.raises(TypeError,
DataFrame({'a': [1, 2, complex(4, -5)]}).to_json,
default_handler=my_handler_raises)
def test_categorical(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})
df["B"] = df["A"]
expected = df.to_json()
df["B"] = df["A"].astype('category')
assert expected == df.to_json()
s = df["A"]
sc = df["B"]
assert s.to_json() == sc.to_json()
def test_datetime_tz(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
tz_range = pd.date_range('20130101', periods=3, tz='US/Eastern')
tz_naive = tz_range.tz_convert('utc').tz_localize(None)
df = DataFrame({
'A': tz_range,
'B': pd.date_range('20130101', periods=3)})
df_naive = df.copy()
df_naive['A'] = tz_naive
expected = df_naive.to_json()
assert expected == df.to_json()
stz = Series(tz_range)
s_naive = Series(tz_naive)
assert stz.to_json() == s_naive.to_json()
def test_sparse(self):
# GH4377 df.to_json segfaults with non-ndarray blocks
df = pd.DataFrame(np.random.randn(10, 4))
df.loc[:8] = np.nan
sdf = df.to_sparse()
expected = df.to_json()
assert expected == sdf.to_json()
s = pd.Series(np.random.randn(10))
s.loc[:8] = np.nan
ss = s.to_sparse()
expected = s.to_json()
assert expected == ss.to_json()
def test_tz_is_utc(self):
from pandas.io.json import dumps
exp = '"2013-01-10T05:00:00.000Z"'
ts = Timestamp('2013-01-10 05:00:00Z')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00', tz='US/Eastern')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
ts = Timestamp('2013-01-10 00:00:00-0500')
assert dumps(ts, iso_dates=True) == exp
dt = ts.to_pydatetime()
assert dumps(dt, iso_dates=True) == exp
def test_tz_range_is_utc(self):
from pandas.io.json import dumps
exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'
dfexp = ('{"DT":{'
'"0":"2013-01-01T05:00:00.000Z",'
'"1":"2013-01-02T05:00:00.000Z"}}')
tz_range = pd.date_range('2013-01-01 05:00:00Z', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00', periods=2,
tz='US/Eastern')
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
tz_range = pd.date_range('2013-01-01 00:00:00-0500', periods=2)
assert dumps(tz_range, iso_dates=True) == exp
dti = pd.DatetimeIndex(tz_range)
assert dumps(dti, iso_dates=True) == exp
df = DataFrame({'DT': dti})
assert dumps(df, iso_dates=True) == dfexp
def test_read_inline_jsonl(self):
# GH9180
result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_s3_jsonl(self, s3_resource):
# GH17200
result = read_json('s3n://pandas-test/items.jsonl', lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_local_jsonl(self):
# GH17200
with ensure_clean('tmp_items.json') as path:
with open(path, 'w') as infile:
infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')
result = read_json(path, lines=True)
expected = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_jsonl_unicode_chars(self):
# GH15132: non-ascii unicode characters
# \u201d == RIGHT DOUBLE QUOTATION MARK
# simulate file handle
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
json = StringIO(json)
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
# simulate string
json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'
result = read_json(json, lines=True)
expected = DataFrame([[u"foo\u201d", "bar"], ["foo", "bar"]],
columns=['a', 'b'])
assert_frame_equal(result, expected)
def test_read_json_large_numbers(self):
# GH18842
json = '{"articleId": "1404366058080022500245"}'
json = StringIO(json)
result = read_json(json, typ="series")
expected = Series(1.404366e+21, index=['articleId'])
assert_series_equal(result, expected)
json = '{"0": {"articleId": "1404366058080022500245"}}'
json = StringIO(json)
result = read_json(json)
expected = DataFrame(1.404366e+21, index=['articleId'], columns=[0])
assert_frame_equal(result, expected)
def test_to_jsonl(self):
# GH9180
df = DataFrame([[1, 2], [1, 2]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":1,"b":2}\n{"a":1,"b":2}'
assert result == expected
df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=['a', 'b'])
result = df.to_json(orient="records", lines=True)
expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}'
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
# GH15096: escaped characters in columns and data
df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]],
columns=["a\\", 'b'])
result = df.to_json(orient="records", lines=True)
expected = ('{"a\\\\":"foo\\\\","b":"bar"}\n'
'{"a\\\\":"foo\\"","b":"bar"}')
assert result == expected
assert_frame_equal(pd.read_json(result, lines=True), df)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
# GH 13774
pytest.skip("encoding not implemented in .to_json(), "
"xref #13774")
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(Series(val, dtype=dtype))
def roundtrip(s, encoding='latin-1'):
with ensure_clean('test.json') as path:
s.to_json(path, encoding=encoding)
retr = read_json(path, encoding=encoding)
assert_series_equal(s, retr, check_categorical=False)
for s in examples:
roundtrip(s)
def test_data_frame_size_after_to_json(self):
# GH15344
df = DataFrame({'a': [str(1)]})
size_before = df.memory_usage(index=True, deep=True).sum()
df.to_json()
size_after = df.memory_usage(index=True, deep=True).sum()
assert size_before == size_after
@pytest.mark.parametrize('data, expected', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo'),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']]),
{'columns': ['a', 'b'], 'data': [[1, 2], [4, 5]]}),
(Series([1, 2, 3], name='A'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2, 3], name='A').rename_axis('foo'),
{'name': 'A', 'data': [1, 2, 3]}),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']]),
{'name': 'A', 'data': [1, 2]}),
])
def test_index_false_to_json_split(self, data, expected):
# GH 17394
# Testing index=False in to_json with orient='split'
result = data.to_json(orient='split', index=False)
result = json.loads(result)
assert result == expected
@pytest.mark.parametrize('data', [
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b']).rename_axis('foo')),
(DataFrame([[1, 2], [4, 5]], columns=['a', 'b'],
index=[['a', 'b'], ['c', 'd']])),
(Series([1, 2, 3], name='A')),
(Series([1, 2, 3], name='A').rename_axis('foo')),
(Series([1, 2], name='A', index=[['a', 'b'], ['c', 'd']])),
])
def test_index_false_to_json_table(self, data):
# GH 17394
# Testing index=False in to_json with orient='table'
result = data.to_json(orient='table', index=False)
result = json.loads(result)
expected = {
'schema': pd.io.json.build_table_schema(data, index=False),
'data': DataFrame(data).to_dict(orient='records')
}
assert result == expected
@pytest.mark.parametrize('orient', [
'records', 'index', 'columns', 'values'
])
def test_index_false_error_to_json(self, orient):
# GH 17394
# Testing error message from to_json with index=False
df = pd.DataFrame([[1, 2], [4, 5]], columns=['a', 'b'])
with tm.assert_raises_regex(ValueError, "'index=False' is only "
"valid when 'orient' is "
"'split' or 'table'"):
df.to_json(orient=orient, index=False)
| bsd-3-clause |
luci/luci-py | appengine/swarming/swarming_bot/bot_code/bot_auth_test.py | 2 | 16472 | #!/usr/bin/env vpython3
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
import json
import logging
import os
import sys
import tempfile
import time
import unittest
import test_env_bot_code
test_env_bot_code.setup_test_env()
# third_party/
from depot_tools import auto_stub
from depot_tools import fix_encoding
import requests
import bot_auth
import remote_client
from utils import file_path
from utils import auth_server
def global_test_setup():
# Terminate HTTP server in tests 50x faster. Impacts performance though, so
# do it only in tests.
auth_server._HTTPServer.poll_interval = 0.01
def call_rpc(ctx, account_id, scopes=None, audience=None):
params = {'account_id': account_id, 'secret': ctx['secret']}
if scopes:
assert audience is None
method = 'GetOAuthToken'
params['scopes'] = scopes
else:
assert audience is not None
assert scopes is None
method = 'GetIDToken'
params['audience'] = audience
r = requests.post(
url='http://127.0.0.1:%d/rpc/LuciLocalAuthService.%s' % (
ctx['rpc_port'], method),
data=json.dumps(params),
headers={'Content-Type': 'application/json'})
if r.status_code == 200:
return 200, r.json()
return r.status_code, r.content
class AuthSystemTest(auto_stub.TestCase):
def setUp(self):
super(AuthSystemTest, self).setUp()
self.tmp_dir = tempfile.mkdtemp(prefix='bot_main')
self.auth_sys = None
def tearDown(self):
try:
if self.auth_sys:
self.auth_sys.stop()
finally:
file_path.rmtree(self.tmp_dir)
super(AuthSystemTest, self).tearDown()
def init_auth_system(self, auth_params):
self.assertIsNone(self.auth_sys) # allowed to be called only once per test
params_path = os.path.join(self.tmp_dir, 'auth_params.json')
with open(params_path, 'w') as f:
json.dump(auth_params._asdict(), f)
self.auth_sys = bot_auth.AuthSystem(params_path)
return self.auth_sys.start()
def test_get_bot_headers(self):
# 'get_bot_headers' returns swarming_http_headers.
exp = int(time.time() + 3600)
self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=exp,
bot_service_account='none',
system_service_account='none',
task_service_account='none'))
self.assertEqual(({
'Authorization': 'Bearer bot-own-token'
}, exp), self.auth_sys.get_bot_headers())
def test_no_auth(self):
# Not using service accounts at all -> no LUCI_CONTEXT['local_auth'].
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=0,
bot_service_account='none',
system_service_account='none',
task_service_account='none'))
self.assertIsNone(local_auth_ctx)
def test_task_as_bot(self):
exp = int(time.time() + 3600)
# An auth system is configured to use only task account, set to bot's own
# credentials.
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=exp,
bot_service_account='bot-account@example.com',
system_service_account='none',
task_service_account='bot'))
# Note: default_account_id is omitted when it is None.
self.assertEqual(['accounts', 'rpc_port', 'secret'], sorted(local_auth_ctx))
# Only 'task' account is defined (no 'system'). And there's NO default.
self.assertEqual(
[{'id': 'task', 'email': 'bot-account@example.com'}],
local_auth_ctx['accounts'])
self.assertFalse(local_auth_ctx.get('default_account_id'))
# Try to use the local RPC service to grab a 'task' token. Should return
# the token specified by 'swarming_http_headers'.
code, resp = call_rpc(local_auth_ctx, 'task', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual([u'access_token', u'expiry'], sorted(resp))
self.assertEqual(u'bot-own-token', resp['access_token'])
self.assertEqual(exp, resp['expiry'])
# No 'system' token at all.
code, _ = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(404, code)
def test_system_as_bot(self):
exp = int(time.time() + 3600)
# An auth system is configured to use only system account, set to bot's own
# credentials.
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=exp,
bot_service_account='bot-account@example.com',
system_service_account='bot',
task_service_account='none'))
self.assertEqual(['accounts', 'default_account_id', 'rpc_port', 'secret'],
sorted(local_auth_ctx))
# Only 'system' account is defined (no 'task'), and it is default.
self.assertEqual([{
'id': 'system',
'email': 'bot-account@example.com'
}], local_auth_ctx['accounts'])
self.assertEqual('system', local_auth_ctx['default_account_id'])
# Try to use the local RPC service to grab a 'system' token. Should return
# the token specified by 'swarming_http_headers'.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual([u'access_token', u'expiry'], sorted(resp))
self.assertEqual(u'bot-own-token', resp['access_token'])
self.assertEqual(exp, resp['expiry'])
# No 'task' token at all.
code, _ = call_rpc(local_auth_ctx, 'task', scopes=['A', 'B', 'C'])
self.assertEqual(404, code)
def test_system_and_task_as_bot(self):
exp = int(time.time() + 3600)
# An auth system configured to use both system and task accounts, both set
# to bot's own credentials.
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=exp,
bot_service_account='bot-account@example.com',
system_service_account='bot',
task_service_account='bot'))
self.assertEqual(['accounts', 'default_account_id', 'rpc_port', 'secret'],
sorted(local_auth_ctx))
# Both are defined, 'system' is default.
self.assertEqual([
{
'id': 'system',
'email': 'bot-account@example.com'
},
{
'id': 'task',
'email': 'bot-account@example.com'
},
], local_auth_ctx['accounts'])
self.assertEqual('system', local_auth_ctx.get('default_account_id'))
# Both 'system' and 'task' tokens work.
for account_id in ('system', 'task'):
code, resp = call_rpc(local_auth_ctx, account_id, scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual([u'access_token', u'expiry'], sorted(resp))
self.assertEqual(u'bot-own-token', resp['access_token'])
self.assertEqual(exp, resp['expiry'])
def test_using_bot_without_known_email(self):
# An auth system configured to use both system and task accounts, both set
# to bot's own credentials, with email not known.
local_auth_ctx = self.init_auth_system(bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={},
swarming_http_headers_exp=None,
bot_service_account='none',
system_service_account='bot',
task_service_account='bot'))
# Email is not available, as indicated by '-'.
self.assertEqual([
{
'id': 'system',
'email': '-'
},
{
'id': 'task',
'email': '-'
},
], local_auth_ctx['accounts'])
@staticmethod
def mocked_rpc_client(reply):
class MockedClient(object):
def __init__(self):
self.calls = []
def handle_call(self, **kwargs):
self.calls.append(kwargs)
if isinstance(reply, Exception):
raise reply
return reply
def mint_oauth_token(self, **kwargs):
return self.handle_call(method='mint_oauth_token', **kwargs)
def mint_id_token(self, **kwargs):
return self.handle_call(method='mint_id_token', **kwargs)
return MockedClient()
def test_minting_oauth_via_rpc_ok(self):
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
# Email is set.
self.assertEqual(
[{'id': 'system', 'email': 'abc@example.com'}],
local_auth_ctx['accounts'])
expiry = int(time.time() + 3600)
rpc_client = self.mocked_rpc_client({
'service_account': 'abc@example.com',
'access_token': 'blah',
'expiry': expiry,
})
self.auth_sys.set_remote_client(rpc_client)
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual({u'access_token': u'blah', u'expiry': expiry}, resp)
self.assertEqual([{
'method': 'mint_oauth_token',
'account_id': 'system',
'scopes': ('A', 'B', 'C'),
'task_id': 'task_1',
}], rpc_client.calls)
del rpc_client.calls[:]
# The token is cached.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual({u'access_token': u'blah', u'expiry': expiry}, resp)
self.assertFalse(rpc_client.calls)
def test_minting_via_rpc_internal_error(self):
local_auth_ctx = self.init_auth_system(bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
rpc_client = self.mocked_rpc_client(remote_client.InternalError('msg'))
self.auth_sys.set_remote_client(rpc_client)
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(500, code)
self.assertEqual(b'msg\n', resp)
self.assertTrue(rpc_client.calls)
del rpc_client.calls[:]
# The error is NOT cached, another RPC is made.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(500, code)
self.assertTrue(rpc_client.calls)
def test_minting_via_rpc_fatal_error(self):
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
rpc_client = self.mocked_rpc_client(
remote_client.MintTokenError('msg'))
self.auth_sys.set_remote_client(rpc_client)
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual({u'error_message': u'msg', u'error_code': 4}, resp)
self.assertTrue(rpc_client.calls)
del rpc_client.calls[:]
# The error is cached, no RPCs are made.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual({u'error_message': u'msg', u'error_code': 4}, resp)
self.assertFalse(rpc_client.calls)
def test_minting_via_rpc_switching_to_none(self):
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
rpc_client = self.mocked_rpc_client({'service_account': 'none'})
self.auth_sys.set_remote_client(rpc_client)
# Refused.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual({
u'error_code': 1,
u'error_message': u"The task has no 'system' account associated with it"
}, resp)
def test_minting_via_rpc_switching_to_bot(self):
expiry = int(time.time() + 3600)
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=expiry,
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
rpc_client = self.mocked_rpc_client({'service_account': 'bot'})
self.auth_sys.set_remote_client(rpc_client)
# Got bot token instead.
code, resp = call_rpc(local_auth_ctx, 'system', scopes=['A', 'B', 'C'])
self.assertEqual(200, code)
self.assertEqual(
{u'access_token': u'bot-own-token', u'expiry': expiry}, resp)
def test_minting_id_token_via_rpc_ok(self):
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='none',
system_service_account='abc@example.com',
task_service_account='none'))
# Email is set.
self.assertEqual(
[{'id': 'system', 'email': 'abc@example.com'}],
local_auth_ctx['accounts'])
expiry = int(time.time() + 3600)
rpc_client = self.mocked_rpc_client({
'service_account': 'abc@example.com',
'id_token': 'blah',
'expiry': expiry,
})
self.auth_sys.set_remote_client(rpc_client)
code, resp = call_rpc(local_auth_ctx, 'system', audience='example.com')
self.assertEqual(200, code)
self.assertEqual({u'id_token': u'blah', u'expiry': expiry}, resp)
self.assertEqual([{
'method': 'mint_id_token',
'account_id': 'system',
'audience': 'example.com',
'task_id': 'task_1',
}], rpc_client.calls)
del rpc_client.calls[:]
# The token is cached.
code, resp = call_rpc(local_auth_ctx, 'system', audience='example.com')
self.assertEqual(200, code)
self.assertEqual({u'id_token': u'blah', u'expiry': expiry}, resp)
self.assertFalse(rpc_client.calls)
def test_id_token_for_bot_not_supported(self):
local_auth_ctx = self.init_auth_system(
bot_auth.AuthParams(
bot_id='bot_1',
task_id='task_1',
swarming_http_headers={'Authorization': 'Bearer bot-own-token'},
swarming_http_headers_exp=int(time.time() + 3600),
bot_service_account='bot-account@example.com',
system_service_account='bot',
task_service_account='bot'))
code, resp = call_rpc(local_auth_ctx, 'task', audience='example.com')
self.assertEqual(200, code)
self.assertEqual({
u'error_message': u'ID tokens for "bot" account are not supported',
u'error_code': 5,
}, resp)
if __name__ == '__main__':
fix_encoding.fix_encoding()
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.CRITICAL)
global_test_setup()
unittest.main()
| apache-2.0 |
vivovip/simpleknowledgeservice | fuseki/reportSchemes.py | 1 | 13495 | #!/usr/bin/env python
# -*- coding: utf8 -*-
#
# LICENSE:
# This program is free software; you can redistribute it and/or modify it under the terms of
# the GNU Affero General Public License version 3 (AGPL) as published by the Free Software
# Foundation.
# (c) 2015 caregraf
#
import re
import json
import urllib, urllib2
from datetime import datetime
"""
All schemes have one ConceptScheme description. It contains a
mixture of general and per version meta data about a scheme. This
set of routines will print out these details about the schemes
in a Simple Knowledge Service's Triple Store as well as walk down to
the key organizing concepts ("Broader Tops") of the scheme.
Note: the MongoDB SKS has an equivalent of this set of routines
"""
FUSEKI_QUERY_URI = "http://localhost:3030/sks/query"
def reportSchemes():
print
print "######################### Report on Schemes of Fuseki at", FUSEKI_QUERY_URI, "#########################"
print
print "Query graphs with schemes ..."
print
QUERY_GRAPHS = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT DISTINCT ?g ?s WHERE {GRAPH ?g {?s a skos:ConceptScheme}} ORDER BY ?g
"""
query = QUERY_GRAPHS
print query
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "json"})
request = urllib2.Request(queryurl)
graphsNSchemes = [(binding["g"]["value"], sciURIToNSForm(binding["s"]["value"])) for binding in json.loads(urllib2.urlopen(request).read())["results"]["bindings"]]
print "Have", len(graphsNSchemes), "schemes, each with their own graph:"
for i, (graph, scheme) in enumerate(graphsNSchemes, 1):
print "\t", i, scheme, graph
print
print "######################### Scheme basics, scheme by scheme ###################"
for graph, scheme in graphsNSchemes:
reportScheme(graph, scheme)
print
print
print "######################### Graph basics, graph by graph ###################"
print "... slower than quick scheme queries as doing a lot of explicit graph walks"
for graph, scheme in graphsNSchemes:
reportGraph(graph)
print
def reportScheme(graph, scheme):
"""
Only interested in the ConceptScheme resource, the one with id
<schemeMN>:scheme
It contains a mixture of meta data about a particular version of a scheme.
As there is one scheme per graph, there will be one ConceptScheme resource
per graph.
"""
print
print "=============== About scheme", scheme, "================"
print
QUERY_SCHEME_DESCRIPTION = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT ?p ?o
FROM <%s>
WHERE {
?s a skos:ConceptScheme ;
?p ?o
}
"""
query = QUERY_SCHEME_DESCRIPTION % graph
print "Query 'ConceptScheme' details ..."
print query
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "json"})
request = urllib2.Request(queryurl)
reply = json.loads(urllib2.urlopen(request).read())
schemeDescription = {}
for binding in reply["results"]["bindings"]:
schemeDescription[sciURIToNSForm(binding["p"]["value"])] = binding["o"]["value"] if binding["o"]["type"] == "literal" else sciURIToNSForm(binding["o"]["value"])
print "Reply ..."
# Leaving out "umlsCUI", "referencedSchemes", "hasTopConcept", "sourceFormat"
print
print "\t-------------- details --------------"
print "\tLabel:", schemeDescription["skos:prefLabel"]
if "skos:definition" in schemeDescription:
print "\tDefinition:"
print "\t\t", schemeDescription["skos:definition"]
print "\tVersion:", schemeDescription["owl:versionInfo"]
print "\tLast update:", schemeDescription["cgkos:lastUpdate"]
print "\tCopyright:"
print "\t\t", schemeDescription["cgkos:copyright"]
"""
Note that the statistics could be explicitly queried but that takes much longer as
you can see in "reportGraph".
ex/ # deprecated:
SELECT (COUNT(*) AS ?noDeprecated)
WHERE {
?c a skos:Concept .
EXISTS {?c owl:deprecated []}
}
"""
print
print "\t-------------- statistics --------------"
# Statistics are from the VoID schema (http://www.w3.org/TR/void/) and Caregraf
# additions. Some include ...
STAT_PREDS = [("void:distinctSubjects", "Subjects"), ("void:triples", "Total Assertions"), ("cgkos:literalTriples", "Property Assertions"), ("cgkos:edgeTriples", "Graph Assertions"), ("cgkos:matched", "Matches"), ("cgkos:broaderTops", "Top Concepts")]
for predInfo in STAT_PREDS:
if predInfo[0] not in schemeDescription:
continue
print "\t" + predInfo[1] + ":", schemeDescription[predInfo[0]]
print
print
"""
Unless a scheme is flat (it has no hierarchy), it will have two or more 'top concepts'. These are the main organizing concepts of a scheme ("Drug", "Dose Form" ... for RxNORM) or ("Clinical Finding", "Substance" for SNOMED).
"""
# Not flat if hasTopConcepts
ASK_IF_TOP_CONCEPTS = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
ASK
FROM <%s>
WHERE {
?s a skos:ConceptScheme ;
skos:hasTopConcept ?tc
}
"""
print "ASK 'Scheme has topConcepts' - is it flat or structured? ..."
query = ASK_IF_TOP_CONCEPTS % graph
print query
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "json"})
request = urllib2.Request(queryurl)
reply = json.loads(urllib2.urlopen(request).read())
if reply["boolean"] == False:
print "... no top concepts so Scheme is Flat. Nothing to report on its breakdown."
print
return
"""
Note: we could just query topConcepts from "hasTopConcept" in Scheme resource
and each of those concepts come with an annotation of how many subordinates they
have.
"""
QUERY_TOP_CONCEPTS_AND_COUNTS = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX cgkos: <http://schemes.caregraf.info/ontology#>
SELECT ?topCLabel (COUNT(DISTINCT ?c) AS ?numberSubordinates)
FROM <%s>
WHERE {
?c cgkos:broaderTop ?topC .
?topC skos:prefLabel ?topCLabel
}
GROUP BY ?topCLabel
ORDER BY DESC(?numberSubordinates)
"""
print "Query 'Top Concepts' and Counts ..."
query = QUERY_TOP_CONCEPTS_AND_COUNTS % graph
print query
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "json"})
request = urllib2.Request(queryurl)
reply = json.loads(urllib2.urlopen(request).read())
print "Reply ..."
print
# TODO: check if has topConcept(s)
mostPopularTopConceptLabel = ""
if not sum(1 for binding in reply["results"]["bindings"] if "topCLabel" in binding):
print "\t------------- Flat Scheme - no top concepts ---------"
else:
print "\t-------------- Top Concepts --------------"
print "\t... the organizing concepts"
print
for i, binding in enumerate(reply["results"]["bindings"], 1):
if i == 1: # most popular BT is the first as sorting
mostPopularTopConceptLabel = binding["topCLabel"]["value"]
print "\t", i, binding["topCLabel"]["value"]
print "\t\tChildren", binding["numberSubordinates"]["value"]
if not mostPopularTopConceptLabel:
raise Exception("Assumed all structured - not flat - schemes have a most popular TC!")
"""
Let's display a child of the most popular broader top, one that hasn't been retired
(deprecated).
All inactive/retired concepts with have the value 'true' for 'deprecated'.
Active concepts won't have a 'deprecated' field.
If a scheme supports matches to other schemes then make sure the example is matched.
"""
print
print "\t-------------- Example (Active) Concept --------------"
DESCRIBE_SAMPLE_CONCEPT = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX cgkos: <http://schemes.caregraf.info/ontology#>
DESCRIBE ?s
FROM <%s>
WHERE {
?tcId skos:prefLabel "%s" .
?s cgkos:broaderTop ?tcId .
FILTER NOT EXISTS {?s owl:deprecated []}
}
LIMIT 1
"""
print "Query ..."
query = DESCRIBE_SAMPLE_CONCEPT % (graph, mostPopularTopConceptLabel)
print query
print
# Note: for DESCRIBE in Jena, "json" now means "json-ld"
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "json"})
request = urllib2.Request(queryurl)
dReply = json.loads(urllib2.urlopen(request).read())
print "Reply ..."
context = dReply["@context"]
print "\t", dReply["prefLabel"], "(" + sciURIToNSForm(dReply["@id"]) + ")"
for pred, value in dReply.iteritems():
if re.match(r'\@', pred): # skip meta or top level preds
continue
# Jena JSON-LD is 'buggy' (july 2015): it doesn't compact boolean preds
if pred not in context:
if not re.match(r'http', pred):
raise Exception("'Buggy' preds should be full https: " + pred)
pred = pred.split("/")[-1]
# Can be "@type": "http://www.w3.org/2001/XMLSchema#boolean" too
if isinstance(context[pred], dict) and context[pred]["@type"] == "@id":
value = sciURIToNSForm(value)
print "\t\t", pred + ":", value
print
def reportGraph(graph):
"""
Report on scheme graph as a graph ie/ totals of resources, types of
resources ie/ applies to any graph.
Brute force queries that can take a while
"""
print
print "=============== About graph", "<" + graph + ">", "as a graph================"
print "... for larger graphs, these take a while as to count every applicable entity (resource or assertion) must be walked"
print
# This is <=> number of documents in MongoDB
print "Query number of typed resources ..."
QUERY_COUNT_TYPED_RESOURCES = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT (COUNT(*) AS ?noTypedResources)
FROM <%s>
WHERE {
?r a []
}
"""
query = QUERY_COUNT_TYPED_RESOURCES % graph
print query
# Note: using 'text' output as this is a report
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "text"})
request = urllib2.Request(queryurl)
print urllib2.urlopen(request).read()
print
print "Query types of resources ..."
print "... always the same two for these Knowledge Graphs, skos:Concept and skos:ConceptScheme"
QUERY_RESOURCE_TYPES = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT DISTINCT ?type
FROM <%s>
WHERE {
?r a ?type
}
"""
query = QUERY_RESOURCE_TYPES % graph
print query
# Note: using 'text' output as this is a report
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "text"})
request = urllib2.Request(queryurl)
print urllib2.urlopen(request).read()
print
print "Query number of graph/edge (object is resource) assertions ..."
print "... takes a while if graph is large"
QUERY_COUNT_GRAPH_ASSERTIONS = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT (COUNT(*) AS ?noGraphAssertions)
FROM <%s>
WHERE {
?r ?p ?o . FILTER isIRI(?o)
}
"""
query = QUERY_COUNT_GRAPH_ASSERTIONS % graph
print query
# Note: using 'text' output as this is a report
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "text"})
request = urllib2.Request(queryurl)
print urllib2.urlopen(request).read()
print
print "Query number of graph (object is string/int/date) assertions ..."
print "... takes a while if graph is large"
QUERY_COUNT_LITERAL_ASSERTIONS = """
PREFIX skos: <http://www.w3.org/2004/02/skos/core#>
SELECT (COUNT(*) AS ?noLiteralAssertions)
FROM <%s>
WHERE {
?r ?p ?o . FILTER isLiteral(?o)
}
"""
query = QUERY_COUNT_LITERAL_ASSERTIONS % graph
print query
# Note: using 'text' output as this is a report
queryurl = FUSEKI_QUERY_URI + "?" + urllib.urlencode({"query": query, "output": "text"})
request = urllib2.Request(queryurl)
print urllib2.urlopen(request).read()
print
def sciURIToNSForm(uri):
"""
Pretty-ups most concept URIs by replacing full expansion with namespaced form.
Ex/ http://schemes.caregraf.info/rxnorm/scheme -> rxnorm:scheme
"""
if not re.match(r'http', uri):
return uri
SMAP = {"http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://www.w3.org/2004/02/skos/core#": "skos", "http://rdfs.org/ns/void#": "void", "http://schemes.caregraf.info/ontology#": "cgkos", "http://www.w3.org/2002/07/owl#": "owl"}
for suri in SMAP:
if re.match(suri, uri):
return SMAP[suri] + ":" + re.sub(suri, "", uri)
if not re.search(r'schemes\.caregraf\.info', uri):
return uri
return re.sub(r'\/', ":", uri.split("info/")[1])
# ############################# Driver ####################################
def main():
reportSchemes()
if __name__ == "__main__":
main()
| apache-2.0 |
zigdon/evelink | evelink/eve.py | 1 | 17608 | from evelink import api
class EVE(object):
"""Wrapper around /eve/ of the EVE API."""
@api.auto_api
def __init__(self, api=None):
self.api = api
@api.auto_call('eve/CharacterName', map_params={'id_list': 'IDs'})
def character_names_from_ids(self, id_list, api_result=None):
"""Retrieve a dict mapping character IDs to names.
id_list:
A list of ids to retrieve names.
NOTE: *ALL* character IDs passed to this function
must be valid - an invalid character ID will cause
the entire call to fail.
"""
if api_result is None:
# The API doesn't actually tell us which character IDs are invalid
msg = "One or more of these character IDs are invalid: %r"
raise ValueError(msg % id_list)
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
name = row.attrib['name']
char_id = int(row.attrib['characterID'])
results[char_id] = name
return api.APIResult(results, api_result.timestamp, api_result.expires)
def character_name_from_id(self, char_id):
"""Retrieve the character's name based on ID.
Convenience wrapper around character_names_from_ids().
"""
api_result = self.character_names_from_ids([char_id])
return api.APIResult(api_result.result.get(int(char_id)), api_result.timestamp, api_result.expires)
@api.auto_call('eve/CharacterID', map_params={'name_list': 'names'})
def character_ids_from_names(self, name_list, api_result=None):
"""Retrieve a dict mapping character names to IDs.
name_list:
A list of names to retrieve character IDs.
Names of unknown characters will map to None.
"""
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
name = row.attrib['name']
char_id = int(row.attrib['characterID']) or None
results[name] = char_id
return api.APIResult(results, api_result.timestamp, api_result.expires)
def character_id_from_name(self, name):
"""Retrieve the named character's ID.
Convenience wrapper around character_ids_from_names().
"""
api_result = self.character_ids_from_names([name])
return api.APIResult(list(api_result.result.values())[0], api_result.timestamp, api_result.expires)
@api.auto_call('eve/CharacterAffiliation', map_params={'id_list': 'ids'})
def affiliations_for_characters(self, id_list, api_result=None):
"""Retrieve the affiliations for a set of character IDs, returned as a dictionary.
name_list:
A list of names to retrieve IDs for.
IDs for anything not a character will be returned with a name, but nothing else.
"""
rowset = api_result.result.find('rowset')
rows = rowset.findall('row')
results = {}
for row in rows:
char_id = int(row.attrib['characterID'])
char_name = row.attrib['characterName']
corp_id = int(row.attrib['corporationID']) or None
corp_name = row.attrib['corporationName'] or None
faction_id = int(row.attrib['factionID']) or None
faction_name = row.attrib['factionName'] or None
alliance_id = int(row.attrib['allianceID']) or None
alliance_name = row.attrib['allianceName'] or None
results[char_id] = {
'id': char_id,
'name': char_name,
'corp': {
'id': corp_id,
'name': corp_name
}
}
if faction_id is not None:
results[char_id]['faction'] = {
'id': faction_id,
'name': faction_name
}
if alliance_id is not None:
results[char_id]['alliance'] = {
'id': alliance_id,
'name': alliance_name
}
return api.APIResult(results, api_result.timestamp, api_result.expires)
def affiliations_for_character(self, char_id):
"""Retrieve the affiliations of a single character
Convenience wrapper around owner_ids_from_names().
"""
api_result = self.affiliations_for_characters([char_id])
return api.APIResult(api_result.result[char_id], api_result.timestamp, api_result.expires)
@api.auto_call('eve/CharacterInfo', map_params={'char_id': 'characterID'})
def character_info_from_id(self, char_id, api_result=None):
"""Retrieve a dict of info about the designated character."""
if api_result is None:
raise ValueError("Unable to fetch info for character %r" % char_id)
_str, _int, _float, _bool, _ts = api.elem_getters(api_result.result)
results = {
'id': _int('characterID'),
'name': _str('characterName'),
'race': _str('race'),
'bloodline': _str('bloodline'),
'sec_status': _float('securityStatus'),
'skillpoints': _int('skillPoints'),
'location': _str('lastKnownLocation'),
'isk': _float('accountBalance'),
'corp': {
'id': _int('corporationID'),
'name': _str('corporation'),
'timestamp': _ts('corporationDate'),
},
'alliance': {
'id': _int('allianceID'),
'name': _str('alliance'),
'timestamp': _ts('allianceDate'),
},
'ship': {
'name': _str('shipName'),
'type_id': _int('shipTypeID'),
'type_name': _str('shipTypeName'),
},
'history': [],
}
# Add in corp history
history = api_result.result.find('rowset')
for row in history.findall('row'):
corp_id = int(row.attrib['corporationID'])
corp_name = row.attrib['corporationName']
start_date = api.parse_ts(row.attrib['startDate'])
results['history'].append({
'corp_id': corp_id,
'corp_name': corp_name,
'start_ts': start_date,
})
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/AllianceList')
def alliances(self, api_result=None):
"""Return a dict of all alliances in EVE."""
results = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
alliance = {
'name': row.attrib['name'],
'ticker': row.attrib['shortName'],
'id': int(row.attrib['allianceID']),
'executor_id': int(row.attrib['executorCorpID']),
'member_count': int(row.attrib['memberCount']),
'timestamp': api.parse_ts(row.attrib['startDate']),
'member_corps': {},
}
corp_rowset = row.find('rowset')
for corp_row in corp_rowset.findall('row'):
corp_id = int(corp_row.attrib['corporationID'])
corp_ts = api.parse_ts(corp_row.attrib['startDate'])
alliance['member_corps'][corp_id] = {
'id': corp_id,
'timestamp': corp_ts,
}
results[alliance['id']] = alliance
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/ErrorList')
def errors(self, api_result=None):
"""Return a mapping of error codes to messages."""
rowset = api_result.result.find('rowset')
results = {}
for row in rowset.findall('row'):
code = int(row.attrib['errorCode'])
message = row.attrib['errorText']
results[code] = message
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/FacWarStats')
def faction_warfare_stats(self, api_result=None):
"""Return various statistics from Faction Warfare."""
totals = api_result.result.find('totals')
rowsets = dict((r.attrib['name'], r) for r in api_result.result.findall('rowset'))
_str, _int, _float, _bool, _ts = api.elem_getters(totals)
results = {
'kills': {
'yesterday': _int('killsYesterday'),
'week': _int('killsLastWeek'),
'total': _int('killsTotal'),
},
'points': {
'yesterday': _int('victoryPointsYesterday'),
'week': _int('victoryPointsLastWeek'),
'total': _int('victoryPointsTotal'),
},
'factions': {},
'wars': [],
}
for row in rowsets['factions'].findall('row'):
a = row.attrib
faction = {
'id': int(a['factionID']),
'name': a['factionName'],
'pilots': int(a['pilots']),
'systems': int(a['systemsControlled']),
'kills': {
'yesterday': int(a['killsYesterday']),
'week': int(a['killsLastWeek']),
'total': int(a['killsTotal']),
},
'points': {
'yesterday': int(a['victoryPointsYesterday']),
'week': int(a['victoryPointsLastWeek']),
'total': int(a['victoryPointsTotal']),
},
}
results['factions'][faction['id']] = faction
for row in rowsets['factionWars'].findall('row'):
a = row.attrib
war = {
'faction': {
'id': int(a['factionID']),
'name': a['factionName'],
},
'against': {
'id': int(a['againstID']),
'name': a['againstName'],
},
}
results['wars'].append(war)
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/SkillTree')
def skill_tree(self, api_result=None):
"""Return a dict of all available skill groups."""
rowset = api_result.result.find('rowset') # skillGroups
results = {}
name_cache = {}
for row in rowset.findall('row'):
# the skill group data
g = row.attrib
group = {
'id': int(g['groupID']),
'name': g['groupName'],
'skills': {}
}
# Because :ccp: groups can sometimes be listed
# multiple times with different skills, and the
# correct result is to add the contents together
group = results.get(group['id'], group)
# now get the actual skill data
skills_rs = row.find('rowset') # skills
for skill_row in skills_rs.findall('row'):
a = skill_row.attrib
_str, _int, _float, _bool, _ts = api.elem_getters(skill_row)
req_attrib = skill_row.find('requiredAttributes')
skill = {
'id': int(a['typeID']),
'group_id': int(a['groupID']),
'name': a['typeName'],
'published': (a['published'] == '1'),
'description': _str('description'),
'rank': _int('rank'),
'required_skills': {},
'bonuses': {},
'attributes': {
'primary': api.get_named_value(req_attrib, 'primaryAttribute'),
'secondary': api.get_named_value(req_attrib, 'secondaryAttribute'),
}
}
name_cache[skill['id']] = skill['name']
# Check each rowset inside the skill, and branch based on the name attribute
for sub_rs in skill_row.findall('rowset'):
if sub_rs.attrib['name'] == 'requiredSkills':
for sub_row in sub_rs.findall('row'):
b = sub_row.attrib
req = {
'level': int(b['skillLevel']),
'id': int(b['typeID']),
}
skill['required_skills'][req['id']] = req
elif sub_rs.attrib['name'] == 'skillBonusCollection':
for sub_row in sub_rs.findall('row'):
b = sub_row.attrib
bonus = {
'type': b['bonusType'],
'value': float(b['bonusValue']),
}
skill['bonuses'][bonus['type']] = bonus
group['skills'][skill['id']] = skill
results[group['id']] = group
# Second pass to fill in required skill names
for group in results.values():
for skill in group['skills'].values():
for skill_id, skill_info in skill['required_skills'].items():
skill_info['name'] = name_cache.get(skill_id)
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/RefTypes')
def reference_types(self, api_result=None):
"""Return a dict containing id -> name reference type mappings."""
rowset = api_result.result.find('rowset')
results = {}
for row in rowset.findall('row'):
a = row.attrib
results[int(a['refTypeID'])] = a['refTypeName']
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/TypeName', map_params={'id_list': 'IDs'})
def type_names_from_ids(self, id_list, api_result=None):
"""Return a dict containing id -> name mappings for the supplied type ids."""
rowset = api_result.result.find('rowset')
results = {}
for row in rowset.findall('row'):
a= row.attrib
results[int(a['typeID'])] = a['typeName']
return api.APIResult(results, api_result.timestamp, api_result.expires)
def type_name_from_id(self, type_id):
"""Retrieve a type name based on ID.
Convenience wrapper around type_names_from_ids().
"""
api_result = self.type_names_from_ids([type_id])
return api.APIResult(api_result.result.get(int(type_id)), api_result.timestamp, api_result.expires)
@api.auto_call('eve/FacWarTopStats')
def faction_warfare_leaderboard(self, api_result=None):
"""Return top-100 lists from Faction Warfare."""
def parse_top_100(rowset, prefix, attr, attr_name):
top100 = []
id_field = '%sID' % prefix
name_field = '%sName' % prefix
for row in rowset.findall('row'):
a = row.attrib
top100.append({
'id': int(a[id_field]),
'name': a[name_field],
attr_name: int(a[attr]),
})
return top100
def parse_section(section, prefix):
section_result = {}
rowsets = dict((r.attrib['name'], r) for r in section.findall('rowset'))
section_result['kills'] = {
'yesterday': parse_top_100(rowsets['KillsYesterday'], prefix, 'kills', 'kills'),
'week': parse_top_100(rowsets['KillsLastWeek'], prefix, 'kills', 'kills'),
'total': parse_top_100(rowsets['KillsTotal'], prefix, 'kills', 'kills'),
}
section_result['points'] = {
'yesterday': parse_top_100(rowsets['VictoryPointsYesterday'],
prefix, 'victoryPoints', 'points'),
'week': parse_top_100(rowsets['VictoryPointsLastWeek'],
prefix, 'victoryPoints', 'points'),
'total': parse_top_100(rowsets['VictoryPointsTotal'],
prefix, 'victoryPoints', 'points'),
}
return section_result
results = {
'char': parse_section(api_result.result.find('characters'), 'character'),
'corp': parse_section(api_result.result.find('corporations'), 'corporation'),
'faction': parse_section(api_result.result.find('factions'), 'faction'),
}
return api.APIResult(results, api_result.timestamp, api_result.expires)
@api.auto_call('eve/ConquerableStationlist')
def conquerable_stations(self, api_result=None):
results = {}
rowset = api_result.result.find('rowset')
for row in rowset.findall('row'):
station = {
'id': int(row.attrib['stationID']),
'name': row.attrib['stationName'],
'type_id': int(row.attrib['stationTypeID']),
'system_id': int(row.attrib['solarSystemID']),
'corp': {
'id': int(row.attrib['corporationID']),
'name': row.attrib['corporationName'] }
}
results[station['id']] = station
return api.APIResult(results, api_result.timestamp, api_result.expires)
# vim: set ts=4 sts=4 sw=4 et:
| mit |
kustodian/ansible | lib/ansible/modules/storage/netapp/na_ontap_unix_group.py | 23 | 13115 | #!/usr/bin/python
"""
create Autosupport module to enable, disable or modify
"""
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- "Create/Delete Unix user group"
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_unix_group
options:
state:
description:
- Whether the specified group should exist or not.
choices: ['present', 'absent']
default: 'present'
name:
description:
- Specifies UNIX group's name, unique for each group.
- Non-modifiable.
required: true
id:
description:
- Specifies an identification number for the UNIX group.
- Group ID is unique for each UNIX group.
- Required for create, modifiable.
vserver:
description:
- Specifies the Vserver for the UNIX group.
- Non-modifiable.
required: true
skip_name_validation:
description:
- Specifies if group name validation is skipped.
type: bool
users:
description:
- Specifies the users associated with this group. Should be comma separated.
- It represents the expected state of a list of users at any time.
- Add a user into group if it is specified in expected state but not in current state.
- Delete a user from group if it is specified in current state but not in expected state.
- To delete all current users, use '' as value.
type: list
version_added: "2.9"
short_description: NetApp ONTAP UNIX Group
version_added: "2.8"
"""
EXAMPLES = """
- name: Create UNIX group
na_ontap_unix_group:
state: present
name: SampleGroup
vserver: ansibleVServer
id: 2
users: user1,user2
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete all users in UNIX group
na_ontap_unix_group:
state: present
name: SampleGroup
vserver: ansibleVServer
users: ''
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Delete UNIX group
na_ontap_unix_group:
state: absent
name: SampleGroup
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapUnixGroup(object):
"""
Common operations to manage UNIX groups
"""
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
id=dict(required=False, type='int'),
skip_name_validation=dict(required=False, type='bool'),
vserver=dict(required=True, type='str'),
users=dict(required=False, type='list')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.set_playbook_zapi_key_map()
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
def set_playbook_zapi_key_map(self):
self.na_helper.zapi_string_keys = {
'name': 'group-name'
}
self.na_helper.zapi_int_keys = {
'id': 'group-id'
}
self.na_helper.zapi_bool_keys = {
'skip_name_validation': 'skip-name-validation'
}
def get_unix_group(self):
"""
Checks if the UNIX group exists.
:return:
dict() if group found
None if group is not found
"""
get_unix_group = netapp_utils.zapi.NaElement('name-mapping-unix-group-get-iter')
attributes = {
'query': {
'unix-group-info': {
'group-name': self.parameters['name'],
'vserver': self.parameters['vserver'],
}
}
}
get_unix_group.translate_struct(attributes)
try:
result = self.server.invoke_successfully(get_unix_group, enable_tunneling=True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
group_info = result['attributes-list']['unix-group-info']
group_details = dict()
else:
return None
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
group_details[item_key] = group_info[zapi_key]
for item_key, zapi_key in self.na_helper.zapi_int_keys.items():
group_details[item_key] = self.na_helper.get_value_for_int(from_zapi=True,
value=group_info[zapi_key])
if group_info.get_child_by_name('users') is not None:
group_details['users'] = [user.get_child_content('user-name')
for user in group_info.get_child_by_name('users').get_children()]
else:
group_details['users'] = None
return group_details
def create_unix_group(self):
"""
Creates an UNIX group in the specified Vserver
:return: None
"""
if self.parameters.get('id') is None:
self.module.fail_json(msg='Error: Missing a required parameter for create: (id)')
group_create = netapp_utils.zapi.NaElement('name-mapping-unix-group-create')
group_details = {}
for item in self.parameters:
if item in self.na_helper.zapi_string_keys:
zapi_key = self.na_helper.zapi_string_keys.get(item)
group_details[zapi_key] = self.parameters[item]
elif item in self.na_helper.zapi_bool_keys:
zapi_key = self.na_helper.zapi_bool_keys.get(item)
group_details[zapi_key] = self.na_helper.get_value_for_bool(from_zapi=False,
value=self.parameters[item])
elif item in self.na_helper.zapi_int_keys:
zapi_key = self.na_helper.zapi_int_keys.get(item)
group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
value=self.parameters[item])
group_create.translate_struct(group_details)
try:
self.server.invoke_successfully(group_create, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error creating UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
if self.parameters.get('users') is not None:
self.modify_users_in_group()
def delete_unix_group(self):
"""
Deletes an UNIX group from a vserver
:return: None
"""
group_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'name-mapping-unix-group-destroy', **{'group-name': self.parameters['name']})
try:
self.server.invoke_successfully(group_delete, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error removing UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def modify_unix_group(self, params):
"""
Modify an UNIX group from a vserver
:param params: modify parameters
:return: None
"""
# modify users requires separate zapi.
if 'users' in params:
self.modify_users_in_group()
if len(params) == 1:
return
group_modify = netapp_utils.zapi.NaElement('name-mapping-unix-group-modify')
group_details = {'group-name': self.parameters['name']}
for key in params:
if key in self.na_helper.zapi_int_keys:
zapi_key = self.na_helper.zapi_int_keys.get(key)
group_details[zapi_key] = self.na_helper.get_value_for_int(from_zapi=True,
value=params[key])
group_modify.translate_struct(group_details)
try:
self.server.invoke_successfully(group_modify, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying UNIX group %s: %s' % (self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def modify_users_in_group(self):
"""
Add/delete one or many users in a UNIX group
:return: None
"""
current_users = self.get_unix_group().get('users')
expect_users = self.parameters.get('users')
if current_users is None:
current_users = []
if expect_users[0] == '' and len(expect_users) == 1:
expect_users = []
users_to_remove = list(set(current_users) - set(expect_users))
users_to_add = list(set(expect_users) - set(current_users))
if len(users_to_add) > 0:
for user in users_to_add:
add_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-add-user')
group_details = {'group-name': self.parameters['name'], 'user-name': user}
add_user.translate_struct(group_details)
try:
self.server.invoke_successfully(add_user, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(
msg='Error adding user %s to UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
if len(users_to_remove) > 0:
for user in users_to_remove:
delete_user = netapp_utils.zapi.NaElement('name-mapping-unix-group-delete-user')
group_details = {'group-name': self.parameters['name'], 'user-name': user}
delete_user.translate_struct(group_details)
try:
self.server.invoke_successfully(delete_user, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(
msg='Error deleting user %s from UNIX group %s: %s' % (user, self.parameters['name'], to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
"""
Autosupport log for unix_group
:return: None
"""
netapp_utils.ems_log_event("na_ontap_unix_group", self.server)
def apply(self):
"""
Invoke appropriate action based on playbook parameters
:return: None
"""
self.autosupport_log()
current = self.get_unix_group()
cd_action = self.na_helper.get_cd_action(current, self.parameters)
if self.parameters['state'] == 'present' and cd_action is None:
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if cd_action == 'create':
self.create_unix_group()
elif cd_action == 'delete':
self.delete_unix_group()
else:
self.modify_unix_group(modify)
self.module.exit_json(changed=self.na_helper.changed)
def main():
obj = NetAppOntapUnixGroup()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
revmischa/boto | boto/mashups/__init__.py | 782 | 1108 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
lokik/sfepy | sfepy/discrete/iga/utils.py | 5 | 4041 | """
Utility functions based on igakit.
"""
from __future__ import absolute_import
import numpy as nm
from sfepy.base.base import Struct
from sfepy.discrete.fem import Mesh
from sfepy.mesh.mesh_generators import get_tensor_product_conn
import six
from six.moves import range
def create_linear_fe_mesh(nurbs, pars=None):
"""
Convert a NURBS object into a nD-linear tensor product FE mesh.
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension. If not given,
the values are set so that the resulting mesh has the same number of
vertices as the number of control points/basis functions of the NURBS
object.
Returns
-------
coors : array
The coordinates of mesh vertices.
conn : array
The vertex connectivity array.
desc : str
The cell kind.
"""
knots = nurbs.knots
shape = nurbs.weights.shape
if pars is None:
pars = []
for ii, kv in enumerate(knots):
par = nm.linspace(kv[0], kv[-1], shape[ii])
pars.append(par)
coors = nurbs(*pars)
coors.shape = (-1, coors.shape[-1])
conn, desc = get_tensor_product_conn([len(ii) for ii in pars])
if (coors[:, -1] == 0.0).all():
coors = coors[:, :-1]
return coors, conn, desc
def create_mesh_and_output(nurbs, pars=None, **kwargs):
"""
Create a nD-linear tensor product FE mesh using
:func:`create_linear_fe_mesh()`, evaluate field variables given as keyword
arguments in the mesh vertices and create a dictionary of output data
usable by Mesh.write().
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension. If not given,
the values are set so that the resulting mesh has the same number of
vertices as the number of control points/basis functions of the NURBS
object.
**kwargs : kwargs
The field variables as keyword arguments. Their names serve as keys in
the output dictionary.
Returns
-------
mesh : Mesh instance
The finite element mesh.
out : dict
The output dictionary.
"""
coors, conn, desc = create_linear_fe_mesh(nurbs, pars)
mat_id = nm.zeros(conn.shape[0], dtype=nm.int32)
mesh = Mesh.from_data('nurbs', coors, None, [conn], [mat_id], [desc])
out = {}
for key, variable in six.iteritems(kwargs):
if variable.ndim == 2:
nc = variable.shape[1]
field = variable.reshape(nurbs.weights.shape + (nc,))
else:
field = variable.reshape(nurbs.weights.shape)
nc = 1
vals = nurbs.evaluate(field, *pars)
out[key] = Struct(name='output_data', mode='vertex',
data=vals.reshape((-1, nc)))
return mesh, out
def save_basis(nurbs, pars):
"""
Save a NURBS object basis on a FE mesh corresponding to the given
parametrization in VTK files.
Parameters
----------
nurbs : igakit.nurbs.NURBS instance
The NURBS object.
pars : sequence of array, optional
The values of parameters in each parametric dimension.
"""
coors, conn, desc = create_linear_fe_mesh(nurbs, pars)
mat_id = nm.zeros(conn.shape[0], dtype=nm.int32)
mesh = Mesh.from_data('nurbs', coors, None, [conn], [mat_id], [desc])
n_dof = nurbs.weights.ravel().shape[0]
variable = nm.zeros(n_dof, dtype=nm.float64)
field = variable.reshape(nurbs.weights.shape)
for ic in range(n_dof):
variable[ic - 1] = 0.0
variable[ic] = 1.0
vals = nurbs.evaluate(field, *pars).reshape((-1))
out = {}
out['bf'] = Struct(name='output_data', mode='vertex',
data=vals[:, None])
mesh.write('iga_basis_%03d.vtk' % ic, io='auto', out=out)
| bsd-3-clause |
bossiernesto/onyx | resource/resourceHelper.py | 1 | 4932 | from useful.singleton import Singleton
import urllib.error
import sys
from urllib.parse import quote_plus, quote
try:
str
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, str)
HTTP_BEGIN = "http://"
HHTPS_BEGIN = "https://"
TYPE_HTML = 'text/html'
TYPE_XML = 'text/xml'
TYPE_JSON = 'application/json'
TYPE_ZIP = 'application/zip'
TYPE_JPG = 'image/jpeg'
TYPE_PNG = 'image/png'
class ResourceHelper(Singleton):
@staticmethod
def is_readable(info):
readable = [TYPE_HTML, TYPE_XML, TYPE_JSON]
try:
for r in readable:
if info['content-type'].startswith(r):
return True
return False
except KeyError:
return True
@staticmethod
def checkPrefix(url):
if not url.startswith(HTTP_BEGIN) and not url.startswith(HHTPS_BEGIN):
return 'http://' + url
return url
@staticmethod
def normalizeUrl(url):
"""Normalize URL and clean it"""
urlNew = ResourceHelper.checkPrefix(url)
return urllib.parse.quote(urlNew.encode('utf-8'), safe="%/:=&?~#+!$,;'@()*[]")
@staticmethod
def quote_collection(collection, prefix='', quote_via=quote_plus, safe="/"):
"""
Quote for collection of resources of urls.
"""
if isinstance(collection, list):
return quote_via(prefix.join([element for element in collection]), safe=safe)
@staticmethod
def urlencode(query, doseq=0, quote_via=quote_plus, safe="/~#!$,;'@()*[]"):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query, "items"):
# mapping objects
query = list(query.items())
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty, va, tb = sys.exc_info()
raise TypeError("not a valid non-string sequence or mapping object").with_traceback(tb)
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_via(str(k), safe)
v = quote_via(str(v), safe)
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_via(str(k))
if isinstance(v, str):
v = quote_via(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_via(v.encode("ASCII", "replace"), safe)
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
len(v)
except TypeError:
# not a sequence
v = quote_via(str(v), safe)
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_via(str(elt), safe))
return '&'.join(l)
@staticmethod
def get_base_name(url):
"""
Get base of the url.
"""
filename, file_ext = ResourceHelper.get_file_and_ext(url)
return filename + file_ext
@staticmethod
def get_file_and_ext(url):
from urllib.parse import urlparse
from os.path import splitext, basename
disassembled = urlparse(url)
return splitext(basename(disassembled.path))
@staticmethod
def get_pre_post_url(url):
"""
separate url base from the parameters that form part of the post data.
"""
return url.split('?')[0], '?'.join(url.split('?')[1:])
| bsd-3-clause |
metamx/spark | python/pyspark/ml/param/__init__.py | 15 | 16490 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import array
import sys
if sys.version > '3':
basestring = str
xrange = range
unicode = str
from abc import ABCMeta
import copy
import numpy as np
import warnings
from py4j.java_gateway import JavaObject
from pyspark import since
from pyspark.ml.linalg import DenseVector, Vector
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params', 'TypeConverters']
class Param(object):
"""
A param with self-contained documentation.
.. versionadded:: 1.3.0
"""
def __init__(self, parent, name, doc, typeConverter=None):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
self.typeConverter = TypeConverters.identity if typeConverter is None else typeConverter
def _copy_new_parent(self, parent):
"""Copy the current param to a new parent, must be a dummy param."""
if self.parent == "undefined":
param = copy.copy(self)
param.parent = parent.uid
return param
else:
raise ValueError("Cannot copy from non-dummy parent %s." % parent)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class TypeConverters(object):
"""
.. note:: DeveloperApi
Factory methods for common type conversion functions for `Param.typeConverter`.
.. versionadded:: 2.0.0
"""
@staticmethod
def _is_numeric(value):
vtype = type(value)
return vtype in [int, float, np.float64, np.int64] or vtype.__name__ == 'long'
@staticmethod
def _is_integer(value):
return TypeConverters._is_numeric(value) and float(value).is_integer()
@staticmethod
def _can_convert_to_list(value):
vtype = type(value)
return vtype in [list, np.ndarray, tuple, xrange, array.array] or isinstance(value, Vector)
@staticmethod
def _can_convert_to_string(value):
vtype = type(value)
return isinstance(value, basestring) or vtype in [np.unicode_, np.string_, np.str_]
@staticmethod
def identity(value):
"""
Dummy converter that just returns value.
"""
return value
@staticmethod
def toList(value):
"""
Convert a value to a list, if possible.
"""
if type(value) == list:
return value
elif type(value) in [np.ndarray, tuple, xrange, array.array]:
return list(value)
elif isinstance(value, Vector):
return list(value.toArray())
else:
raise TypeError("Could not convert %s to list" % value)
@staticmethod
def toListFloat(value):
"""
Convert a value to list of floats, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return [float(v) for v in value]
raise TypeError("Could not convert %s to list of floats" % value)
@staticmethod
def toListInt(value):
"""
Convert a value to list of ints, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_integer(v), value)):
return [int(v) for v in value]
raise TypeError("Could not convert %s to list of ints" % value)
@staticmethod
def toListString(value):
"""
Convert a value to list of strings, if possible.
"""
if TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._can_convert_to_string(v), value)):
return [TypeConverters.toString(v) for v in value]
raise TypeError("Could not convert %s to list of strings" % value)
@staticmethod
def toVector(value):
"""
Convert a value to a MLlib Vector, if possible.
"""
if isinstance(value, Vector):
return value
elif TypeConverters._can_convert_to_list(value):
value = TypeConverters.toList(value)
if all(map(lambda v: TypeConverters._is_numeric(v), value)):
return DenseVector(value)
raise TypeError("Could not convert %s to vector" % value)
@staticmethod
def toFloat(value):
"""
Convert a value to a float, if possible.
"""
if TypeConverters._is_numeric(value):
return float(value)
else:
raise TypeError("Could not convert %s to float" % value)
@staticmethod
def toInt(value):
"""
Convert a value to an int, if possible.
"""
if TypeConverters._is_integer(value):
return int(value)
else:
raise TypeError("Could not convert %s to int" % value)
@staticmethod
def toString(value):
"""
Convert a value to a string, if possible.
"""
if isinstance(value, basestring):
return value
elif type(value) in [np.string_, np.str_]:
return str(value)
elif type(value) == np.unicode_:
return unicode(value)
else:
raise TypeError("Could not convert %s to string type" % type(value))
@staticmethod
def toBoolean(value):
"""
Convert a value to a boolean, if possible.
"""
if type(value) == bool:
return value
else:
raise TypeError("Boolean Param requires value of type bool. Found %s." % type(value))
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
.. versionadded:: 1.3.0
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
# Copy the params from the class to the object
self._copy_params()
def _copy_params(self):
"""
Copy all params defined on the class to current object.
"""
cls = type(self)
src_name_attrs = [(x, getattr(cls, x)) for x in dir(cls)]
src_params = list(filter(lambda nameAttr: isinstance(nameAttr[1], Param), src_name_attrs))
for name, param in src_params:
setattr(self, name, param._copy_new_parent(self))
@property
@since("1.3.0")
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params" and
not isinstance(getattr(type(self), x, None), property)]))
return self._params
@since("1.4.0")
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
@since("1.4.0")
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
@since("1.4.0")
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
@since("1.4.0")
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
@since("1.4.0")
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
@since("1.4.0")
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
@since("1.4.0")
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
if isinstance(paramName, str):
p = getattr(self, paramName, None)
return isinstance(p, Param)
else:
raise TypeError("hasParam(): paramName must be a string")
@since("1.4.0")
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
@since("1.4.0")
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
return self._copyValues(that, extra)
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, str):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None:
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid param value given for param "%s". %s' % (p.name, e))
self._paramMap[p] = value
return self
def _clear(self, param):
"""
Clears a param from the param map if it has been explicitly set.
"""
if self.isSet(param):
del self._paramMap[param]
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
p = getattr(self, param)
if value is not None and not isinstance(value, JavaObject):
try:
value = p.typeConverter(value)
except TypeError as e:
raise TypeError('Invalid default param value given for param "%s". %s'
% (p.name, e))
self._defaultParamMap[p] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
if extra is None:
extra = dict()
paramMap = self.extractParamMap(extra)
for p in self.params:
if p in paramMap and to.hasParam(p.name):
to._set(**{p.name: paramMap[p]})
return to
def _resetUid(self, newUid):
"""
Changes the uid of this instance. This updates both
the stored uid and the parent uid of params and param maps.
This is used by persistence (loading).
:param newUid: new uid to use, which is converted to unicode
:return: same instance, but with the uid and Param.parent values
updated, including within param maps
"""
newUid = unicode(newUid)
self.uid = newUid
newDefaultParamMap = dict()
newParamMap = dict()
for param in self.params:
newParam = copy.copy(param)
newParam.parent = newUid
if param in self._defaultParamMap:
newDefaultParamMap[newParam] = self._defaultParamMap[param]
if param in self._paramMap:
newParamMap[newParam] = self._paramMap[param]
param.parent = newUid
self._defaultParamMap = newDefaultParamMap
self._paramMap = newParamMap
return self
| apache-2.0 |
lucasa/landell_gst-gengui | sltv/input/alsainput.py | 5 | 1339 | # -*- coding: utf-8 -*-
# Copyright (C) 2010 Holoscopio Tecnologia
# Author: Marcelo Jorge Vieira <metal@holoscopio.com>
# Author: Thadeu Lima de Souza Cascardo <cascardo@holoscopio.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import gobject
import pygst
pygst.require("0.10")
import gst
from core import Input, INPUT_TYPE_AUDIO
CAPABILITIES = INPUT_TYPE_AUDIO
class ALSAInput(Input):
def __init__(self):
Input.__init__(self, CAPABILITIES)
self.audio_src = gst.element_factory_make("alsasrc", "audio_src")
self.add(self.audio_src)
self.audio_pad.set_target(self.audio_src.src_pads().next())
def config(self, dict):
pass
| gpl-2.0 |
shadanan/hevante-points | src/requests/packages/urllib3/util/selectors.py | 86 | 18836 | # Backport of selectors.py from Python 3.5+ to support Python < 3.4
# Also has the behavior specified in PEP 475 which is to retry syscalls
# in the case of an EINTR error. This module is required because selectors34
# does not follow this behavior and instead returns that no dile descriptor
# events have occurred rather than retry the syscall. The decision to drop
# support for select.devpoll is made to maintain 100% test coverage.
import errno
import math
import select
from collections import namedtuple, Mapping
import time
try:
monotonic = time.monotonic
except (AttributeError, ImportError): # Python 3.3<
monotonic = time.time
EVENT_READ = (1 << 0)
EVENT_WRITE = (1 << 1)
HAS_SELECT = True # Variable that shows whether the platform has a selector.
_SYSCALL_SENTINEL = object() # Sentinel in case a system call returns None.
class SelectorError(Exception):
def __init__(self, errcode):
super(SelectorError, self).__init__()
self.errno = errcode
def __repr__(self):
return "<SelectorError errno={0}>".format(self.errno)
def __str__(self):
return self.__repr__()
def _fileobj_to_fd(fileobj):
""" Return a file descriptor from a file object. If
given an integer will simply return that integer back. """
if isinstance(fileobj, int):
fd = fileobj
else:
try:
fd = int(fileobj.fileno())
except (AttributeError, TypeError, ValueError):
raise ValueError("Invalid file object: {0!r}".format(fileobj))
if fd < 0:
raise ValueError("Invalid file descriptor: {0}".format(fd))
return fd
def _syscall_wrapper(func, recalc_timeout, *args, **kwargs):
""" Wrapper function for syscalls that could fail due to EINTR.
All functions should be retried if there is time left in the timeout
in accordance with PEP 475. """
timeout = kwargs.get("timeout", None)
if timeout is None:
expires = None
recalc_timeout = False
else:
timeout = float(timeout)
if timeout < 0.0: # Timeout less than 0 treated as no timeout.
expires = None
else:
expires = monotonic() + timeout
args = list(args)
if recalc_timeout and "timeout" not in kwargs:
raise ValueError(
"Timeout must be in args or kwargs to be recalculated")
result = _SYSCALL_SENTINEL
while result is _SYSCALL_SENTINEL:
try:
result = func(*args, **kwargs)
# OSError is thrown by select.select
# IOError is thrown by select.epoll.poll
# select.error is thrown by select.poll.poll
# Aren't we thankful for Python 3.x rework for exceptions?
except (OSError, IOError, select.error) as e:
# select.error wasn't a subclass of OSError in the past.
errcode = None
if hasattr(e, "errno"):
errcode = e.errno
elif hasattr(e, "args"):
errcode = e.args[0]
# Also test for the Windows equivalent of EINTR.
is_interrupt = (errcode == errno.EINTR or (hasattr(errno, "WSAEINTR") and
errcode == errno.WSAEINTR))
if is_interrupt:
if expires is not None:
current_time = monotonic()
if current_time > expires:
raise OSError(errno=errno.ETIMEDOUT)
if recalc_timeout:
if "timeout" in kwargs:
kwargs["timeout"] = expires - current_time
continue
if errcode:
raise SelectorError(errcode)
else:
raise
return result
SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data'])
class _SelectorMapping(Mapping):
""" Mapping of file objects to selector keys """
def __init__(self, selector):
self._selector = selector
def __len__(self):
return len(self._selector._fd_to_key)
def __getitem__(self, fileobj):
try:
fd = self._selector._fileobj_lookup(fileobj)
return self._selector._fd_to_key[fd]
except KeyError:
raise KeyError("{0!r} is not registered.".format(fileobj))
def __iter__(self):
return iter(self._selector._fd_to_key)
class BaseSelector(object):
""" Abstract Selector class
A selector supports registering file objects to be monitored
for specific I/O events.
A file object is a file descriptor or any object with a
`fileno()` method. An arbitrary object can be attached to the
file object which can be used for example to store context info,
a callback, etc.
A selector can use various implementations (select(), poll(), epoll(),
and kqueue()) depending on the platform. The 'DefaultSelector' class uses
the most efficient implementation for the current platform.
"""
def __init__(self):
# Maps file descriptors to keys.
self._fd_to_key = {}
# Read-only mapping returned by get_map()
self._map = _SelectorMapping(self)
def _fileobj_lookup(self, fileobj):
""" Return a file descriptor from a file object.
This wraps _fileobj_to_fd() to do an exhaustive
search in case the object is invalid but we still
have it in our map. Used by unregister() so we can
unregister an object that was previously registered
even if it is closed. It is also used by _SelectorMapping
"""
try:
return _fileobj_to_fd(fileobj)
except ValueError:
# Search through all our mapped keys.
for key in self._fd_to_key.values():
if key.fileobj is fileobj:
return key.fd
# Raise ValueError after all.
raise
def register(self, fileobj, events, data=None):
""" Register a file object for a set of events to monitor. """
if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)):
raise ValueError("Invalid events: {0!r}".format(events))
key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data)
if key.fd in self._fd_to_key:
raise KeyError("{0!r} (FD {1}) is already registered"
.format(fileobj, key.fd))
self._fd_to_key[key.fd] = key
return key
def unregister(self, fileobj):
""" Unregister a file object from being monitored. """
try:
key = self._fd_to_key.pop(self._fileobj_lookup(fileobj))
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
return key
def modify(self, fileobj, events, data=None):
""" Change a registered file object monitored events and data. """
# NOTE: Some subclasses optimize this operation even further.
try:
key = self._fd_to_key[self._fileobj_lookup(fileobj)]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
if events != key.events:
self.unregister(fileobj)
key = self.register(fileobj, events, data)
elif data != key.data:
# Use a shortcut to update the data.
key = key._replace(data=data)
self._fd_to_key[key.fd] = key
return key
def select(self, timeout=None):
""" Perform the actual selection until some monitored file objects
are ready or the timeout expires. """
raise NotImplementedError()
def close(self):
""" Close the selector. This must be called to ensure that all
underlying resources are freed. """
self._fd_to_key.clear()
self._map = None
def get_key(self, fileobj):
""" Return the key associated with a registered file object. """
mapping = self.get_map()
if mapping is None:
raise RuntimeError("Selector is closed")
try:
return mapping[fileobj]
except KeyError:
raise KeyError("{0!r} is not registered".format(fileobj))
def get_map(self):
""" Return a mapping of file objects to selector keys """
return self._map
def _key_from_fd(self, fd):
""" Return the key associated to a given file descriptor
Return None if it is not found. """
try:
return self._fd_to_key[fd]
except KeyError:
return None
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
# Almost all platforms have select.select()
if hasattr(select, "select"):
class SelectSelector(BaseSelector):
""" Select-based selector. """
def __init__(self):
super(SelectSelector, self).__init__()
self._readers = set()
self._writers = set()
def register(self, fileobj, events, data=None):
key = super(SelectSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
self._readers.add(key.fd)
if events & EVENT_WRITE:
self._writers.add(key.fd)
return key
def unregister(self, fileobj):
key = super(SelectSelector, self).unregister(fileobj)
self._readers.discard(key.fd)
self._writers.discard(key.fd)
return key
def _select(self, r, w, timeout=None):
""" Wrapper for select.select because timeout is a positional arg """
return select.select(r, w, [], timeout)
def select(self, timeout=None):
# Selecting on empty lists on Windows errors out.
if not len(self._readers) and not len(self._writers):
return []
timeout = None if timeout is None else max(timeout, 0.0)
ready = []
r, w, _ = _syscall_wrapper(self._select, True, self._readers,
self._writers, timeout)
r = set(r)
w = set(w)
for fd in r | w:
events = 0
if fd in r:
events |= EVENT_READ
if fd in w:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "poll"):
class PollSelector(BaseSelector):
""" Poll-based selector """
def __init__(self):
super(PollSelector, self).__init__()
self._poll = select.poll()
def register(self, fileobj, events, data=None):
key = super(PollSelector, self).register(fileobj, events, data)
event_mask = 0
if events & EVENT_READ:
event_mask |= select.POLLIN
if events & EVENT_WRITE:
event_mask |= select.POLLOUT
self._poll.register(key.fd, event_mask)
return key
def unregister(self, fileobj):
key = super(PollSelector, self).unregister(fileobj)
self._poll.unregister(key.fd)
return key
def _wrap_poll(self, timeout=None):
""" Wrapper function for select.poll.poll() so that
_syscall_wrapper can work with only seconds. """
if timeout is not None:
if timeout <= 0:
timeout = 0
else:
# select.poll.poll() has a resolution of 1 millisecond,
# round away from zero to wait *at least* timeout seconds.
timeout = math.ceil(timeout * 1e3)
result = self._poll.poll(timeout)
return result
def select(self, timeout=None):
ready = []
fd_events = _syscall_wrapper(self._wrap_poll, True, timeout=timeout)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.POLLIN:
events |= EVENT_WRITE
if event_mask & ~select.POLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
if hasattr(select, "epoll"):
class EpollSelector(BaseSelector):
""" Epoll-based selector """
def __init__(self):
super(EpollSelector, self).__init__()
self._epoll = select.epoll()
def fileno(self):
return self._epoll.fileno()
def register(self, fileobj, events, data=None):
key = super(EpollSelector, self).register(fileobj, events, data)
events_mask = 0
if events & EVENT_READ:
events_mask |= select.EPOLLIN
if events & EVENT_WRITE:
events_mask |= select.EPOLLOUT
_syscall_wrapper(self._epoll.register, False, key.fd, events_mask)
return key
def unregister(self, fileobj):
key = super(EpollSelector, self).unregister(fileobj)
try:
_syscall_wrapper(self._epoll.unregister, False, key.fd)
except SelectorError:
# This can occur when the fd was closed since registry.
pass
return key
def select(self, timeout=None):
if timeout is not None:
if timeout <= 0:
timeout = 0.0
else:
# select.epoll.poll() has a resolution of 1 millisecond
# but luckily takes seconds so we don't need a wrapper
# like PollSelector. Just for better rounding.
timeout = math.ceil(timeout * 1e3) * 1e-3
timeout = float(timeout)
else:
timeout = -1.0 # epoll.poll() must have a float.
# We always want at least 1 to ensure that select can be called
# with no file descriptors registered. Otherwise will fail.
max_events = max(len(self._fd_to_key), 1)
ready = []
fd_events = _syscall_wrapper(self._epoll.poll, True,
timeout=timeout,
maxevents=max_events)
for fd, event_mask in fd_events:
events = 0
if event_mask & ~select.EPOLLIN:
events |= EVENT_WRITE
if event_mask & ~select.EPOLLOUT:
events |= EVENT_READ
key = self._key_from_fd(fd)
if key:
ready.append((key, events & key.events))
return ready
def close(self):
self._epoll.close()
super(EpollSelector, self).close()
if hasattr(select, "kqueue"):
class KqueueSelector(BaseSelector):
""" Kqueue / Kevent-based selector """
def __init__(self):
super(KqueueSelector, self).__init__()
self._kqueue = select.kqueue()
def fileno(self):
return self._kqueue.fileno()
def register(self, fileobj, events, data=None):
key = super(KqueueSelector, self).register(fileobj, events, data)
if events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
if events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_ADD)
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
return key
def unregister(self, fileobj):
key = super(KqueueSelector, self).unregister(fileobj)
if key.events & EVENT_READ:
kevent = select.kevent(key.fd,
select.KQ_FILTER_READ,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
if key.events & EVENT_WRITE:
kevent = select.kevent(key.fd,
select.KQ_FILTER_WRITE,
select.KQ_EV_DELETE)
try:
_syscall_wrapper(self._kqueue.control, False, [kevent], 0, 0)
except SelectorError:
pass
return key
def select(self, timeout=None):
if timeout is not None:
timeout = max(timeout, 0)
max_events = len(self._fd_to_key) * 2
ready_fds = {}
kevent_list = _syscall_wrapper(self._kqueue.control, True,
None, max_events, timeout)
for kevent in kevent_list:
fd = kevent.ident
event_mask = kevent.filter
events = 0
if event_mask == select.KQ_FILTER_READ:
events |= EVENT_READ
if event_mask == select.KQ_FILTER_WRITE:
events |= EVENT_WRITE
key = self._key_from_fd(fd)
if key:
if key.fd not in ready_fds:
ready_fds[key.fd] = (key, events & key.events)
else:
old_events = ready_fds[key.fd][1]
ready_fds[key.fd] = (key, (events | old_events) & key.events)
return list(ready_fds.values())
def close(self):
self._kqueue.close()
super(KqueueSelector, self).close()
# Choose the best implementation, roughly:
# kqueue == epoll > poll > select. Devpoll not supported. (See above)
# select() also can't accept a FD > FD_SETSIZE (usually around 1024)
if 'KqueueSelector' in globals(): # Platform-specific: Mac OS and BSD
DefaultSelector = KqueueSelector
elif 'EpollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = EpollSelector
elif 'PollSelector' in globals(): # Platform-specific: Linux
DefaultSelector = PollSelector
elif 'SelectSelector' in globals(): # Platform-specific: Windows
DefaultSelector = SelectSelector
else: # Platform-specific: AppEngine
def no_selector(_):
raise ValueError("Platform does not have a selector")
DefaultSelector = no_selector
HAS_SELECT = False
| mit |
yaoli/sklearn-theano | sklearn_theano/externals/google/protobuf/internal/generator_test.py | 73 | 14525 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# TODO(robinson): Flesh this out considerably. We focused on reflection_test.py
# first, since it's testing the subtler code, and since it provides decent
# indirect testing of the protocol compiler output.
"""Unittest that directly tests the output of the pure-Python protocol
compiler. See //google/protobuf/reflection_test.py for a test which
further ensures that we can use Python protocol message objects as we expect.
"""
__author__ = 'robinson@google.com (Will Robinson)'
from google.apputils import basetest
from google.protobuf.internal import test_bad_identifiers_pb2
from google.protobuf import unittest_custom_options_pb2
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_import_public_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_no_generic_services_pb2
from google.protobuf import unittest_pb2
from google.protobuf import service
from google.protobuf import symbol_database
MAX_EXTENSION = 536870912
class GeneratorTest(basetest.TestCase):
def testNestedMessageDescriptor(self):
field_name = 'optional_nested_message'
proto_type = unittest_pb2.TestAllTypes
self.assertEqual(
proto_type.NestedMessage.DESCRIPTOR,
proto_type.DESCRIPTOR.fields_by_name[field_name].message_type)
def testEnums(self):
# We test only module-level enums here.
# TODO(robinson): Examine descriptors directly to check
# enum descriptor output.
self.assertEqual(4, unittest_pb2.FOREIGN_FOO)
self.assertEqual(5, unittest_pb2.FOREIGN_BAR)
self.assertEqual(6, unittest_pb2.FOREIGN_BAZ)
proto = unittest_pb2.TestAllTypes()
self.assertEqual(1, proto.FOO)
self.assertEqual(1, unittest_pb2.TestAllTypes.FOO)
self.assertEqual(2, proto.BAR)
self.assertEqual(2, unittest_pb2.TestAllTypes.BAR)
self.assertEqual(3, proto.BAZ)
self.assertEqual(3, unittest_pb2.TestAllTypes.BAZ)
def testExtremeDefaultValues(self):
message = unittest_pb2.TestExtremeDefaultValues()
# Python pre-2.6 does not have isinf() or isnan() functions, so we have
# to provide our own.
def isnan(val):
# NaN is never equal to itself.
return val != val
def isinf(val):
# Infinity times zero equals NaN.
return not isnan(val) and isnan(val * 0)
self.assertTrue(isinf(message.inf_double))
self.assertTrue(message.inf_double > 0)
self.assertTrue(isinf(message.neg_inf_double))
self.assertTrue(message.neg_inf_double < 0)
self.assertTrue(isnan(message.nan_double))
self.assertTrue(isinf(message.inf_float))
self.assertTrue(message.inf_float > 0)
self.assertTrue(isinf(message.neg_inf_float))
self.assertTrue(message.neg_inf_float < 0)
self.assertTrue(isnan(message.nan_float))
self.assertEqual("? ? ?? ?? ??? ??/ ??-", message.cpp_trigraph)
def testHasDefaultValues(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
expected_has_default_by_name = {
'optional_int32': False,
'repeated_int32': False,
'optional_nested_message': False,
'default_int32': True,
}
has_default_by_name = dict(
[(f.name, f.has_default_value)
for f in desc.fields
if f.name in expected_has_default_by_name])
self.assertEqual(expected_has_default_by_name, has_default_by_name)
def testContainingTypeBehaviorForExtensions(self):
self.assertEqual(unittest_pb2.optional_int32_extension.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
self.assertEqual(unittest_pb2.TestRequired.single.containing_type,
unittest_pb2.TestAllExtensions.DESCRIPTOR)
def testExtensionScope(self):
self.assertEqual(unittest_pb2.optional_int32_extension.extension_scope,
None)
self.assertEqual(unittest_pb2.TestRequired.single.extension_scope,
unittest_pb2.TestRequired.DESCRIPTOR)
def testIsExtension(self):
self.assertTrue(unittest_pb2.optional_int32_extension.is_extension)
self.assertTrue(unittest_pb2.TestRequired.single.is_extension)
message_descriptor = unittest_pb2.TestRequired.DESCRIPTOR
non_extension_descriptor = message_descriptor.fields_by_name['a']
self.assertTrue(not non_extension_descriptor.is_extension)
def testOptions(self):
proto = unittest_mset_pb2.TestMessageSet()
self.assertTrue(proto.DESCRIPTOR.GetOptions().message_set_wire_format)
def testMessageWithCustomOptions(self):
proto = unittest_custom_options_pb2.TestMessageWithCustomOptions()
enum_options = proto.DESCRIPTOR.enum_types_by_name['AnEnum'].GetOptions()
self.assertTrue(enum_options is not None)
# TODO(gps): We really should test for the presense of the enum_opt1
# extension and for its value to be set to -789.
def testNestedTypes(self):
self.assertEquals(
set(unittest_pb2.TestAllTypes.DESCRIPTOR.nested_types),
set([
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
unittest_pb2.TestAllTypes.OptionalGroup.DESCRIPTOR,
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR,
]))
self.assertEqual(unittest_pb2.TestEmptyMessage.DESCRIPTOR.nested_types, [])
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.nested_types, [])
def testContainingType(self):
self.assertTrue(
unittest_pb2.TestEmptyMessage.DESCRIPTOR.containing_type is None)
self.assertTrue(
unittest_pb2.TestAllTypes.DESCRIPTOR.containing_type is None)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
self.assertEqual(
unittest_pb2.TestAllTypes.RepeatedGroup.DESCRIPTOR.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testContainingTypeInEnumDescriptor(self):
self.assertTrue(unittest_pb2._FOREIGNENUM.containing_type is None)
self.assertEqual(unittest_pb2._TESTALLTYPES_NESTEDENUM.containing_type,
unittest_pb2.TestAllTypes.DESCRIPTOR)
def testPackage(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.file.package,
'protobuf_unittest')
desc = unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR
self.assertEqual(desc.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_import_pb2.ImportMessage.DESCRIPTOR.file.package,
'protobuf_unittest_import')
self.assertEqual(
unittest_pb2._FOREIGNENUM.file.package, 'protobuf_unittest')
self.assertEqual(
unittest_pb2._TESTALLTYPES_NESTEDENUM.file.package,
'protobuf_unittest')
self.assertEqual(
unittest_import_pb2._IMPORTENUM.file.package,
'protobuf_unittest_import')
def testExtensionRange(self):
self.assertEqual(
unittest_pb2.TestAllTypes.DESCRIPTOR.extension_ranges, [])
self.assertEqual(
unittest_pb2.TestAllExtensions.DESCRIPTOR.extension_ranges,
[(1, MAX_EXTENSION)])
self.assertEqual(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR.extension_ranges,
[(42, 43), (4143, 4244), (65536, MAX_EXTENSION)])
def testFileDescriptor(self):
self.assertEqual(unittest_pb2.DESCRIPTOR.name,
'google/protobuf/unittest.proto')
self.assertEqual(unittest_pb2.DESCRIPTOR.package, 'protobuf_unittest')
self.assertFalse(unittest_pb2.DESCRIPTOR.serialized_pb is None)
self.assertEqual(unittest_pb2.DESCRIPTOR.dependencies,
[unittest_import_pb2.DESCRIPTOR])
self.assertEqual(unittest_import_pb2.DESCRIPTOR.dependencies,
[unittest_import_public_pb2.DESCRIPTOR])
def testNoGenericServices(self):
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "TestMessage"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "FOO"))
self.assertTrue(hasattr(unittest_no_generic_services_pb2, "test_extension"))
# Make sure unittest_no_generic_services_pb2 has no services subclassing
# Proto2 Service class.
if hasattr(unittest_no_generic_services_pb2, "TestService"):
self.assertFalse(issubclass(unittest_no_generic_services_pb2.TestService,
service.Service))
def testMessageTypesByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2._TESTALLTYPES,
file_type.message_types_by_name[unittest_pb2._TESTALLTYPES.name])
# Nested messages shouldn't be included in the message_types_by_name
# dictionary (like in the C++ API).
self.assertFalse(
unittest_pb2._TESTALLTYPES_NESTEDMESSAGE.name in
file_type.message_types_by_name)
def testEnumTypesByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2._FOREIGNENUM,
file_type.enum_types_by_name[unittest_pb2._FOREIGNENUM.name])
def testExtensionsByName(self):
file_type = unittest_pb2.DESCRIPTOR
self.assertEqual(
unittest_pb2.my_extension_string,
file_type.extensions_by_name[unittest_pb2.my_extension_string.name])
def testPublicImports(self):
# Test public imports as embedded message.
all_type_proto = unittest_pb2.TestAllTypes()
self.assertEqual(0, all_type_proto.optional_public_import_message.e)
# PublicImportMessage is actually defined in unittest_import_public_pb2
# module, and is public imported by unittest_import_pb2 module.
public_import_proto = unittest_import_pb2.PublicImportMessage()
self.assertEqual(0, public_import_proto.e)
self.assertTrue(unittest_import_public_pb2.PublicImportMessage is
unittest_import_pb2.PublicImportMessage)
def testBadIdentifiers(self):
# We're just testing that the code was imported without problems.
message = test_bad_identifiers_pb2.TestBadIdentifiers()
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.message],
"foo")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.descriptor],
"bar")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.reflection],
"baz")
self.assertEqual(message.Extensions[test_bad_identifiers_pb2.service],
"qux")
def testOneof(self):
desc = unittest_pb2.TestAllTypes.DESCRIPTOR
self.assertEqual(1, len(desc.oneofs))
self.assertEqual('oneof_field', desc.oneofs[0].name)
self.assertEqual(0, desc.oneofs[0].index)
self.assertIs(desc, desc.oneofs[0].containing_type)
self.assertIs(desc.oneofs[0], desc.oneofs_by_name['oneof_field'])
nested_names = set(['oneof_uint32', 'oneof_nested_message',
'oneof_string', 'oneof_bytes'])
self.assertSameElements(
nested_names,
[field.name for field in desc.oneofs[0].fields])
for field_name, field_desc in desc.fields_by_name.iteritems():
if field_name in nested_names:
self.assertIs(desc.oneofs[0], field_desc.containing_oneof)
else:
self.assertIsNone(field_desc.containing_oneof)
class SymbolDatabaseRegistrationTest(basetest.TestCase):
"""Checks that messages, enums and files are correctly registered."""
def testGetSymbol(self):
self.assertEquals(
unittest_pb2.TestAllTypes, symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes'))
self.assertEquals(
unittest_pb2.TestAllTypes.NestedMessage,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.NestedMessage'))
with self.assertRaises(KeyError):
symbol_database.Default().GetSymbol('protobuf_unittest.NestedMessage')
self.assertEquals(
unittest_pb2.TestAllTypes.OptionalGroup,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.OptionalGroup'))
self.assertEquals(
unittest_pb2.TestAllTypes.RepeatedGroup,
symbol_database.Default().GetSymbol(
'protobuf_unittest.TestAllTypes.RepeatedGroup'))
def testEnums(self):
self.assertEquals(
'protobuf_unittest.ForeignEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'protobuf_unittest.ForeignEnum').full_name)
self.assertEquals(
'protobuf_unittest.TestAllTypes.NestedEnum',
symbol_database.Default().pool.FindEnumTypeByName(
'protobuf_unittest.TestAllTypes.NestedEnum').full_name)
def testFindFileByName(self):
self.assertEquals(
'google/protobuf/unittest.proto',
symbol_database.Default().pool.FindFileByName(
'google/protobuf/unittest.proto').name)
if __name__ == '__main__':
basetest.main()
| bsd-3-clause |
willprice/arduino-sphere-project | scripts/example_direction_finder/temboo/Library/Google/Gmail/InboxFeed.py | 1 | 5147 | # -*- coding: utf-8 -*-
###############################################################################
#
# InboxFeed
# Allows you to access a read-only Gmail feed that contains a list of unread emails.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class InboxFeed(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the InboxFeed Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(InboxFeed, self).__init__(temboo_session, '/Library/Google/Gmail/InboxFeed')
def new_input_set(self):
return InboxFeedInputSet()
def _make_result_set(self, result, path):
return InboxFeedResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return InboxFeedChoreographyExecution(session, exec_id, path)
class InboxFeedInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the InboxFeed
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Label(self, value):
"""
Set the value of the Label input for this Choreo. ((optional, string) The name of a Gmail Label to retrieve messages from (e.g., important, starred, sent, junk-e-mail, all).)
"""
super(InboxFeedInputSet, self)._set_input('Label', value)
def set_Mode(self, value):
"""
Set the value of the Mode input for this Choreo. ((optional, string) Used when an XPath query is provided. Valid values are "select" or "recursive". Select mode will return the first match of the query. In recursive mode, the XPath query will be applied within a loop.)
"""
super(InboxFeedInputSet, self)._set_input('Mode', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) A Google App-specific password that you've generated after enabling 2-Step Verification. See the Gmailv2 bundle for OAuth.)
"""
super(InboxFeedInputSet, self)._set_input('Password', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format for the response. Valid values are JSON and XML. This will be ignored when providng an XPath query because results are returned as a string or JSON depending on the Mode specified.)
"""
super(InboxFeedInputSet, self)._set_input('ResponseFormat', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your full Google email address e.g., martha.temboo@gmail.com. See the Gmailv2 bundle for OAuth.)
"""
super(InboxFeedInputSet, self)._set_input('Username', value)
def set_XPath(self, value):
"""
Set the value of the XPath input for this Choreo. ((optional, string) An XPATH query to run.)
"""
super(InboxFeedInputSet, self)._set_input('XPath', value)
class InboxFeedResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the InboxFeed Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Google. This will contain the data from the Gmail feed, or if the XPath input is provided, it will contain the result of the XPath query.)
"""
return self._output.get('Response', None)
def get_FullCount(self):
"""
Retrieve the value for the "FullCount" output from this Choreo execution. ((integer) The number of unread messages. This is parsed from the Google XML response. Note that when using the Label input to retrieve messages from a particular Gmail label, the full count element may be 0.)
"""
return self._output.get('FullCount', None)
class InboxFeedChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return InboxFeedResultSet(response, path)
| gpl-2.0 |
cloudera/hue | desktop/core/ext-py/django-extensions-1.8.0/django_extensions/management/commands/create_template_tags.py | 7 | 2830 | # -*- coding: utf-8 -*-
import os
import sys
from django.core.management.base import AppCommand
from django_extensions.management.utils import _make_writeable, signalcommand
class Command(AppCommand):
help = ("Creates a Django template tags directory structure for the given app name"
" in the apps's directory")
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--name', '-n', action='store', dest='tag_library_name',
default='appname_tags',
help='The name to use for the template tag base name. '
'Defaults to `appname`_tags.')
requires_system_checks = False
# Can't import settings during this command, because they haven't
# necessarily been created.
can_import_settings = True
@signalcommand
def handle_app_config(self, app_config, **options):
app_dir = app_config.path
tag_library_name = options.get('tag_library_name')
if tag_library_name == 'appname_tags':
tag_library_name = '%s_tags' % os.path.basename(app_dir)
copy_template('template_tags_template', app_dir, tag_library_name)
def copy_template(template_name, copy_to, tag_library_name):
"""copies the specified template directory to the copy_to location"""
import django_extensions
import shutil
template_dir = os.path.join(django_extensions.__path__[0], 'conf', template_name)
# walks the template structure and copies it
for d, subdirs, files in os.walk(template_dir):
relative_dir = d[len(template_dir) + 1:]
if relative_dir and not os.path.exists(os.path.join(copy_to, relative_dir)):
os.mkdir(os.path.join(copy_to, relative_dir))
for i, subdir in enumerate(subdirs):
if subdir.startswith('.'):
del subdirs[i]
for f in files:
if f.endswith('.pyc') or f.startswith('.DS_Store'):
continue
path_old = os.path.join(d, f)
path_new = os.path.join(copy_to, relative_dir, f.replace('sample', tag_library_name))
if os.path.exists(path_new):
path_new = os.path.join(copy_to, relative_dir, f)
if os.path.exists(path_new):
continue
path_new = path_new.rstrip(".tmpl")
fp_old = open(path_old, 'r')
fp_new = open(path_new, 'w')
fp_new.write(fp_old.read())
fp_old.close()
fp_new.close()
try:
shutil.copymode(path_old, path_new)
_make_writeable(path_new)
except OSError:
sys.stderr.write("Notice: Couldn't set permission bits on %s. You're probably using an uncommon filesystem setup. No problem.\n" % path_new)
| apache-2.0 |
compasscoin/compasscoin | src/share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
lowitty/server | libsDarwin/twisted/web/test/test_web.py | 3 | 43967 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for various parts of L{twisted.web}.
"""
import os
import zlib
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.python import reflect
from twisted.python.compat import _PY3
from twisted.python.filepath import FilePath
from twisted.trial import unittest
from twisted.internet import reactor
from twisted.internet.address import IPv4Address
from twisted.internet.task import Clock
from twisted.web import server, resource
from twisted.web import iweb, http, error
from twisted.web.test.requesthelper import DummyChannel, DummyRequest
from twisted.web.static import Data
class ResourceTests(unittest.TestCase):
def testListEntities(self):
r = resource.Resource()
self.assertEqual([], r.listEntities())
class SimpleResource(resource.Resource):
"""
@ivar _contentType: C{None} or a C{str} giving the value of the
I{Content-Type} header in the response this resource will render. If it
is C{None}, no I{Content-Type} header will be set in the response.
"""
def __init__(self, contentType=None):
resource.Resource.__init__(self)
self._contentType = contentType
def render(self, request):
if self._contentType is not None:
request.responseHeaders.setRawHeaders(
b"content-type", [self._contentType])
if http.CACHED in (request.setLastModified(10),
request.setETag(b'MatchingTag')):
return b''
else:
return b"correct"
class SiteTest(unittest.TestCase):
"""
Unit tests for L{server.Site}.
"""
def test_simplestSite(self):
"""
L{Site.getResourceFor} returns the C{b""} child of the root resource it
is constructed with when processing a request for I{/}.
"""
sres1 = SimpleResource()
sres2 = SimpleResource()
sres1.putChild(b"",sres2)
site = server.Site(sres1)
self.assertIdentical(
site.getResourceFor(DummyRequest([b''])),
sres2, "Got the wrong resource.")
def test_defaultRequestFactory(self):
"""
L{server.Request} is the default request factory.
"""
site = server.Site(resource=SimpleResource())
self.assertIs(server.Request, site.requestFactory)
def test_constructorRequestFactory(self):
"""
Can be initialized with a custom requestFactory.
"""
customFactory = object()
site = server.Site(
resource=SimpleResource(), requestFactory=customFactory)
self.assertIs(customFactory, site.requestFactory)
def test_buildProtocol(self):
"""
Returns a C{Channel} whose C{site} and C{requestFactory} attributes are
assigned from the C{site} instance.
"""
site = server.Site(SimpleResource())
channel = site.buildProtocol(None)
self.assertIs(site, channel.site)
self.assertIs(site.requestFactory, channel.requestFactory)
class SessionTests(unittest.TestCase):
"""
Tests for L{server.Session}.
"""
def setUp(self):
"""
Create a site with one active session using a deterministic, easily
controlled clock.
"""
self.clock = Clock()
self.uid = b'unique'
self.site = server.Site(resource.Resource())
self.session = server.Session(self.site, self.uid, self.clock)
self.site.sessions[self.uid] = self.session
def test_defaultReactor(self):
"""
If not value is passed to L{server.Session.__init__}, the global
reactor is used.
"""
session = server.Session(server.Site(resource.Resource()), b'123')
self.assertIdentical(session._reactor, reactor)
def test_startCheckingExpiration(self):
"""
L{server.Session.startCheckingExpiration} causes the session to expire
after L{server.Session.sessionTimeout} seconds without activity.
"""
self.session.startCheckingExpiration()
# Advance to almost the timeout - nothing should happen.
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# Advance to the timeout, the session should expire.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# There should be no calls left over, either.
self.assertFalse(self.clock.calls)
def test_expire(self):
"""
L{server.Session.expire} expires the session.
"""
self.session.expire()
# It should be gone from the session dictionary.
self.assertNotIn(self.uid, self.site.sessions)
# And there should be no pending delayed calls.
self.assertFalse(self.clock.calls)
def test_expireWhileChecking(self):
"""
L{server.Session.expire} expires the session even if the timeout call
isn't due yet.
"""
self.session.startCheckingExpiration()
self.test_expire()
def test_notifyOnExpire(self):
"""
A function registered with L{server.Session.notifyOnExpire} is called
when the session expires.
"""
callbackRan = [False]
def expired():
callbackRan[0] = True
self.session.notifyOnExpire(expired)
self.session.expire()
self.assertTrue(callbackRan[0])
def test_touch(self):
"""
L{server.Session.touch} updates L{server.Session.lastModified} and
delays session timeout.
"""
# Make sure it works before startCheckingExpiration
self.clock.advance(3)
self.session.touch()
self.assertEqual(self.session.lastModified, 3)
# And after startCheckingExpiration
self.session.startCheckingExpiration()
self.clock.advance(self.session.sessionTimeout - 1)
self.session.touch()
self.clock.advance(self.session.sessionTimeout - 1)
self.assertIn(self.uid, self.site.sessions)
# It should have advanced it by just sessionTimeout, no more.
self.clock.advance(1)
self.assertNotIn(self.uid, self.site.sessions)
# Conditional requests:
# If-None-Match, If-Modified-Since
# make conditional request:
# normal response if condition succeeds
# if condition fails:
# response code
# no body
def httpBody(whole):
return whole.split(b'\r\n\r\n', 1)[1]
def httpHeader(whole, key):
key = key.lower()
headers = whole.split(b'\r\n\r\n', 1)[0]
for header in headers.split(b'\r\n'):
if header.lower().startswith(key):
return header.split(b':', 1)[1].strip()
return None
def httpCode(whole):
l1 = whole.split(b'\r\n', 1)[0]
return int(l1.split()[1])
class ConditionalTests(unittest.TestCase):
"""
web.server's handling of conditional requests for cache validation.
"""
def setUp(self):
self.resrc = SimpleResource()
self.resrc.putChild(b'', self.resrc)
self.resrc.putChild(b'with-content-type', SimpleResource(b'image/jpeg'))
self.site = server.Site(self.resrc)
self.site.startFactory()
self.addCleanup(self.site.stopFactory)
# HELLLLLLLLLLP! This harness is Very Ugly.
self.channel = self.site.buildProtocol(None)
self.transport = http.StringTransport()
self.transport.close = lambda *a, **kw: None
self.transport.disconnecting = lambda *a, **kw: 0
self.transport.getPeer = lambda *a, **kw: "peer"
self.transport.getHost = lambda *a, **kw: "host"
self.channel.makeConnection(self.transport)
def tearDown(self):
self.channel.connectionLost(None)
def _modifiedTest(self, modifiedSince=None, etag=None):
"""
Given the value C{modifiedSince} for the I{If-Modified-Since} header or
the value C{etag} for the I{If-Not-Match} header, verify that a response
with a 200 code, a default Content-Type, and the resource as the body is
returned.
"""
if modifiedSince is not None:
validator = b"If-Modified-Since: " + modifiedSince
else:
validator = b"If-Not-Match: " + etag
for line in [b"GET / HTTP/1.1", validator, b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.OK)
self.assertEqual(httpBody(result), b"correct")
self.assertEqual(httpHeader(result, b"Content-Type"), b"text/html")
def test_modified(self):
"""
If a request is made with an I{If-Modified-Since} header value with
a timestamp indicating a time before the last modification of the
requested resource, a 200 response is returned along with a response
body containing the resource.
"""
self._modifiedTest(modifiedSince=http.datetimeToString(1))
def test_unmodified(self):
"""
If a request is made with an I{If-Modified-Since} header value with a
timestamp indicating a time after the last modification of the request
resource, a 304 response is returned along with an empty response body
and no Content-Type header if the application does not set one.
"""
for line in [b"GET / HTTP/1.1",
b"If-Modified-Since: " + http.datetimeToString(100), b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
# Since there SHOULD NOT (RFC 2616, section 10.3.5) be any
# entity-headers, the Content-Type is not set if the application does
# not explicitly set it.
self.assertEqual(httpHeader(result, b"Content-Type"), None)
def test_invalidTimestamp(self):
"""
If a request is made with an I{If-Modified-Since} header value which
cannot be parsed, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"like, maybe a week ago, I guess?")
def test_invalidTimestampYear(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the year position which is not an integer, the
header is treated as not having been present and a normal 200
response is returned with a response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan blah 00:00:10 GMT")
def test_invalidTimestampTooLongAgo(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a year before the epoch, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Jan 1899 00:00:10 GMT")
def test_invalidTimestampMonth(self):
"""
If a request is made with an I{If-Modified-Since} header value which
contains a string in the month position which is not a recognized
month abbreviation, the header is treated as not having been present
and a normal 200 response is returned with a response body
containing the resource.
"""
self._modifiedTest(modifiedSince=b"Thu, 01 Blah 1970 00:00:10 GMT")
def test_etagMatchedNot(self):
"""
If a request is made with an I{If-None-Match} ETag which does not match
the current ETag of the requested resource, the header is treated as not
having been present and a normal 200 response is returned with a
response body containing the resource.
"""
self._modifiedTest(etag=b"unmatchedTag")
def test_etagMatched(self):
"""
If a request is made with an I{If-None-Match} ETag which does match the
current ETag of the requested resource, a 304 response is returned along
with an empty response body.
"""
for line in [b"GET / HTTP/1.1", b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpHeader(result, b"ETag"), b"MatchingTag")
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
def test_unmodifiedWithContentType(self):
"""
Similar to L{test_etagMatched}, but the response should include a
I{Content-Type} header if the application explicitly sets one.
This I{Content-Type} header SHOULD NOT be present according to RFC 2616,
section 10.3.5. It will only be present if the application explicitly
sets it.
"""
for line in [b"GET /with-content-type HTTP/1.1",
b"If-None-Match: MatchingTag", b""]:
self.channel.lineReceived(line)
result = self.transport.getvalue()
self.assertEqual(httpCode(result), http.NOT_MODIFIED)
self.assertEqual(httpBody(result), b"")
self.assertEqual(httpHeader(result, b"Content-Type"), b"image/jpeg")
class RequestTests(unittest.TestCase):
"""
Tests for the HTTP request class, L{server.Request}.
"""
def test_interface(self):
"""
L{server.Request} instances provide L{iweb.IRequest}.
"""
self.assertTrue(
verifyObject(iweb.IRequest, server.Request(DummyChannel(), True)))
def testChildLink(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'bar/baz')
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar/', b'HTTP/1.0')
self.assertEqual(request.childLink(b'baz'), b'baz')
def testPrePathURLSimple(self):
request = server.Request(DummyChannel(), 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
request.setHost(b'example.com', 80)
self.assertEqual(request.prePathURL(), b'http://example.com/foo/bar')
def testPrePathURLNonDefault(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:81/foo/bar')
def testPrePathURLSSLPort(self):
d = DummyChannel()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com:443/foo/bar')
def testPrePathURLSSLPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 443
request = server.Request(d, 1)
request.setHost(b'example.com', 443)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com/foo/bar')
def testPrePathURLHTTPPortAndSSL(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 80
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:80/foo/bar')
def testPrePathURLSSLNonDefault(self):
d = DummyChannel()
d.transport = DummyChannel.SSL()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://example.com:81/foo/bar')
def testPrePathURLSetSSLHost(self):
d = DummyChannel()
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'foo.com', 81, 1)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo/bar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'https://foo.com:81/foo/bar')
def test_prePathURLQuoting(self):
"""
L{Request.prePathURL} quotes special characters in the URL segments to
preserve the original meaning.
"""
d = DummyChannel()
request = server.Request(d, 1)
request.setHost(b'example.com', 80)
request.gotLength(0)
request.requestReceived(b'GET', b'/foo%2Fbar', b'HTTP/1.0')
self.assertEqual(request.prePathURL(), b'http://example.com/foo%2Fbar')
class GzipEncoderTests(unittest.TestCase):
if _PY3:
skip = "GzipEncoder not ported to Python 3 yet."
def setUp(self):
self.channel = DummyChannel()
staticResource = Data(b"Some data", "text/plain")
wrapped = resource.EncodingResourceWrapper(
staticResource, [server.GzipEncoderFactory()])
self.channel.site.resource.putChild(b"foo", wrapped)
def test_interfaces(self):
"""
L{server.GzipEncoderFactory} implements the
L{iweb._IRequestEncoderFactory} and its C{encoderForRequest} returns an
instance of L{server._GzipEncoder} which implements
L{iweb._IRequestEncoder}.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
factory = server.GzipEncoderFactory()
self.assertTrue(verifyObject(iweb._IRequestEncoderFactory, factory))
encoder = factory.encoderForRequest(request)
self.assertTrue(verifyObject(iweb._IRequestEncoder, encoder))
def test_encoding(self):
"""
If the client request passes a I{Accept-Encoding} header which mentions
gzip, L{server._GzipEncoder} automatically compresses the data.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"gzip,deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_nonEncoding(self):
"""
L{server.GzipEncoderFactory} doesn't return a L{server._GzipEncoder} if
the I{Accept-Encoding} header doesn't mention gzip support.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"foo,bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertIn(b"Content-Length", data)
self.assertNotIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data", body)
def test_multipleAccept(self):
"""
If there are multiple I{Accept-Encoding} header,
L{server.GzipEncoderFactory} reads them properly to detect if gzip is
supported.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_alreadyEncoded(self):
"""
If the content is already encoded and the I{Content-Encoding} header is
set, L{server.GzipEncoderFactory} properly appends gzip to it.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"deflate"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: deflate,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
def test_multipleEncodingLines(self):
"""
If there are several I{Content-Encoding} headers,
L{server.GzipEncoderFactory} normalizes it and appends gzip to the
field value.
"""
request = server.Request(self.channel, False)
request.gotLength(0)
request.requestHeaders.setRawHeaders(b"Accept-Encoding",
[b"deflate", b"gzip"])
request.responseHeaders.setRawHeaders(b"Content-Encoding",
[b"foo", b"bar"])
request.requestReceived(b'GET', b'/foo', b'HTTP/1.0')
data = self.channel.transport.written.getvalue()
self.assertNotIn(b"Content-Length", data)
self.assertIn(b"Content-Encoding: foo,bar,gzip\r\n", data)
body = data[data.find(b"\r\n\r\n") + 4:]
self.assertEqual(b"Some data",
zlib.decompress(body, 16 + zlib.MAX_WBITS))
class RootResource(resource.Resource):
isLeaf=0
def getChildWithDefault(self, name, request):
request.rememberRootURL()
return resource.Resource.getChildWithDefault(self, name, request)
def render(self, request):
return ''
class RememberURLTests(unittest.TestCase):
def createServer(self, r):
chan = DummyChannel()
chan.site = server.Site(r)
return chan
def testSimple(self):
r = resource.Resource()
r.isLeaf=0
rr = RootResource()
r.putChild(b'foo', rr)
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(r)
for url in [b'/foo/', b'/foo/bar', b'/foo/bar/baz', b'/foo/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/foo")
def testRoot(self):
rr = RootResource()
rr.putChild(b'', rr)
rr.putChild(b'bar', resource.Resource())
chan = self.createServer(rr)
for url in [b'/', b'/bar', b'/bar/baz', b'/bar/']:
request = server.Request(chan, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
request.requestReceived(b'GET', url, b'HTTP/1.0')
self.assertEqual(request.getRootURL(), b"http://example.com/")
class NewRenderResource(resource.Resource):
def render_GET(self, request):
return b"hi hi"
def render_HEH(self, request):
return b"ho ho"
@implementer(resource.IResource)
class HeadlessResource(object):
"""
A resource that implements GET but not HEAD.
"""
allowedMethods = [b"GET"]
def render(self, request):
"""
Leave the request open for future writes.
"""
self.request = request
if request.method not in self.allowedMethods:
raise error.UnsupportedMethod(self.allowedMethods)
self.request.write(b"some data")
return server.NOT_DONE_YET
class NewRenderTests(unittest.TestCase):
"""
Tests for L{server.Request.render}.
"""
def _getReq(self, resource=None):
"""
Create a request object with a stub channel and install the
passed resource at /newrender. If no resource is passed,
create one.
"""
d = DummyChannel()
if resource is None:
resource = NewRenderResource()
d.site.resource.putChild(b'newrender', resource)
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def testGoodMethods(self):
req = self._getReq()
req.requestReceived(b'GET', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'hi hi')
req = self._getReq()
req.requestReceived(b'HEH', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.transport.getvalue().splitlines()[-1], b'ho ho')
def testBadMethods(self):
req = self._getReq()
req.requestReceived(b'CONNECT', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
req = self._getReq()
req.requestReceived(b'hlalauguG', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 501)
def test_notAllowedMethod(self):
"""
When trying to invoke a method not in the allowed method list, we get
a response saying it is not allowed.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 405)
self.assertTrue(req.responseHeaders.hasHeader(b"allow"))
raw_header = req.responseHeaders.getRawHeaders(b'allow')[0]
allowed = sorted([h.strip() for h in raw_header.split(b",")])
self.assertEqual([b'GET', b'HEAD', b'HEH'], allowed)
def testImplicitHead(self):
req = self._getReq()
req.requestReceived(b'HEAD', b'/newrender', b'HTTP/1.0')
self.assertEqual(req.code, 200)
self.assertEqual(-1, req.transport.getvalue().find(b'hi hi'))
def test_unsupportedHead(self):
"""
HEAD requests against resource that only claim support for GET
should not include a body in the response.
"""
resource = HeadlessResource()
req = self._getReq(resource)
req.requestReceived(b"HEAD", b"/newrender", b"HTTP/1.0")
headers, body = req.transport.getvalue().split(b'\r\n\r\n')
self.assertEqual(req.code, 200)
self.assertEqual(body, b'')
def test_noBytesResult(self):
"""
When implemented C{render} method does not return bytes an internal
server error is returned.
"""
class RiggedRepr(object):
def __repr__(self):
return 'my>repr'
result = RiggedRepr()
no_bytes_resource = resource.Resource()
no_bytes_resource.render = lambda request: result
request = self._getReq(no_bytes_resource)
request.requestReceived(b"GET", b"/newrender", b"HTTP/1.0")
headers, body = request.transport.getvalue().split(b'\r\n\r\n')
self.assertEqual(request.code, 500)
expected = [
'',
'<html>',
' <head><title>500 - Request did not return bytes</title></head>',
' <body>',
' <h1>Request did not return bytes</h1>',
' <p>Request: <pre><%s></pre><br />'
'Resource: <pre><%s></pre><br />'
'Value: <pre>my>repr</pre></p>' % (
reflect.safe_repr(request)[1:-1],
reflect.safe_repr(no_bytes_resource)[1:-1],
),
' </body>',
'</html>',
'']
self.assertEqual('\n'.join(expected).encode('ascii'), body)
class GettableResource(resource.Resource):
"""
Used by AllowedMethodsTests to simulate an allowed method.
"""
def render_GET(self):
pass
def render_fred_render_ethel(self):
"""
The unusual method name is designed to test the culling method
in C{twisted.web.resource._computeAllowedMethods}.
"""
pass
class AllowedMethodsTests(unittest.TestCase):
"""
'C{twisted.web.resource._computeAllowedMethods} is provided by a
default should the subclass not provide the method.
"""
if _PY3:
skip = "Allowed methods functionality not ported to Python 3."
def _getReq(self):
"""
Generate a dummy request for use by C{_computeAllowedMethod} tests.
"""
d = DummyChannel()
d.site.resource.putChild(b'gettableresource', GettableResource())
d.transport.port = 81
request = server.Request(d, 1)
request.setHost(b'example.com', 81)
request.gotLength(0)
return request
def test_computeAllowedMethods(self):
"""
C{_computeAllowedMethods} will search through the
'gettableresource' for all attributes/methods of the form
'render_{method}' ('render_GET', for example) and return a list of
the methods. 'HEAD' will always be included from the
resource.Resource superclass.
"""
res = GettableResource()
allowedMethods = resource._computeAllowedMethods(res)
self.assertEqual(set(allowedMethods),
set([b'GET', b'HEAD', b'fred_render_ethel']))
def test_notAllowed(self):
"""
When an unsupported method is requested, the default
L{_computeAllowedMethods} method will be called to determine the
allowed methods, and the HTTP 405 'Method Not Allowed' status will
be returned with the allowed methods will be returned in the
'Allow' header.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 405)
self.assertEqual(
set(req.responseHeaders.getRawHeaders(b'allow')[0].split(b", ")),
set([b'GET', b'HEAD', b'fred_render_ethel'])
)
def test_notAllowedQuoting(self):
"""
When an unsupported method response is generated, an HTML message will
be displayed. That message should include a quoted form of the URI and,
since that value come from a browser and shouldn't necessarily be
trusted.
"""
req = self._getReq()
req.requestReceived(b'POST', b'/gettableresource?'
b'value=<script>bad', b'HTTP/1.0')
self.assertEqual(req.code, 405)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<script>bad", renderedPage)
self.assertIn(b'<script>bad', renderedPage)
def test_notImplementedQuoting(self):
"""
When an not-implemented method response is generated, an HTML message
will be displayed. That message should include a quoted form of the
requested method, since that value come from a browser and shouldn't
necessarily be trusted.
"""
req = self._getReq()
req.requestReceived(b'<style>bad', b'/gettableresource', b'HTTP/1.0')
self.assertEqual(req.code, 501)
renderedPage = req.transport.getvalue()
self.assertNotIn(b"<style>bad", renderedPage)
self.assertIn(b'<style>bad', renderedPage)
class DummyRequestForLogTest(DummyRequest):
uri = b'/dummy' # parent class uri has "http://", which doesn't really happen
code = 123
clientproto = b'HTTP/1.0'
sentLength = None
client = IPv4Address('TCP', '1.2.3.4', 12345)
class AccessLogTestsMixin(object):
"""
A mixin for L{TestCase} subclasses defining tests that apply to
L{HTTPFactory} and its subclasses.
"""
def factory(self, *args, **kwargs):
"""
Get the factory class to apply logging tests to.
Subclasses must override this method.
"""
raise NotImplementedError("Subclass failed to override factory")
def test_combinedLogFormat(self):
"""
The factory's C{log} method writes a I{combined log format} line to the
factory's log file.
"""
reactor = Clock()
# Set the clock to an arbitrary point in time. It doesn't matter when
# as long as it corresponds to the timestamp in the string literal in
# the assertion below.
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(logPath=logPath)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# Client IP
b'"1.2.3.4" '
# Some blanks we never fill in
b'- - '
# The current time (circa 1234567890)
b'[13/Feb/2009:23:31:30 +0000] '
# Method, URI, version
b'"GET /dummy HTTP/1.0" '
# Response code
b'123 '
# Response length
b'- '
# Value of the "Referer" header. Probably incorrectly quoted.
b'"-" '
# Value pf the "User-Agent" header. Probably incorrectly quoted.
b'"-"' + self.linesep,
FilePath(logPath).getContent())
def test_logFormatOverride(self):
"""
If the factory is initialized with a custom log formatter then that
formatter is used to generate lines for the log file.
"""
def notVeryGoodFormatter(timestamp, request):
return u"this is a bad log format"
reactor = Clock()
reactor.advance(1234567890)
logPath = self.mktemp()
factory = self.factory(
logPath=logPath, logFormatter=notVeryGoodFormatter)
factory._reactor = reactor
factory.startFactory()
try:
factory.log(DummyRequestForLogTest(factory))
finally:
factory.stopFactory()
self.assertEqual(
# self.linesep is a sad thing.
# https://twistedmatrix.com/trac/ticket/6938
b"this is a bad log format" + self.linesep,
FilePath(logPath).getContent())
class HTTPFactoryAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{http.HTTPFactory.log}.
"""
factory = http.HTTPFactory
linesep = b"\n"
class SiteAccessLogTests(AccessLogTestsMixin, unittest.TestCase):
"""
Tests for L{server.Site.log}.
"""
if _PY3:
skip = "Site not ported to Python 3 yet."
linesep = os.linesep
def factory(self, *args, **kwargs):
return server.Site(resource.Resource(), *args, **kwargs)
class CombinedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.combinedLogFormatter}.
"""
def test_interface(self):
"""
L{combinedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.combinedLogFormatter))
def test_nonASCII(self):
"""
Bytes in fields of the request which are not part of ASCII are escaped
in the result.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
request.client = IPv4Address("TCP", b"evil x-forwarded-for \x80", 12345)
request.method = b"POS\x81"
request.protocol = b"HTTP/1.\x82"
request.headers[b"referer"] = b"evil \x83"
request.headers[b"user-agent"] = b"evil \x84"
line = http.combinedLogFormatter(timestamp, request)
self.assertEqual(
u'"evil x-forwarded-for \\x80" - - [13/Feb/2009:23:31:30 +0000] '
u'"POS\\x81 /dummy HTTP/1.0" 123 - "evil \\x83" "evil \\x84"',
line)
class ProxiedLogFormatterTests(unittest.TestCase):
"""
Tests for L{twisted.web.http.proxiedLogFormatter}.
"""
def test_interface(self):
"""
L{proxiedLogFormatter} provides L{IAccessLogFormatter}.
"""
self.assertTrue(verifyObject(
iweb.IAccessLogFormatter, http.proxiedLogFormatter))
def _xforwardedforTest(self, header):
"""
Assert that a request with the given value in its I{X-Forwarded-For}
header is logged by L{proxiedLogFormatter} the same way it would have
been logged by L{combinedLogFormatter} but with 172.16.1.2 as the
client address instead of the normal value.
@param header: An I{X-Forwarded-For} header with left-most address of
172.16.1.2.
"""
reactor = Clock()
reactor.advance(1234567890)
timestamp = http.datetimeToLogString(reactor.seconds())
request = DummyRequestForLogTest(http.HTTPFactory())
expected = http.combinedLogFormatter(timestamp, request).replace(
u"1.2.3.4", u"172.16.1.2")
request.requestHeaders.setRawHeaders(b"x-forwarded-for", [header])
line = http.proxiedLogFormatter(timestamp, request)
self.assertEqual(expected, line)
def test_xforwardedfor(self):
"""
L{proxiedLogFormatter} logs the value of the I{X-Forwarded-For} header
in place of the client address field.
"""
self._xforwardedforTest(b"172.16.1.2, 10.0.0.3, 192.168.1.4")
def test_extraForwardedSpaces(self):
"""
Any extra spaces around the address in the I{X-Forwarded-For} header
are stripped and not included in the log string.
"""
self._xforwardedforTest(b" 172.16.1.2 , 10.0.0.3, 192.168.1.4")
class LogEscapingTests(unittest.TestCase):
def setUp(self):
self.logPath = self.mktemp()
self.site = http.HTTPFactory(self.logPath)
self.site.startFactory()
self.request = DummyRequestForLogTest(self.site, False)
def assertLogs(self, line):
"""
Assert that if C{self.request} is logged using C{self.site} then
C{line} is written to the site's access log file.
@param line: The expected line.
@type line: L{bytes}
@raise self.failureException: If the log file contains something other
than the expected line.
"""
try:
self.site.log(self.request)
finally:
self.site.stopFactory()
logged = FilePath(self.logPath).getContent()
self.assertEqual(line, logged)
def test_simple(self):
"""
A I{GET} request is logged with no extra escapes.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_methodQuote(self):
"""
If the HTTP request method includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.method = b'G"T'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"G\\"T /dummy HTTP/1.0" 123 - "-" "-"\n')
def test_requestQuote(self):
"""
If the HTTP request path includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.uri = b'/dummy"withquote'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy\\"withquote HTTP/1.0" 123 - "-" "-"\n')
def test_protoQuote(self):
"""
If the HTTP request version includes a quote, the quote is escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.clientproto = b'HT"P/1.0'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HT\\"P/1.0" 123 - "-" "-"\n')
def test_refererQuote(self):
"""
If the value of the I{Referer} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'referer'] = (
b'http://malicious" ".website.invalid')
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - '
b'"http://malicious\\" \\".website.invalid" "-"\n')
def test_userAgentQuote(self):
"""
If the value of the I{User-Agent} header contains a quote, the quote is
escaped.
"""
self.site._logDateTime = "[%02d/%3s/%4d:%02d:%02d:%02d +0000]" % (
25, 'Oct', 2004, 12, 31, 59)
self.request.headers[b'user-agent'] = b'Malicious Web" Evil'
self.assertLogs(
b'"1.2.3.4" - - [25/Oct/2004:12:31:59 +0000] '
b'"GET /dummy HTTP/1.0" 123 - "-" "Malicious Web\\" Evil"\n')
class ServerAttributesTests(unittest.TestCase):
"""
Tests that deprecated twisted.web.server attributes raise the appropriate
deprecation warnings when used.
"""
def test_deprecatedAttributeDateTimeString(self):
"""
twisted.web.server.date_time_string should not be used; instead use
twisted.web.http.datetimeToString directly
"""
server.date_time_string
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeDateTimeString])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.date_time_string was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.datetimeToString instead"))
def test_deprecatedAttributeStringDateTime(self):
"""
twisted.web.server.string_date_time should not be used; instead use
twisted.web.http.stringToDatetime directly
"""
server.string_date_time
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedAttributeStringDateTime])
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]['message'],
("twisted.web.server.string_date_time was deprecated in Twisted "
"12.1.0: Please use twisted.web.http.stringToDatetime instead"))
| mit |
jacknjzhou/neutron | neutron/agent/dhcp/agent.py | 12 | 24421 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import os
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
from oslo_service import loopingcall
from oslo_utils import importutils
from neutron.agent.linux import dhcp
from neutron.agent.linux import external_process
from neutron.agent.metadata import driver as metadata_driver
from neutron.agent import rpc as agent_rpc
from neutron.common import constants
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import utils
from neutron import context
from neutron.i18n import _LE, _LI, _LW
from neutron import manager
LOG = logging.getLogger(__name__)
class DhcpAgent(manager.Manager):
"""DHCP agent service manager.
Note that the public methods of this class are exposed as the server side
of an rpc interface. The neutron server uses
neutron.api.rpc.agentnotifiers.dhcp_rpc_agent_api.DhcpAgentNotifyApi as the
client side to execute the methods here. For more information about
changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
target = oslo_messaging.Target(version='1.0')
def __init__(self, host=None):
super(DhcpAgent, self).__init__(host=host)
self.needs_resync_reasons = collections.defaultdict(list)
self.conf = cfg.CONF
self.cache = NetworkCache()
self.dhcp_driver_cls = importutils.import_class(self.conf.dhcp_driver)
ctx = context.get_admin_context_without_session()
self.plugin_rpc = DhcpPluginApi(topics.PLUGIN,
ctx, self.conf.use_namespaces)
# create dhcp dir to store dhcp info
dhcp_dir = os.path.dirname("/%s/dhcp/" % self.conf.state_path)
utils.ensure_dir(dhcp_dir)
self.dhcp_version = self.dhcp_driver_cls.check_version()
self._populate_networks_cache()
self._process_monitor = external_process.ProcessMonitor(
config=self.conf,
resource_type='dhcp')
def init_host(self):
self.sync_state()
def _populate_networks_cache(self):
"""Populate the networks cache when the DHCP-agent starts."""
try:
existing_networks = self.dhcp_driver_cls.existing_dhcp_networks(
self.conf
)
for net_id in existing_networks:
net = dhcp.NetModel(self.conf.use_namespaces,
{"id": net_id,
"subnets": [],
"ports": []})
self.cache.put(net)
except NotImplementedError:
# just go ahead with an empty networks cache
LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
"list of existing networks",
self.conf.dhcp_driver)
def after_start(self):
self.run()
LOG.info(_LI("DHCP agent started"))
def run(self):
"""Activate the DHCP agent."""
self.sync_state()
self.periodic_resync()
def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance."""
LOG.debug('Calling driver for network: %(net)s action: %(action)s',
{'net': network.id, 'action': action})
try:
# the Driver expects something that is duck typed similar to
# the base models.
driver = self.dhcp_driver_cls(self.conf,
network,
self._process_monitor,
self.dhcp_version,
self.plugin_rpc)
getattr(driver, action)(**action_kwargs)
return True
except exceptions.Conflict:
# No need to resync here, the agent will receive the event related
# to a status update for the network
LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
'is a conflict with its current state; please '
'check that the network and/or its subnet(s) '
'still exist.'),
{'net_id': network.id, 'action': action})
except Exception as e:
if getattr(e, 'exc_type', '') != 'IpAddressGenerationFailure':
# Don't resync if port could not be created because of an IP
# allocation failure. When the subnet is updated with a new
# allocation pool or a port is deleted to free up an IP, this
# will automatically be retried on the notification
self.schedule_resync(e, network.id)
if (isinstance(e, oslo_messaging.RemoteError)
and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_LW("Network %s has been deleted."), network.id)
else:
LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.'),
{'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None):
"""Schedule a resync for a given network and reason. If no network is
specified, resync all networks.
"""
self.needs_resync_reasons[network].append(reason)
@utils.synchronized('dhcp-agent')
def sync_state(self, networks=None):
"""Sync the local DHCP state with Neutron. If no networks are passed,
or 'None' is one of the networks, sync all of the networks.
"""
only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids())
try:
active_networks = self.plugin_rpc.get_active_networks_info()
active_network_ids = set(network.id for network in active_networks)
for deleted_id in known_network_ids - active_network_ids:
try:
self.disable_dhcp_helper(deleted_id)
except Exception as e:
self.schedule_resync(e, deleted_id)
LOG.exception(_LE('Unable to sync network state on '
'deleted network %s'), deleted_id)
for network in active_networks:
if (not only_nets or # specifically resync all
network.id not in known_network_ids or # missing net
network.id in only_nets): # specific network to sync
pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall()
LOG.info(_LI('Synchronizing state complete'))
except Exception as e:
self.schedule_resync(e)
LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger()
def _periodic_resync_helper(self):
"""Resync the dhcp state at the configured interval."""
while True:
eventlet.sleep(self.conf.resync_interval)
if self.needs_resync_reasons:
# be careful to avoid a race with additions to list
# from other threads
reasons = self.needs_resync_reasons
self.needs_resync_reasons = collections.defaultdict(list)
for net, r in reasons.items():
if not net:
net = "*"
LOG.debug("resync (%(network)s): %(reason)s",
{"reason": r, "network": net})
self.sync_state(reasons.keys())
def periodic_resync(self):
"""Spawn a thread to periodically resync the dhcp state."""
eventlet.spawn(self._periodic_resync_helper)
def safe_get_network_info(self, network_id):
try:
network = self.plugin_rpc.get_network_info(network_id)
if not network:
LOG.warn(_LW('Network %s has been deleted.'), network_id)
return network
except Exception as e:
self.schedule_resync(e, network_id)
LOG.exception(_LE('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria."""
network = self.safe_get_network_info(network_id)
if network:
self.configure_dhcp_for_network(network)
@utils.exception_logger()
def safe_configure_dhcp_for_network(self, network):
try:
self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_LW('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network):
if not network.admin_state_up:
return
enable_metadata = self.dhcp_driver_cls.should_enable_metadata(
self.conf, network)
dhcp_network_enabled = False
for subnet in network.subnets:
if subnet.enable_dhcp:
if self.call_driver('enable', network):
dhcp_network_enabled = True
self.cache.put(network)
break
if enable_metadata and dhcp_network_enabled:
for subnet in network.subnets:
if subnet.ip_version == 4 and subnet.enable_dhcp:
self.enable_isolated_metadata_proxy(network)
break
def disable_dhcp_helper(self, network_id):
"""Disable DHCP for a network known to the agent."""
network = self.cache.get_network_by_id(network_id)
if network:
if (self.conf.use_namespaces and
self.conf.enable_isolated_metadata):
# NOTE(jschwarz): In the case where a network is deleted, all
# the subnets and ports are deleted before this function is
# called, so checking if 'should_enable_metadata' is True
# for any subnet is false logic here.
self.disable_isolated_metadata_proxy(network)
if self.call_driver('disable', network):
self.cache.remove(network)
def refresh_dhcp_helper(self, network_id):
"""Refresh or disable DHCP for a network depending on the current state
of the network.
"""
old_network = self.cache.get_network_by_id(network_id)
if not old_network:
# DHCP current not running for network.
return self.enable_dhcp_helper(network_id)
network = self.safe_get_network_info(network_id)
if not network:
return
old_cidrs = set(s.cidr for s in old_network.subnets if s.enable_dhcp)
new_cidrs = set(s.cidr for s in network.subnets if s.enable_dhcp)
if new_cidrs and old_cidrs == new_cidrs:
self.call_driver('reload_allocations', network)
self.cache.put(network)
elif new_cidrs:
if self.call_driver('restart', network):
self.cache.put(network)
else:
self.disable_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def network_create_end(self, context, payload):
"""Handle the network.create.end notification event."""
network_id = payload['network']['id']
self.enable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_update_end(self, context, payload):
"""Handle the network.update.end notification event."""
network_id = payload['network']['id']
if payload['network']['admin_state_up']:
self.enable_dhcp_helper(network_id)
else:
self.disable_dhcp_helper(network_id)
@utils.synchronized('dhcp-agent')
def network_delete_end(self, context, payload):
"""Handle the network.delete.end notification event."""
self.disable_dhcp_helper(payload['network_id'])
@utils.synchronized('dhcp-agent')
def subnet_update_end(self, context, payload):
"""Handle the subnet.update.end notification event."""
network_id = payload['subnet']['network_id']
self.refresh_dhcp_helper(network_id)
# Use the update handler for the subnet create event.
subnet_create_end = subnet_update_end
@utils.synchronized('dhcp-agent')
def subnet_delete_end(self, context, payload):
"""Handle the subnet.delete.end notification event."""
subnet_id = payload['subnet_id']
network = self.cache.get_network_by_subnet_id(subnet_id)
if network:
self.refresh_dhcp_helper(network.id)
@utils.synchronized('dhcp-agent')
def port_update_end(self, context, payload):
"""Handle the port.update.end notification event."""
updated_port = dhcp.DictModel(payload['port'])
network = self.cache.get_network_by_id(updated_port.network_id)
if network:
driver_action = 'reload_allocations'
if self._is_port_on_this_agent(updated_port):
orig = self.cache.get_port_by_id(updated_port['id'])
# assume IP change if not in cache
old_ips = {i['ip_address'] for i in orig['fixed_ips'] or []}
new_ips = {i['ip_address'] for i in updated_port['fixed_ips']}
if old_ips != new_ips:
driver_action = 'restart'
self.cache.put_port(updated_port)
self.call_driver(driver_action, network)
def _is_port_on_this_agent(self, port):
thishost = utils.get_dhcp_agent_device_id(
port['network_id'], self.conf.host)
return port['device_id'] == thishost
# Use the update handler for the port create event.
port_create_end = port_update_end
@utils.synchronized('dhcp-agent')
def port_delete_end(self, context, payload):
"""Handle the port.delete.end notification event."""
port = self.cache.get_port_by_id(payload['port_id'])
if port:
network = self.cache.get_network_by_id(port.network_id)
self.cache.remove_port(port)
self.call_driver('reload_allocations', network)
def enable_isolated_metadata_proxy(self, network):
# The proxy might work for either a single network
# or all the networks connected via a router
# to the one passed as a parameter
kwargs = {'network_id': network.id}
# When the metadata network is enabled, the proxy might
# be started for the router attached to the network
if self.conf.enable_metadata_network:
router_ports = [port for port in network.ports
if (port.device_owner in
constants.ROUTER_INTERFACE_OWNERS)]
if router_ports:
# Multiple router ports should not be allowed
if len(router_ports) > 1:
LOG.warning(_LW("%(port_num)d router ports found on the "
"metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s "
"will be considered"),
{'port_num': len(router_ports),
'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id})
kwargs = {'router_id': router_ports[0].device_id}
metadata_driver.MetadataDriver.spawn_monitored_metadata_proxy(
self._process_monitor, network.namespace, dhcp.METADATA_PORT,
self.conf, **kwargs)
def disable_isolated_metadata_proxy(self, network):
metadata_driver.MetadataDriver.destroy_monitored_metadata_proxy(
self._process_monitor, network.id, self.conf)
class DhcpPluginApi(object):
"""Agent side of the dhcp rpc API.
This class implements the client side of an rpc interface. The server side
of this interface can be found in
neutron.api.rpc.handlers.dhcp_rpc.DhcpRpcCallback. For more information
about changing rpc interfaces, see doc/source/devref/rpc_api.rst.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
and update_dhcp_port methods.
"""
def __init__(self, topic, context, use_namespaces):
self.context = context
self.host = cfg.CONF.host
self.use_namespaces = use_namespaces
target = oslo_messaging.Target(
topic=topic,
namespace=constants.RPC_NAMESPACE_DHCP_PLUGIN,
version='1.0')
self.client = n_rpc.get_client(target)
def get_active_networks_info(self):
"""Make a remote process call to retrieve all network info."""
cctxt = self.client.prepare(version='1.1')
networks = cctxt.call(self.context, 'get_active_networks_info',
host=self.host)
return [dhcp.NetModel(self.use_namespaces, n) for n in networks]
def get_network_info(self, network_id):
"""Make a remote process call to retrieve network info."""
cctxt = self.client.prepare()
network = cctxt.call(self.context, 'get_network_info',
network_id=network_id, host=self.host)
if network:
return dhcp.NetModel(self.use_namespaces, network)
def create_dhcp_port(self, port):
"""Make a remote process call to create the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'create_dhcp_port',
port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def update_dhcp_port(self, port_id, port):
"""Make a remote process call to update the dhcp port."""
cctxt = self.client.prepare(version='1.1')
port = cctxt.call(self.context, 'update_dhcp_port',
port_id=port_id, port=port, host=self.host)
if port:
return dhcp.DictModel(port)
def release_dhcp_port(self, network_id, device_id):
"""Make a remote process call to release the dhcp port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_dhcp_port',
network_id=network_id, device_id=device_id,
host=self.host)
def release_port_fixed_ip(self, network_id, device_id, subnet_id):
"""Make a remote process call to release a fixed_ip on the port."""
cctxt = self.client.prepare()
return cctxt.call(self.context, 'release_port_fixed_ip',
network_id=network_id, subnet_id=subnet_id,
device_id=device_id, host=self.host)
class NetworkCache(object):
"""Agent cache of the current network state."""
def __init__(self):
self.cache = {}
self.subnet_lookup = {}
self.port_lookup = {}
def get_network_ids(self):
return self.cache.keys()
def get_network_by_id(self, network_id):
return self.cache.get(network_id)
def get_network_by_subnet_id(self, subnet_id):
return self.cache.get(self.subnet_lookup.get(subnet_id))
def get_network_by_port_id(self, port_id):
return self.cache.get(self.port_lookup.get(port_id))
def put(self, network):
if network.id in self.cache:
self.remove(self.cache[network.id])
self.cache[network.id] = network
for subnet in network.subnets:
self.subnet_lookup[subnet.id] = network.id
for port in network.ports:
self.port_lookup[port.id] = network.id
def remove(self, network):
del self.cache[network.id]
for subnet in network.subnets:
del self.subnet_lookup[subnet.id]
for port in network.ports:
del self.port_lookup[port.id]
def put_port(self, port):
network = self.get_network_by_id(port.network_id)
for index in range(len(network.ports)):
if network.ports[index].id == port.id:
network.ports[index] = port
break
else:
network.ports.append(port)
self.port_lookup[port.id] = network.id
def remove_port(self, port):
network = self.get_network_by_port_id(port.id)
for index in range(len(network.ports)):
if network.ports[index] == port:
del network.ports[index]
del self.port_lookup[port.id]
break
def get_port_by_id(self, port_id):
network = self.get_network_by_port_id(port_id)
if network:
for port in network.ports:
if port.id == port_id:
return port
def get_state(self):
net_ids = self.get_network_ids()
num_nets = len(net_ids)
num_subnets = 0
num_ports = 0
for net_id in net_ids:
network = self.get_network_by_id(net_id)
num_subnets += len(network.subnets)
num_ports += len(network.ports)
return {'networks': num_nets,
'subnets': num_subnets,
'ports': num_ports}
class DhcpAgentWithStateReport(DhcpAgent):
def __init__(self, host=None):
super(DhcpAgentWithStateReport, self).__init__(host=host)
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
self.agent_state = {
'binary': 'neutron-dhcp-agent',
'host': host,
'topic': topics.DHCP_AGENT,
'configurations': {
'dhcp_driver': cfg.CONF.dhcp_driver,
'use_namespaces': cfg.CONF.use_namespaces,
'dhcp_lease_duration': cfg.CONF.dhcp_lease_duration,
'log_agent_heartbeats': cfg.CONF.AGENT.log_agent_heartbeats},
'start_flag': True,
'agent_type': constants.AGENT_TYPE_DHCP}
report_interval = cfg.CONF.AGENT.report_interval
self.use_call = True
if report_interval:
self.heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
self.heartbeat.start(interval=report_interval)
def _report_state(self):
try:
self.agent_state.get('configurations').update(
self.cache.get_state())
ctx = context.get_admin_context_without_session()
self.state_rpc.report_state(ctx, self.agent_state, self.use_call)
self.use_call = False
except AttributeError:
# This means the server does not support report_state
LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled."))
self.heartbeat.stop()
self.run()
return
except Exception:
LOG.exception(_LE("Failed reporting state!"))
return
if self.agent_state.pop('start_flag', None):
self.run()
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload})
LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self):
LOG.info(_LI("DHCP agent started"))
| apache-2.0 |
tj93/pymtl | pclib/rtl/Mux_test.py | 8 | 5466 | #=========================================================================
# Mux_test.py
#=========================================================================
from pymtl import *
from pclib.test import TestVectorSimulator
from Mux import Mux
#-------------------------------------------------------------------------
# run_test_mux
#-------------------------------------------------------------------------
def run_test_mux( dump_vcd, test_verilog,
ModelType, num_inputs, test_vectors ):
# Instantiate and elaborate the model
model = ModelType(16, num_inputs)
model.vcd_file = dump_vcd
if test_verilog:
model = TranslationTool( model )
model.elaborate()
# Define functions mapping the test vector to ports in model
def tv_in( model, test_vector ):
for i in range(num_inputs):
model.in_[i].value = test_vector[i]
model.sel.value = test_vector[num_inputs]
def tv_out( model, test_vector ):
if test_vector[num_inputs] != '?':
assert model.out.value == test_vector[num_inputs+1]
# Run the test
sim = TestVectorSimulator( model, test_vectors, tv_in, tv_out )
sim.run_test()
#-------------------------------------------------------------------------
# test_mux2
#-------------------------------------------------------------------------
def test_mux2( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 2, [
[ 0x0a0a, 0x0b0b, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0, 0x0a0a ],
[ 0x0c0c, 0x0d0d, 1, 0x0d0d ],
[ 0x0c0c, 0x0d0d, 0, 0x0c0c ],
])
#-------------------------------------------------------------------------
# test_mux3
#-------------------------------------------------------------------------
def test_mux3( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 3, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0, 0x0a0a ],
[ 0x0d0d, 0x0e0e, 0x0f0f, 1, 0x0e0e ],
[ 0x0d0d, 0x0e0e, 0x0f0f, 2, 0x0f0f ],
[ 0x0d0d, 0x0e0e, 0x0f0f, 0, 0x0d0d ],
])
#-------------------------------------------------------------------------
# test_mux4
#-------------------------------------------------------------------------
def test_mux4( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 4, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 3, 0x0d0d ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0, 0x0a0a ],
])
#-------------------------------------------------------------------------
# test_mux5
#-------------------------------------------------------------------------
def test_mux5( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 5, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 3, 0x0d0d ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 4, 0x0e0e ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0, 0x0a0a ],
])
#-------------------------------------------------------------------------
# test_mux6
#-------------------------------------------------------------------------
def test_mux6( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 6, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 3, 0x0d0d ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 4, 0x0e0e ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 5, 0x0f0f ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0, 0x0a0a ],
])
#-------------------------------------------------------------------------
# test_mux7
#-------------------------------------------------------------------------
def test_mux7( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 7, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 3, 0x0d0d ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 4, 0x0e0e ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 5, 0x0f0f ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 6, 0x0101 ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0, 0x0a0a ],
])
#-------------------------------------------------------------------------
# test_mux8
#-------------------------------------------------------------------------
def test_mux8( dump_vcd, test_verilog ):
run_test_mux( dump_vcd, test_verilog, Mux, 8, [
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 1, 0x0b0b ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 2, 0x0c0c ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 3, 0x0d0d ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 4, 0x0e0e ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 5, 0x0f0f ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 6, 0x0101 ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 7, 0x0202 ],
[ 0x0a0a, 0x0b0b, 0x0c0c, 0x0d0d, 0x0e0e, 0x0f0f, 0x0101, 0x0202, 0, 0x0a0a ],
])
| bsd-3-clause |
listamilton/supermilton.repository | script.areswizard/requests/packages/urllib3/exceptions.py | 156 | 4244 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
| gpl-2.0 |
Verizon/libcloud | libcloud/test/common/test_runabove.py | 27 | 1181 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from libcloud.test import MockHttp
FORMAT_URL = re.compile(r'[./-]')
class BaseRunAboveMockHttp(MockHttp):
def _get_method_name(self, type, use_param, qs, path):
return "_json"
def _json(self, method, url, body, headers):
meth_name = '_json%s_%s' % (FORMAT_URL.sub('_', url), method.lower())
return getattr(self, meth_name)(method, url, body, headers)
| apache-2.0 |
dkoes/md-scripts | rmsdtofirst.py | 1 | 1999 | #!/usr/local/bin/python
import MDAnalysis, sys, argparse, numpy, math
import MDAnalysis.core.qcprot as qcp
from MDAnalysis.analysis.align import *
#take a topology, trajectory, and selction and output rmsd to the first frame
#throughout the trajectory
parser = argparse.ArgumentParser(description='Calculate RMSD to first frame (optionally perform alignment)')
parser.add_argument('topo',metavar='topology file')
parser.add_argument('traj',metavar='trajectory file')
parser.add_argument('sel',metavar='selection')
parser.add_argument('--minimize','-m', default=False, action='store_true', help="compute minimal rmsd for each frame")
args = parser.parse_args()
#read topo and trajectory
u = MDAnalysis.Universe(args.topo, args.traj)
refatoms = u.selectAtoms(args.sel);
trajatoms = u.selectAtoms(args.sel);
ref_com = refatoms.centerOfMass().astype(numpy.float32)
if args.minimize:
ref_coordinates = refatoms.coordinates() - ref_com
else:
ref_coordinates = refatoms.coordinates()
# allocate the array for selection atom coords
traj_coordinates = trajatoms.coordinates().copy()
natoms = trajatoms.numberOfAtoms()
# RMSD timeseries
frames = u.trajectory
nframes = len(frames)
# R: rotation matrix that aligns r-r_com, x~-x~com
# (x~: selected coordinates, x: all coordinates)
# Final transformed traj coordinates: x' = (x-x~_com)*R + ref_com
rot = numpy.zeros(9,dtype=numpy.float64) # allocate space for calculation
R = numpy.matrix(rot.reshape(3,3))
for k,ts in enumerate(frames):
# shift coordinates for rotation fitting
# selection is updated with the time frame
if args.minimize:
x_com = trajatoms.centerOfMass().astype(numpy.float32)
traj_coordinates[:] = trajatoms.coordinates() - x_com
r = rmsd(ref_coordinates, traj_coordinates)
else:
traj_coordinates[:] = trajatoms.coordinates()
diff = ref_coordinates - traj_coordinates
r = math.sqrt(numpy.square(diff.flatten()).sum()/natoms)
print(k,r)
| bsd-3-clause |
pprett/statsmodels | statsmodels/sandbox/contrast_old.py | 4 | 4666 | import copy
import numpy as np
from numpy.linalg import pinv
from statsmodels.sandbox import utils_old as utils
class ContrastResults(object):
"""
Results from looking at a particular contrast of coefficients in
a parametric model. The class does nothing, it is a container
for the results from T and F contrasts.
"""
def __init__(self, t=None, F=None, sd=None, effect=None, df_denom=None,
df_num=None):
if F is not None:
self.F = F
self.df_denom = df_denom
self.df_num = df_num
else:
self.t = t
self.sd = sd
self.effect = effect
self.df_denom = df_denom
def __array__(self):
if hasattr(self, "F"):
return self.F
else:
return self.t
def __str__(self):
if hasattr(self, 'F'):
return '<F contrast: F=%s, df_denom=%d, df_num=%d>' % \
(`self.F`, self.df_denom, self.df_num)
else:
return '<T contrast: effect=%s, sd=%s, t=%s, df_denom=%d>' % \
(`self.effect`, `self.sd`, `self.t`, self.df_denom)
class Contrast(object):
"""
This class is used to construct contrast matrices in regression models.
They are specified by a (term, formula) pair.
The term, T, is a linear combination of columns of the design
matrix D=formula(). The matrix attribute is
a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D. Further, the matrix
Tnew = dot(C, D)
is full rank. The rank attribute is the rank of
dot(D, dot(pinv(D), T))
In a regression model, the contrast tests that E(dot(Tnew, Y)) = 0
for each column of Tnew.
"""
def __init__(self, term, formula, name=''):
self.term = term
self.formula = formula
if name is '':
self.name = str(term)
else:
self.name = name
def __str__(self):
return '<contrast:%s>' % \
`{'term':str(self.term), 'formula':str(self.formula)}`
def compute_matrix(self, *args, **kw):
"""
Construct a contrast matrix C so that
colspan(dot(D, C)) = colspan(dot(D, dot(pinv(D), T)))
where pinv(D) is the generalized inverse of D=self.D=self.formula().
If the design, self.D is already set,
then evaldesign can be set to False.
"""
t = copy.copy(self.term)
t.namespace = self.formula.namespace
T = np.transpose(np.array(t(*args, **kw)))
if T.ndim == 1:
T.shape = (T.shape[0], 1)
self.T = utils.clean0(T)
self.D = self.formula.design(*args, **kw)
self._matrix = contrastfromcols(self.T, self.D)
try:
self.rank = self.matrix.shape[1]
except:
self.rank = 1
def _get_matrix(self):
"""
This will fail if the formula needs arguments to construct
the design.
"""
if not hasattr(self, "_matrix"):
self.compute_matrix()
return self._matrix
matrix = property(_get_matrix)
def contrastfromcols(L, D, pseudo=None):
"""
From an n x p design matrix D and a matrix L, tries
to determine a p x q contrast matrix C which
determines a contrast of full rank, i.e. the
n x q matrix
dot(transpose(C), pinv(D))
is full rank.
L must satisfy either L.shape[0] == n or L.shape[1] == p.
If L.shape[0] == n, then L is thought of as representing
columns in the column space of D.
If L.shape[1] == p, then L is thought of as what is known
as a contrast matrix. In this case, this function returns an estimable
contrast corresponding to the dot(D, L.T)
Note that this always produces a meaningful contrast, not always
with the intended properties because q is always non-zero unless
L is identically 0. That is, it produces a contrast that spans
the column space of L (after projection onto the column space of D).
"""
L = np.asarray(L)
D = np.asarray(D)
n, p = D.shape
if L.shape[0] != n and L.shape[1] != p:
raise ValueError, 'shape of L and D mismatched'
if pseudo is None:
pseudo = pinv(D)
if L.shape[0] == n:
C = np.dot(pseudo, L).T
else:
C = L
C = np.dot(pseudo, np.dot(D, C.T)).T
Lp = np.dot(D, C.T)
if len(Lp.shape) == 1:
Lp.shape = (n, 1)
if utils.rank(Lp) != Lp.shape[1]:
Lp = utils.fullrank(Lp)
C = np.dot(pseudo, Lp).T
return np.squeeze(C)
| bsd-3-clause |
Alex-Gurung/MathSpeech | wordToNum.py | 1 | 1299 | #Taken from http://stackoverflow.com/questions/493174/is-there-a-way-to-convert-number-words-to-integers
def text2int(textnum, numwords={}):
if not numwords:
units = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen",
"sixteen", "seventeen", "eighteen", "nineteen",
]
tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
scales = ["hundred", "thousand", "million", "billion", "trillion"]
numwords["and"] = (1, 0)
for idx, word in enumerate(units): numwords[word] = (1, idx)
for idx, word in enumerate(tens): numwords[word] = (1, idx * 10)
for idx, word in enumerate(scales): numwords[word] = (10 ** (idx * 3 or 2), 0)
current = result = 0
for word in textnum.split():
if word not in numwords:
raise Exception("Illegal word: " + word)
scale, increment = numwords[word]
current = current * scale + increment
if scale > 100:
result += current
current = 0
return result + current
#Test
print text2int("seven billion one hundred million thirty one thousand three hundred thirty seven")
#7100031337 | mit |
osiell/server-tools | base_report_auto_create_qweb/models/report_xml.py | 13 | 5388 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api, exceptions, _
import logging
_logger = logging.getLogger(__name__)
class IrActionsReport(models.Model):
_inherit = 'ir.actions.report.xml'
def _format_template_name(self, text):
try:
from unidecode import unidecode
except ImportError:
_logger.debug('Can not `import unidecode`.')
text = unidecode(unicode(text))
text.lower()
return text.encode('iso-8859-1')
def _prepare_qweb_view_data(self, qweb_name, arch):
return {
'name': qweb_name,
'mode': 'primary',
'type': 'qweb',
'arch': arch,
}
def _prepare_model_data_data(self, qweb_name, module, qweb_view):
return {
'module': module,
'name': qweb_name,
'res_id': qweb_view.id,
'model': 'ir.ui.view',
}
def _prepare_value_view_data(self, name, model):
return {
'name': name,
'model': model,
'key2': 'client_print_multi',
'value_unpickle': 'ir.actions.report.xml,%s' % self.id,
}
def _create_qweb(self, name, qweb_name, module, model, arch):
qweb_view_data = self._prepare_qweb_view_data(qweb_name, arch)
qweb_view = self.env['ir.ui.view'].create(qweb_view_data)
model_data_data = self._prepare_model_data_data(
qweb_name, module, qweb_view)
self.env['ir.model.data'].create(model_data_data)
value_view_data = self._prepare_value_view_data(
name, model)
self.env['ir.values'].sudo().create(value_view_data)
@api.model
def create(self, values):
values['report_name'] = self._format_template_name(
values.get('report_name', ''))
if (values.get('report_type') in ['qweb-pdf', 'qweb-html'] and
values.get('report_name') and
values['report_name'].find('.') == -1):
raise exceptions.Warning(
_("Template Name must contain at least a dot in it's name"))
if not self.env.context.get('enable_duplication', False):
return super(IrActionsReport, self).create(values)
report_xml = super(IrActionsReport, self).create(values)
if values.get('report_type') in ['qweb-pdf', 'qweb-html']:
report_view_ids = self.env.context.get('report_views', False)
suffix = self.env.context.get('suffix') or 'copy'
name = values['name']
model = values['model']
report = values['report_name']
module = report.split('.')[0]
report_name = report.split('.')[1]
for report_view in self.env['ir.ui.view'].browse(report_view_ids):
origin_name = report_name.replace(('_%s' % suffix), '')
origin_module = module.replace(('_%s' % suffix), '')
new_report_name = '%s_%s' % (origin_name, suffix)
report_view_name = report_view.name
if report_view.name.find('.') != -1:
report_view_name = report_view.name.split('.')[1]
qweb_name = report_view_name.replace(
origin_name, new_report_name)
arch = report_view.arch.replace(
origin_name, new_report_name).replace(origin_module + '.',
module + '.')
report_xml._create_qweb(
name, qweb_name, module, model, arch)
if not report_view_ids:
arch = ('<?xml version="1.0"?>\n'
'<t t-name="%s">\n</t>' % report_name)
report_xml._create_qweb(name, report_name, module, model, arch)
return report_xml
@api.one
def copy(self, default=None):
if not self.env.context.get('enable_duplication', False):
return super(IrActionsReport, self).copy(default=default)
if default is None:
default = {}
suffix = self.env.context.get('suffix') or 'copy'
default['name'] = '%s (%s)' % (self.name, suffix)
module = '%s_%s' % (
self.report_name.split('.')[0], suffix.lower())
report = '%s_%s' % (self.report_name.split('.')[1], suffix.lower())
default['report_name'] = '%s.%s' % (module, report)
report_views = self.env['ir.ui.view'].search([
('name', 'ilike', self.report_name.split('.')[1]),
('type', '=', 'qweb')])
return super(IrActionsReport,
self.with_context(
report_views=report_views.ids,
suffix=suffix.lower())).copy(default=default)
@api.multi
def button_create_qweb(self):
self.ensure_one()
module = self.report_name.split('.')[0]
report_name = self.report_name.split('.')[1]
arch = ('<?xml version="1.0"?>\n'
'<t t-name="%s">\n</t>' % self.report_name)
self._create_qweb(self.name, report_name, module, self.model, arch)
self.associated_view()
| agpl-3.0 |
peiyuwang/pants | tests/python/pants_test/engine/legacy/test_pants_engine_integration.py | 9 | 1078 | # coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class PantsEngineIntegrationTest(PantsRunIntegrationTest):
def test_engine_list(self):
pants_run = self.run_pants(['-ldebug', '--enable-v2-engine', 'list', '3rdparty::'])
self.assert_success(pants_run)
self.assertRegexpMatches(pants_run.stderr_data, 'build_graph is: .*LegacyBuildGraph')
self.assertRegexpMatches(pants_run.stderr_data,
'ran \d+ scheduling iterations and \d+ runnables in')
self.assertNotRegexpMatches(pants_run.stderr_data, 'pantsd is running at pid \d+')
def test_engine_binary(self):
self.assert_success(
self.run_pants(
['--enable-v2-engine', 'binary', 'examples/src/python/example/hello/main:']
)
)
| apache-2.0 |
jezdez/kuma | vendor/packages/translate/convert/po2ini.py | 22 | 3155 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Convert Gettext PO localization files to .ini files.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/ini2po.html
for examples and usage instructions.
"""
from translate.convert import convert
from translate.storage import factory
class reini:
def __init__(self, templatefile, inputstore, dialect="default"):
from translate.storage import ini
self.templatefile = templatefile
self.templatestore = ini.inifile(templatefile, dialect=dialect)
self.inputstore = inputstore
def convertstore(self, includefuzzy=False):
self.includefuzzy = includefuzzy
self.inputstore.makeindex()
for unit in self.templatestore.units:
for location in unit.getlocations():
if location in self.inputstore.locationindex:
inputunit = self.inputstore.locationindex[location]
if inputunit.isfuzzy() and not self.includefuzzy:
unit.target = unit.source
else:
unit.target = inputunit.target
else:
unit.target = unit.source
return str(self.templatestore)
def convertini(inputfile, outputfile, templatefile, includefuzzy=False,
dialect="default", outputthreshold=None):
inputstore = factory.getobject(inputfile)
if not convert.should_output_store(inputstore, outputthreshold):
return False
if templatefile is None:
raise ValueError("must have template file for ini files")
else:
convertor = reini(templatefile, inputstore, dialect)
outputstring = convertor.convertstore(includefuzzy)
outputfile.write(outputstring)
return True
def convertisl(inputfile, outputfile, templatefile, includefuzzy=False,
dialect="inno", outputthreshold=None):
convertini(inputfile, outputfile, templatefile, includefuzzy, dialect,
outputthreshold=outputthreshold)
def main(argv=None):
formats = {
("po", "ini"): ("ini", convertini),
("po", "isl"): ("isl", convertisl),
}
parser = convert.ConvertOptionParser(formats, usetemplates=True,
description=__doc__)
parser.add_threshold_option()
parser.add_fuzzy_option()
parser.run(argv)
if __name__ == '__main__':
main()
| mpl-2.0 |
dmarteau/QGIS | python/plugins/processing/gui/ToolboxAction.py | 45 | 1439 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ToolboxAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication
class ToolboxAction:
def setData(self, toolbox):
self.toolbox = toolbox
def getIcon(self):
return QgsApplication.getThemeIcon("/processingAlgorithm.svg")
def tr(self, string, context=''):
if context == '':
context = self.__class__.__name__
return QCoreApplication.translate(context, string)
| gpl-2.0 |
Yethiel/re-volt-addon | io_revolt/__init__.py | 1 | 4294 | """
Name: init
Purpose: Init file for the Blender Add-On
Description:
Marv's Add-On for Re-Volt
"""
import bpy
import os
import os.path
import imp
from bpy.app.handlers import persistent # For the scene update handler
from . import (
common,
layers,
operators,
texanim,
tools,
)
from .props import (
props_mesh,
props_obj,
props_scene,
)
from .ui import (
menu_add,
headers,
faceprops,
instances,
light,
hull,
object,
scene,
vertex,
texanim,
helpers,
settings,
)
# Reloads potentially changed modules on reload (F8 in Blender)
imp.reload(common)
imp.reload(layers)
imp.reload(props_mesh)
imp.reload(props_obj)
imp.reload(props_scene)
imp.reload(operators)
imp.reload(texanim)
imp.reload(tools)
# Reloads ui
imp.reload(menu_add)
imp.reload(headers)
imp.reload(faceprops)
imp.reload(instances)
imp.reload(light)
imp.reload(hull)
imp.reload(object)
imp.reload(scene)
imp.reload(vertex)
imp.reload(texanim)
imp.reload(helpers)
imp.reload(settings)
# Reloaded here because it's used in a class which is instanced here
if "fin_in" in locals():
imp.reload(fin_in)
if "fin_out" in locals():
imp.reload(fin_out)
if "hul_in" in locals():
imp.reload(hul_in)
if "hul_out" in locals():
imp.reload(hul_out)
if "img_in" in locals():
imp.reload(img_in)
if "prm_in" in locals():
imp.reload(prm_in)
if "prm_out" in locals():
imp.reload(prm_out)
if "ncp_in" in locals():
imp.reload(ncp_in)
if "ncp_out" in locals():
imp.reload(ncp_out)
if "parameters_in" in locals():
imp.reload(parameters_in)
if "ta_csv_in" in locals():
imp.reload(ta_csv_in)
if "ta_csv_out" in locals():
imp.reload(ta_csv_out)
if "w_in" in locals():
imp.reload(w_in)
if "w_out" in locals():
imp.reload(w_out)
if "rim_in" in locals():
imp.reload(rim_in)
if "rim_out" in locals():
imp.reload(rim_out)
# Makes common variables and classes directly accessible
from .common import *
from .props.props_mesh import *
from .props.props_obj import *
from .props.props_scene import *
from .texanim import *
dprint("---\n\n\n\n")
bl_info = {
"name": "Re-Volt",
"author": "Marvin Thiel",
"version": (19, 12, 30),
"blender": (2, 79, 0),
"location": "File > Import-Export",
"description": "Import and export Re-Volt file formats.",
"wiki_url": "https://re-volt.github.io/re-volt-addon/",
"tracker_url": "https://github.com/Re-Volt/re-volt-addon/issues",
"support": 'COMMUNITY',
"category": "Import-Export"
}
@persistent
def edit_object_change_handler(scene):
"""Makes the edit mode bmesh available for use in GUI panels."""
obj = scene.objects.active
if obj is None:
return
# Adds an instance of the edit mode mesh to the global dict
if obj.mode == 'EDIT' and obj.type == 'MESH':
bm = dic.setdefault(obj.name, bmesh.from_edit_mesh(obj.data))
return
dic.clear()
def menu_func_import(self, context):
"""Import function for the user interface."""
self.layout.operator("import_scene.revolt", text="Re-Volt")
def menu_func_export(self, context):
"""Export function for the user interface."""
self.layout.operator("export_scene.revolt", text="Re-Volt")
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.revolt = bpy.props.PointerProperty(
type=RVSceneProperties
)
bpy.types.Object.revolt = bpy.props.PointerProperty(
type=RVObjectProperties
)
bpy.types.Mesh.revolt = bpy.props.PointerProperty(
type=RVMeshProperties
)
bpy.types.INFO_MT_file_import.prepend(menu_func_import)
bpy.types.INFO_MT_file_export.prepend(menu_func_export)
bpy.types.INFO_MT_add.append(menu_add.menu_func_add)
bpy.app.handlers.scene_update_pre.append(edit_object_change_handler)
# bpy.app.handlers.scene_update_post.append(edit_object_change_handler)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.revolt
del bpy.types.Object.revolt
del bpy.types.Mesh.revolt
bpy.types.INFO_MT_file_import.remove(menu_func_import)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_add.remove(menu_add.menu_func_add)
if __name__ == "__main__":
register()
dprint("Re-Volt add-on registered.")
| gpl-3.0 |
methane/toever | toever/toever.py | 2 | 10878 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from evernote.api.client import EvernoteClient
from evernote.edam.type.ttypes import Note
from evernote.edam.type.ttypes import Resource, ResourceAttributes, Data
# from evernote.edam.error import ttypes as errors
from xml.sax.saxutils import escape
from datetime import datetime
from clint import textui
import sys
import os
import argparse
import mimetypes
import hashlib
import ConfigParser
import keyring
import chardet
import config as sys_config
class ToEver():
def __init__(self, token, sandbox=True):
self.client = EvernoteClient(token=token, sandbox=sandbox)
self.token = token
self.hide = False
self.share = False
self.tag = None
self.bookguid = None
self.content = str()
def createNote(self, title, resource=None):
user_store = self.client.get_user_store()
note_store = self.client.get_note_store()
try:
note = Note()
note.tagNames = self.tag
note.notebookGuid = self.bookguid
if resource is not None:
note.resources = [resource]
self.content += "<span><en-media type=\"%s\" hash=\"%s\"/></span>" % (resource.mime, resource.data.bodyHash)
note.title = title
note.content = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
note.content += "<!DOCTYPE en-note SYSTEM \"http://xml.evernote.com/pub/enml2.dtd\">"
note.content += "<en-note>%s</en-note>" % self.content
created_note = note_store.createNote(note)
except:
return False
note_share_url = None
note_share_resource_url = None
if self.share:
note_share_url = ToEver.getNoteShareUrl(
sys_config.evernote_url,
user_store.getUser().shardId,
created_note.guid,
note_store.shareNote(self.token, created_note.guid)
)
if resource is not None:
for x in created_note.resources:
note_share_resource_url = note_share_url + "/res/%s/%s" % (x.guid, x.attributes.fileName)
message = None
if not self.hide:
message = "Created note title is '" + title + "'"
message += " [" + ToEver.getUserUploadState(note_store.getSyncState().uploaded, user_store.getUser().accounting.uploadLimitNextMonth) + "]"
if note_share_url is not None:
message += "\n" + "share link --> " + note_share_url
if note_share_resource_url is not None:
message += "\n" + "share attachment link --> " + note_share_resource_url
elif note_share_url is not None:
message = note_share_url
if note_share_resource_url is not None:
message += "\n" + note_share_resource_url
if message is not None:
print(textui.colored.blue(message))
return True
def getResource(self, filename):
data = Data()
data.body = sys.stdin.read()
data.size = len(data.body)
data.bodyHash = hashlib.md5(data.body).hexdigest()
resource = Resource()
resource.mime = mimetypes.guess_type(filename)[0]
resource.data = data
attr = ResourceAttributes()
attr.fileName = filename
resource.attributes = attr
return resource
def setContent(self):
for line in iter(sys.stdin.readline, ''):
self.content += self.getContentFormat(line)
if not self.hide:
print(textui.colored.green(line.rstrip()))
def isSetContent(self):
return len(self.content.replace('<div><br/></div>', '')) != 0
def listNotebooks(self):
note_store = self.client.get_note_store()
return note_store.listNotebooks()
@staticmethod
def getContentFormat(data):
data = data.rstrip()
data = '<div>' + escape(data) + '</div>'
data = data.replace(' ', ' ') # bytecode 20 -> c2a0
data = data.replace(' ', ' ') # tab -> c2a0
data = data.replace('<div></div>', '<div><br/></div>') + '\n'
return data
@staticmethod
def getNoteShareUrl(url, shard_id, note_guid, share_key):
return "%s/shard/%s/sh/%s/%s" % (url, shard_id, note_guid, share_key)
@staticmethod
def getRoundMbSize(size):
return str(round(size / (1024.0 ** 2)))
@staticmethod
def getUserUploadState(user_upload, upload_limit_next_month):
return "%s MB / %s MB" % (ToEver.getRoundMbSize(user_upload), ToEver.getRoundMbSize(upload_limit_next_month))
@staticmethod
def getCreateNoteError():
return textui.colored.red('Create note error')
class UserConfig():
def __init__(self, filepath):
self.filepath = filepath
self.user_config = ConfigParser.SafeConfigParser()
try:
if not os.path.isfile(self.filepath):
raise IOError(self.filepath)
except:
user_config = ConfigParser.RawConfigParser()
user_config.add_section(sys_config.application_name)
user_config.set(sys_config.application_name, 'notebook', '')
user_config.set(sys_config.application_name, 'tags', '')
with open(self.filepath, 'wb') as configfile:
user_config.write(configfile)
os.chmod(self.filepath, 0644)
self.user_config.read(self.filepath)
def getUserOption(self, option):
if self.user_config.has_option(sys_config.application_name, option):
return self.user_config.get(sys_config.application_name, option)
def setDeveloperToken(self):
print(textui.colored.green('Get Evernote DeveloperToken URL --> ' + sys_config.token_geturl))
while True:
developer_token = raw_input('Token: ')
if self.isDeveloperToken(developer_token, sys_config.sandbox):
keyring.set_password(sys_config.application_name, 'developer_token', developer_token)
return self
def setDefaultNotebook(self):
print(textui.colored.green('Set toEver default post notebook / Not enter if you do not set'))
notebook = raw_input('Notebook: ')
self.user_config.set(sys_config.application_name, 'notebook', notebook)
return self
def setDefaultTags(self):
print(textui.colored.green('Set toEver default post tags / Not enter if you do not set'))
tags = raw_input('Tags: ')
self.user_config.set(sys_config.application_name, 'tags', tags)
return self
def save(self):
self.user_config.write(open(self.filepath, 'w'))
@staticmethod
def isDeveloperToken(token, sandbox=True):
try:
EvernoteClient(token=token, sandbox=sandbox).get_note_store()
except:
print(textui.colored.red('Token can not be used'))
return False
return True
class Util():
@staticmethod
def isBinary(data):
return chardet.detect(data)['encoding'] is None
def main():
parser = argparse.ArgumentParser(description=sys_config.application_name + ' version ' + sys_config.version)
parser.add_argument('file', nargs='*', action='store', help='file to send to evernote')
parser.add_argument('-f', '--filename', type=str, help='set note attachment file name (When the name is designated, it processed as attachment file.)')
parser.add_argument('-t', '--title', type=str, help='set note title (omitted, the time is inputted automatically.)')
parser.add_argument('--tags', type=str, help='set note tags (multiple tag separated by comma.)')
parser.add_argument('--notebook', type=str, help='set note notebook')
parser.add_argument('--config', action='store_true', help='set user config')
parser.add_argument('--hide', action='store_true', help='hide the display message (except share link)')
parser.add_argument('--share', action='store_true', help='set note share link')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + sys_config.version)
args = parser.parse_args()
user_config = UserConfig(sys_config.user_filepath)
if args.config:
try:
user_config.setDeveloperToken().setDefaultNotebook().setDefaultTags().save()
except:
return 1
return 0
stdin_dafault = sys.stdin
sys.stdin = open('/dev/tty', 'rt')
if not user_config.isDeveloperToken(keyring.get_password(sys_config.application_name, 'developer_token'), sys_config.sandbox):
user_config.setDeveloperToken()
sys.stdin = stdin_dafault
toever = ToEver(keyring.get_password(sys_config.application_name, 'developer_token'), sys_config.sandbox)
note_title = args.title
if args.title is None:
note_title = 'toEver Post ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if args.tags is None and user_config.getUserOption('tags'):
args.tags = user_config.getUserOption('tags')
if args.tags is not None:
toever.tag = args.tags.split(',')
if args.notebook is None and user_config.getUserOption('notebook'):
args.notebook = user_config.getUserOption('notebook')
if args.notebook is not None:
for line in toever.listNotebooks():
if line.name == args.notebook:
toever.bookguid = line.guid
break
toever.hide = args.hide
toever.share = args.share
if len(args.file) > 0:
for filepath in args.file:
if not os.path.isfile(filepath):
return textui.colored.red('File does not exist ' + filepath)
toever.content = str()
resource = None
filename = os.path.basename(filepath)
sys.stdin = open(filepath, 'r')
if Util.isBinary(open(filepath, 'r').read()):
resource = toever.getResource(filename)
if not toever.hide:
print(textui.colored.green("Attachment file is '" + filename + "'"))
else:
toever.setContent()
if args.title is None:
note_title = filename
if not toever.createNote(note_title, resource):
return toever.getCreateNoteError()
return 0
if args.filename is not None:
if not toever.hide:
print(textui.colored.green("Attachment file is '" + args.filename + "'"))
if not toever.createNote(note_title, toever.getResource(args.filename)):
return toever.getCreateNoteError()
return 0
try:
toever.setContent()
except:
pass
finally:
if not toever.createNote(note_title):
return toever.getCreateNoteError()
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
markgw/jazzparser | src/jptests/data/init.py | 1 | 14507 | """Unit tests for jazzparser.data module
"""
"""
============================== License ========================================
Copyright (C) 2008, 2010-12 University of Edinburgh, Mark Granroth-Wilding
This file is part of The Jazz Parser.
The Jazz Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
The Jazz Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with The Jazz Parser. If not, see <http://www.gnu.org/licenses/>.
============================ End license ======================================
"""
__author__ = "Mark Granroth-Wilding <mark.granroth-wilding@ed.ac.uk>"
import unittest
from jazzparser.data import Chord, DerivationTrace, Fraction
class TestChord(unittest.TestCase):
"""
Tests for the various ways of creating instances of Chord.
"""
ALLOWED_NUMERALS = [
("C", 0, "C" ),
("Db", 1, "Db"),
("D", 2, "D" ),
("Eb", 3, "Eb"),
("E", 4, "E" ),
("F", 5, "F" ),
("F#", 6, "Gb"),
("G", 7, "G" ),
("G#", 8, "Ab"),
("A", 9, "A" ),
("Bb", 10, "Bb"),
("B", 11, "B" ),
]
def test_from_numerals(self):
"""
Try creating chords using all possbile numerals and check the numeral
and root get set correctly.
"""
for numeral,root,trg_num in self.ALLOWED_NUMERALS:
# Try creating a Chord with each numeral
c = Chord(numeral)
# Check it has the right numeral
self.assertEqual(trg_num, c.root_numeral)
# and the right root number
self.assertEqual(root, c.root)
def test_set_root(self):
"""
Try setting the root or numeral after a chord is created and check
that both values get correctly set.
"""
c = Chord(self.ALLOWED_NUMERALS[0][0])
for numeral,root,trg_num in self.ALLOWED_NUMERALS:
# Try setting the root and check root and numeral are correct
c.root = root
self.assertEqual(root, c.root)
self.assertEqual(trg_num, c.root_numeral)
for numeral,root,trg_num in self.ALLOWED_NUMERALS:
# Try setting the numeral and check root and numeral are correct
c.root_numeral = numeral
self.assertEqual(root, c.root)
self.assertEqual(trg_num, c.root_numeral)
def test_init_type(self):
"""
Try creating chords with a particular type and check (a) that they
successfully create a chord and (b) that the chord has the right type.
"""
for ctype in Chord.TYPE_SYMBOLS.values():
c = Chord("C", type=ctype)
self.assertEqual(c.type, ctype)
def test_interval(self):
"""
Try getting the interval between two chords and check it comes out
as expected.
"""
# Some randomly chosen tests
tests = [
(0, "C", "C", True),
(2, "F", "G", False),
(4, "D", "F#", False),
(6, "B", "F", True),
(8, "F", "Db", False),
(10, "Ab", "F#", False)
]
for interval,lower,upper,invertible in tests:
c0 = Chord(lower)
c1 = Chord(upper)
self.assertEqual(interval, Chord.interval(c0,c1))
# Try inverting the interval and check it's only the same in the
# cases where the interval is its own inverse
if invertible:
self.assertEqual(interval, Chord.interval(c1,c0))
else:
self.assertNotEqual(interval, Chord.interval(c1,c0))
def test_from_name(self):
"""
from_name covers a lot of possible chord instances. Here we just test
a sample of textual chords and check the instance gets the right
attributes out of the name.
It's by no means exhaustive!
"""
tests = [
# Name, root, type, additions, tetrad type
("C", 0, "", "", ""),
("F#m7", 6, "m7", "", "m7"),
("G7(9)", 7, "7", "9", "7"),
("A(9)", 9, "", "9", "7"),
("Dsus4", 2, "sus4", "", "sus4"),
("Esus4,7", 4, "sus4,7","", "sus4,7"),
("Esus4(9)", 4, "sus4", "9", "sus4,7"),
("Fm,M7(+11)", 5, "m,M7", "+11", "m,M7"),
]
for name,root,ctype,additions,tetrad in tests:
c = Chord.from_name(name)
self.assertEqual(root, c.root)
self.assertEqual(ctype, c.type)
self.assertEqual(additions, c.additions)
self.assertEqual(tetrad, c.tetrad_type)
class TestDerivationTraceHalfspan(unittest.TestCase):
"""
A derivation trace is quite a simple data structure. We test that it
behaves correctly when used to store a trace of derivations in the
halfspan formalism.
This is specific to the L{halfspan
formalism<jazzparser.formalisms.music_halfspan>}, the current and
only supported formalism at the time
of writing. If the formalism is deprecated these tests will need to
be rewritten for the new formalism and these tests should be removed.
"""
def setUp(self):
from jazzparser.formalisms.music_halfspan.rules import ApplicationRule
from jazzparser.formalisms.music_halfspan.syntax import AtomicCategory, \
ComplexCategory, HalfCategory, Sign, Slash
from jazzparser.formalisms.music_halfspan.semantics import \
DummyLogicalForm, Semantics
from jazzparser.grammar import Grammar
# Use the default grammar
self.grammar = Grammar()
# Get a rule to instantiate: forward application
self.rule = self.grammar.rules_by_name['appf']
# Create some categories we can store as if the rule applied to them
# Create an atomic category
self.cat0 = AtomicCategory(
HalfCategory("I"),
HalfCategory("I") )
# Create a complex category that could be applied to the atomic one
self.cat1 = ComplexCategory(
HalfCategory("V", function="D"),
Slash(True),
HalfCategory("I", function=["D","T"]) )
# An atomic category, as if 0 was applied to 1
self.cat2 = AtomicCategory(
HalfCategory("V", function="D"),
HalfCategory("I") )
# A dummy semantics to use for all signs
dummy_sem = Semantics(DummyLogicalForm())
# Create signs from the categories
self.sign0 = Sign(self.cat0, dummy_sem.copy())
self.sign1 = Sign(self.cat1, dummy_sem.copy())
self.sign2 = Sign(self.cat2, dummy_sem.copy())
def test_create_lexical_trace(self):
"""
Just creates a derivation trace in the simplest possible way, as if
it's a lexical production.
"""
trace = DerivationTrace(self.sign0, word="IM7")
def test_create_rule_trace(self):
"""
First creates two lexical traces (as tested in
L{test_create_lexical_trace}) and then a trace for applying the
application rule to them. The rule is not actually applied, we
just pretend it was.
"""
trace0 = DerivationTrace(self.sign0, word="IM7")
trace1 = DerivationTrace(self.sign1, word="V7")
# Pretend the rule was applied to the above signs
trace2 = DerivationTrace(self.sign2, rule=self.rule, args=[trace1, trace0])
def test_multiple_source_trace(self):
"""
Creates two derivation traces like that created in
L{test_create_rule_trace} and combines them into a single trace.
"""
trace0 = DerivationTrace(self.sign0, word="IM7")
trace1 = DerivationTrace(self.sign1, word="V7")
# Pretend the rule was applied to the above signs
trace2 = DerivationTrace(self.sign2, rule=self.rule, args=[trace1, trace0])
# This rule app is actually the same as trace2, but the DT shouldn't
# care about that, as it's not clever enough
trace2.add_rule(self.rule, [trace1, trace0])
def test_combined_traces(self):
"""
Does the same thing as L{test_multiple_source_trace}, but does it by
creating two DTs and adding the rules from one to the other.
"""
trace0 = DerivationTrace(self.sign0, word="IM7")
trace1 = DerivationTrace(self.sign1, word="V7")
# Pretend the rule was applied to the above signs
trace2 = DerivationTrace(self.sign2, rule=self.rule, args=[trace1, trace0])
# This is actually the same as trace2
trace2b = DerivationTrace(self.sign2, rule=self.rule, args=[trace1, trace0])
trace2.add_rules_from_trace(trace2b)
class TestFraction(unittest.TestCase):
"""
Tests for L{jazzparser.data.Fraction}.
"""
def test_create_int(self):
""" Simplest instantiation: int """
f = Fraction(9)
self.assertEqual(f, 9)
def test_create_fraction(self):
""" Simplest instantiation: fraction """
f = Fraction(9, 10)
def test_simplify(self):
"""
Basic test of simplification of fractions.
"""
f0 = Fraction(9, 10)
f1 = Fraction(18, 20)
self.assertEqual(f0, f1)
f2 = Fraction(17, 20)
self.assertNotEqual(f0, f2)
def test_create_string(self):
"""
Test creating a fraction from a string representation.
"""
f0 = Fraction("1 1/4")
self.assertEqual(f0, Fraction(5, 4))
f1 = Fraction("5")
self.assertEqual(f1, Fraction(5))
f2 = Fraction("5/4")
self.assertEqual(f2, Fraction(5, 4))
for invalid in ["", "1.5", "1/1/1", "5 5", "4\\5", "a", "X"]:
self.assertRaises(Fraction.ValueError, Fraction, invalid)
def test_reparse_string(self):
"""
Create some random fractions, get their string representation and check
the this can by used to correctly reinstantiate the fraction.
"""
from random import randint
for i in range(50):
# Create a random fraction
f0 = Fraction(randint(0,100), randint(1,100))
f0_str = str(f0)
self.assertEqual(f0, Fraction(f0_str))
def test_zero_denominator(self):
"""
Setting a Fraction's denominator to 0 should raise an error.
"""
self.assertRaises(ZeroDivisionError, Fraction, 5, 0)
f = Fraction(1)
self.assertRaises(ZeroDivisionError, lambda x: f / x, 0)
def test_add(self):
"""
Try adding Fractions together.
"""
f0 = Fraction(5)
f1 = Fraction(6)
self.assertEqual(f0+f1, 11)
f2 = Fraction(7, 12) + f0
self.assertEqual(f2, Fraction("5 7/12"))
self.assertEqual(f2, Fraction(67, 12))
def test_neg(self):
""" Try negating Fractions """
f0 = Fraction(5, 7)
self.assertEqual(-f0, Fraction(-5, 7))
f1 = Fraction("5 4/5")
self.assertEqual(-f1, Fraction("-5 4/5"))
def test_sub(self):
""" Test subtraction """
f0 = Fraction(5)
f1 = Fraction(6)
self.assertEqual(f0-f1, -1)
f2 = Fraction(7, 12) - f0
self.assertEqual(f2, Fraction("-4 5/12"))
self.assertEqual(f2, Fraction(-53, 12))
def test_mul(self):
""" Test multiplication """
f0 = Fraction(1,2) * Fraction(3)
self.assertEqual(f0, Fraction(3,2))
f1 = Fraction(5,7) * Fraction(3,9)
self.assertEqual(f1, Fraction(15, 63))
f2 = Fraction(-5,7) * Fraction(3,-9)
self.assertEqual(f2, f1)
def test_div(self):
""" Test division """
f0 = Fraction(1,2) / 3
self.assertEqual(f0, Fraction(1,6))
f0 = Fraction(1,2) / Fraction(3)
self.assertEqual(f0, Fraction(1,6))
f1 = Fraction(5,7) / Fraction(3,9)
self.assertEqual(f1, Fraction(45, 21))
f2 = Fraction(-5,7) / Fraction(3,-9)
self.assertEqual(f2, f1)
f3 = Fraction(5, 7) / 0.5
self.assertEqual(f3, 10.0/7.0)
def test_float(self):
""" Conversion to float """
from random import randint
for i in range(50):
n = randint(0, 100)
d = randint(1, 100)
self.assertEqual(float(Fraction(n,d)), float(n)/float(d))
def test_long(self):
""" Conversion to long """
from random import randint
for i in range(50):
n = randint(0, 100)
d = randint(1, 100)
self.assertEqual(long(Fraction(n,d)), long(n)/long(d))
def test_int(self):
""" Conversion to int """
from random import randint
for i in range(50):
n = randint(0, 100)
d = randint(1, 100)
self.assertEqual(int(Fraction(n,d)), n/d)
def test_equal(self):
""" Test that equal Fractions evaluate as equal """
from random import randint
for i in range(10):
n = randint(0, 100)
d = randint(1, 100)
self.assertEqual(Fraction(n,d), Fraction(n,d))
self.assertEqual(Fraction(50,4), Fraction("12 1/2"))
self.assertEqual(Fraction(50,4), Fraction(25,2))
self.assertEqual(Fraction(7,6), --Fraction(7,6))
f = Fraction(7, 19)
self.assertEqual(f, f/Fraction(6, 17)*Fraction(12, 34))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
esilgard/argos_nlp | fhcrc_pathology/PathSpecimenType.py | 2 | 1076 | '''author@esilgard'''
#
# Copyright (c) 2013-2016 Fred Hutchinson Cancer Research Center
#
# Licensed under the Apache License, Version 2.0: http://www.apache.org/licenses/LICENSE-2.0
#
from OneFieldPerSpecimen import OneFieldPerSpecimen
import global_strings as gb
class PathSpecimenType(OneFieldPerSpecimen):
''' extract speciment type (procedure) from path text '''
__version__ = 'SpecimenType1.0'
def __init__(self):
super(PathSpecimenType, self).__init__()
self.specimen_field_name = 'SpecimenFindType'
self.overall_field_name = 'PathSpecimenType'
self.specimen_table = gb.FINDING_TABLE
self.overall_table = gb.PATHOLOGY_TABLE
self.specimen_confidence = 0.7
self.unlabled_specimen_confidence = 0.5
## reference lists & dictionaries ##
self.file_name_string = 'procedures'
## relevant sections of the report ##
self.good_section = \
r'SPECIMEN|Specimen|IMPRESSION|DIAGNOSIS|COMMENT|DX|DESCRIPTION|DESC|GROSS'
self.bad_section = r'CLINICAL|Note'
| apache-2.0 |
TheTimmy/spack | var/spack/repos/builtin/packages/libconfig/package.py | 3 | 1907 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libconfig(AutotoolsPackage):
"""C/C++ Configuration File Library"""
homepage = "http://www.hyperrealm.com/libconfig/"
url = "https://github.com/hyperrealm/libconfig/archive/v1.6.tar.gz"
force_autoreconf = True
# there is currently a build error with version 1.6, see:
# https://github.com/hyperrealm/libconfig/issues/47
# version('1.6', '2ccd24b6a2ee39f7ff8a3badfafb6539')
version('1.5', 'e92a91c2ddf3bf77bea0f5ed7f09e492', preferred=True)
depends_on('m4', type=('build'))
depends_on('autoconf', type=('build'))
depends_on('automake', type=('build'))
depends_on('libtool', type=('build'))
| lgpl-2.1 |
nagyistoce/photivo | scons-local-2.2.0/SCons/Platform/sunos.py | 14 | 1969 | """engine.SCons.Platform.sunos
Platform-specific initialization for Sun systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/sunos.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import posix
def generate(env):
posix.generate(env)
# Based on sunSparc 8:32bit
# ARG_MAX=1048320 - 3000 for environment expansion
env['MAXLINELENGTH'] = 1045320
env['PKGINFO'] = 'pkginfo'
env['PKGCHK'] = '/usr/sbin/pkgchk'
env['ENV']['PATH'] = env['ENV']['PATH'] + ':/opt/SUNWspro/bin:/usr/ccs/bin'
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
publicRoman/spark | examples/src/main/python/ml/lda_example.py | 54 | 1900 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.clustering import LDA
# $example off$
from pyspark.sql import SparkSession
"""
An example demonstrating LDA.
Run with:
bin/spark-submit examples/src/main/python/ml/lda_example.py
"""
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("LDAExample") \
.getOrCreate()
# $example on$
# Loads data.
dataset = spark.read.format("libsvm").load("data/mllib/sample_lda_libsvm_data.txt")
# Trains a LDA model.
lda = LDA(k=10, maxIter=10)
model = lda.fit(dataset)
ll = model.logLikelihood(dataset)
lp = model.logPerplexity(dataset)
print("The lower bound on the log likelihood of the entire corpus: " + str(ll))
print("The upper bound on perplexity: " + str(lp))
# Describe topics.
topics = model.describeTopics(3)
print("The topics described by their top-weighted terms:")
topics.show(truncate=False)
# Shows the result
transformed = model.transform(dataset)
transformed.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
lmtierney/selenium | py/selenium/webdriver/support/event_firing_webdriver.py | 7 | 12322 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
if isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
::
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print("Before navigate to %s" % url)
def after_navigate_to(self, url, driver):
print("After navigate to %s" % url)
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._driver._wrap_value = self._wrap_value
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def _wrap_value(self, value):
if isinstance(value, EventFiringWebElement):
return WebDriver._wrap_value(self._driver, value.wrapped_element)
return WebDriver._wrap_value(self._driver, value)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args, **kwargs):
try:
result = attrib(*args, **kwargs)
return _wrap_elements(result, self)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
try:
attrib = getattr(self._driver, name)
return _wrap if callable(attrib) else attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args, **kwargs):
try:
result = attrib(*args, **kwargs)
return _wrap_elements(result, self._ef_driver)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
try:
attrib = getattr(self._webelement, name)
return _wrap if callable(attrib) else attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise
| apache-2.0 |
EKiefer/edge-starter | py34env/Lib/site-packages/django/contrib/gis/db/backends/base/models.py | 434 | 7111 | import re
from django.contrib.gis import gdal
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class SpatialRefSysMixin(object):
"""
The SpatialRefSysMixin is a class used by the database-dependent
SpatialRefSys objects to reduce redundant code.
"""
# For pulling out the spheroid from the spatial reference string. This
# regular expression is used only if the user does not have GDAL installed.
# TODO: Flattening not used in all ellipsoids, could also be a minor axis,
# or 'b' parameter.
spheroid_regex = re.compile(r'.+SPHEROID\[\"(?P<name>.+)\",(?P<major>\d+(\.\d+)?),(?P<flattening>\d{3}\.\d+),')
# For pulling out the units on platforms w/o GDAL installed.
# TODO: Figure out how to pull out angular units of projected coordinate system and
# fix for LOCAL_CS types. GDAL should be highly recommended for performing
# distance queries.
units_regex = re.compile(
r'.+UNIT ?\["(?P<unit_name>[\w \'\(\)]+)", ?(?P<unit>[\d\.]+)'
r'(,AUTHORITY\["(?P<unit_auth_name>[\w \'\(\)]+)",'
r'"(?P<unit_auth_val>\d+)"\])?\]([\w ]+)?(,'
r'AUTHORITY\["(?P<auth_name>[\w \'\(\)]+)","(?P<auth_val>\d+)"\])?\]$'
)
@property
def srs(self):
"""
Returns a GDAL SpatialReference object, if GDAL is installed.
"""
if gdal.HAS_GDAL:
# TODO: Is caching really necessary here? Is complexity worth it?
if hasattr(self, '_srs'):
# Returning a clone of the cached SpatialReference object.
return self._srs.clone()
else:
# Attempting to cache a SpatialReference object.
# Trying to get from WKT first.
try:
self._srs = gdal.SpatialReference(self.wkt)
return self.srs
except Exception as msg:
pass
try:
self._srs = gdal.SpatialReference(self.proj4text)
return self.srs
except Exception as msg:
pass
raise Exception('Could not get OSR SpatialReference from WKT: %s\nError:\n%s' % (self.wkt, msg))
else:
raise Exception('GDAL is not installed.')
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening).
"""
if gdal.HAS_GDAL:
return self.srs.ellipsoid
else:
m = self.spheroid_regex.match(self.wkt)
if m:
return (float(m.group('major')), float(m.group('flattening')))
else:
return None
@property
def name(self):
"Returns the projection name."
return self.srs.name
@property
def spheroid(self):
"Returns the spheroid name for this spatial reference."
return self.srs['spheroid']
@property
def datum(self):
"Returns the datum for this spatial reference."
return self.srs['datum']
@property
def projected(self):
"Is this Spatial Reference projected?"
if gdal.HAS_GDAL:
return self.srs.projected
else:
return self.wkt.startswith('PROJCS')
@property
def local(self):
"Is this Spatial Reference local?"
if gdal.HAS_GDAL:
return self.srs.local
else:
return self.wkt.startswith('LOCAL_CS')
@property
def geographic(self):
"Is this Spatial Reference geographic?"
if gdal.HAS_GDAL:
return self.srs.geographic
else:
return self.wkt.startswith('GEOGCS')
@property
def linear_name(self):
"Returns the linear units name."
if gdal.HAS_GDAL:
return self.srs.linear_name
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def linear_units(self):
"Returns the linear units."
if gdal.HAS_GDAL:
return self.srs.linear_units
elif self.geographic:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def angular_name(self):
"Returns the name of the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_name
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit_name')
@property
def angular_units(self):
"Returns the angular units."
if gdal.HAS_GDAL:
return self.srs.angular_units
elif self.projected:
return None
else:
m = self.units_regex.match(self.wkt)
return m.group('unit')
@property
def units(self):
"Returns a tuple of the units and the name."
if self.projected or self.local:
return (self.linear_units, self.linear_name)
elif self.geographic:
return (self.angular_units, self.angular_name)
else:
return (None, None)
@classmethod
def get_units(cls, wkt):
"""
Class method used by GeometryField on initialization to
retrieve the units on the given WKT, without having to use
any of the database fields.
"""
if gdal.HAS_GDAL:
return gdal.SpatialReference(wkt).units
else:
m = cls.units_regex.match(wkt)
return m.group('unit'), m.group('unit_name')
@classmethod
def get_spheroid(cls, wkt, string=True):
"""
Class method used by GeometryField on initialization to
retrieve the `SPHEROID[..]` parameters from the given WKT.
"""
if gdal.HAS_GDAL:
srs = gdal.SpatialReference(wkt)
sphere_params = srs.ellipsoid
sphere_name = srs['spheroid']
else:
m = cls.spheroid_regex.match(wkt)
if m:
sphere_params = (float(m.group('major')), float(m.group('flattening')))
sphere_name = m.group('name')
else:
return None
if not string:
return sphere_name, sphere_params
else:
# `string` parameter used to place in format acceptable by PostGIS
if len(sphere_params) == 3:
radius, flattening = sphere_params[0], sphere_params[2]
else:
radius, flattening = sphere_params
return 'SPHEROID["%s",%s,%s]' % (sphere_name, radius, flattening)
def __str__(self):
"""
Returns the string representation. If GDAL is installed,
it will be 'pretty' OGC WKT.
"""
try:
return six.text_type(self.srs)
except Exception:
return six.text_type(self.wkt)
| mit |
m-weigand/ccd_tools | Examples/ccd_time/03_gap/generate_data.py | 1 | 1172 | #!/usr/bin/python
"""
Generate sample SIP-Spectra for 9 time steps, decreasing linearly tau
"""
import os
import numpy as np
import NDimInv.colecole as CC
frequencies = np.logspace(-2, 4, 20)
fin = np.hstack((frequencies, frequencies))
# generate CC parameters
np.random.seed(5)
rho0_1 = np.random.randint(0, 10, size=10) + 70
rho0_2 = np.random.randint(0, 10, size=10) + 30
rho0 = np.hstack((rho0_1, rho0_2))
m = np.logspace(-2, -1, 3)
tau = np.logspace(np.log10(0.004), np.log10(0.4), 20)
tau = 0.004
# note: we keep c constant
c = 0.6
basedir = 'data'
if(not os.path.isdir(basedir)):
os.makedirs(basedir)
os.chdir(basedir)
cr_data = []
cc_list = []
for timestep in range(0, 20):
cc_pars = [np.log(rho0[timestep]), m[0], np.log(tau), c]
cc_list.append(cc_pars)
magpha = CC.cole_log(fin, cc_pars).flatten()[np.newaxis, :]
magpha[0, 0:magpha.size / 2] = np.exp(magpha[0, 0:magpha.size / 2])
cr_data.append(magpha)
cr_data = np.array(cr_data).squeeze()
np.savetxt('data.dat', cr_data)
np.savetxt('frequencies.dat', frequencies)
times = range(0, 10) + range(20, 30)
np.savetxt('times.dat', times)
np.savetxt('cc_pars.dat', np.array(cc_list))
| gpl-3.0 |
jumping/Diamond | src/collectors/servertechpdu/servertechpdu.py | 31 | 4354 | # coding=utf-8
"""
SNMPCollector for Server Tech PDUs
Server Tech is a manufacturer of PDUs
http://www.servertech.com/
"""
import time
import re
import os
import sys
# Fix Path for locating the SNMPCollector
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),
'../',
'snmp',
)))
from diamond.metric import Metric
from snmp import SNMPCollector as parent_SNMPCollector
class ServerTechPDUCollector(parent_SNMPCollector):
"""
SNMPCollector for ServerTech PDUs
"""
PDU_SYSTEM_GAUGES = {
"systemTotalWatts": "1.3.6.1.4.1.1718.3.1.6"
}
PDU_INFEED_NAMES = "1.3.6.1.4.1.1718.3.2.2.1.3"
PDU_INFEED_GAUGES = {
"infeedCapacityAmps": "1.3.6.1.4.1.1718.3.2.2.1.10",
"infeedVolts": "1.3.6.1.4.1.1718.3.2.2.1.11",
"infeedAmps": "1.3.6.1.4.1.1718.3.2.2.1.7",
"infeedWatts": "1.3.6.1.4.1.1718.3.2.2.1.12"
}
def get_default_config_help(self):
config_help = super(ServerTechPDUCollector,
self).get_default_config_help()
config_help.update({
'host': 'PDU dns address',
'port': 'PDU port to collect snmp data',
'community': 'SNMP community'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ServerTechPDUCollector, self).get_default_config()
config.update({
'path': 'pdu',
'timeout': 15,
'retries': 3,
})
return config
def collect_snmp(self, device, host, port, community):
"""
Collect stats from device
"""
# Log
self.log.info("Collecting ServerTech PDU statistics from: %s" % device)
# Set timestamp
timestamp = time.time()
inputFeeds = {}
# Collect PDU input gauge values
for gaugeName, gaugeOid in self.PDU_SYSTEM_GAUGES.items():
systemGauges = self.walk(gaugeOid, host, port, community)
for o, gaugeValue in systemGauges.items():
# Get Metric Name
metricName = gaugeName
# Get Metric Value
metricValue = float(gaugeValue)
# Get Metric Path
metricPath = '.'.join(
['devices', device, 'system', metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 2)
# Publish Metric
self.publish_metric(metric)
# Collect PDU input feed names
inputFeedNames = self.walk(
self.PDU_INFEED_NAMES, host, port, community)
for o, inputFeedName in inputFeedNames.items():
# Extract input feed name
inputFeed = ".".join(o.split(".")[-2:])
inputFeeds[inputFeed] = inputFeedName
# Collect PDU input gauge values
for gaugeName, gaugeOid in self.PDU_INFEED_GAUGES.items():
inputFeedGauges = self.walk(gaugeOid, host, port, community)
for o, gaugeValue in inputFeedGauges.items():
# Extract input feed name
inputFeed = ".".join(o.split(".")[-2:])
# Get Metric Name
metricName = '.'.join([re.sub(r'\.|\\', '_',
inputFeeds[inputFeed]),
gaugeName])
# Get Metric Value
if gaugeName == "infeedVolts":
# Note: Voltage is in "tenth volts", so divide by 10
metricValue = float(gaugeValue) / 10.0
elif gaugeName == "infeedAmps":
# Note: Amps is in "hundredth amps", so divide by 100
metricValue = float(gaugeValue) / 100.0
else:
metricValue = float(gaugeValue)
# Get Metric Path
metricPath = '.'.join(['devices', device, 'input', metricName])
# Create Metric
metric = Metric(metricPath, metricValue, timestamp, 2)
# Publish Metric
self.publish_metric(metric)
| mit |
google/pymql | test/mql_manual_two_test.py | 1 | 44269 | #!/usr/bin/python2.4
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
#
"""Read examples from the MQL Manual at wiki.freebase.com."""
__author__ = 'bneutra@google.com (Brendan Neutra)'
import google3
from pymql.mql import error
from pymql.test import mql_fixture
class MQLTest(mql_fixture.MQLTest):
"""MQL Manual wiki examples."""
def setUp(self):
self.SetMockPath('data/mql_manual_two.yaml')
super(MQLTest, self).setUp()
self.env = {'as_of_time': '2011-10-24'}
def testMqldocExample46(self):
"""mqldoc_example_46."""
query = """
[
{
"limit": 3,
"type": "/film/film",
"initial_release_date": "1970",
"estimate-count": null,
"name": null
}
]
"""
exp_response = """
[
{
"estimate-count": 52058,
"initial_release_date": "1970",
"name": "Bombay Talkie",
"type": "/film/film"
},
{
"estimate-count": 52058,
"initial_release_date": "1970",
"name": "Brancaleone alle Crociate",
"type": "/film/film"
},
{
"estimate-count": 52058,
"initial_release_date": "1970",
"name": "Cherry, Harry & Raquel!",
"type": "/film/film"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample47(self):
"""mqldoc_example_47."""
query = """
[
{
"sort": "name",
"type": "/film/film",
"name": null,
"directed_by": "Sofia Coppola"
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"name": "Lick the Star",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"name": "Lost in Translation",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"name": "Marie Antoinette",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"name": "Somewhere",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"name": "The Virgin Suicides",
"directed_by": "Sofia Coppola"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample48(self):
"""mqldoc_example_48."""
query = """
[
{
"sort": "initial_release_date",
"type": "/film/film",
"name": null,
"initial_release_date": null,
"directed_by": "Sofia Coppola"
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"initial_release_date": "1998-10",
"name": "Lick the Star",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "1999-05-19",
"name": "The Virgin Suicides",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2003-09-12",
"name": "Lost in Translation",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2006-05-24",
"name": "Marie Antoinette",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2010-09-03",
"name": "Somewhere",
"directed_by": "Sofia Coppola"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample49(self):
"""mqldoc_example_49."""
query = """
[
{
"sort": "-initial_release_date",
"type": "/film/film",
"name": null,
"initial_release_date": null,
"directed_by": "Sofia Coppola"
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"initial_release_date": "2010-09-03",
"name": "Somewhere",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2006-05-24",
"name": "Marie Antoinette",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2003-09-12",
"name": "Lost in Translation",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "1999-05-19",
"name": "The Virgin Suicides",
"directed_by": "Sofia Coppola"
},
{
"type": "/film/film",
"initial_release_date": "1998-10",
"name": "Lick the Star",
"directed_by": "Sofia Coppola"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample50(self):
"""mqldoc_example_50."""
query = """
{
"type": "/film/director",
"name": "Francis Ford Coppola",
"film": [
{
"sort": "initial_release_date",
"limit": 1,
"name": null,
"initial_release_date": null
}
]
}
"""
exp_response = """
{
"type": "/film/director",
"name": "Francis Ford Coppola",
"film": [
{
"initial_release_date": "1960",
"name": "Battle Beyond the Sun"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample51(self):
"""mqldoc_example_51."""
query = """
[
{
"type": "/music/album",
"id": "/en/zenyatta_mondatta",
"releases": [
{
"sort": [
"format",
"release_date"
],
"release_date": null,
"format": null
}
]
}
]
"""
exp_response = """
[
{
"type": "/music/album",
"id": "/en/zenyatta_mondatta",
"releases": [
{
"release_date": "1983-12-19",
"format": "Compact Disc"
},
{
"release_date": "1991",
"format": "Compact Disc"
},
{
"release_date": "2003",
"format": "Compact Disc"
},
{
"release_date": "1980-10",
"format": "Gramophone record"
},
{
"release_date": "1980-10",
"format": "Gramophone record"
},
{
"release_date": "1980-10-03",
"format": "Gramophone record"
},
{
"release_date": "2007-11-05",
"format": "Gramophone record"
},
{
"release_date": "1980-10",
"format": null
},
{
"release_date": "2003",
"format": null
}
]
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample52(self):
"""mqldoc_example_52."""
query = """
[
{
"limit": 3,
"sort": "releases.release_date",
"type": "/music/album",
"name": null,
"releases": [
{
"sort": "release_date",
"release_date": null,
"limit": 1,
"format": null
}
],
"artist": {
"id": "/en/van_halen"
}
}
]
"""
exp_response = """
[
{
"artist": {
"id": "/en/van_halen"
},
"name": "Van Halen",
"releases": [{
"format": "Gramophone record",
"release_date": "1978-02-10"
}],
"type": "/music/album"
},
{
"artist": {
"id": "/en/van_halen"
},
"name": "Van Halen II",
"releases": [{
"format": "Gramophone record",
"release_date": "1979"
}],
"type": "/music/album"
},
{
"artist": {
"id": "/en/van_halen"
},
"name": "Women and Children First",
"releases": [{
"format": "Gramophone record",
"release_date": "1980"
}],
"type": "/music/album"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample53(self):
"""mqldoc_example_53."""
query = """
[
{
"sort": [
"character",
"film.name",
"-actor./people/person/date_of_birth"
],
"character": null,
"type": "/film/performance",
"actor": {
"/people/person/date_of_birth": null,
"name": null
},
"film": {
"name": null,
"directed_by": "George Lucas"
}
}
]
"""
partial_response = {
"type": "/film/performance",
"character": "Ackmena",
"film": {
"name": "The Star Wars Holiday Special",
"directed_by": "George Lucas"
},
"actor": {
"/people/person/date_of_birth": "1922-05-13",
"name": "Beatrice Arthur"
}
}
self.DoQuery(query)
assert partial_response == self.mql_result.result[0]
def testMqldocExample54(self):
"""mqldoc_example_54."""
query = """
[
{
"type": "/film/film",
"id": "/en/blade_runner",
"starring": [
{
"sort": "index",
"index": null,
"character": null,
"actor": null
}
]
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"id": "/en/blade_runner",
"starring": [
{
"index": 0,
"character": "Rick Deckard",
"actor": "Harrison Ford"
},
{
"index": 1,
"character": "Roy Batty",
"actor": "Rutger Hauer"
},
{
"index": 2,
"character": "Rachael",
"actor": "Sean Young"
},
{
"index": 3,
"character": "Pris",
"actor": "Daryl Hannah"
},
{
"index": 4,
"character": "Zhora",
"actor": "Joanna Cassidy"
},
{
"index": 5,
"character": "Leon Kowalski",
"actor": "Brion James"
},
{
"index": 6,
"character": "Holden",
"actor": "Morgan Paull"
},
{
"index": 7,
"character": "Eldon Tyrell",
"actor": "Joe Turkel"
},
{
"index": 8,
"character": "J.F. Sebastian",
"actor": "William Sanderson"
},
{
"index": 9,
"character": "Gaff",
"actor": "Edward James Olmos"
},
{
"index": 10,
"character": "Hannibal Chew",
"actor": "James Hong"
},
{
"index": 11,
"character": "Bryant",
"actor": "M. Emmet Walsh"
},
{
"index": null,
"character": "Taffey Lewis",
"actor": "Hy Pyke"
},
{
"index": null,
"character": "Bear",
"actor": "Kevin Thompson"
},
{
"index": null,
"character": "Kaiser",
"actor": "John Edward Allen"
}
]
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample55(self):
"""mqldoc_example_55."""
query = """
[
{
"type": "/film/film",
"id": "/en/blade_runner",
"starring": [
{
"sort": "-index",
"index": null,
"character": null,
"limit": 5,
"actor": null
}
]
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"id": "/en/blade_runner",
"starring": [
{
"index": null,
"character": "Taffey Lewis",
"actor": "Hy Pyke"
},
{
"index": null,
"character": "Bear",
"actor": "Kevin Thompson"
},
{
"index": null,
"character": "Kaiser",
"actor": "John Edward Allen"
},
{
"index": 1,
"character": "Bryant",
"actor": "M. Emmet Walsh"
},
{
"index": 0,
"character": "Hannibal Chew",
"actor": "James Hong"
}
]
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample56(self):
"""mqldoc_example_56."""
query = """
[
{
"track": "Lola",
"limit": 5,
"album": [
{
"optional": "optional",
"name": "Greatest Hits"
}
],
"type": "/music/artist",
"name": null
}
]
"""
exp_response = """
[
{
"album": [],
"track": "Lola",
"type": "/music/artist",
"name": "Herman Brood"
},
{
"album": [
{
"name": "Greatest Hits"
},
{
"name": "Greatest Hits"
},
{
"name": "Greatest Hits"
}
],
"track": "Lola",
"type": "/music/artist",
"name": "Robbie Williams"
},
{
"album": [],
"track": "Lola",
"type": "/music/artist",
"name": "Madness"
},
{
"album": [],
"track": "Lola",
"type": "/music/artist",
"name": "Marlene Dietrich"
},
{
"album": [],
"track": "Lola",
"type": "/music/artist",
"name": "The Raincoats"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample57(self):
"""mqldoc_example_57."""
query = """
[
{
"limit": 6,
"type": "/music/track",
"name": "Lola",
"releases": [
{
"optional": "optional",
"name": "Greatest Hits",
"format": [
{
"optional": "optional",
"name": "Compact Disc"
}
]
}
]
}
]
"""
exp_response = """
[{
"type": "/music/track",
"name": "Lola",
"releases": []
},
{
"type": "/music/track",
"name": "Lola",
"releases": []
},
{
"type": "/music/track",
"name": "Lola",
"releases": []
},
{
"type": "/music/track",
"name": "Lola",
"releases": []
},
{
"type": "/music/track",
"name": "Lola",
"releases": []
},
{
"type": "/music/track",
"name": "Lola",
"releases": [
{
"name": "Greatest Hits",
"format": []
},
{
"name": "Greatest Hits",
"format": []
},
{
"name": "Greatest Hits",
"format": [
{
"name": "Compact Disc"
}
]
}
]
}]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample58(self):
"""mqldoc_example_58."""
query = """
[
{
"limit": 3,
"type": "/film/director",
"name": null,
"/common/topic/alias": []
}
]
"""
exp_response = """
[
{
"type": "/film/director",
"name": "Blake Edwards",
"/common/topic/alias": [
"William Blake Crump"
]
},
{
"type": "/film/director",
"name": "D. A. Pennebaker",
"/common/topic/alias": [
"Donn Alan Pennebaker",
"D.A. Pennabaker",
"Don Alan Pennebaker",
"Penny"
]
},
{
"type": "/film/director",
"name": "Chris Hegedus",
"/common/topic/alias": []
}]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample59(self):
"""mqldoc_example_59."""
query = """
[
{
"limit": 3,
"type": "/film/director",
"name": null,
"/common/topic/alias": [
{
"lang": "/lang/en",
"value": null
}
]
}
]
"""
exp_response = """
[
{
"/common/topic/alias": [{
"lang": "/lang/en",
"value": "William Blake Crump"
}],
"name": "Blake Edwards",
"type": "/film/director"
},
{
"/common/topic/alias": [
{
"lang": "/lang/en",
"value": "Donn Alan Pennebaker"
},
{
"lang": "/lang/en",
"value": "D.A. Pennabaker"
},
{
"lang": "/lang/en",
"value": "Don Alan Pennebaker"
},
{
"lang": "/lang/en",
"value": "Penny"
}
],
"name": "D. A. Pennebaker",
"type": "/film/director"
},
{
"/common/topic/alias": [
{
"lang": "/lang/en",
"value": "The Wizard"
},
{
"lang": "/lang/en",
"value": "Zachary Edward Snyder"
}
],
"name": "Zack Snyder",
"type": "/film/director"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample60(self):
"""mqldoc_example_60."""
query = """
[
{
"limit": 3,
"type": "/film/director",
"name": null,
"/common/topic/alias": [
{
"lang": "/lang/en",
"optional": true,
"value": null
}
]
}
]
"""
exp_response = """
[
{
"/common/topic/alias": [{
"lang": "/lang/en",
"value": "William Blake Crump"
}],
"name": "Blake Edwards",
"type": "/film/director"
},
{
"/common/topic/alias": [
{
"lang": "/lang/en",
"value": "Donn Alan Pennebaker"
},
{
"lang": "/lang/en",
"value": "D.A. Pennabaker"
},
{
"lang": "/lang/en",
"value": "Don Alan Pennebaker"
},
{
"lang": "/lang/en",
"value": "Penny"
}
],
"name": "D. A. Pennebaker",
"type": "/film/director"
},
{
"/common/topic/alias": [],
"name": "Chris Hegedus",
"type": "/film/director"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample61(self):
"""mqldoc_example_61."""
query = """
{
"album": {
"optional": true,
"return": "count",
"name": "Arrested"
},
"type": "/music/artist",
"name": "The Police"
}
"""
exp_response = """
{
"album": 0,
"type": "/music/artist",
"name": "The Police"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample62(self):
"""mqldoc_example_62."""
query = """
[
{
"track": "Masters of War",
"album": {
"optional": "forbidden",
"name": "Greatest Hits"
},
"type": "/music/artist",
"name": null,
"limit": 3
}
]
"""
exp_response = """
[
{
"album": null,
"track": "Masters of War",
"type": "/music/artist",
"name": "Don McLean"
},
{
"album": null,
"track": "Masters of War",
"type": "/music/artist",
"name": "The Staple Singers"
},
{
"album": null,
"track": "Masters of War",
"type": "/music/artist",
"name": "Judy Collins"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample63(self):
"""mqldoc_example_63."""
query = """
[
{
"limit": 3,
"nor:album": {
"optional": "forbidden",
"name": "The Best Of"
},
"type": "/music/artist",
"name": null,
"neither:album": {
"optional": "forbidden",
"name": "Greatest Hits"
}
}
]
"""
exp_response = """
[
{
"nor:album": null,
"type": "/music/artist",
"name": "Blonde Redhead",
"neither:album": null
},
{
"nor:album": null,
"type": "/music/artist",
"name": "Bruce Cockburn",
"neither:album": null
},
{
"nor:album": null,
"type": "/music/artist",
"name": "Buck Owens",
"neither:album": null
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample64(self):
"""mqldoc_example_64."""
query = """
[
{
"limit": 3,
"album": {
"optional": "forbidden",
"id": null
},
"type": "/music/artist",
"name": null
}
]
"""
exp_response = """
[
{
"album": null,
"type": "/music/artist",
"name": "Bill Clinton"
},
{
"album": null,
"type": "/music/artist",
"name": "Domenico Alberti"
},
{
"album": null,
"type": "/music/artist",
"name": "Donny the Punk"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample65(self):
"""mqldoc_example_65."""
query = """
{
"track": [
{
"limit": 3,
"length": null,
"name": null
}
],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
exp_response = """
{
"track": [{
"length": 272.666,
"name": "Roxanne '97 (Puff Daddy remix)"
},
{
"length": 234.10599999999999,
"name": "Don't Stand So Close to Me"
},
{
"length": 192.90600000000001,
"name": "Roxanne"
}],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample66(self):
"""mqldoc_example_66."""
query = """
{
"track": [
{
"length": null,
"name": null,
"length>": 475
}
],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
exp_response = """
{
"track": [
{
"length": 533.78599999999994,
"name": "The Bed's Too Big Without You"
},
{
"length": 476.82600000000002,
"name": "Can't Stand Losing You"
},
{
"length": 479.733,
"name": "Walking on the Moon (Roger Sanchez Darkside of the Moon mix)"
},
{
"length": 527.06600000000003,
"name": "The Bed's Too Big Without You"
},
{
"length": 490.89299999999997,
"name": "Roxanne"
}
],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample67(self):
"""mqldoc_example_67."""
query = """
{
"track": [
{
"length": null,
"length>=": 420,
"name": null,
"length<": 480
}
],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
exp_response = """
{
"track": [
{
"length": 450.02600000000001,
"name": "So Lonely"
},
{
"length": 456,
"name": "So Lonely"
},
{
"length": 476.82600000000002,
"name": "Can't Stand Losing You"
},
{
"length": 479.733,
"name": "Walking on the Moon (Roger Sanchez Darkside of the Moon mix)"
},
{
"length": 456.39999999999998,
"name": "Voices Inside My Head (E Smoove Pump mix)"
},
{
"length": 474.44,
"name": "I Can't Stand Losing You"
},
{
"length": 424.12,
"name": "Voices Inside My Head / When the World Is Running Down, You Make the Best of What's Still Around"
}
],
"type": "/music/artist",
"id": "/en/the_police"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample68(self):
"""mqldoc_example_68."""
query = """
[
{
"initial_release_date": null,
"initial_release_date<=": "2009",
"initial_release_date>=": "1999",
"directed_by": "Francis Ford Coppola",
"type": "/film/film",
"name": null
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"initial_release_date": "2000",
"name": "Supernova",
"directed_by": "Francis Ford Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2007-10-26",
"name": "Youth Without Youth",
"directed_by": "Francis Ford Coppola"
},
{
"type": "/film/film",
"initial_release_date": "2001-08-03",
"name": "Apocalypse Now Redux",
"directed_by": "Francis Ford Coppola"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample69(self):
"""mqldoc_example_69."""
query = """
[
{
"name>=": "tl",
"name<": "tn",
"type": "/film/film",
"name": null
}
]
"""
exp_response = """
[
{
"type": "/film/film",
"name": "TMNT"
},
{
"type": "/film/film",
"name": "TMZ on TV"
},
{
"type": "/film/film",
"name": "Tlayucan"
},
{
"type": "/film/film",
"name": "TMA Ultimate Idol THE BEST 2 discs 8 hours"
},
{
"type": "/film/film",
"name": "TLC: Now & Forever: Video Hits"
},
{
"type": "/film/film",
"name": "TLC: Tables, Ladders & Chairs"
},
{
"type": "/film/film",
"name": "Tlatelolco68"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample70(self):
"""mqldoc_example_70."""
query = """
[
{
"limit": 3,
"name~=": "love",
"artist~=": "^The",
"type": "/music/track",
"name": null,
"artist": null
}
]
"""
exp_response = """
[
{
"type": "/music/track",
"name": "Love You Till Friday",
"artist": "The Replacements"
},
{
"type": "/music/track",
"name": "Love Spreads",
"artist": "The Stone Roses"
},
{
"type": "/music/track",
"name": "One Love",
"artist": "The Stone Roses"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample71(self):
"""mqldoc_example_71."""
query = """
[
{
"limit": 3,
"name~=": "^The *$",
"type": "/music/artist",
"name": null
}
]
"""
exp_response = """
[
{
"type": "/music/artist",
"name": "The Doors"
},
{
"type": "/music/artist",
"name": "The Beatles"
},
{
"type": "/music/artist",
"name": "The Penguins"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample72(self):
"""mqldoc_example_72."""
query = """
[
{
"limit": 3,
"name~=": "^The * *s$",
"type": "/music/artist",
"name": null
}
]
"""
exp_response = """
[
{
"type": "/music/artist",
"name": "The Beach Boys"
},
{
"type": "/music/artist",
"name": "The Righteous Brothers"
},
{
"type": "/music/artist",
"name": "The Rolling Stones"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample73(self):
"""mqldoc_example_73."""
query = """
[
{
"limit": 3,
"type": "/music/track",
"name": null,
"b:name~=": "love",
"a:name~=": "I"
}
]
"""
exp_response = """
[
{
"type": "/music/track",
"name": "I Want Your Love"
},
{
"type": "/music/track",
"name": "P.S. I Love You"
},
{
"type": "/music/track",
"name": "I Know My Love (feat. The Corrs)"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample74(self):
"""mqldoc_example_74."""
query = """
[
{
"atomic_number": null,
"sort": "atomic_number",
"atomic_number|=": [
1,
2,
3
],
"type": "/chemistry/chemical_element",
"name": null
}
]
"""
exp_response = """
[
{
"atomic_number": 1,
"type": "/chemistry/chemical_element",
"name": "Hydrogen"
},
{
"atomic_number": 2,
"type": "/chemistry/chemical_element",
"name": "Helium"
},
{
"atomic_number": 3,
"type": "/chemistry/chemical_element",
"name": "Lithium"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample75(self):
"""mqldoc_example_75."""
query = """
[
{
"english:name|=": [
"England",
"France"
],
"english:name": null,
"type": "/location/country",
"foreign:name": [
{
"lang": null,
"lang|=": [
"/lang/fr",
"/lang/es"
],
"value": null
}
]
}
]
"""
exp_response = """
[
{
"english:name": "England",
"type": "/location/country",
"foreign:name": [
{
"lang": "/lang/fr",
"value": "Angleterre"
},
{
"lang": "/lang/es",
"value": "Inglaterra"
}
]
},
{
"english:name": "France",
"type": "/location/country",
"foreign:name": [
{
"lang": "/lang/fr",
"value": "France"
},
{
"lang": "/lang/es",
"value": "Francia"
}
]
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample76(self):
"""mqldoc_example_76."""
query = """
[
{
"type": "/music/album",
"release_type": null,
"artist": "William Shatner"
}
]
"""
exp_response = """
[
{
"type": "/music/album",
"release_type": "Album",
"artist": "William Shatner"
},
{
"type": "/music/album",
"release_type": "Album",
"artist": "William Shatner"
},
{
"type": "/music/album",
"release_type": "Live Album",
"artist": "William Shatner"
},
{
"type": "/music/album",
"release_type": "Single",
"artist": "William Shatner"
},
{
"type": "/music/album",
"release_type": "Album",
"artist": "William Shatner"
},
{
"type": "/music/album",
"release_type": null,
"artist": "William Shatner"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample77(self):
"""mqldoc_example_77."""
query = """
{
"type": "/music/album",
"return": "count",
"release_type!=": "Live Album",
"artist": "William Shatner"
}
"""
self.DoQuery(query)
assert self.mql_result.result == 4
def testMqldocExample78(self):
"""mqldoc_example_78."""
query = """
{
"type": "/music/album",
"return": "count",
"release_type": {
"optional": "forbidden",
"name": "Live Album"
},
"artist": "William Shatner"
}
"""
self.DoQuery(query)
assert self.mql_result.result == 5
def testMqldocExample79(self):
"""mqldoc_example_79."""
query = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": {},
"name": "Lost in Translation"
}
}
"""
exp_response = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": {
"master_property": "/film/film/directed_by",
"type": "/type/link",
"reverse": true
},
"name": "Lost in Translation"
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample80(self):
"""mqldoc_example_80."""
query = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": null,
"name": "Lost in Translation"
}
}
"""
exp_response = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": "/film/film/directed_by",
"name": "Lost in Translation"
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample81(self):
"""mqldoc_example_81."""
query = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": {
"*": null
},
"name": "Lost in Translation"
}
}
"""
exp_response = """
{
"id": "/en/sofia_coppola",
"/film/director/film": {
"link": {
"attribution": "/user/mwcl_infobox",
"reverse": true,
"creator": "/user/mwcl_infobox",
"master_property": "/film/film/directed_by",
"source": "Lost in Translation",
"valid": true,
"timestamp": "2006-11-30T19:17:55.0020Z",
"operation": "insert",
"type": "/type/link",
"target_value": null,
"target": "Sofia Coppola"
},
"name": "Lost in Translation"
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample82(self):
"""mqldoc_example_82."""
query = """
{
"timestamp": null,
"id": "/en/sofia_coppola",
"/film/director/film": {
"timestamp": null,
"link": {
"timestamp": null,
"creator": null
},
"name": "Lost in Translation",
"creator": null
},
"creator": null
}
"""
exp_response = """
{
"timestamp": "2006-10-22T15:08:38.0048Z",
"id": "/en/sofia_coppola",
"/film/director/film": {
"timestamp": "2006-10-22T15:14:08.0061Z",
"link": {
"timestamp": "2006-11-30T19:17:55.0020Z",
"creator": "/user/mwcl_infobox"
},
"name": "Lost in Translation",
"creator": "/user/metaweb"
},
"creator": "/user/metaweb"
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample83(self):
"""mqldoc_example_83."""
query = """
{
"id": "/en/spain",
"name": {
"link": {
"creator": null,
"target": "French",
"target_value": null
}
}
}
"""
exp_response = """
{
"id": "/en/spain",
"name": {
"link": {
"creator": "/user/mwcl_wikipedia_en",
"target": "French",
"target_value": "Espagne"
}
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample84(self):
"""mqldoc_example_84."""
query = """
{
"/type/reflect/any_master": [
{
"limit": 3,
"link": null,
"name": null
}
],
"/type/reflect/any_reverse": [
{
"limit": 3,
"link": null,
"name": null
}
],
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"limit": 3,
"link": null,
"value": null
}
]
}
"""
exp_response = """
{
"/type/reflect/any_master": [
{
"link": "/type/object/permission",
"name": "Global Write Permission"
},
{
"link": "/type/object/type",
"name": "Topic"
},
{
"link": "/common/topic/article",
"name": null
}
],
"/type/reflect/any_reverse": [
{
"link": "/film/performance/film",
"name": null
},
{
"link": "/film/performance/film",
"name": null
},
{
"link": "/film/performance/film",
"name": null
}
],
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"link": "/type/object/name",
"value": "The Gumball Rally"
},
{
"link": "/film/film/initial_release_date",
"value": "1976"
},
{
"link": "/film/film/tagline",
"value": "It's a hilarious coast-to-coast, 180 mile-an-hour, go-for-broke, outrageous road race with the world's most expensive cars. And it's all just for glory and a gumball machine."
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample85(self):
"""mqldoc_example_85."""
query = """
{
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"lang": null,
"link": null,
"type": "/type/text",
"value": null
}
]
}
"""
exp_response = """
{
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"lang": "/lang/en",
"type": "/type/text",
"link": "/type/object/name",
"value": "The Gumball Rally"
},
{
"lang": "/lang/en",
"type": "/type/text",
"link": "/film/film/tagline",
"value": "It's a hilarious coast-to-coast, 180 mile-an-hour, go-for-broke, outrageous road race with the world's most expensive cars. And it's all just for glory and a gumball machine."
},
{
"lang": "/lang/it",
"type": "/type/text",
"link": "/type/object/name",
"value": "La corsa pi\u00f9 pazza del mondo"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample86(self):
"""mqldoc_example_86."""
query = """
{
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"*": null,
"link": {
"master_property": null,
"target": {
"optional": true,
"id": null
}
}
}
]
}
"""
exp_response = """
{
"id": "/en/the_gumball_rally",
"/type/reflect/any_value": [
{
"type": "/type/text",
"link": {
"master_property": "/type/object/name",
"target": {
"id": "/lang/en"
}
},
"value": "The Gumball Rally"
},
{
"type": "/type/datetime",
"link": {
"master_property": "/film/film/initial_release_date",
"target": null
},
"value": "1976"
},
{
"type": "/type/text",
"link": {
"master_property": "/film/film/tagline",
"target": {
"id": "/lang/en"
}
},
"value": "It's a hilarious coast-to-coast, 180 mile-an-hour, go-for-broke, outrageous road race with the world's most expensive cars. And it's all just for glory and a gumball machine."
},
{
"type": "/type/text",
"link": {
"master_property": "/type/object/name",
"target": {
"id": "/lang/it"
}
},
"value": "La corsa pi\u00f9 pazza del mondo"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample87(self):
"""mqldoc_example_87."""
query = """
[
{
"limit": 3,
"first:/type/reflect/any_master": {
"link": null,
"name": "Sting"
},
"type": [],
"id": null,
"second:/type/reflect/any_master": {
"link": null,
"name": "The Police"
}
}
]
"""
exp_response = """
[
{
"first:/type/reflect/any_master": {
"link": "/music/group_membership/member",
"name": "Sting"
},
"type": [
"/music/group_membership"
],
"id": "/m/01t4k16",
"second:/type/reflect/any_master": {
"link": "/music/group_membership/group",
"name": "The Police"
}
},
{
"first:/type/reflect/any_master": {
"link": "/freebase/user_profile/favorite_music_artists",
"name": "Sting"
},
"type": [
"/type/user",
"/type/namespace",
"/freebase/user_profile"
],
"id": "/user/saraw524",
"second:/type/reflect/any_master": {
"link": "/freebase/user_profile/favorite_music_artists",
"name": "The Police"
}
},
{
"first:/type/reflect/any_master": {
"link": "/freebase/user_profile/favorite_music_artists",
"name": "Sting"
},
"type": [
"/type/user",
"/type/namespace",
"/freebase/user_profile"
],
"id": "/user/webgrrlie",
"second:/type/reflect/any_master": {
"link": "/freebase/user_profile/favorite_music_artists",
"name": "The Police"
}
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample88(self):
"""mqldoc_example_88."""
query = """
[
{
"sort": "-timestamp",
"target_value": null,
"master_property": "/type/object/name",
"source": {},
"valid": false,
"limit": 1,
"timestamp": null,
"type": "/type/link"
}
]
"""
exp_response = """
[
{
"target_value": "Kim Possible - The Villain Files.jpg",
"master_property": "/type/object/name",
"source": {
"type": [
"/common/image",
"/type/content"
],
"id": "/m/0h88y5p",
"name": "Kim Possible: The Villain Files"
},
"valid": false,
"timestamp": "2011-10-23T11:46:50.0003Z",
"type": "/type/link"
}
]
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample89(self):
"""mqldoc_example_89."""
query = """
{
"id": "/finance/currency",
"name": {
"lang": "/lang/en",
"link": {
"timestamp": null
},
"value": null
}
}
"""
exp_response = """
{
"id": "/finance/currency",
"name": {
"lang": "/lang/en",
"link": {
"timestamp": "2007-03-25T00:33:28.0000Z"
},
"value": "Currency"
}
}
"""
self.DoQuery(query, exp_response=exp_response)
def testMqldocExample90(self):
"""mqldoc_example_90."""
query = """
{
"id": "/finance/currency",
"name": [
{
"lang": "/lang/en",
"link": {
"timestamp": null,
"valid": null
},
"value": null
}
]
}
"""
exp_response = """
{
"id": "/finance/currency",
"name": [
{
"lang": "/lang/en",
"link": {
"timestamp": "2006-10-22T07:34:51.0008Z",
"valid": false
},
"value": "currency"
},
{
"lang": "/lang/en",
"link": {
"timestamp": "2007-03-25T00:33:28.0000Z",
"valid": true
},
"value": "Currency"
}
]
}
"""
self.DoQuery(query, exp_response=exp_response)
if __name__ == '__main__':
mql_fixture.main()
| apache-2.0 |
devs4v/devs4v-information-retrieval15 | project/venv/lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/models.py | 396 | 2158 | """
The GeometryColumns and SpatialRefSys models for the PostGIS backend.
"""
from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class PostGISGeometryColumns(models.Model):
"""
The 'geometry_columns' table from the PostGIS. See the PostGIS
documentation at Ch. 4.3.2.
On PostGIS 2, this is a view.
"""
f_table_catalog = models.CharField(max_length=256)
f_table_schema = models.CharField(max_length=256)
f_table_name = models.CharField(max_length=256)
f_geometry_column = models.CharField(max_length=256)
coord_dimension = models.IntegerField()
srid = models.IntegerField(primary_key=True)
type = models.CharField(max_length=30)
class Meta:
app_label = 'gis'
db_table = 'geometry_columns'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the feature table
name.
"""
return 'f_table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the feature
geometry column.
"""
return 'f_geometry_column'
def __str__(self):
return "%s.%s - %dD %s field (SRID: %d)" % \
(self.f_table_name, self.f_geometry_column,
self.coord_dimension, self.type, self.srid)
class PostGISSpatialRefSys(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from PostGIS. See the PostGIS
documentaiton at Ch. 4.2.1.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
srtext = models.CharField(max_length=2048)
proj4text = models.CharField(max_length=2048)
class Meta:
app_label = 'gis'
db_table = 'spatial_ref_sys'
managed = False
@property
def wkt(self):
return self.srtext
@classmethod
def wkt_col(cls):
return 'srtext'
| mit |
Esri/public-transit-tools | transit-network-analysis-tools/CreatePercentAccessPolygon.py | 1 | 18541 | ################################################################################
## Toolbox: Transit Network Analysis Tools
## Tool name: Create Percent Access Polygons
## Created by: David Wasserman, Fehr & Peers, https://github.com/d-wasserman
## and: Melinda Morang, Esri
## Last updated: 17 June 2019
################################################################################
''''''
################################################################################
'''Copyright 2018 Fehr & Peers
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
################################################################################
'''Copyright 2019 Esri
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
################################################################################
import sys
import os
import time
import uuid
import arcpy
# Create a GUID for temporary outputs (avoids naming conflicts)
guid = uuid.uuid4().hex
def create_polygon_raster_template(in_polys, outgdb, cell_size):
'''Creates a raster-like polygon feature class covering the area of the original time lapse polygons. Each polygon
in the output is equivalent to one square of a raster. The dataset is meant to be used with Spatial Join with the
original time lapse polygon dataset in order to count the number of original polygons overlapping that cell.
Params:
in_polys: path to the input time lapse polygon dataset generated from the Prepare Time Lapse Polygons tool.
outgdb: path of workspace being used to store output from this tool
cell_size: The length or width (not area) of the desired raster cell, in the units of the spatial reference of the
'''
# Convert the full time lapse dataset into a temporary raster. The cell values are irrelvant.
poly_oid = arcpy.Describe(in_polys).OIDFieldName
temp_raster = os.path.join(outgdb, "Temp_" + guid + "_InitialRaster")
arcpy.conversion.FeatureToRaster(in_polys, poly_oid, temp_raster, cell_size=cell_size)
# Create a temporary point dataset with one point for the centroid of every raster cell
# The value of the points is irrelevant. We just need their geometry and an OID.
temp_points = os.path.join(outgdb, "Temp_" + guid + "_Points")
arcpy.conversion.RasterToPoint(temp_raster, temp_points)
# Create a new raster from the points with the same cell size as the initial raster. Set the value of each cell
# equal to the value of the OID of the point it was created from. This way, each cell has a unique value.
pt_oid = arcpy.Describe(temp_points).OIDFieldName
temp_raster2 = os.path.join(outgdb, "Temp_" + guid + "_ProcessedRaster")
arcpy.conversion.FeatureToRaster(temp_points, pt_oid, temp_raster2, cell_size=cell_size)
# Convert this raster to polygons. The result contains one square polygon per raster cell and can be used for
# calculating spatial joins with the original time lapse polygon dataset.
poly_raster_template_fc = os.path.join(outgdb, "Temp_" + guid + "_PolyRasterTemplate")
arcpy.conversion.RasterToPolygon(temp_raster2, poly_raster_template_fc, simplify=False)
# Clean up intermediate outputs
clean_up = [temp_raster, temp_points, temp_raster2]
for temp_output in clean_up:
arcpy.management.Delete(temp_output)
return poly_raster_template_fc
def generate_field_map(in_time_lapse_polys, fields_to_preserve):
'''Create a FieldMappings object to use in Spatial Join. For our application, we only want to preserve a few fields
for informational purposes. We expect all these field values to be the same, so use the "First" rule so the output
polygon will just keep the same value as the inputs. All other fields in the input data will not be transferred to
the output.
Params:
in_time_lapse_polys: Time lapse polygon feature class from which to retrieve the fields
fields_to_preserve: A list of field names we want to keep for the output.
'''
field_mappings = arcpy.FieldMappings()
for field in fields_to_preserve:
fmap = arcpy.FieldMap()
fmap.addInputField(in_time_lapse_polys, field)
fmap.mergeRule = "First"
field_mappings.addFieldMap(fmap)
return field_mappings
def create_raw_cell_counts_fc(selected_time_lapse_polys, in_poly_raster_template, temp_spatial_join_fc, fmaps,
match_option):
'''Do a spatial join in order to count the number of time lapse polygons intersect each "cell" in the raster-like
polylgon template. We are effectively applying the template to a specific set of time lapse polygons, doing the
count, and creating the raw output. The result is a polygon feature class of raster-like cells with a field called
Join_Count that shows the number of input time lapse polygons that intersect the cell using the specified
match_option.
Params:
selected_time_lapse_polys: Set (or subset) of time lapse polygons to use
in_poly_raster_template: The raster-like polygon feature class produced from create_polygon_raster_template()
temp_spatial_join_fc: Path to a temporary output FC which we will overwrite each time this method is called and then
delete at the end of the tool during clean-up
fmaps: FieldMappings object indicating which fields to preserve
match_options: match_options parameter for the Spatial Join tool
'''
arcpy.analysis.SpatialJoin(
in_poly_raster_template,
selected_time_lapse_polys,
temp_spatial_join_fc,
"JOIN_ONE_TO_ONE", # Output keeps only one copy of each "cell" when multiple time lapse polys intersect it
"KEEP_COMMON", # Delete any "cells" that don't overlap the time lapse polys being considered
field_mapping=fmaps, # Preserve some fields from the original data
match_option=match_option
)
def dissolve_raw_cell_counts_fc(raw_cell_count_fc, out_fc, fields_to_preserve, num_time_steps):
'''Currently, the feature class contains a large number of little square polygons representing raster cells. The
Join_Count field added by Spatial Join says how many of the input time lapse polygons overlapped the cell. We
don't need all the little squares. We can dissolve them so that we have one polygon per unique value of
Join_Count. Also calculate a field showing the Percent of times each polygon was reached.
Params:
raw_cell_count_fc: The feature class of raster-like polygons created from create_raw_cell_counts_fc()
out_fc: Path to output feature class
fields_to_preserve: Informational fields to preserve in the output
num_time_steps: Number of time steps used in the overall time lapse polygons
'''
arcpy.management.Dissolve(raw_cell_count_fc, out_fc, fields_to_preserve + ["Join_Count"])
# Add a field converting the raw count to the percent of total times accessed
percent_field = "Percent"
arcpy.management.AddField(out_fc, percent_field, "DOUBLE")
expression = "float(!Join_Count!) * 100.0 / float(%d)" % num_time_steps
arcpy.management.CalculateField(out_fc, percent_field, expression, "PYTHON_9.3")
def create_percent_access_polys(raw_cell_counts, percents, out_fc, fields_to_preserve, scratch_workspace):
'''For each percent threshold, dissolve the cells where the number of times reached exceeds the threshold. Each
threshold gets its own polygon, and they are all output to the same feature class.
Params:
raw_cell_counts: Feature class of cell-like polygons with counts generated from create_raw_cell_counts_fc()
count_field: The field in raw_cell_counts containing the number of times the cell was reached
percents: List of percents to calculate results for. Example: 80 means crate a polygon representing the area that
could be reached for at least 80% of start times.
num_time_steps: The total number of time steps present in the input time lapse polygon dataset
out_fc: Path of the output feature class for storing the percent access polygons
'''
first = True
temp_out_dissolve_fc = os.path.join(scratch_workspace, "Temp_" + guid + "_Dissolve")
for percent in sorted(percents):
# Select all the cells where the number of times with access is >= our percent threshold
# The result is all the cells that are reachable at least X% of start times
query = arcpy.AddFieldDelimiters(raw_cell_counts, "Percent") + " >= " + str(percent)
percent_layer = arcpy.management.MakeFeatureLayer(raw_cell_counts, "PercentLayer", query).getOutput(0)
# Dissolve everything that meets the threshold into one polygon
if first:
out_Dissolve = out_fc
else:
out_Dissolve = temp_out_dissolve_fc
arcpy.management.Dissolve(percent_layer, out_Dissolve, fields_to_preserve)
percent_field = "Percent"
arcpy.management.AddField(out_Dissolve, percent_field, "DOUBLE")
arcpy.management.CalculateField(out_Dissolve, percent_field, str(percent))
if not first:
# If this wasn't the first percent output, append it to the master output fc
arcpy.management.Append(out_Dissolve, out_fc, "TEST")
first = False
# Clean up temporary output
if arcpy.Exists(temp_out_dissolve_fc):
arcpy.management.Delete(temp_out_dissolve_fc)
def main(in_time_lapse_polys, out_cell_counts_fc, cell_size, out_percents_fc=None, percents=[]):
"""Create 'typical access polygons' that represent the area reachable by transit across a time window.
The tool attempts to account for the dynamic nature of transit schedules by overlaying service area polygons from
multiple times of day and summarizing the results in terms of the number or percentage of the input polygons that
cover an area. Areas covered by a larger percentage of input polygons were reached at more start times and are
consequently more frequently accessible to travelers.
The tool output will show you the percentage of times any given area was reached, and you can also choose to
summarize these results for different percentage thresholds. For example, you can find out what area can be reached
at least 75% of start times.
Parameters:
in_time_lapse_polys: A polygon feature class created using the Prepare Time Lapse Polygons tool that you wish to
summarize. The feature class must be in a projected coordinate system.
out_cell_counts_fc: The main output feature class of the tool. Must be in a geodatabase; it cannot be a shapefile.
cell_size: This tool rasterizes the input polygons, essentially turning the study area into little squares. This is
the size for these squares. The cell size refers to the width or length of the cell, not the area. The units for
the cell size are the linear units of the projected coordinate system of the input time lapse polygons.
out_percents_fc: Optional output feature class that further summarizes the output percent access polygons feature
class. If you specify one or more percentage thresholds, this output contains polygons showing the area reached
at least as often as your designated percentage thresholds. There will be a separate feature for each percentage
threshold for each unique combination of FacilityID, FromBreak, and ToBreak in the input data.
percents: You can choose to summarize the tool's raw output for different percentage thresholds. For example, you
can find out what area can be reached at least 75% of start times by setting 75 as one of your percentage
thresholds. Specified as a list of percents.
"""
arcpy.env.overwriteOutput = True
# Use the scratchGDB as a holder for temporary output
scratchgdb = arcpy.env.scratchGDB
# List of percent of times accessed to summarize in results
if not out_percents_fc:
percents = []
# Hard-coded "options"
# Field names that must be in the input time lapse polygons
facility_id_field = "FacilityID"
name_field = "Name"
frombreak_field = "FromBreak"
tobreak_field = "ToBreak"
time_field = "TimeOfDay"
# Match option to use in the spatial join
match_option = "HAVE_THEIR_CENTER_IN"
# Fields we want to keep around in the output
fields_to_preserve = [facility_id_field, name_field, frombreak_field, tobreak_field]
# Create the raster-like polygons we'll use later with spatial joins.
arcpy.AddMessage("Rasterizing time lapse polygons...")
poly_raster_template_fc = create_polygon_raster_template(in_time_lapse_polys, scratchgdb, cell_size)
# Figure out the unique combinations of FacilityID, FromBreak, and ToBreak in the input data. Each of these will
# be processed sequentially and get a separate output.
# Also count the number of unique times of day that were used in the original analysis so we can calculate % later.
unique_output_combos = []
unique_times = []
fields = [facility_id_field, frombreak_field, tobreak_field, time_field]
with arcpy.da.SearchCursor(in_time_lapse_polys, fields) as cur:
for row in cur:
unique_output_combos.append((row[0], row[1], row[2]))
unique_times.append(row[3])
unique_output_combos = sorted(set(unique_output_combos))
num_time_steps = len(set(unique_times))
# For each set of time lapse polygons, generate the cell-like counts
first = True
temp_spatial_join_fc = os.path.join(scratchgdb, "Temp_" + guid + "_SpatialJoin")
temp_raw_dissolve_fc = os.path.join(scratchgdb, "Temp_" + guid + "_RawDissolve")
for combo in unique_output_combos:
facility_id = combo[0]
from_break = combo[1]
to_break = combo[2]
if facility_id is None:
msg = "Processing FacilityID <Null>, FromBreak %d, ToBreak %d" % (from_break, to_break)
else:
msg = "Processing FacilityID %i, FromBreak %d, ToBreak %d" % (facility_id, from_break, to_break)
arcpy.AddMessage(msg + "...")
# Select the subset of polygons for this FacilityID/FromBreak/ToBreak combo
# Note: Don't use a feature layer and Select By Attributes because of a bug with field mapping in Spatial Join
# in 10.6 which caused field maps to be ignored for layers.
temp_selected_polys = os.path.join(scratchgdb, "Temp_" + guid + "_SelectedPolys")
if facility_id is None:
facility_query = arcpy.AddFieldDelimiters(in_time_lapse_polys, facility_id_field) + " IS NULL"
else:
facility_query = arcpy.AddFieldDelimiters(in_time_lapse_polys, facility_id_field) + " = " + str(facility_id)
query = facility_query + " AND " + \
arcpy.AddFieldDelimiters(in_time_lapse_polys, frombreak_field) + " = " + str(from_break) + " AND " + \
arcpy.AddFieldDelimiters(in_time_lapse_polys, tobreak_field) + " = " + str(to_break)
arcpy.analysis.Select(in_time_lapse_polys, temp_selected_polys, query)
# Create a FieldMappings object for Spatial Join to preserve informational input fields
fmaps = generate_field_map(temp_selected_polys, fields_to_preserve)
# Count the number of time lapse polygons that intersect each "cell" in the raster-like polygon template and
# write out a new feature class to disk that shows the counts
# Create the raw output
create_raw_cell_counts_fc(
temp_selected_polys,
poly_raster_template_fc,
temp_spatial_join_fc,
fmaps,
match_option
)
# Dissolve all the little cells that were reached the same number of times to make the output more manageable
if first:
out_raw_dissolve = out_cell_counts_fc
else:
out_raw_dissolve = temp_raw_dissolve_fc
dissolve_raw_cell_counts_fc(temp_spatial_join_fc, out_raw_dissolve, fields_to_preserve, num_time_steps)
if not first:
# If this wasn't the first output, append it to the master output fc
arcpy.management.Append(out_raw_dissolve, out_cell_counts_fc, "TEST")
# Finished with the first loop
first = False
# Dissolve the cell-like polygons that were accessible >= X% of times
if percents:
arcpy.AddMessage("Creating percent access polygons...")
create_percent_access_polys(out_cell_counts_fc, percents, out_percents_fc, fields_to_preserve, scratchgdb)
# Clean up intermediate outputs
clean_up = [
temp_selected_polys,
poly_raster_template_fc,
temp_spatial_join_fc,
temp_raw_dissolve_fc
]
for temp_output in clean_up:
if arcpy.Exists(temp_output):
arcpy.management.Delete(temp_output)
if __name__ == '__main__':
# Feature class of polygons created by the Prepare Time Lapse Polygons tool
# The feature class must be in a projected coordinate system, but this is checked in tool validation
in_time_lapse_polys = sys.argv[1]
out_cell_counts_fc = sys.argv[2]
# Raster cell size for output (length or width of cell, not area)
cell_size = sys.argv[3]
out_percents_fc = sys.argv[4]
# List of percent of times accessed to summarize in results
percents = sys.argv[5]
main(in_time_lapse_polys, out_cell_counts_fc, cell_size, out_percents_fc, percents)
| apache-2.0 |
voidcc/PCTRL | pox/lib/packet/lldp.py | 41 | 14180 | # Copyright 2012 James McCauley
# Copyright 2008 (C) Nicira, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is derived from the packet library in NOX, which was
# developed by Nicira, Inc.
#======================================================================
# IEEE 802.1AB Link Layer Discovery Protocol (lldp) header
# (http://standards.ieee.org/getieee802/download/802.1AB-2005.pdf)
#
# Copyright (C) 2007 Nicira Networks
#
# Ethernet type = 0x88cc
# Destination MAC = 01:80:c2:00:00:0e (LLDP_MULTICAST)
#
# LLDPDU format
#
# +------+-----+-----+-------+-------+------+---------+---------------+
# | Chassis ID | Port ID | TTL | Optional ......| End of LLDPDU |
# | TLV | TLV | TLV |
# +------+-----+-----+-------+-------+------+---------+---------------+
#
# TLV Format
#
# +------------+---------------------+--------------------------------+
# | TLV type | TLV information | TLV information string |
# | | string length | |
# +------------+---------------------+--------------------------------+
#
# TLV Types:
#
# 0 - end of LLDPDU
# 1 - Chassis ID
# 2 - Port ID
# 3 - TTL
# 4 - Port description (optional)
# 5 - System name
# 6 - System description
# 7 - System capabilities
# 8 - Management address
# 127 - Organization specific TLVs
# 9-126 - reserved
#
# TODO:
# Error handling (malformed packets will definately cause this to puke)
#
#======================================================================
import struct
import time
from packet_utils import *
from packet_base import packet_base
from pox.lib.addresses import EthAddr
from pox.lib.util import initHelper
import logging
lg = logging.getLogger('packet')
#======================================================================
# LLDP PDU
#======================================================================
class lldp (packet_base):
"802.1 AB lldp pdu"
# chassis ID min = 2 + 1 + 1
# PORT ID min = 2 + 1 + 1
# TTL min = 2 + 2
# End min = 2
MIN_LEN = (4 + 4 + 4 + 2 )
#TODO: Remove these from here (they should be at module scope)?
END_TLV = 0
CHASSIS_ID_TLV = 1
PORT_ID_TLV = 2
TTL_TLV = 3
PORT_DESC_TLV = 4
SYSTEM_NAME_TLV = 5
SYSTEM_DESC_TLV = 6
SYSTEM_CAP_TLV = 7
MANAGEMENT_ADDR_TLV = 8
ORGANIZATIONALLY_SPECIFIC_TLV = 127
tlv_parsers = {}
def __init__ (self, raw=None, prev=None, **kw):
packet_base.__init__(self)
self.prev = prev
self.next = None
self.tlvs = []
if raw is not None:
self.parse(raw)
self._init(kw)
def next_tlv(self, array):
if len(array) < 2:
self.msg('(lldp tlv parse) warning TLV data too short to read '
+ 'type/len (%u)' % (len(array),))
return
(typelen,) = struct.unpack("!H",array[0:2])
type = typelen >> 9
length = typelen & 0x01ff
if len(array) < length:
self.msg('(lldp tlv parse) warning TLV data too short to parse (%u)'
% (len(array),))
return
if type in lldp.tlv_parsers:
self.tlvs.append(lldp.tlv_parsers[type](array[0: 2 + length]))
return 2 + length
else:
self.msg('(lldp tlv parse) warning unknown tlv type (%u)'
% (type,))
self.tlvs.append(unknown_tlv(array[0: 2 + length]))
return 2 + length
def parse (self, raw):
assert isinstance(raw, bytes)
self.raw = raw
dlen = len(raw)
if dlen < lldp.MIN_LEN:
self.msg('(lldp parse) warning LLDP packet data too short to parse '
+ 'header: data len %u' % (dlen,))
return
# point to the beginning of the pdu
pduhead = 0
# get Chassis ID
ret = self.next_tlv(raw)
if ret == None:
self.msg( '(lldp parse) error parsing chassis ID tlv' )
return
pduhead += ret
if self.tlvs[len(self.tlvs)-1].tlv_type != lldp.CHASSIS_ID_TLV:
self.msg( '(lldp parse) error CHASSIS ID TLV missing' )
return
# get PORT ID
ret = self.next_tlv(raw[pduhead:])
if ret is None:
self.msg( '(lldp parse) error parsing port ID TLV' )
return
pduhead += ret
if self.tlvs[len(self.tlvs)-1].tlv_type != lldp.PORT_ID_TLV:
self.msg( '(lldp parse) error port ID TLV missing' )
return
# get TTL
ret = self.next_tlv(raw[pduhead:])
if ret == None:
self.msg( '(lldp parse) error parsing TTL TLV' )
return
pduhead += ret
if self.tlvs[len(self.tlvs)-1].tlv_type != lldp.TTL_TLV:
self.msg( '(lldp parse) error port TTL TLV missing' )
return
# Loop over all other TLVs
arr_len = len(raw)
while True:
ret = self.next_tlv(raw[pduhead:])
if ret == None:
self.msg( '(lldp parse) error parsing TLV' )
return
if self.tlvs[len(self.tlvs)-1].tlv_type == lldp.END_TLV:
break
if (pduhead + ret) >= arr_len:
self.msg( '(lldp parse) error end of TLV list without END TLV' )
return
pduhead += ret
self.parsed = True
def add_tlv (self, tlv):
self.tlvs.append(tlv)
def __str__ (self):
lstr = ''
for tlv in self.tlvs:
lstr += str(tlv)
return '[LLDP ' + lstr + ']'
def hdr (self, payload):
packet = b''
for tlv in self.tlvs:
packet += tlv.pack()
return packet
#======================================================================
# TLV definitions
#======================================================================
#NOTE: As with a bunch of the packet library, it'd be nice if things
# like TLVs inherited from some base class common to other
# "sub-packets" (and maybe even packets).
class tlv_base (object):
"""
Supertype for LLDP TLVs
"""
pass
class simple_tlv (tlv_base):
tlv_type = None # Purposely illegal
def __init__ (self, raw = None, **kw):
self._init(kw)
self.parsed = False
if raw is not None:
self.parse(raw)
self._init_helper(kw)
def _init_helper (self, kw):
if len(kw):
if 'payload' in kw:
self.payload = None
initHelper(self, kw)
self.parsed = True
def parse (self, raw):
# assume lldp has done the type/len checking
(typelen,) = struct.unpack("!H", raw[0:2])
tlv_type = typelen >> 9
if self.tlv_type is not None:
assert self.tlv_type == tlv_type
self.tlv_type = tlv_type
strlen = typelen & 0x01ff
data = raw[2:2+strlen]
if len(data) < strlen:
raise TruncatedException()
self._parse_data(data)
self.parsed = True
@property
def strlen (self):
return self._data_len()
def pack (self):
typelen = self.tlv_type << 9
data = self._pack_data()
typelen |= (len(data) & 0x01ff)
return struct.pack('!H', typelen) + data
def __str__ (self):
return "<" + self.__class__.__name__ + ">"
def _init (self, kw):
"""
Initialize subclass-specific fields
Override this.
"""
pass
def _data_len (self):
"""
Returns length of the TLV information string
Override this.
"""
return len(self._pack_data())
def _parse_data (self, data):
"""
Store TLV information string
Override this.
"""
self.payload = data
def _pack_data (self):
"""
Return TLV information string
Override this.
"""
return self.payload
class unknown_tlv (simple_tlv):
"""
Unknown TLVs are parsed into this class
"""
tlv_type = None
class chassis_id (simple_tlv):
tlv_type = lldp.CHASSIS_ID_TLV
SUB_CHASSIS = 1 # IETF RFC 2737
SUB_IF_ALIAS = 2 # IETF RFC 2863
SUB_PORT = 3 # IETF RFC 2737
SUB_MAC = 4 # IEEE Std 802-2001
SUB_NETWORK = 5 #
SUB_IF_NAME = 6 # IETF RFC 2863
SUB_LOCAL = 7
subtype_to_str = {}
subtype_to_str[SUB_CHASSIS] = "chassis"
subtype_to_str[SUB_IF_ALIAS] = "interface alias"
subtype_to_str[SUB_PORT] = "port"
subtype_to_str[SUB_MAC] = "mac"
subtype_to_str[SUB_NETWORK] = "network"
subtype_to_str[SUB_IF_NAME] = "interface name"
subtype_to_str[SUB_LOCAL] = "local"
def _init (self, kw):
self.subtype = 0
self.id = None
def _parse_data (self, data):
if len(data) < 2:
raise MalformedException("TLV has invalid strlen")
(self.subtype,) = struct.unpack("!B",data[0:1])
self.id = data[1:]
def _pack_data (self):
return struct.pack("!B", self.subtype) + self.id
def __str__ (self):
if self.subtype == chassis_id.SUB_MAC:
assert len(self.id) == 6
id_str = str(EthAddr(self.id))
else:
id_str = ":".join(["%02x" % (ord(x),) for x in self.id])
return ''.join(['<chasis ID:',id_str,'>'])
class port_id (simple_tlv):
tlv_type = lldp.PORT_ID_TLV
SUB_IF_ALIAS = 1 # IETF RFC 2863
SUB_PORT = 2 # IETF RFC 2737
SUB_MAC = 3 # IEEE Std 802-2001
SUB_NETWORK = 4 #
SUB_IF_NAME = 5 # IETF RFC 2863
SUB_CIRC_ID = 6 # IETF RFC 3046
SUB_LOCAL = 7
subtype_to_str = {}
subtype_to_str[SUB_IF_ALIAS] = "interface alias"
subtype_to_str[SUB_PORT] = "port"
subtype_to_str[SUB_MAC] = "mac"
subtype_to_str[SUB_NETWORK] = "network"
subtype_to_str[SUB_IF_NAME] = "interface name"
subtype_to_str[SUB_CIRC_ID] = "agent circuit ID"
subtype_to_str[SUB_LOCAL] = "local"
def _init (self, kw):
self.subtype = 0
self.id = None
def _parse_data (self, data):
if len(data) < 2:
raise MalformedException("TLV has invalid strlen")
(self.subtype,) = struct.unpack("!B",data[0:1])
self.id = data[1:]
def __str__ (self):
if self.subtype == chassis_id.SUB_MAC:
assert len(self.id) == 6
id_str = str(EthAddr(self.id))
else:
id_str = ":".join(["%02x" % (ord(x),) for x in self.id])
return ''.join(['<port ID:',id_str,'>'])
def _pack_data (self):
return struct.pack("!B", self.subtype) + self.id
class ttl (simple_tlv):
tlv_type = lldp.TTL_TLV
def _init (self, kw):
self.ttl = 0
def _parse_data (self, data):
if len(data) != 2:
raise MalformedException("TLV has invalid strlen (!= 2)")
(self.ttl,) = struct.unpack("!H",data[0:2])
def __str__ (self):
return ''.join(['<ttl:',str(self.ttl),'>'])
def _pack_data (self):
return struct.pack('!H', self.ttl)
class end_tlv (simple_tlv):
tlv_type = lldp.END_TLV
def _parse_data (self, data):
if len(data) != 0:
raise MalformedException("TLV has invalid strlen (!= 0)")
def __str__ (self):
return '<tlv end>'
def _pack_data (self):
return b''
class system_description (simple_tlv):
tlv_type = lldp.SYSTEM_DESC_TLV
class management_address (simple_tlv):
tlv_type = lldp.MANAGEMENT_ADDR_TLV
def _init (self, kw):
self.address_subtype = 0
self.address = b''
self.interface_numbering_subtype = 0
self.interface_number = 0
self.object_identifier = b''
def _parse_data (self, data):
asl = ord(data[0]) - 1
self.address_subtype = ord(data[1])
self.address = data[2:2+asl]
self.interface_numbering_subtype = ord(data[2+asl])
self.interface_number = struct.unpack("!L",
data[2+asl+1:2+asl+1+4])[0]
osl = ord(data[7+asl])
self.object_identifier = data[7+asl+1:7+asl+1+osl]
def _data_len (self):
return 1+1+len(self.address)+1+4+1+len(self.object_identifier)
def _pack_data (self):
r = struct.pack('!BB', len(self.address)+1, self.address_subtype)
r += self.address
r += struct.pack("!BLB", self.interface_numbering_subtype,
self.interface_number,
len(self.object_identifier))
r += self.object_identifier
return r
class system_name (simple_tlv):
tlv_type = lldp.SYSTEM_NAME_TLV
class organizationally_specific (simple_tlv):
tlv_type = lldp.ORGANIZATIONALLY_SPECIFIC_TLV
def _init (self, kw):
self.oui = '\x00\x00\x00'
self.subtype = 0
self.payload = b''
def _parse_data (self, data):
(self.oui,self.subtype) = struct.unpack("3sB", data[0:4])
self.payload = data[4:]
def _pack_data (self):
return struct.pack('!3sB', self.oui, self.subtype) + self.payload
class port_description (simple_tlv):
tlv_type = lldp.PORT_DESC_TLV
class system_capabilities (simple_tlv):
tlv_type = lldp.SYSTEM_CAP_TLV
cap_names = ["Other", "Repeater", "Bridge", "WLAN Access Point",
"Router", "Telephone", "DOCSIS cable device",
"Station Only"]
def _init (self, kw):
self.caps = [False] * 16
self.enabled_caps = [False] * 16
def _parse_data (self, data):
(cap,en) = struct.unpack("!HH", data)
del self.caps[:]
del self.enabled_caps[:]
for i in range(0, 16):
self.caps.append(True if (cap and (1 << i)) else False)
self.enabled_caps.append(True if (en and (1 << i)) else False)
def _pack_data (self):
cap = 0
en = 0
for i in range(0, 16):
if self.caps[i]: cap |= (1 << i)
if self.enabled_caps[i]: en |= (1 << i)
return struct.pack('!HH', cap, en)
def __str__ (self):
r = []
for i in range(0, 16):
if self.caps[i]:
if i < len(self.cap_names):
s = self.cap_names[i]
else:
s = "Capability " + str(i)
s += ":" + ("On" if self.enabled_caps[i] else "Off")
r.append(s)
return "<Capabilities: " + ', '.join(r) + ">"
# Add parsers to main lldp class
for t in [chassis_id, port_id, ttl, system_name, system_description,
end_tlv, organizationally_specific, port_description,
system_capabilities, management_address]:
lldp.tlv_parsers[t.tlv_type] = t
| apache-2.0 |
google-research/google-research | aqt/jax/hlo_utils_test.py | 1 | 2653 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for aqt.jax.hlo_utils."""
from absl.testing import absltest
from absl.testing import parameterized
from flax import linen as nn
from jax import random
import jax.numpy as jnp
from aqt.jax import hlo_utils
class HloUtilsTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(
testcase_name='one_add',
fn=lambda x: x + 1,
fn_args=[1],
ops_regex=r'add',
exp_count=1,
),
dict(
testcase_name='two_adds',
fn=lambda x, y: x + y + 1,
fn_args=[1, 2],
ops_regex=r'add',
exp_count=2,
),
dict(
testcase_name='one_mult',
fn=lambda x, y: x * y,
fn_args=[2, 3],
ops_regex=r'multiply',
exp_count=1,
),
)
def test_load_hlo_proto_from_jax_fn_and_count_ops(self, fn,
fn_args, ops_regex,
exp_count):
hlo_proto = hlo_utils.load_hlo_proto_from_jax_fn(
fn, *fn_args)
count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, ops_regex=ops_regex)
self.assertEqual(count, exp_count)
class TestModelWith2DenseLayers(nn.Module):
"""Test model with two Dense layers."""
@nn.compact
def __call__(self, inputs, dtype=jnp.float32):
x = nn.linear.Dense(features=2)(inputs)
output = nn.linear.Dense(features=3)(x)
return output
def test_load_hlo_proto_from_model_and_count_ops(self):
input_shapes = [(1, 2)]
# with nn.stateful() as init_state:
test_model = self.TestModelWith2DenseLayers()
init_state = test_model.init(
random.PRNGKey(0), *[jnp.ones(shape) for shape in input_shapes])
hlo_proto = hlo_utils.load_hlo_proto_from_model(test_model, init_state,
input_shapes)
count = hlo_utils.count_ops_in_hlo_proto(hlo_proto, ops_regex=r'dot')
self.assertEqual(count, 2)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
maestro-hybrid-cloud/heat | heat/common/param_utils.py | 7 | 2670 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
from heat.common.i18n import _
def extract_bool(name, value):
"""Convert any true/false string to its corresponding boolean value.
Value is case insensitive.
"""
if str(value).lower() not in ('true', 'false'):
raise ValueError(_('Unrecognized value "%(value)s" for "%(name)s", '
'acceptable values are: true, false.')
% {'value': value, 'name': name})
return strutils.bool_from_string(value, strict=True)
def extract_int(name, value, allow_zero=True, allow_negative=False):
if value is None:
return None
if not strutils.is_int_like(value):
raise ValueError(_("Only integer is acceptable by "
"'%(name)s'.") % {'name': name})
if value in ('0', 0):
if allow_zero:
return int(value)
raise ValueError(_("Only non-zero integer is acceptable by "
"'%(name)s'.") % {'name': name})
try:
result = int(value)
except (TypeError, ValueError):
raise ValueError(_("Value '%(value)s' is invalid for '%(name)s' "
"which only accepts integer.") %
{'name': name, 'value': value})
if allow_negative is False and result < 0:
raise ValueError(_("Value '%(value)s' is invalid for '%(name)s' "
"which only accepts non-negative integer.") %
{'name': name, 'value': value})
return result
def extract_tags(subject):
tags = subject.split(',')
for tag in tags:
if len(tag) > 80:
raise ValueError(_('Invalid tag, "%s" is longer than 80 '
'characters') % tag)
return tags
def extract_template_type(subject):
template_type = subject.lower()
if template_type not in ('cfn', 'hot'):
raise ValueError(_('Invalid template type "%(value)s", valid '
'types are: cfn, hot.') %
{'value': subject})
return template_type
| apache-2.0 |
cybercomgroup/Big_Data | Cloudera/Code/million_song_dataset/Spark_scripts/spark_visualisehottnessbyartist.py | 1 | 1183 | from pyspark import SparkConf
from pyspark import SparkContext
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import random
#To run: PYSPARK_PYTHON=/opt/cloudera/parcels/Anaconda/bin/python spark-submit spark_visualisehottnessbyartist.py /user/cloudera/song/song_final.csv
def rddToPand(RDD):
header = "temp"
first = True
data = []
# Convert unicode to ascii
for x in RDD.collect():
if first:
first = False
header = x.encode("ascii").split(',')
else:
data.append(tuple(x.encode("ascii").split(',')))
return pd.DataFrame.from_records(data, columns = header)
def test(row):
for x in range(0, row.count()):
if x!=3 and x!=5:
row[x]=''
return row
# Init Spark
conf = SparkConf()
conf.setMaster('yarn-client')
conf.setAppName('artisthotness-job')
sc = SparkContext(conf=conf)
rdd = sc.textFile(str(sys.argv[1]))
mapped = rdd.map(lambda line: line.split(',')).map(lambda line: row[3])
mapped2 = rdd.map(lambda line: line.split(',')).map(lambda line: row[5])
maps = mapped.join(mapped2)
df = rddToPand(mapped)
file = open('visualise.txt', 'w')
file.write(str(mapped2.take(10)))
file.close()
| gpl-3.0 |
40223141/lego | static/Brython3.1.1-20150328-091302/Lib/test/test_int.py | 765 | 12587 | import sys
import unittest
from test.support import run_unittest
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
("\u0200", ValueError)
]
class IntTestCases(unittest.TestCase):
def test_basic(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
self.assertEqual(int("-3"), -3)
self.assertEqual(int(" -3 "), -3)
self.assertEqual(int("\N{EM SPACE}-3\N{EN SPACE}"), -3)
# Different base:
self.assertEqual(int("10",16), 16)
# Test conversion from strings and various anomalies
for s, v in L:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except ValueError:
pass
s = repr(-1-sys.maxsize)
x = int(s)
self.assertEqual(x+1, -sys.maxsize)
self.assertIsInstance(x, int)
# should return int
self.assertEqual(int(s[1:]), sys.maxsize+1)
# should return int
x = int(1e100)
self.assertIsInstance(x, int)
x = int(-1e100)
self.assertIsInstance(x, int)
# SF bug 434186: 0x80000000/2 != 0x80000000>>1.
# Worked by accident in Windows release build, but failed in debug build.
# Failed in all Linux builds.
x = -1-sys.maxsize
self.assertEqual(x >> 1, x//2)
self.assertRaises(ValueError, int, '123\0')
self.assertRaises(ValueError, int, '53', 40)
# SF bug 1545497: embedded NULs were not detected with
# explicit base
self.assertRaises(ValueError, int, '123\0', 10)
self.assertRaises(ValueError, int, '123\x00 245', 20)
x = int('1' * 600)
self.assertIsInstance(x, int)
self.assertRaises(TypeError, int, 1, 12)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 16), 291)
# Bug 1679: "0x" is not a valid hex literal
self.assertRaises(ValueError, int, "0x", 16)
self.assertRaises(ValueError, int, "0x", 0)
self.assertRaises(ValueError, int, "0o", 8)
self.assertRaises(ValueError, int, "0o", 0)
self.assertRaises(ValueError, int, "0b", 2)
self.assertRaises(ValueError, int, "0b", 0)
# Bug #3236: Return small longs from PyLong_FromString
self.assertTrue(int("10") is 10)
self.assertTrue(int("-1") is -1)
# SF bug 1334662: int(string, base) wrong answers
# Various representations of 2**32 evaluated to 0
# rather than 2**32 in previous versions
self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296)
self.assertEqual(int('102002022201221111211', 3), 4294967296)
self.assertEqual(int('10000000000000000', 4), 4294967296)
self.assertEqual(int('32244002423141', 5), 4294967296)
self.assertEqual(int('1550104015504', 6), 4294967296)
self.assertEqual(int('211301422354', 7), 4294967296)
self.assertEqual(int('40000000000', 8), 4294967296)
self.assertEqual(int('12068657454', 9), 4294967296)
self.assertEqual(int('4294967296', 10), 4294967296)
self.assertEqual(int('1904440554', 11), 4294967296)
self.assertEqual(int('9ba461594', 12), 4294967296)
self.assertEqual(int('535a79889', 13), 4294967296)
self.assertEqual(int('2ca5b7464', 14), 4294967296)
self.assertEqual(int('1a20dcd81', 15), 4294967296)
self.assertEqual(int('100000000', 16), 4294967296)
self.assertEqual(int('a7ffda91', 17), 4294967296)
self.assertEqual(int('704he7g4', 18), 4294967296)
self.assertEqual(int('4f5aff66', 19), 4294967296)
self.assertEqual(int('3723ai4g', 20), 4294967296)
self.assertEqual(int('281d55i4', 21), 4294967296)
self.assertEqual(int('1fj8b184', 22), 4294967296)
self.assertEqual(int('1606k7ic', 23), 4294967296)
self.assertEqual(int('mb994ag', 24), 4294967296)
self.assertEqual(int('hek2mgl', 25), 4294967296)
self.assertEqual(int('dnchbnm', 26), 4294967296)
self.assertEqual(int('b28jpdm', 27), 4294967296)
self.assertEqual(int('8pfgih4', 28), 4294967296)
self.assertEqual(int('76beigg', 29), 4294967296)
self.assertEqual(int('5qmcpqg', 30), 4294967296)
self.assertEqual(int('4q0jto4', 31), 4294967296)
self.assertEqual(int('4000000', 32), 4294967296)
self.assertEqual(int('3aokq94', 33), 4294967296)
self.assertEqual(int('2qhxjli', 34), 4294967296)
self.assertEqual(int('2br45qb', 35), 4294967296)
self.assertEqual(int('1z141z4', 36), 4294967296)
# tests with base 0
# this fails on 3.0, but in 2.x the old octal syntax is allowed
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
# without base still base 10
self.assertEqual(int('0123'), 123)
self.assertEqual(int('0123', 10), 123)
# tests with prefix and base != 0
self.assertEqual(int('0x123', 16), 291)
self.assertEqual(int('0o123', 8), 83)
self.assertEqual(int('0b100', 2), 4)
self.assertEqual(int('0X123', 16), 291)
self.assertEqual(int('0O123', 8), 83)
self.assertEqual(int('0B100', 2), 4)
# the code has special checks for the first character after the
# type prefix
self.assertRaises(ValueError, int, '0b2', 2)
self.assertRaises(ValueError, int, '0b02', 2)
self.assertRaises(ValueError, int, '0B2', 2)
self.assertRaises(ValueError, int, '0B02', 2)
self.assertRaises(ValueError, int, '0o8', 8)
self.assertRaises(ValueError, int, '0o08', 8)
self.assertRaises(ValueError, int, '0O8', 8)
self.assertRaises(ValueError, int, '0O08', 8)
self.assertRaises(ValueError, int, '0xg', 16)
self.assertRaises(ValueError, int, '0x0g', 16)
self.assertRaises(ValueError, int, '0Xg', 16)
self.assertRaises(ValueError, int, '0X0g', 16)
# SF bug 1334662: int(string, base) wrong answers
# Checks for proper evaluation of 2**32 + 1
self.assertEqual(int('100000000000000000000000000000001', 2), 4294967297)
self.assertEqual(int('102002022201221111212', 3), 4294967297)
self.assertEqual(int('10000000000000001', 4), 4294967297)
self.assertEqual(int('32244002423142', 5), 4294967297)
self.assertEqual(int('1550104015505', 6), 4294967297)
self.assertEqual(int('211301422355', 7), 4294967297)
self.assertEqual(int('40000000001', 8), 4294967297)
self.assertEqual(int('12068657455', 9), 4294967297)
self.assertEqual(int('4294967297', 10), 4294967297)
self.assertEqual(int('1904440555', 11), 4294967297)
self.assertEqual(int('9ba461595', 12), 4294967297)
self.assertEqual(int('535a7988a', 13), 4294967297)
self.assertEqual(int('2ca5b7465', 14), 4294967297)
self.assertEqual(int('1a20dcd82', 15), 4294967297)
self.assertEqual(int('100000001', 16), 4294967297)
self.assertEqual(int('a7ffda92', 17), 4294967297)
self.assertEqual(int('704he7g5', 18), 4294967297)
self.assertEqual(int('4f5aff67', 19), 4294967297)
self.assertEqual(int('3723ai4h', 20), 4294967297)
self.assertEqual(int('281d55i5', 21), 4294967297)
self.assertEqual(int('1fj8b185', 22), 4294967297)
self.assertEqual(int('1606k7id', 23), 4294967297)
self.assertEqual(int('mb994ah', 24), 4294967297)
self.assertEqual(int('hek2mgm', 25), 4294967297)
self.assertEqual(int('dnchbnn', 26), 4294967297)
self.assertEqual(int('b28jpdn', 27), 4294967297)
self.assertEqual(int('8pfgih5', 28), 4294967297)
self.assertEqual(int('76beigh', 29), 4294967297)
self.assertEqual(int('5qmcpqh', 30), 4294967297)
self.assertEqual(int('4q0jto5', 31), 4294967297)
self.assertEqual(int('4000001', 32), 4294967297)
self.assertEqual(int('3aokq95', 33), 4294967297)
self.assertEqual(int('2qhxjlj', 34), 4294967297)
self.assertEqual(int('2br45qc', 35), 4294967297)
self.assertEqual(int('1z141z5', 36), 4294967297)
def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
self.assertRaises(TypeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
class Foo1(object):
def __int__(self):
return 42
class Foo2(int):
def __int__(self):
return 42
class Foo3(int):
def __int__(self):
return self
class Foo4(int):
def __int__(self):
return 42
class Foo5(int):
def __int__(self):
return 42.
self.assertEqual(int(Foo0()), 42)
self.assertEqual(int(Foo1()), 42)
self.assertEqual(int(Foo2()), 42)
self.assertEqual(int(Foo3()), 0)
self.assertEqual(int(Foo4()), 42)
self.assertRaises(TypeError, int, Foo5())
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
for trunc_result_base in (object, Classic):
class Integral(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonInt(base):
def __trunc__(self):
return Integral()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except TypeError as e:
self.assertEqual(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),))
def test_error_message(self):
testlist = ('\xbd', '123\xbd', ' 123 456 ')
for s in testlist:
try:
int(s)
except ValueError as e:
self.assertIn(s.strip(), e.args[0])
else:
self.fail("Expected int(%r) to raise a ValueError", s)
def test_main():
run_unittest(IntTestCases)
if __name__ == "__main__":
test_main()
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/kusto/azure-mgmt-kusto/azure/mgmt/kusto/aio/_kusto_management_client.py | 1 | 6882 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import KustoManagementClientConfiguration
from .operations import ClustersOperations
from .operations import ClusterPrincipalAssignmentsOperations
from .operations import DatabasesOperations
from .operations import DatabasePrincipalAssignmentsOperations
from .operations import ScriptsOperations
from .operations import AttachedDatabaseConfigurationsOperations
from .operations import DataConnectionsOperations
from .operations import Operations
from .operations import OperationsResultsOperations
from .. import models
class KustoManagementClient(object):
"""The Azure Kusto management API provides a RESTful set of web services that interact with Azure Kusto services to manage your clusters and databases. The API enables you to create, update, and delete clusters and databases.
:ivar clusters: ClustersOperations operations
:vartype clusters: azure.mgmt.kusto.aio.operations.ClustersOperations
:ivar cluster_principal_assignments: ClusterPrincipalAssignmentsOperations operations
:vartype cluster_principal_assignments: azure.mgmt.kusto.aio.operations.ClusterPrincipalAssignmentsOperations
:ivar databases: DatabasesOperations operations
:vartype databases: azure.mgmt.kusto.aio.operations.DatabasesOperations
:ivar database_principal_assignments: DatabasePrincipalAssignmentsOperations operations
:vartype database_principal_assignments: azure.mgmt.kusto.aio.operations.DatabasePrincipalAssignmentsOperations
:ivar scripts: ScriptsOperations operations
:vartype scripts: azure.mgmt.kusto.aio.operations.ScriptsOperations
:ivar attached_database_configurations: AttachedDatabaseConfigurationsOperations operations
:vartype attached_database_configurations: azure.mgmt.kusto.aio.operations.AttachedDatabaseConfigurationsOperations
:ivar data_connections: DataConnectionsOperations operations
:vartype data_connections: azure.mgmt.kusto.aio.operations.DataConnectionsOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.kusto.aio.operations.Operations
:ivar operations_results: OperationsResultsOperations operations
:vartype operations_results: azure.mgmt.kusto.aio.operations.OperationsResultsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = KustoManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.clusters = ClustersOperations(
self._client, self._config, self._serialize, self._deserialize)
self.cluster_principal_assignments = ClusterPrincipalAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.databases = DatabasesOperations(
self._client, self._config, self._serialize, self._deserialize)
self.database_principal_assignments = DatabasePrincipalAssignmentsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.scripts = ScriptsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.attached_database_configurations = AttachedDatabaseConfigurationsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_connections = DataConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.operations_results = OperationsResultsOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "KustoManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| mit |
cooper-software/cellardoor | cellardoor/storage/mongodb.py | 1 | 5973 | import re
import pymongo
from datetime import datetime
from bson.objectid import ObjectId
from . import Storage
from .. import errors
find_dupe_index_pattern = re.compile(r'\$([a-zA-Z0-9_]+)\s+')
class MongoDBStorage(Storage):
special_fields = { '$where', '$text' }
def __init__(self, db=None, *args, **kwargs):
self.client = pymongo.MongoClient(*args, **kwargs)
self.db = self.client[db]
self.unique_fields_by_index = {}
def setup(self, model):
for e in model.entities.values():
collection = self.get_collection(e)
for k,v in e.fields.items():
if v.unique:
index_name = collection.ensure_index(k, unique=True, sparse=True)
self.unique_fields_by_index[index_name] = k
def get(self, entity, filter=None, fields=None, sort=None, offset=0, limit=0, count=False):
to_dict = self.document_to_dict
if filter and '_id' in filter and isinstance(filter['_id'], basestring):
filter['_id'] = self._objectid(filter['_id'])
sort_pairs = []
if filter and '$text' in filter:
sort_pairs.append(('score', {'$meta':'textScore'}))
if not fields:
fields = {}
fields['score'] = {'$meta':'textScore'}
if sort:
sort_pairs.extend([(field[1:], 1) if field[0] == '+' else (field[1:], -1) for field in sort])
collection = self.get_collection(entity)
type_filter = self.get_type_filter(entity)
if type_filter:
if not filter:
filter = type_filter
else:
filter.update(type_filter)
results = collection.find(spec=filter,
fields=fields,
sort=sort_pairs,
skip=offset,
limit=limit)
if count:
return results.count()
else:
return map(to_dict, results)
def get_by_ids(self, entity, ids, filter=None, fields=None, sort=None, offset=0, limit=0, count=False):
if not filter:
filter = {}
filter['_id'] = {'$in':map(self._objectid, ids)}
return self.get(entity, filter=filter, fields=fields, sort=sort, offset=offset, limit=limit, count=count)
def get_by_id(self, entity, id, filter=None, fields=None):
collection = self.get_collection(entity)
filter = filter if filter else {}
filter['_id'] = self._objectid(id)
type_filter = self.get_type_filter(entity)
if type_filter:
filter.update(type_filter)
result = collection.find_one(filter, fields=fields)
if result is None:
return None
else:
return self.document_to_dict(result)
def create(self, entity, fields):
collection = self.get_collection(entity)
type_name = self.get_type_name(entity)
if type_name:
fields['_type'] = type_name
if '_id' in fields:
fields['_id'] = self._objectid(fields['_id'])
try:
obj_id = collection.insert(fields.copy())
except pymongo.errors.DuplicateKeyError, e:
self._raise_dupe_error(e)
return self._from_objectid(obj_id)
def update(self, entity, id, fields, replace=False):
type_name = self.get_type_name(entity)
if type_name:
fields['_type'] = type_name
try:
collection = self.get_collection(entity)
obj_id = self._objectid(id)
if replace:
doc = fields
else:
doc = { '$set': fields }
doc = collection.find_and_modify({ '_id': obj_id }, doc, new=True)
if doc:
return self.document_to_dict(doc)
except pymongo.errors.DuplicateKeyError, e:
self._raise_dupe_error(e)
def delete(self, entity, id):
collection = self.get_collection(entity)
collection.remove(self._objectid(id))
def document_to_dict(self, doc):
doc['_id'] = self._from_objectid(doc['_id'])
return doc
def get_collection(self, entity):
if len(entity.hierarchy) > 0:
collection_name = entity.hierarchy[0].__name__
else:
collection_name = entity.__name__
# We use getattr here instead of __getitem__ to
# make it easier to inject mock collections objects
# for testing
return getattr(self.db, collection_name)
def get_type_name(self, entity):
if len(entity.hierarchy) > 0:
return '.'.join([x.__name__ for x in entity.hierarchy]) + '.' + entity.__name__
else:
return None
def get_type_filter(self, entity):
type_name = self.get_type_name(entity)
if type_name:
return {'_type':{'$regex':'^%s' % re.escape(type_name)}}
def check_filter(self, filter, allowed_fields, context):
allowed_fields = set(allowed_fields)
return self._check_filter(filter, allowed_fields, context)
def _check_filter(self, filter, allowed_fields, context):
if not isinstance(filter, dict):
return
for k,v in filter.items():
if k.startswith('$'):
if k in self.special_fields:
continue
elif k not in allowed_fields:
raise errors.DisabledFieldError('You cannot filter by the "%s" field' % k)
identity_value = self._get_identity_value(v, context)
if identity_value:
filter[k] = identity_value
elif isinstance(v, (list, tuple)):
new_v = []
for x in v:
identity_value = self._get_identity_value(x, context)
if identity_value:
new_v.append(identity_value)
else:
new_v.append(x)
self._check_filter(x, allowed_fields, context)
filter[k] = new_v
elif isinstance(v, dict):
self._check_filter(v, allowed_fields, context)
def _get_identity_value(self, key, context):
if isinstance(key, basestring):
if key.startswith('$identity'):
try:
return reduce(dict.get, key[1:].split("."), context)
except:
raise errors.CompoundValidationError({'filter': 'Attempting to use a non-existent context variable: %s' % key})
def _raise_dupe_error(self, orig_exc):
m = find_dupe_index_pattern.search(orig_exc.message)
if m:
index_name = m.group(1)
key_name = self.unique_fields_by_index.get(index_name)
else:
key_name = 'unknown'
raise errors.DuplicateError(key_name)
def _objectid(self, id):
try:
return ObjectId(id)
except:
return str(id)
def _from_objectid(self, id):
if isinstance(id, ObjectId):
return str(id)
else:
return id | mit |
pychess/pychess | lib/pychess/Utils/lutils/PolyglotHash.py | 1 | 23734 | # -*- coding: UTF-8 -*-
import random
from pychess.Utils.const import WHITE, BLACK, PAWN, KNIGHT, BISHOP, ROOK, QUEEN, HAWK, ELEPHANT, KING
# Polyglot opening books are indexed by 64-bit Zobrist hash keys.
# The standard specifies the following Zobrist seed values.
# The numbers in this file come from PolyGlot by Fabien Letouzey.
# PolyGlot is available under the GNU GPL from http://wbec-ridderkerk.nl
pieceHashes = [
[
[0x0000000000000000] * 64,
[0x5355f900c2a82dc7,
0x07fb9f855a997142,
0x5093417aa8a7ed5e,
0x7bcbc38da25a7f3c,
0x19fc8a768cf4b6d4,
0x637a7780decfc0d9,
0x8249a47aee0e41f7,
0x79ad695501e7d1e8,
0x14acbaf4777d5776,
0xf145b6beccdea195,
0xdabf2ac8201752fc,
0x24c3c94df9c8d3f6,
0xbb6e2924f03912ea,
0x0ce26c0b95c980d9,
0xa49cd132bfbf7cc4,
0xe99d662af4243939,
0x27e6ad7891165c3f,
0x8535f040b9744ff1,
0x54b3f4fa5f40d873,
0x72b12c32127fed2b,
0xee954d3c7b411f47,
0x9a85ac909a24eaa1,
0x70ac4cd9f04f21f5,
0xf9b89d3e99a075c2,
0x87b3e2b2b5c907b1,
0xa366e5b8c54f48b8,
0xae4a9346cc3f7cf2,
0x1920c04d47267bbd,
0x87bf02c6b49e2ae9,
0x092237ac237f3859,
0xff07f64ef8ed14d0,
0x8de8dca9f03cc54e,
0x9c1633264db49c89,
0xb3f22c3d0b0b38ed,
0x390e5fb44d01144b,
0x5bfea5b4712768e9,
0x1e1032911fa78984,
0x9a74acb964e78cb3,
0x4f80f7a035dafb04,
0x6304d09a0b3738c4,
0x2171e64683023a08,
0x5b9b63eb9ceff80c,
0x506aacf489889342,
0x1881afc9a3a701d6,
0x6503080440750644,
0xdfd395339cdbf4a7,
0xef927dbcf00c20f2,
0x7b32f7d1e03680ec,
0xb9fd7620e7316243,
0x05a7e8a57db91b77,
0xb5889c6e15630a75,
0x4a750a09ce9573f7,
0xcf464cec899a2f8a,
0xf538639ce705b824,
0x3c79a0ff5580ef7f,
0xede6c87f8477609d,
0x799e81f05bc93f31,
0x86536b8cf3428a8c,
0x97d7374c60087b73,
0xa246637cff328532,
0x043fcae60cc0eba0,
0x920e449535dd359e,
0x70eb093b15b290cc,
0x73a1921916591cbd, ],
[0xc547f57e42a7444e,
0x78e37644e7cad29e,
0xfe9a44e9362f05fa,
0x08bd35cc38336615,
0x9315e5eb3a129ace,
0x94061b871e04df75,
0xdf1d9f9d784ba010,
0x3bba57b68871b59d,
0xd2b7adeeded1f73f,
0xf7a255d83bc373f8,
0xd7f4f2448c0ceb81,
0xd95be88cd210ffa7,
0x336f52f8ff4728e7,
0xa74049dac312ac71,
0xa2f61bb6e437fdb5,
0x4f2a5cb07f6a35b3,
0x87d380bda5bf7859,
0x16b9f7e06c453a21,
0x7ba2484c8a0fd54e,
0xf3a678cad9a2e38c,
0x39b0bf7dde437ba2,
0xfcaf55c1bf8a4424,
0x18fcf680573fa594,
0x4c0563b89f495ac3,
0x40e087931a00930d,
0x8cffa9412eb642c1,
0x68ca39053261169f,
0x7a1ee967d27579e2,
0x9d1d60e5076f5b6f,
0x3810e399b6f65ba2,
0x32095b6d4ab5f9b1,
0x35cab62109dd038a,
0xa90b24499fcfafb1,
0x77a225a07cc2c6bd,
0x513e5e634c70e331,
0x4361c0ca3f692f12,
0xd941aca44b20a45b,
0x528f7c8602c5807b,
0x52ab92beb9613989,
0x9d1dfa2efc557f73,
0x722ff175f572c348,
0x1d1260a51107fe97,
0x7a249a57ec0c9ba2,
0x04208fe9e8f7f2d6,
0x5a110c6058b920a0,
0x0cd9a497658a5698,
0x56fd23c8f9715a4c,
0x284c847b9d887aae,
0x04feabfbbdb619cb,
0x742e1e651c60ba83,
0x9a9632e65904ad3c,
0x881b82a13b51b9e2,
0x506e6744cd974924,
0xb0183db56ffc6a79,
0x0ed9b915c66ed37e,
0x5e11e86d5873d484,
0xf678647e3519ac6e,
0x1b85d488d0f20cc5,
0xdab9fe6525d89021,
0x0d151d86adb73615,
0xa865a54edcc0f019,
0x93c42566aef98ffb,
0x99e7afeabe000731,
0x48cbff086ddf285a, ],
[0x23b70edb1955c4bf,
0xc330de426430f69d,
0x4715ed43e8a45c0a,
0xa8d7e4dab780a08d,
0x0572b974f03ce0bb,
0xb57d2e985e1419c7,
0xe8d9ecbe2cf3d73f,
0x2fe4b17170e59750,
0x11317ba87905e790,
0x7fbf21ec8a1f45ec,
0x1725cabfcb045b00,
0x964e915cd5e2b207,
0x3e2b8bcbf016d66d,
0xbe7444e39328a0ac,
0xf85b2b4fbcde44b7,
0x49353fea39ba63b1,
0x1dd01aafcd53486a,
0x1fca8a92fd719f85,
0xfc7c95d827357afa,
0x18a6a990c8b35ebd,
0xcccb7005c6b9c28d,
0x3bdbb92c43b17f26,
0xaa70b5b4f89695a2,
0xe94c39a54a98307f,
0xb7a0b174cff6f36e,
0xd4dba84729af48ad,
0x2e18bc1ad9704a68,
0x2de0966daf2f8b1c,
0xb9c11d5b1e43a07e,
0x64972d68dee33360,
0x94628d38d0c20584,
0xdbc0d2b6ab90a559,
0xd2733c4335c6a72f,
0x7e75d99d94a70f4d,
0x6ced1983376fa72b,
0x97fcaacbf030bc24,
0x7b77497b32503b12,
0x8547eddfb81ccb94,
0x79999cdff70902cb,
0xcffe1939438e9b24,
0x829626e3892d95d7,
0x92fae24291f2b3f1,
0x63e22c147b9c3403,
0xc678b6d860284a1c,
0x5873888850659ae7,
0x0981dcd296a8736d,
0x9f65789a6509a440,
0x9ff38fed72e9052f,
0xe479ee5b9930578c,
0xe7f28ecd2d49eecd,
0x56c074a581ea17fe,
0x5544f7d774b14aef,
0x7b3f0195fc6f290f,
0x12153635b2c0cf57,
0x7f5126dbba5e0ca7,
0x7a76956c3eafb413,
0x3d5774a11d31ab39,
0x8a1b083821f40cb4,
0x7b4a38e32537df62,
0x950113646d1d6e03,
0x4da8979a0041e8a9,
0x3bc36e078f7515d7,
0x5d0a12f27ad310d1,
0x7f9d1a2e1ebe1327, ],
[0xa09e8c8c35ab96de,
0xfa7e393983325753,
0xd6b6d0ecc617c699,
0xdfea21ea9e7557e3,
0xb67c1fa481680af8,
0xca1e3785a9e724e5,
0x1cfc8bed0d681639,
0xd18d8549d140caea,
0x4ed0fe7e9dc91335,
0xe4dbf0634473f5d2,
0x1761f93a44d5aefe,
0x53898e4c3910da55,
0x734de8181f6ec39a,
0x2680b122baa28d97,
0x298af231c85bafab,
0x7983eed3740847d5,
0x66c1a2a1a60cd889,
0x9e17e49642a3e4c1,
0xedb454e7badc0805,
0x50b704cab602c329,
0x4cc317fb9cddd023,
0x66b4835d9eafea22,
0x219b97e26ffc81bd,
0x261e4e4c0a333a9d,
0x1fe2cca76517db90,
0xd7504dfa8816edbb,
0xb9571fa04dc089c8,
0x1ddc0325259b27de,
0xcf3f4688801eb9aa,
0xf4f5d05c10cab243,
0x38b6525c21a42b0e,
0x36f60e2ba4fa6800,
0xeb3593803173e0ce,
0x9c4cd6257c5a3603,
0xaf0c317d32adaa8a,
0x258e5a80c7204c4b,
0x8b889d624d44885d,
0xf4d14597e660f855,
0xd4347f66ec8941c3,
0xe699ed85b0dfb40d,
0x2472f6207c2d0484,
0xc2a1e7b5b459aeb5,
0xab4f6451cc1d45ec,
0x63767572ae3d6174,
0xa59e0bd101731a28,
0x116d0016cb948f09,
0x2cf9c8ca052f6e9f,
0x0b090a7560a968e3,
0xabeeddb2dde06ff1,
0x58efc10b06a2068d,
0xc6e57a78fbd986e0,
0x2eab8ca63ce802d7,
0x14a195640116f336,
0x7c0828dd624ec390,
0xd74bbe77e6116ac7,
0x804456af10f5fb53,
0xebe9ea2adf4321c7,
0x03219a39ee587a30,
0x49787fef17af9924,
0xa1e9300cd8520548,
0x5b45e522e4b1b4ef,
0xb49c3b3995091a36,
0xd4490ad526f14431,
0x12a8f216af9418c2, ],
[0x6ffe73e81b637fb3,
0xddf957bc36d8b9ca,
0x64d0e29eea8838b3,
0x08dd9bdfd96b9f63,
0x087e79e5a57d1d13,
0xe328e230e3e2b3fb,
0x1c2559e30f0946be,
0x720bf5f26f4d2eaa,
0xb0774d261cc609db,
0x443f64ec5a371195,
0x4112cf68649a260e,
0xd813f2fab7f5c5ca,
0x660d3257380841ee,
0x59ac2c7873f910a3,
0xe846963877671a17,
0x93b633abfa3469f8,
0xc0c0f5a60ef4cdcf,
0xcaf21ecd4377b28c,
0x57277707199b8175,
0x506c11b9d90e8b1d,
0xd83cc2687a19255f,
0x4a29c6465a314cd1,
0xed2df21216235097,
0xb5635c95ff7296e2,
0x22af003ab672e811,
0x52e762596bf68235,
0x9aeba33ac6ecc6b0,
0x944f6de09134dfb6,
0x6c47bec883a7de39,
0x6ad047c430a12104,
0xa5b1cfdba0ab4067,
0x7c45d833aff07862,
0x5092ef950a16da0b,
0x9338e69c052b8e7b,
0x455a4b4cfe30e3f5,
0x6b02e63195ad0cf8,
0x6b17b224bad6bf27,
0xd1e0ccd25bb9c169,
0xde0c89a556b9ae70,
0x50065e535a213cf6,
0x9c1169fa2777b874,
0x78edefd694af1eed,
0x6dc93d9526a50e68,
0xee97f453f06791ed,
0x32ab0edb696703d3,
0x3a6853c7e70757a7,
0x31865ced6120f37d,
0x67fef95d92607890,
0x1f2b1d1f15f6dc9c,
0xb69e38a8965c6b65,
0xaa9119ff184cccf4,
0xf43c732873f24c13,
0xfb4a3d794a9a80d2,
0x3550c2321fd6109c,
0x371f77e76bb8417e,
0x6bfa9aae5ec05779,
0xcd04f3ff001a4778,
0xe3273522064480ca,
0x9f91508bffcfc14a,
0x049a7f41061a9e60,
0xfcb6be43a9f2fe9b,
0x08de8a1c7797da9b,
0x8f9887e6078735a1,
0xb5b4071dbfc73a66, ],
[0x55b6344cf97aafae,
0xb862225b055b6960,
0xcac09afbddd2cdb4,
0xdaf8e9829fe96b5f,
0xb5fdfc5d3132c498,
0x310cb380db6f7503,
0xe87fbb46217a360e,
0x2102ae466ebb1148,
0xf8549e1a3aa5e00d,
0x07a69afdcc42261a,
0xc4c118bfe78feaae,
0xf9f4892ed96bd438,
0x1af3dbe25d8f45da,
0xf5b4b0b0d2deeeb4,
0x962aceefa82e1c84,
0x046e3ecaaf453ce9,
0xf05d129681949a4c,
0x964781ce734b3c84,
0x9c2ed44081ce5fbd,
0x522e23f3925e319e,
0x177e00f9fc32f791,
0x2bc60a63a6f3b3f2,
0x222bbfae61725606,
0x486289ddcc3d6780,
0x7dc7785b8efdfc80,
0x8af38731c02ba980,
0x1fab64ea29a2ddf7,
0xe4d9429322cd065a,
0x9da058c67844f20c,
0x24c0e332b70019b0,
0x233003b5a6cfe6ad,
0xd586bd01c5c217f6,
0x5e5637885f29bc2b,
0x7eba726d8c94094b,
0x0a56a5f0bfe39272,
0xd79476a84ee20d06,
0x9e4c1269baa4bf37,
0x17efee45b0dee640,
0x1d95b0a5fcf90bc6,
0x93cbe0b699c2585d,
0x65fa4f227a2b6d79,
0xd5f9e858292504d5,
0xc2b5a03f71471a6f,
0x59300222b4561e00,
0xce2f8642ca0712dc,
0x7ca9723fbb2e8988,
0x2785338347f2ba08,
0xc61bb3a141e50e8c,
0x150f361dab9dec26,
0x9f6a419d382595f4,
0x64a53dc924fe7ac9,
0x142de49fff7a7c3d,
0x0c335248857fa9e7,
0x0a9c32d5eae45305,
0xe6c42178c4bbb92e,
0x71f1ce2490d20b07,
0xf1bcc3d275afe51a,
0xe728e8c83c334074,
0x96fbf83a12884624,
0x81a1549fd6573da5,
0x5fa7867caf35e149,
0x56986e2ef3ed091b,
0x917f1dd5f8886c61,
0xd20d8c88c8ffe65f, ],
],
[
[0x0000000000000000] * 64,
[0x9d39247e33776d41,
0x2af7398005aaa5c7,
0x44db015024623547,
0x9c15f73e62a76ae2,
0x75834465489c0c89,
0x3290ac3a203001bf,
0x0fbbad1f61042279,
0xe83a908ff2fb60ca,
0x0d7e765d58755c10,
0x1a083822ceafe02d,
0x9605d5f0e25ec3b0,
0xd021ff5cd13a2ed5,
0x40bdf15d4a672e32,
0x011355146fd56395,
0x5db4832046f3d9e5,
0x239f8b2d7ff719cc,
0x05d1a1ae85b49aa1,
0x679f848f6e8fc971,
0x7449bbff801fed0b,
0x7d11cdb1c3b7adf0,
0x82c7709e781eb7cc,
0xf3218f1c9510786c,
0x331478f3af51bbe6,
0x4bb38de5e7219443,
0xaa649c6ebcfd50fc,
0x8dbd98a352afd40b,
0x87d2074b81d79217,
0x19f3c751d3e92ae1,
0xb4ab30f062b19abf,
0x7b0500ac42047ac4,
0xc9452ca81a09d85d,
0x24aa6c514da27500,
0x4c9f34427501b447,
0x14a68fd73c910841,
0xa71b9b83461cbd93,
0x03488b95b0f1850f,
0x637b2b34ff93c040,
0x09d1bc9a3dd90a94,
0x3575668334a1dd3b,
0x735e2b97a4c45a23,
0x18727070f1bd400b,
0x1fcbacd259bf02e7,
0xd310a7c2ce9b6555,
0xbf983fe0fe5d8244,
0x9f74d14f7454a824,
0x51ebdc4ab9ba3035,
0x5c82c505db9ab0fa,
0xfcf7fe8a3430b241,
0x3253a729b9ba3dde,
0x8c74c368081b3075,
0xb9bc6c87167c33e7,
0x7ef48f2b83024e20,
0x11d505d4c351bd7f,
0x6568fca92c76a243,
0x4de0b0f40f32a7b8,
0x96d693460cc37e5d,
0x42e240cb63689f2f,
0x6d2bdcdae2919661,
0x42880b0236e4d951,
0x5f0f4a5898171bb6,
0x39f890f579f92f88,
0x93c5b5f47356388b,
0x63dc359d8d231b78,
0xec16ca8aea98ad76, ],
[0x56436c9fe1a1aa8d,
0xefac4b70633b8f81,
0xbb215798d45df7af,
0x45f20042f24f1768,
0x930f80f4e8eb7462,
0xff6712ffcfd75ea1,
0xae623fd67468aa70,
0xdd2c5bc84bc8d8fc,
0x7eed120d54cf2dd9,
0x22fe545401165f1c,
0xc91800e98fb99929,
0x808bd68e6ac10365,
0xdec468145b7605f6,
0x1bede3a3aef53302,
0x43539603d6c55602,
0xaa969b5c691ccb7a,
0xa87832d392efee56,
0x65942c7b3c7e11ae,
0xded2d633cad004f6,
0x21f08570f420e565,
0xb415938d7da94e3c,
0x91b859e59ecb6350,
0x10cff333e0ed804a,
0x28aed140be0bb7dd,
0xc5cc1d89724fa456,
0x5648f680f11a2741,
0x2d255069f0b7dab3,
0x9bc5a38ef729abd4,
0xef2f054308f6a2bc,
0xaf2042f5cc5c2858,
0x480412bab7f5be2a,
0xaef3af4a563dfe43,
0x19afe59ae451497f,
0x52593803dff1e840,
0xf4f076e65f2ce6f0,
0x11379625747d5af3,
0xbce5d2248682c115,
0x9da4243de836994f,
0x066f70b33fe09017,
0x4dc4de189b671a1c,
0x51039ab7712457c3,
0xc07a3f80c31fb4b4,
0xb46ee9c5e64a6e7c,
0xb3819a42abe61c87,
0x21a007933a522a20,
0x2df16f761598aa4f,
0x763c4a1371b368fd,
0xf793c46702e086a0,
0xd7288e012aeb8d31,
0xde336a2a4bc1c44b,
0x0bf692b38d079f23,
0x2c604a7a177326b3,
0x4850e73e03eb6064,
0xcfc447f1e53c8e1b,
0xb05ca3f564268d99,
0x9ae182c8bc9474e8,
0xa4fc4bd4fc5558ca,
0xe755178d58fc4e76,
0x69b97db1a4c03dfe,
0xf9b5b7c4acc67c96,
0xfc6a82d64b8655fb,
0x9c684cb6c4d24417,
0x8ec97d2917456ed0,
0x6703df9d2924e97e, ],
[0x7f9b6af1ebf78baf,
0x58627e1a149bba21,
0x2cd16e2abd791e33,
0xd363eff5f0977996,
0x0ce2a38c344a6eed,
0x1a804aadb9cfa741,
0x907f30421d78c5de,
0x501f65edb3034d07,
0x37624ae5a48fa6e9,
0x957baf61700cff4e,
0x3a6c27934e31188a,
0xd49503536abca345,
0x088e049589c432e0,
0xf943aee7febf21b8,
0x6c3b8e3e336139d3,
0x364f6ffa464ee52e,
0xd60f6dcedc314222,
0x56963b0dca418fc0,
0x16f50edf91e513af,
0xef1955914b609f93,
0x565601c0364e3228,
0xecb53939887e8175,
0xbac7a9a18531294b,
0xb344c470397bba52,
0x65d34954daf3cebd,
0xb4b81b3fa97511e2,
0xb422061193d6f6a7,
0x071582401c38434d,
0x7a13f18bbedc4ff5,
0xbc4097b116c524d2,
0x59b97885e2f2ea28,
0x99170a5dc3115544,
0x6f423357e7c6a9f9,
0x325928ee6e6f8794,
0xd0e4366228b03343,
0x565c31f7de89ea27,
0x30f5611484119414,
0xd873db391292ed4f,
0x7bd94e1d8e17debc,
0xc7d9f16864a76e94,
0x947ae053ee56e63c,
0xc8c93882f9475f5f,
0x3a9bf55ba91f81ca,
0xd9a11fbb3d9808e4,
0x0fd22063edc29fca,
0xb3f256d8aca0b0b9,
0xb03031a8b4516e84,
0x35dd37d5871448af,
0xe9f6082b05542e4e,
0xebfafa33d7254b59,
0x9255abb50d532280,
0xb9ab4ce57f2d34f3,
0x693501d628297551,
0xc62c58f97dd949bf,
0xcd454f8f19c5126a,
0xbbe83f4ecc2bdecb,
0xdc842b7e2819e230,
0xba89142e007503b8,
0xa3bc941d0a5061cb,
0xe9f6760e32cd8021,
0x09c7e552bc76492f,
0x852f54934da55cc9,
0x8107fccf064fcf56,
0x098954d51fff6580, ],
[0xda3a361b1c5157b1,
0xdcdd7d20903d0c25,
0x36833336d068f707,
0xce68341f79893389,
0xab9090168dd05f34,
0x43954b3252dc25e5,
0xb438c2b67f98e5e9,
0x10dcd78e3851a492,
0xdbc27ab5447822bf,
0x9b3cdb65f82ca382,
0xb67b7896167b4c84,
0xbfced1b0048eac50,
0xa9119b60369ffebd,
0x1fff7ac80904bf45,
0xac12fb171817eee7,
0xaf08da9177dda93d,
0x1b0cab936e65c744,
0xb559eb1d04e5e932,
0xc37b45b3f8d6f2ba,
0xc3a9dc228caac9e9,
0xf3b8b6675a6507ff,
0x9fc477de4ed681da,
0x67378d8eccef96cb,
0x6dd856d94d259236,
0xa319ce15b0b4db31,
0x073973751f12dd5e,
0x8a8e849eb32781a5,
0xe1925c71285279f5,
0x74c04bf1790c0efe,
0x4dda48153c94938a,
0x9d266d6a1cc0542c,
0x7440fb816508c4fe,
0x13328503df48229f,
0xd6bf7baee43cac40,
0x4838d65f6ef6748f,
0x1e152328f3318dea,
0x8f8419a348f296bf,
0x72c8834a5957b511,
0xd7a023a73260b45c,
0x94ebc8abcfb56dae,
0x9fc10d0f989993e0,
0xde68a2355b93cae6,
0xa44cfe79ae538bbe,
0x9d1d84fcce371425,
0x51d2b1ab2ddfb636,
0x2fd7e4b9e72cd38c,
0x65ca5b96b7552210,
0xdd69a0d8ab3b546d,
0x604d51b25fbf70e2,
0x73aa8a564fb7ac9e,
0x1a8c1e992b941148,
0xaac40a2703d9bea0,
0x764dbeae7fa4f3a6,
0x1e99b96e70a9be8b,
0x2c5e9deb57ef4743,
0x3a938fee32d29981,
0x26e6db8ffdf5adfe,
0x469356c504ec9f9d,
0xc8763c5b08d1908c,
0x3f6c6af859d80055,
0x7f7cc39420a3a545,
0x9bfb227ebdf4c5ce,
0x89039d79d6fc5c5c,
0x8fe88b57305e2ab6, ],
[0x001f837cc7350524,
0x1877b51e57a764d5,
0xa2853b80f17f58ee,
0x993e1de72d36d310,
0xb3598080ce64a656,
0x252f59cf0d9f04bb,
0xd23c8e176d113600,
0x1bda0492e7e4586e,
0x21e0bd5026c619bf,
0x3b097adaf088f94e,
0x8d14dedb30be846e,
0xf95cffa23af5f6f4,
0x3871700761b3f743,
0xca672b91e9e4fa16,
0x64c8e531bff53b55,
0x241260ed4ad1e87d,
0x106c09b972d2e822,
0x7fba195410e5ca30,
0x7884d9bc6cb569d8,
0x0647dfedcd894a29,
0x63573ff03e224774,
0x4fc8e9560f91b123,
0x1db956e450275779,
0xb8d91274b9e9d4fb,
0xa2ebee47e2fbfce1,
0xd9f1f30ccd97fb09,
0xefed53d75fd64e6b,
0x2e6d02c36017f67f,
0xa9aa4d20db084e9b,
0xb64be8d8b25396c1,
0x70cb6af7c2d5bcf0,
0x98f076a4f7a2322e,
0xbf84470805e69b5f,
0x94c3251f06f90cf3,
0x3e003e616a6591e9,
0xb925a6cd0421aff3,
0x61bdd1307c66e300,
0xbf8d5108e27e0d48,
0x240ab57a8b888b20,
0xfc87614baf287e07,
0xef02cdd06ffdb432,
0xa1082c0466df6c0a,
0x8215e577001332c8,
0xd39bb9c3a48db6cf,
0x2738259634305c14,
0x61cf4f94c97df93d,
0x1b6baca2ae4e125b,
0x758f450c88572e0b,
0x959f587d507a8359,
0xb063e962e045f54d,
0x60e8ed72c0dff5d1,
0x7b64978555326f9f,
0xfd080d236da814ba,
0x8c90fd9b083f4558,
0x106f72fe81e2c590,
0x7976033a39f7d952,
0xa4ec0132764ca04b,
0x733ea705fae4fa77,
0xb4d8f77bc3e56167,
0x9e21f4f903b33fd9,
0x9d765e419fb69f6d,
0xd30c088ba61ea5ef,
0x5d94337fbfaf7f5b,
0x1a4e4822eb4d7a59, ],
[0x230e343dfba08d33,
0x43ed7f5a0fae657d,
0x3a88a0fbbcb05c63,
0x21874b8b4d2dbc4f,
0x1bdea12e35f6a8c9,
0x53c065c6c8e63528,
0xe34a1d250e7a8d6b,
0xd6b04d3b7651dd7e,
0x5e90277e7cb39e2d,
0x2c046f22062dc67d,
0xb10bb459132d0a26,
0x3fa9ddfb67e2f199,
0x0e09b88e1914f7af,
0x10e8b35af3eeab37,
0x9eedeca8e272b933,
0xd4c718bc4ae8ae5f,
0x81536d601170fc20,
0x91b534f885818a06,
0xec8177f83f900978,
0x190e714fada5156e,
0xb592bf39b0364963,
0x89c350c893ae7dc1,
0xac042e70f8b383f2,
0xb49b52e587a1ee60,
0xfb152fe3ff26da89,
0x3e666e6f69ae2c15,
0x3b544ebe544c19f9,
0xe805a1e290cf2456,
0x24b33c9d7ed25117,
0xe74733427b72f0c1,
0x0a804d18b7097475,
0x57e3306d881edb4f,
0x4ae7d6a36eb5dbcb,
0x2d8d5432157064c8,
0xd1e649de1e7f268b,
0x8a328a1cedfe552c,
0x07a3aec79624c7da,
0x84547ddc3e203c94,
0x990a98fd5071d263,
0x1a4ff12616eefc89,
0xf6f7fd1431714200,
0x30c05b1ba332f41c,
0x8d2636b81555a786,
0x46c9feb55d120902,
0xccec0a73b49c9921,
0x4e9d2827355fc492,
0x19ebb029435dcb0f,
0x4659d2b743848a2c,
0x963ef2c96b33be31,
0x74f85198b05a2e7d,
0x5a0f544dd2b1fb18,
0x03727073c2e134b1,
0xc7f6aa2de59aea61,
0x352787baa0d7c22f,
0x9853eab63b5e0b35,
0xabbdcdd7ed5c0860,
0xcf05daf5ac8d77b0,
0x49cad48cebf4a71e,
0x7a4c10ec2158c4a6,
0xd9e92aa246bf719e,
0x13ae978d09fe5557,
0x730499af921549ff,
0x4e4b705b92903ba4,
0xff577222c14f0a3a, ],
],
]
epHashes = [0x70cc73d90bc26e24, 0xe21a6b35df0c3ad7, 0x003a93d8b2806962,
0x1c99ded33cb890a1, 0xcf3145de0add4289, 0xd0e4427a5514fb72,
0x77c621cc9fb3a483, 0x67a34dac4356550b]
W_OOHash = 0x31d71dce64b2c310
W_OOOHash = 0xf165b587df898190
B_OOHash = 0xa57e6339dd2cf3a0
B_OOOHash = 0x1ef6e6dbb1961ec9
colorHash = 0xf8d626aaaf278509
holdingHash = [[[0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ]],
[[0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ], [0, ]]]
for color in (WHITE, BLACK):
for pt in (PAWN, KNIGHT, BISHOP, ROOK, QUEEN, KING, HAWK, ELEPHANT):
for i in range(16):
holdingHash[color][pt].append(random.getrandbits(64))
for color in (WHITE, BLACK):
for pt in (HAWK, ELEPHANT):
pieceHashes[color].append([])
for i in range(64):
pieceHashes[color][pt].append(random.getrandbits(64))
| gpl-3.0 |
Evervolv/android_external_chromium_org | chrome/common/extensions/docs/server2/template_data_source_test.py | 23 | 5507 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import unittest
from api_data_source import APIDataSource
from compiled_file_system import CompiledFileSystem
from local_file_system import LocalFileSystem
from manifest_data_source import ManifestDataSource
from object_store_creator import ObjectStoreCreator
from reference_resolver import ReferenceResolver
from template_data_source import TemplateDataSource
from test_file_system import TestFileSystem
from test_util import DisableLogging
from third_party.handlebar import Handlebar
from servlet import Request
class _FakeFactory(object):
def __init__(self, input_dict=None):
if input_dict is None:
self._input_dict = {}
else:
self._input_dict = input_dict
def Create(self, *args, **optargs):
return self._input_dict
class TemplateDataSourceTest(unittest.TestCase):
def setUp(self):
self._base_path = os.path.join(sys.path[0],
'test_data',
'template_data_source')
self._fake_api_list_data_source_factory = _FakeFactory()
self._fake_intro_data_source_factory = _FakeFactory()
self._fake_samples_data_source_factory = _FakeFactory()
self._fake_sidenav_data_source_factory = _FakeFactory()
self._manifest_data_source = ManifestDataSource(
_FakeFactory(), LocalFileSystem.Create(), '', '')
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _RenderTest(self, name, data_source):
template_name = name + '_tmpl.html'
template = Handlebar(self._ReadLocalFile(template_name))
self.assertEquals(
self._ReadLocalFile(name + '_expected.html'),
data_source.Render(template_name))
def _CreateTemplateDataSource(self, compiled_fs_factory, api_data=None):
if api_data is None:
api_data_factory = APIDataSource.Factory(compiled_fs_factory,
'fake_path',
_FakeFactory())
else:
api_data_factory = _FakeFactory(api_data)
reference_resolver_factory = ReferenceResolver.Factory(
api_data_factory,
self._fake_api_list_data_source_factory,
ObjectStoreCreator.ForTest())
@DisableLogging('error') # "was never set" error
def create_from_factory(factory):
path = 'extensions/foo'
return factory.Create(Request.ForTest(path), path)
return create_from_factory(TemplateDataSource.Factory(
api_data_factory,
self._fake_api_list_data_source_factory,
self._fake_intro_data_source_factory,
self._fake_samples_data_source_factory,
self._fake_sidenav_data_source_factory,
compiled_fs_factory,
reference_resolver_factory,
self._manifest_data_source,
'.',
'.',
''))
def testSimple(self):
self._base_path = os.path.join(self._base_path, 'simple')
fetcher = LocalFileSystem(self._base_path)
compiled_fs_factory = CompiledFileSystem.Factory(
fetcher,
ObjectStoreCreator.ForTest())
t_data_source = self._CreateTemplateDataSource(
compiled_fs_factory,
ObjectStoreCreator.ForTest())
template_a1 = Handlebar(self._ReadLocalFile('test1.html'))
self.assertEqual(template_a1.render({}, {'templates': {}}).text,
t_data_source.get('test1').render({}, {'templates': {}}).text)
template_a2 = Handlebar(self._ReadLocalFile('test2.html'))
self.assertEqual(template_a2.render({}, {'templates': {}}).text,
t_data_source.get('test2').render({}, {'templates': {}}).text)
@DisableLogging('warning')
def testNotFound(self):
self._base_path = os.path.join(self._base_path, 'simple')
fetcher = LocalFileSystem(self._base_path)
compiled_fs_factory = CompiledFileSystem.Factory(
fetcher,
ObjectStoreCreator.ForTest())
t_data_source = self._CreateTemplateDataSource(
compiled_fs_factory,
ObjectStoreCreator.ForTest())
self.assertEqual(None, t_data_source.get('junk.html'))
def testPartials(self):
self._base_path = os.path.join(self._base_path, 'partials')
fetcher = LocalFileSystem(self._base_path)
compiled_fs_factory = CompiledFileSystem.Factory(
fetcher,
ObjectStoreCreator.ForTest())
t_data_source = self._CreateTemplateDataSource(compiled_fs_factory)
self.assertEqual(
self._ReadLocalFile('test_expected.html'),
t_data_source.get('test_tmpl').render(
json.loads(self._ReadLocalFile('input.json')), t_data_source).text)
def testRender(self):
self._base_path = os.path.join(self._base_path, 'render')
fetcher = LocalFileSystem(self._base_path)
context = json.loads(self._ReadLocalFile('test1.json'))
compiled_fs_factory = CompiledFileSystem.Factory(
fetcher,
ObjectStoreCreator.ForTest())
self._RenderTest(
'test1',
self._CreateTemplateDataSource(
compiled_fs_factory,
api_data=json.loads(self._ReadLocalFile('test1.json'))))
self._RenderTest(
'test2',
self._CreateTemplateDataSource(
compiled_fs_factory,
api_data=json.loads(self._ReadLocalFile('test2.json'))))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
madhurauti/Map-Polygon | modules/hs/analysis/shp.py | 25 | 5225 |
"""
Healthscapes Geolytics Module
@author: Nico Preston <nicopresto@gmail.com>
@author: Colin Burreson <kasapo@gmail.com>
@author: Zack Krejci <zack.krejci@gmail.com>
@copyright: (c) 2010 Healthscapes
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import shapelib
import dbflib
from point import SpatialPointList, SpatialPoint
from polygon import SpatialPolygon
from utils import R
def pointsFromSHP (pointsFilename, datafield=[]):
shapes = shapelib.open(pointsFilename)
if len(datafield) != 0:
data = dbflib.open (pointsFilename)
list = SpatialPointList ()
for i in range (shapes.info ()[0]):
records = {}
pair = []
for entry in datafield:
pair.append ((entry, data.read_record (i)[entry]))
records.update (pair)
x = shapes.read_object (i).vertices ()[0][0]
y = shapes.read_object (i).vertices ()[0][1]
list.append (SpatialPoint (x, y, records))
return list
def polysFromSHP (polysFilename, datafield, polySubset):
R.importLibrary ('sp')
shapes = shapelib.open(polysFilename)
data = dbflib.open (polysFilename)
polyList = []
for i in range (shapes.info ()[0]):
shp = shapes.read_object (i)
d = data.read_record (i)[datafield]
if len(polySubset) != 0:
for item in polySubset:
if item == d:
p = SpatialPolygon (i, shp.vertices (), d)
polyList.append(p)
else:
p = SpatialPolygon (i, shp.vertices (), d)
polyList.append(p)
return polyList
| mit |
andersbll/vae_gan | model/aegan.py | 1 | 8690 | from copy import deepcopy
import numpy as np
import cudarray as ca
import deeppy as dp
import deeppy.expr as ex
from util import ScaleGradient, WeightedParameter
from ae import NLLNormal
class AEGAN(dp.base.Model, dp.base.CollectionMixin):
def __init__(self, encoder, latent_encoder, decoder, discriminator,
recon_depth=0, discriminate_sample_z=True,
discriminate_ae_recon=True, recon_vs_gan_weight=1e-2,
real_vs_gen_weight=0.5, eps=1e-3):
self.encoder = encoder
self.latent_encoder = latent_encoder
self.discriminator = discriminator
self.recon_depth = recon_depth
self.discriminate_sample_z = discriminate_sample_z
self.discriminate_ae_recon = discriminate_ae_recon
self.recon_vs_gan_weight = recon_vs_gan_weight
self.real_vs_gen_weight = real_vs_gen_weight
self.eps = eps
self.recon_error = NLLNormal()
self.decoder = decoder
self.collection = [self.encoder, self.latent_encoder, self.decoder,
self.discriminator]
decoder.params = [WeightedParameter(p, self.recon_vs_gan_weight,
-(1.0-self.recon_vs_gan_weight))
for p in decoder.params]
self.decoder_neggrad = deepcopy(decoder)
self.decoder_neggrad.params = [p.share() for p in decoder.params]
self.collection += [self.decoder_neggrad]
if recon_depth > 0:
recon_layers = discriminator.collection[:recon_depth]
print('Reconstruction error at layer #%i: %s'
% (recon_depth, recon_layers[-1].__class__.__name__))
dis_layers = discriminator.collection[recon_depth:]
discriminator.collection = recon_layers
discriminator.params = [WeightedParameter(p, 1.0, 0.0)
for p in discriminator.params]
self.discriminator_recon = deepcopy(discriminator)
self.discriminator_recon.params = [p.share() for p in
discriminator.params]
discriminator.collection += dis_layers
self.collection += [self.discriminator_recon]
def _encode_expr(self, x, batch_size):
enc = self.encoder(x)
z, encoder_loss = self.latent_encoder.encode(enc, batch_size)
return z
def _decode_expr(self, z, batch_size):
return self.decoder(z)
def setup(self, x_shape):
batch_size = x_shape[0]
self.x_src = ex.Source(x_shape)
loss = 0
# Encode
enc = self.encoder(self.x_src)
z, self.encoder_loss = self.latent_encoder.encode(enc, batch_size)
loss += self.encoder_loss
# Decode
x_tilde = self.decoder(z)
if self.recon_depth > 0:
# Reconstruction error in discriminator
x = ex.Concatenate(axis=0)(x_tilde, self.x_src)
d = self.discriminator_recon(x)
d_x_tilde, d_x = ex.Slices([batch_size])(d)
loss += self.recon_error(d_x_tilde, d_x)
else:
loss += self.recon_error(x_tilde, self.x_src)
# Kill gradient from GAN loss to AE encoder
z = ScaleGradient(0.0)(z)
# Decode for GAN loss
gen_size = 0
if self.discriminate_ae_recon:
gen_size += batch_size
# Kill gradient from GAN loss to AE encoder
z = ScaleGradient(0.0)(z)
if self.discriminate_sample_z:
gen_size += batch_size
z_samples = self.latent_encoder.samples(batch_size)
if self.discriminate_ae_recon:
z = ex.Concatenate(axis=0)(z, z_samples)
else:
z = z_samples
if gen_size == 0:
raise ValueError('GAN does not receive any generated samples.')
x = self.decoder_neggrad(z)
x = ex.Concatenate(axis=0)(self.x_src, x)
# Scale gradients to balance real vs. generated contributions to GAN
# discriminator
dis_batch_size = batch_size + gen_size
real_weight = self.real_vs_gen_weight
gen_weight = (1-self.real_vs_gen_weight) * float(batch_size)/gen_size
weights = np.zeros((dis_batch_size,))
weights[:batch_size] = real_weight
weights[batch_size:] = gen_weight
dis_weights = ca.array(weights)
shape = np.array(x_shape)**0
shape[0] = dis_batch_size
dis_weights_inv = ca.array(1.0 / np.reshape(weights, shape))
x = ScaleGradient(dis_weights_inv)(x)
# Discriminate
d = self.discriminator(x)
d = ex.Reshape((-1,))(d)
d = ScaleGradient(dis_weights)(d)
sign = np.ones((gen_size + batch_size,), dtype=ca.float_)
sign[batch_size:] = -1.0
offset = np.zeros_like(sign)
offset[batch_size:] = 1.0
self.gan_loss = ex.log(d*sign + offset + self.eps)
self.loss = ex.sum(loss) - ex.sum(self.gan_loss)
self._graph = ex.graph.ExprGraph(self.loss)
self._graph.setup()
self.loss.grad_array = ca.array(1.0)
@property
def params(self):
enc_params = self.encoder.params + self.latent_encoder.params
dec_params = self.decoder.params
dis_params = self.discriminator.params
return enc_params, dec_params, dis_params
def update(self, x):
self.x_src.array = x
self._graph.fprop()
self._graph.bprop()
encoder_loss = 0
d_x_loss = 0
d_z_loss = 0
encoder_loss = np.array(self.encoder_loss.array)
gan_loss = -np.array(self.gan_loss.array)
batch_size = x.shape[0]
d_x_loss = float(np.mean(gan_loss[:batch_size]))
d_z_loss = float(np.mean(gan_loss[batch_size:]))
return d_x_loss, d_z_loss, encoder_loss
def _batchwise(self, feed, expr_fun):
feed = dp.Feed.from_any(feed)
src = ex.Source(feed.x_shape)
sink = expr_fun(src, feed.batch_size)
graph = ex.graph.ExprGraph(sink)
graph.setup()
z = []
for x, in feed.batches():
src.array = x
graph.fprop()
z.append(np.array(sink.array))
z = np.concatenate(z)[:feed.n_samples]
return z
def encode(self, feed):
return self._batchwise(feed, self._encode_expr)
def decode(self, feed):
return self._batchwise(feed, self._decode_expr)
class GradientDescent(dp.GradientDescent):
def __init__(self, model, feed, learn_rule, margin=0.4, equilibrium=0.68):
super(GradientDescent, self).__init__(model, feed, learn_rule)
self.margin = margin
self.equilibrium = equilibrium
def reset(self):
self.feed.reset()
self.model.setup(*self.feed.shapes)
self.params_enc, self.params_dec, self.params_dis = self.model.params
def states(params):
return [self.learn_rule.init_state(p) for p in params
if not isinstance(p, dp.parameter.SharedParameter)]
self.lstates_enc = states(self.params_enc)
self.lstates_dec = states(self.params_dec)
self.lstates_dis = states(self.params_dis)
def train_epoch(self):
batch_costs = []
for batch in self.feed.batches():
real_cost, fake_cost, encoder = self.model.update(*batch)
batch_costs.append((real_cost, fake_cost, encoder))
dec_update = True
dis_update = True
if self.margin is not None:
if real_cost < self.equilibrium - self.margin or \
fake_cost < self.equilibrium - self.margin:
dis_update = False
if real_cost > self.equilibrium + self.margin or \
fake_cost > self.equilibrium + self.margin:
dec_update = False
if not (dec_update or dis_update):
dec_update = True
dis_update = True
for param, state in zip(self.params_enc, self.lstates_enc):
self.learn_rule.step(param, state)
if dec_update:
for param, state in zip(self.params_dec, self.lstates_dec):
self.learn_rule.step(param, state)
if dis_update:
for param, state in zip(self.params_dis, self.lstates_dis):
self.learn_rule.step(param, state)
real_cost = np.mean([cost[0] for cost in batch_costs])
fake_cost = np.mean([cost[1] for cost in batch_costs])
encoder = np.mean([c[2] for c in batch_costs])
return real_cost + fake_cost + encoder
| mit |
LubyRuffy/spiderfoot | ext/stem/util/lru_cache.py | 16 | 7373 | # Drop in replace for python 3.2's collections.lru_cache, from...
# http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/
#
# ... which is under the MIT license. Stem users should *not* rely upon this
# module. It will be removed when we drop support for python 3.2 and below.
"""
Memoization decorator that caches a function's return value. If later called
with the same arguments then the cached value is returned rather than
reevaluated.
This is a a python 2.x port of `functools.lru_cache
<http://docs.python.org/3/library/functools.html#functools.lru_cache>`_. If
using python 3.2 or later you should use that instead.
"""
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple('CacheInfo', ['hits', 'misses', 'maxsize', 'currsize'])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = set([int, str, frozenset, type(None)]),
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
def decorating_function(user_function):
cache = dict()
stats = [0, 0] # make statistics updateable non-locally
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
cache_get = cache.get # bound method to lookup key or return None
_len = len # localize the global len() function
lock = RLock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
nonlocal_root = [root] # make updateable non-locally
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
result = cache_get(key, root) # root used here as a unique not-found sentinel
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the front of the list
root, = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
root, = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def cache_clear():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| gpl-2.0 |
loco-odoo/localizacion_co | openerp/addons/marketing/__init__.py | 378 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AltSchool/django | tests/template_tests/test_context.py | 128 | 6990 | # -*- coding: utf-8 -*-
from django.http import HttpRequest
from django.template import (
Context, Engine, RequestContext, Template, Variable, VariableDoesNotExist,
)
from django.template.context import RenderContext
from django.test import RequestFactory, SimpleTestCase
class ContextTests(SimpleTestCase):
def test_context(self):
c = Context({"a": 1, "b": "xyzzy"})
self.assertEqual(c["a"], 1)
self.assertEqual(c.push(), {})
c["a"] = 2
self.assertEqual(c["a"], 2)
self.assertEqual(c.get("a"), 2)
self.assertEqual(c.pop(), {"a": 2})
self.assertEqual(c["a"], 1)
self.assertEqual(c.get("foo", 42), 42)
def test_push_context_manager(self):
c = Context({"a": 1})
with c.push():
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.push(a=3):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager(self):
c = Context({"a": 1})
with c.update({}):
c['a'] = 2
self.assertEqual(c['a'], 2)
self.assertEqual(c['a'], 1)
with c.update({'a': 3}):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_push_context_manager_with_context_object(self):
c = Context({'a': 1})
with c.push(Context({'a': 3})):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_update_context_manager_with_context_object(self):
c = Context({'a': 1})
with c.update(Context({'a': 3})):
self.assertEqual(c['a'], 3)
self.assertEqual(c['a'], 1)
def test_push_proper_layering(self):
c = Context({'a': 1})
c.push(Context({'b': 2}))
c.push(Context({'c': 3, 'd': {'z': '26'}}))
self.assertEqual(
c.dicts,
[
{'False': False, 'None': None, 'True': True},
{'a': 1},
{'b': 2},
{'c': 3, 'd': {'z': '26'}},
]
)
def test_update_proper_layering(self):
c = Context({'a': 1})
c.update(Context({'b': 2}))
c.update(Context({'c': 3, 'd': {'z': '26'}}))
self.assertEqual(
c.dicts,
[
{'False': False, 'None': None, 'True': True},
{'a': 1},
{'b': 2},
{'c': 3, 'd': {'z': '26'}},
]
)
def test_setdefault(self):
c = Context()
x = c.setdefault('x', 42)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
x = c.setdefault('x', 100)
self.assertEqual(x, 42)
self.assertEqual(c['x'], 42)
def test_resolve_on_context_method(self):
"""
#17778 -- Variable shouldn't resolve RequestContext methods
"""
empty_context = Context()
with self.assertRaises(VariableDoesNotExist):
Variable('no_such_variable').resolve(empty_context)
with self.assertRaises(VariableDoesNotExist):
Variable('new').resolve(empty_context)
self.assertEqual(
Variable('new').resolve(Context({'new': 'foo'})),
'foo',
)
def test_render_context(self):
test_context = RenderContext({'fruit': 'papaya'})
# Test that push() limits access to the topmost dict
test_context.push()
test_context['vegetable'] = 'artichoke'
self.assertEqual(list(test_context), ['vegetable'])
self.assertNotIn('fruit', test_context)
with self.assertRaises(KeyError):
test_context['fruit']
self.assertIsNone(test_context.get('fruit'))
def test_flatten_context(self):
a = Context()
a.update({'a': 2})
a.update({'b': 4})
a.update({'c': 8})
self.assertEqual(a.flatten(), {
'False': False, 'None': None, 'True': True,
'a': 2, 'b': 4, 'c': 8
})
def test_flatten_context_with_context(self):
"""
Context.push() with a Context argument should work.
"""
a = Context({'a': 2})
a.push(Context({'z': '8'}))
self.assertEqual(a.flatten(), {
'False': False,
'None': None,
'True': True,
'a': 2,
'z': '8',
})
def test_context_comparable(self):
"""
#21765 -- equality comparison should work
"""
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
self.assertEqual(Context(test_data), Context(test_data))
a = Context()
b = Context()
self.assertEqual(a, b)
# update only a
a.update({'a': 1})
self.assertNotEqual(a, b)
# update both to check regression
a.update({'c': 3})
b.update({'c': 3})
self.assertNotEqual(a, b)
# make contexts equals again
b.update({'a': 1})
self.assertEqual(a, b)
def test_copy_request_context_twice(self):
"""
#24273 -- Copy twice shouldn't raise an exception
"""
RequestContext(HttpRequest()).new().new()
class RequestContextTests(SimpleTestCase):
def test_include_only(self):
"""
#15721 -- ``{% include %}`` and ``RequestContext`` should work
together.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'child': '{{ var|default:"none" }}',
}),
])
request = RequestFactory().get('/')
ctx = RequestContext(request, {'var': 'parent'})
self.assertEqual(engine.from_string('{% include "child" %}').render(ctx), 'parent')
self.assertEqual(engine.from_string('{% include "child" only %}').render(ctx), 'none')
def test_stack_size(self):
"""
#7116 -- Optimize RequetsContext construction
"""
request = RequestFactory().get('/')
ctx = RequestContext(request, {})
# The stack should now contain 3 items:
# [builtins, supplied context, context processor, empty dict]
self.assertEqual(len(ctx.dicts), 4)
def test_context_comparable(self):
# Create an engine without any context processors.
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
# test comparing RequestContext to prevent problems if somebody
# adds __eq__ in the future
request = RequestFactory().get('/')
self.assertEqual(
RequestContext(request, dict_=test_data),
RequestContext(request, dict_=test_data),
)
def test_modify_context_and_render(self):
template = Template('{{ foo }}')
request = RequestFactory().get('/')
context = RequestContext(request, {})
context['foo'] = 'foo'
self.assertEqual(template.render(context), 'foo')
| bsd-3-clause |
ingadhoc/odoo | openerp/addons/test_impex/tests/test_import.py | 231 | 30712 | # -*- coding: utf-8 -*-
import openerp.modules.registry
import openerp
from openerp.tests import common
from openerp.tools.misc import mute_logger
def ok(n):
""" Successful import of ``n`` records
:param int n: number of records which should have been imported
"""
return n, 0, 0, 0
def error(row, message, record=None, **kwargs):
""" Failed import of the record ``record`` at line ``row``, with the error
message ``message``
:param str message:
:param dict record:
"""
return (
-1, dict(record or {}, **kwargs),
"Line %d : %s" % (row, message),
'')
def values(seq, field='value'):
return [item[field] for item in seq]
class ImporterCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(ImporterCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(ImporterCase, self).setUp()
self.model = self.registry(self.model_name)
def import_(self, fields, rows, context=None):
return self.model.import_data(
self.cr, openerp.SUPERUSER_ID, fields, rows, context=context)
def read(self, fields=('value',), domain=(), context=None):
return self.model.read(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
fields=fields, context=context)
def browse(self, domain=(), context=None):
return self.model.browse(
self.cr, openerp.SUPERUSER_ID,
self.model.search(self.cr, openerp.SUPERUSER_ID, domain, context=context),
context=context)
def xid(self, record):
ModelData = self.registry('ir.model.data')
ids = ModelData.search(
self.cr, openerp.SUPERUSER_ID,
[('model', '=', record._name), ('res_id', '=', record.id)])
if ids:
d = ModelData.read(
self.cr, openerp.SUPERUSER_ID, ids, ['name', 'module'])[0]
if d['module']:
return '%s.%s' % (d['module'], d['name'])
return d['name']
name = record.name_get()[0][1]
# fix dotted name_get results, otherwise xid lookups blow up
name = name.replace('.', '-')
ModelData.create(self.cr, openerp.SUPERUSER_ID, {
'name': name,
'model': record._name,
'res_id': record.id,
'module': '__test__'
})
return '__test__.' + name
class test_ids_stuff(ImporterCase):
model_name = 'export.integer'
def test_create_with_id(self):
self.assertEqual(
self.import_(['.id', 'value'], [['42', '36']]),
error(1, u"Unknown database identifier '42'"))
def test_create_with_xid(self):
self.assertEqual(
self.import_(['id', 'value'], [['somexmlid', '42']]),
ok(1))
self.assertEqual(
'somexmlid',
self.xid(self.browse()[0]))
def test_update_with_id(self):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
36,
self.model.browse(self.cr, openerp.SUPERUSER_ID, id).value)
self.assertEqual(
self.import_(['.id', 'value'], [[str(id), '42']]),
ok(1))
self.assertEqual(
[42], # updated value to imported
values(self.read()))
def test_update_with_xid(self):
self.import_(['id', 'value'], [['somexmlid', '36']])
self.assertEqual([36], values(self.read()))
self.import_(['id', 'value'], [['somexmlid', '1234567']])
self.assertEqual([1234567], values(self.read()))
def test_wrong_format(self):
self.assertEqual(
self.import_(['value'], [['50%']]),
error(1, u"'50%' does not seem to be an integer for field 'unknown'"))
class test_boolean_field(ImporterCase):
model_name = 'export.boolean'
def test_empty(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_exported(self):
self.assertEqual(
self.import_(['value'], [
['False'],
['True'],
]),
ok(2))
records = self.read()
self.assertEqual([
False,
True,
], values(records))
def test_falses(self):
self.assertEqual(
self.import_(['value'], [
[u'0'],
[u'no'],
[u'false'],
[u'FALSE'],
[u''],
]),
ok(5))
self.assertEqual([
False,
False,
False,
False,
False,
],
values(self.read()))
def test_trues(self):
self.assertEqual(
self.import_(['value'], [
['off'],
['None'],
['nil'],
['()'],
['f'],
['#f'],
# Problem: OpenOffice (and probably excel) output localized booleans
['VRAI'],
[u'OFF'],
[u'是的'],
['!&%#${}'],
['%(field)s'],
]),
ok(11))
self.assertEqual(
[True] * 11,
values(self.read()))
class test_integer_field(ImporterCase):
model_name = 'export.integer'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678']
]),
ok(4))
self.assertEqual([
1, 42, 2**31-1, 12345678
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-(2**31 - 1))],
[str(-(2**31))],
['-12345678']
]),
ok(5))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678
], values(self.read()))
@mute_logger('openerp.sql_db')
def test_out_of_range(self):
self.assertEqual(
self.import_(['value'], [[str(2**31)]]),
error(1, "integer out of range\n"))
# auto-rollbacks if error is in process_liness, but not during
# ir.model.data write. Can differentiate because former ends lines
# error lines with "!"
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[str(-2**32)]]),
error(1, "integer out of range\n"))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['zorglub']]),
error(1, u"'zorglub' does not seem to be an integer for field 'unknown'"))
class test_float_field(ImporterCase):
model_name = 'export.float'
def test_none(self):
self.assertEqual(
self.import_(['value'], []),
ok(0))
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual(
[False],
values(self.read()))
def test_zero(self):
self.assertEqual(
self.import_(['value'], [['0']]),
ok(1))
self.assertEqual(
self.import_(['value'], [['-0']]),
ok(1))
self.assertEqual([False, False], values(self.read()))
def test_positives(self):
self.assertEqual(
self.import_(['value'], [
['1'],
['42'],
[str(2**31-1)],
['12345678'],
[str(2**33)],
['0.000001'],
]),
ok(6))
self.assertEqual([
1, 42, 2**31-1, 12345678, 2.0**33, .000001
], values(self.read()))
def test_negatives(self):
self.assertEqual(
self.import_(['value'], [
['-1'],
['-42'],
[str(-2**31 + 1)],
[str(-2**31)],
['-12345678'],
[str(-2**33)],
['-0.000001'],
]),
ok(7))
self.assertEqual([
-1, -42, -(2**31 - 1), -(2**31), -12345678, -2.0**33, -.000001
], values(self.read()))
def test_nonsense(self):
self.assertEqual(
self.import_(['value'], [['foobar']]),
error(1, u"'foobar' does not seem to be a number for field 'unknown'"))
class test_string_field(ImporterCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'foobar'],
[u'foobarbaz'],
[u'Með suð í eyrum við spilum endalaust'],
[u"People 'get' types. They use them all the time. Telling "
u"someone he can't pound a nail with a banana doesn't much "
u"surprise him."]
]),
ok(4))
self.assertEqual([
u"foobar",
u"foobarbaz",
u"Með suð í eyrum ",
u"People 'get' typ",
], values(self.read()))
class test_unbound_string_field(ImporterCase):
model_name = 'export.string'
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
[u'í dag viðrar vel til loftárása'],
# ackbar.jpg
[u"If they ask you about fun, you tell them – fun is a filthy"
u" parasite"]
]),
ok(2))
self.assertEqual([
u"í dag viðrar vel til loftárása",
u"If they ask you about fun, you tell them – fun is a filthy parasite"
], values(self.read()))
class test_text(ImporterCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.import_(['value'], [['']]),
ok(1))
self.assertEqual([False], values(self.read()))
def test_imported(self):
s = (u"Breiðskífa er notað um útgefna hljómplötu sem inniheldur "
u"stúdíóupptökur frá einum flytjanda. Breiðskífur eru oftast "
u"milli 25-80 mínútur og er lengd þeirra oft miðuð við 33⅓ "
u"snúninga 12 tommu vínylplötur (sem geta verið allt að 30 mín "
u"hvor hlið).\n\nBreiðskífur eru stundum tvöfaldar og eru þær þá"
u" gefnar út á tveimur geisladiskum eða tveimur vínylplötum.")
self.assertEqual(
self.import_(['value'], [[s]]),
ok(1))
self.assertEqual([s], values(self.read()))
class test_selection(ImporterCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_imported(self):
self.assertEqual(
self.import_(['value'], [
['Qux'],
['Bar'],
['Foo'],
['2'],
]),
ok(4))
self.assertEqual([3, 2, 1, 2], values(self.read()))
def test_imported_translated(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
['titi'],
], context={'lang': 'fr_FR'}),
ok(3))
self.assertEqual([3, 1, 2], values(self.read()))
self.assertEqual(
self.import_(['value'], [['Foo']], context={'lang': 'fr_FR'}),
ok(1))
def test_invalid(self):
self.assertEqual(
self.import_(['value'], [['Baz']]),
error(1, u"Value 'Baz' not found in selection field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value'], [[42]]),
error(1, u"Value '42' not found in selection field 'unknown'"))
class test_selection_function(ImporterCase):
model_name = 'export.selection.function'
translations_fr = [
("Corge", "toto"),
("Grault", "titi"),
("Wheee", "tete"),
("Moog", "tutu"),
]
def test_imported(self):
""" import uses fields_get, so translates import label (may or may not
be good news) *and* serializes the selection function to reverse it:
import does not actually know that the selection field uses a function
"""
# NOTE: conflict between a value and a label => ?
self.assertEqual(
self.import_(['value'], [
['3'],
["Grault"],
]),
ok(2))
self.assertEqual(
[3, 1],
values(self.read()))
def test_translated(self):
""" Expects output of selection function returns translated labels
"""
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'Français',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.import_(['value'], [
['toto'],
['tete'],
], context={'lang': 'fr_FR'}),
ok(2))
self.assertEqual(
self.import_(['value'], [['Wheee']], context={'lang': 'fr_FR'}),
ok(1))
class test_m2o(ImporterCase):
model_name = 'export.many2one'
def test_by_name(self):
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
# get its name
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
self.assertEqual(
self.import_(['value'], [
# import by name_get
[name1],
[name1],
[name2],
]),
ok(3))
# correct ids assigned to corresponding records
self.assertEqual([
(integer_id1, name1),
(integer_id1, name1),
(integer_id2, name2),],
values(self.read()))
def test_by_xid(self):
ExportInteger = self.registry('export.integer')
integer_id = ExportInteger.create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
xid = self.xid(ExportInteger.browse(
self.cr, openerp.SUPERUSER_ID, [integer_id])[0])
self.assertEqual(
self.import_(['value/id'], [[xid]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.import_(['value/.id'], [[integer_id]]),
ok(1))
b = self.browse()
self.assertEqual(42, b[0].value.value)
def test_by_names(self):
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name1 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id1]))[integer_id1]
name2 = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id2]))[integer_id2]
# names should be the same
self.assertEqual(name1, name2)
self.assertEqual(
self.import_(['value'], [[name2]]),
ok(1))
self.assertEqual([
(integer_id1, name1)
], values(self.read()))
def test_fail_by_implicit_id(self):
""" Can't implicitly import records by id
"""
# create integer objects
integer_id1 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
integer_id2 = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 36})
self.assertEqual(
self.import_(['value'], [
# import by id, without specifying it
[integer_id1],
[integer_id2],
[integer_id1],
]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % integer_id1))
def test_sub_field(self):
""" Does not implicitly create the record, does not warn that you can't
import m2o subfields (at all)...
"""
self.assertEqual(
self.import_(['value/value'], [['42']]),
error(1, u"Can not create Many-To-One records indirectly, import the field separately"))
def test_fail_noids(self):
self.assertEqual(
self.import_(['value'], [['nameisnoexist:3']]),
error(1, u"No matching record found for name 'nameisnoexist:3' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/id'], [['noxidhere']]),
error(1, u"No matching record found for external id 'noxidhere' in field 'unknown'"))
self.cr.rollback()
self.assertEqual(
self.import_(['value/.id'], [[66]]),
error(1, u"No matching record found for database id '66' in field 'unknown'"))
class test_m2m(ImporterCase):
model_name = 'export.many2many'
# apparently, one and only thing which works is a
# csv_internal_sep-separated list of ids, xids, or names (depending if
# m2m/.id, m2m/id or m2m[/anythingelse]
def test_ids(self):
id1 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
id5 = self.registry('export.many2many.other').create(
self.cr, openerp.SUPERUSER_ID, {'value': 99, 'str': 'record4'})
self.assertEqual(
self.import_(['value/.id'], [
['%d,%d' % (id1, id2)],
['%d,%d,%d' % (id1, id3, id4)],
['%d,%d,%d' % (id1, id2, id3)],
['%d' % id5]
]),
ok(4))
ids = lambda records: [record.id for record in records]
b = self.browse()
self.assertEqual(ids(b[0].value), [id1, id2])
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(ids(b[2].value), [id1, id2, id3])
self.assertEqual(values(b[2].value), [3, 44, 84])
def test_noids(self):
self.assertEqual(
self.import_(['value/.id'], [['42']]),
error(1, u"No matching record found for database id '42' in field 'unknown'"))
def test_xids(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
self.assertEqual(
self.import_(['value/id'], [
['%s,%s' % (self.xid(records[0]), self.xid(records[1]))],
['%s' % self.xid(records[3])],
['%s,%s' % (self.xid(records[2]), self.xid(records[1]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[0].value), [3, 44])
self.assertEqual(values(b[2].value), [44, 84])
def test_noxids(self):
self.assertEqual(
self.import_(['value/id'], [['noxidforthat']]),
error(1, u"No matching record found for external id 'noxidforthat' in field 'unknown'"))
def test_names(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
records = M2O_o.browse(self.cr, openerp.SUPERUSER_ID, [id1, id2, id3, id4])
name = lambda record: record.name_get()[0][1]
self.assertEqual(
self.import_(['value'], [
['%s,%s' % (name(records[1]), name(records[2]))],
['%s,%s,%s' % (name(records[0]), name(records[1]), name(records[2]))],
['%s,%s' % (name(records[0]), name(records[3]))],
]),
ok(3))
b = self.browse()
self.assertEqual(values(b[1].value), [3, 44, 84])
self.assertEqual(values(b[2].value), [3, 9])
def test_nonames(self):
self.assertEqual(
self.import_(['value'], [['wherethem2mhavenonames']]),
error(1, u"No matching record found for name 'wherethem2mhavenonames' in field 'unknown'"))
def test_import_to_existing(self):
M2O_o = self.registry('export.many2many.other')
id1 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 3, 'str': 'record0'})
id2 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 44, 'str': 'record1'})
id3 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 84, 'str': 'record2'})
id4 = M2O_o.create(self.cr, openerp.SUPERUSER_ID, {'value': 9, 'str': 'record3'})
xid = 'myxid'
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id1, id2)]]),
ok(1))
self.assertEqual(
self.import_(['id', 'value/.id'], [[xid, '%d,%d' % (id3, id4)]]),
ok(1))
b = self.browse()
self.assertEqual(len(b), 1)
# TODO: replacement of existing m2m values is correct?
self.assertEqual(values(b[0].value), [84, 9])
class test_o2m(ImporterCase):
model_name = 'export.one2many'
def test_name_get(self):
s = u'Java is a DSL for taking large XML files and converting them to' \
u' stack traces'
self.assertEqual(
self.import_(
['const', 'value'],
[['5', s]]),
error(1, u"No matching record found for name '%s' in field 'unknown'" % s))
def test_single(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63']
]),
ok(1))
(b,) = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.value), [63])
def test_multicore(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['6', '64'],
]),
ok(2))
b1, b2 = self.browse()
self.assertEqual(b1.const, 5)
self.assertEqual(values(b1.value), [63])
self.assertEqual(b2.const, 6)
self.assertEqual(values(b2.value), [64])
def test_multisub(self):
self.assertEqual(
self.import_(['const', 'value/value'], [
['5', '63'],
['', '64'],
['', '65'],
['', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
def test_multi_subfields(self):
self.assertEqual(
self.import_(['value/str', 'const', 'value/value'], [
['this', '5', '63'],
['is', '', '64'],
['the', '', '65'],
['rhythm', '', '66'],
]),
ok(4))
(b,) = self.browse()
self.assertEqual(values(b.value), [63, 64, 65, 66])
self.assertEqual(
values(b.value, 'str'),
'this is the rhythm'.split())
def test_link_inline(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
try:
self.import_(['const', 'value/.id'], [
['42', '%d,%d' % (id1, id2)]
])
except ValueError, e:
# should be Exception(Database ID doesn't exist: export.one2many.child : $id1,$id2)
self.assertIs(type(e), ValueError)
self.assertEqual(
e.args[0],
"invalid literal for int() with base 10: '%d,%d'" % (id1, id2))
def test_link(self):
id1 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = self.registry('export.one2many.child').create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id'], [
['42', str(id1)],
['', str(id2)],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
# automatically forces link between core record and o2ms
self.assertEqual(values(b.value), [109, 262])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
def test_link_2(self):
O2M_c = self.registry('export.one2many.child')
id1 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Bf', 'value': 109
})
id2 = O2M_c.create(self.cr, openerp.SUPERUSER_ID, {
'str': 'Me', 'value': 262
})
self.assertEqual(
self.import_(['const', 'value/.id', 'value/value'], [
['42', str(id1), '1'],
['', str(id2), '2'],
]),
ok(2))
[b] = self.browse()
self.assertEqual(b.const, 42)
self.assertEqual(values(b.value), [1, 2])
self.assertEqual(values(b.value, field='parent_id'), [b, b])
class test_o2m_multiple(ImporterCase):
model_name = 'export.one2many.multiple'
def test_multi_mixed(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', '22'],
['', '13', '23'],
['', '14', ''],
]),
ok(4))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', '21'],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '22'],
['', '', '23'],
]),
ok(6))
[b] = self.browse()
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
def test_multi_fullsplit(self):
self.assertEqual(
self.import_(['const', 'child1/value', 'child2/value'], [
['5', '11', ''],
['', '12', ''],
['', '13', ''],
['', '14', ''],
['', '', '21'],
['', '', '22'],
['', '', '23'],
]),
ok(7))
[b] = self.browse()
self.assertEqual(b.const, 5)
self.assertEqual(values(b.child1), [11, 12, 13, 14])
self.assertEqual(values(b.child2), [21, 22, 23])
# function, related, reference: written to db as-is...
# => function uses @type for value coercion/conversion
| agpl-3.0 |
dmitry-sobolev/ansible | lib/ansible/modules/cloud/univention/udm_dns_record.py | 69 | 5961 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_dns_record
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage dns entries on a univention corporate server
description:
- "This module allows to manage dns records on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
- Univention
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the dns record is present or not.
name:
required: true
description:
- "Name of the record, this is also the DNS record. E.g. www for
www.example.com."
zone:
required: true
description:
- Corresponding DNS zone for this record, e.g. example.com.
type:
required: true
choices: [ host_record, alias, ptr_record, srv_record, txt_record ]
description:
- "Define the record type. C(host_record) is a A or AAAA record,
C(alias) is a CNAME, C(ptr_record) is a PTR record, C(srv_record)
is a SRV record and C(txt_record) is a TXT record."
data:
required: false
default: []
description:
- "Additional data for this record, e.g. ['a': '192.0.2.1'].
Required if C(state=present)."
'''
EXAMPLES = '''
# Create a DNS record on a UCS
- udm_dns_zone:
name: www
zone: example.com
type: host_record
data:
- a: 192.0.2.1
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
config,
uldap,
)
HAVE_UNIVENTION = False
try:
from univention.admin.handlers.dns import (
forward_zone,
reverse_zone,
)
HAVE_UNIVENTION = True
except ImportError:
pass
def main():
module = AnsibleModule(
argument_spec = dict(
type = dict(required=True,
type='str'),
zone = dict(required=True,
type='str'),
name = dict(required=True,
type='str'),
data = dict(default=[],
type='dict'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['data'])
])
)
if not HAVE_UNIVENTION:
module.fail_json(msg="This module requires univention python bindings")
type = module.params['type']
zone = module.params['zone']
name = module.params['name']
data = module.params['data']
state = module.params['state']
changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={})(relativeDomainName={}))'.format(zone, name),
attr=['dNSZone']
))
exists = bool(len(obj))
container = 'zoneName={},cn=dns,{}'.format(zone, base_dn())
dn = 'relativeDomainName={},{}'.format(name, container)
if state == 'present':
try:
if not exists:
so = forward_zone.lookup(
config(),
uldap(),
'(zone={})'.format(zone),
scope='domain',
) or reverse_zone.lookup(
config(),
uldap(),
'(zone={})'.format(zone),
scope='domain',
)
obj = umc_module_for_add('dns/{}'.format(type), container, superordinate=so[0])
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
obj['name'] = name
for k, v in data.items():
obj[k] = v
diff = obj.diff()
changed = obj.diff() != []
if not module.check_mode:
if not exists:
obj.create()
else:
obj.modify()
except BaseException as e:
module.fail_json(
msg='Creating/editing dns entry {} in {} failed: {}'.format(name, container, e)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
if not module.check_mode:
obj.remove()
changed = True
except BaseException as e:
module.fail_json(
msg='Removing dns entry {} in {} failed: {}'.format(name, container, e)
)
module.exit_json(
changed=changed,
name=name,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
| gpl-3.0 |
waidyanatha/sambro-eden | modules/s3db/stats.py | 2 | 69932 | # -*- coding: utf-8 -*-
""" Sahana Eden Stats Model
@copyright: 2012-13 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import division
__all__ = ["S3StatsModel",
"S3StatsDemographicModel",
"S3StatsPeopleModel",
"S3StatsTrainedPeopleModel",
"stats_demographic_data_controller",
]
from datetime import date
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3AddResourceLink
# =============================================================================
class S3StatsModel(S3Model):
"""
Statistics Data
"""
names = ["stats_parameter",
"stats_data",
"stats_source",
"stats_source_superlink",
"stats_source_id",
#"stats_source_details",
"stats_quantile",
]
def model(self):
T = current.T
db = current.db
super_entity = self.super_entity
super_link = self.super_link
#----------------------------------------------------------------------
# Super entity: stats_parameter
#
sp_types = Storage(org_resource_type = T("Organization Resource Type"),
project_beneficiary_type = T("Project Beneficiary Type"),
project_campaign_keyword = T("Project Campaign Keyword"),
stats_demographic = T("Demographic"),
stats_people_type = T("Types of People"),
stats_trained_type = T("Types of Trained People"),
supply_distribution_item = T("Distribution Item"),
vulnerability_indicator = T("Vulnerability Indicator"),
vulnerability_aggregated_indicator = T("Vulnerability Aggregated Indicator"),
#survey_question_type = T("Survey Question Type"),
#climate_parameter = T("Climate Parameter"),
)
tablename = "stats_parameter"
table = super_entity(tablename, "parameter_id",
sp_types,
Field("name",
label = T("Name")),
Field("description",
label = T("Description")),
)
table.instance_type.readable = True
#----------------------------------------------------------------------
# Super entity: stats_data
#
sd_types = Storage(org_resource = T("Organization Resource"),
project_beneficiary = T("Project Beneficiary"),
project_campaign_response_summary = T("Project Campaign Response Summary"),
stats_demographic_data = T("Demographic Data"),
stats_people = T("People"),
stats_trained = T("Trained People"),
supply_distribution = T("Distribution"),
vulnerability_data = T("Vulnerability Data"),
#survey_answer = T("Survey Answer"),
#climate_data = T("Climate Data"),
)
tablename = "stats_data"
table = super_entity(tablename, "data_id",
sd_types,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter"),
self.gis_location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()
),
Field("value", "double",
label = T("Value")),
# @ToDo: This will need to be a datetime for some usecases
s3_date(),
s3_date("date_end",
label = T("End Date")),
)
# ---------------------------------------------------------------------
# Stats Source Super-Entity
#
source_types = Storage(doc_document = T("Document"),
#org_organisation = T("Organization"),
#pr_person = T("Person"),
#flood_gauge = T("Flood Gauge"),
#survey_series = T("Survey")
)
tablename = "stats_source"
table = super_entity(tablename, "source_id", source_types,
Field("name",
label=T("Name")),
)
# For use by Instances or Components
source_superlink = super_link("source_id", "stats_source")
# For use by other FKs
represent = S3Represent(lookup="stats_source")
source_id = S3ReusableField("source_id", table,
label=T("Source"),
requires = IS_NULL_OR(
IS_ONE_OF(db, "stats_source.source_id",
represent,
sort=True)),
represent=represent,
)
#self.add_component("stats_source_details", stats_source="source_id")
# ---------------------------------------------------------------------
# Stats Source Details
#
#tablename = "stats_source_details"
#table = self.define_table(tablename,
# # Component
# source_superlink,
# #Field("reliability",
# # label=T("Reliability")),
# #Field("review",
# # label=T("Review")),
# )
# Pass names back to global scope (s3.*)
return dict(stats_source_superlink = source_superlink,
stats_source_id = source_id,
stats_quantile = self.quantile,
)
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if module is disabled """
return dict(
# Needed for doc
stats_source_superlink = S3ReusableField("source_id", "integer",
readable=False,
writable=False,
)(),
)
# -------------------------------------------------------------------------
@staticmethod
def quantile(data, q):
"""
Return the specified quantile(s) q of the supplied list.
The function can be called with either a single value for q or a
list of values. In the latter case, the returned value is a tuple.
"""
sx = sorted(data)
def get_quantile(q1):
pos = (len(sx) - 1) * q1
if abs(pos - int(pos) - 0.5) < 0.1:
# quantile in the middle between two values, average them
return (sx[int(pos)] + sx[int(pos) + 1]) * 0.5
else:
# otherwise return the nearest value
return sx[int(pos + 0.5)]
if hasattr(q, "__iter__"):
return tuple([get_quantile(qi) for qi in q])
else:
return get_quantile(q)
# =============================================================================
class S3StatsDemographicModel(S3Model):
"""
Baseline Demographics
"""
names = ["stats_demographic",
"stats_demographic_data",
"stats_demographic_aggregate",
"stats_demographic_rebuild_all_aggregates",
"stats_demographic_update_aggregates",
"stats_demographic_update_location_aggregate",
]
def model(self):
T = current.T
db = current.db
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
location_id = self.gis_location_id
stats_parameter_represent = S3Represent(lookup="stats_parameter")
#----------------------------------------------------------------------
# Demographic
#
tablename = "stats_demographic"
table = define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
label = T("Name")),
s3_comments("description",
label = T("Description")),
# Link to the Demographic which is the Total, so that we can calculate percentages
Field("total_id", self.stats_parameter,
requires = IS_NULL_OR(
IS_ONE_OF(db, "stats_parameter.parameter_id",
stats_parameter_represent,
instance_types = ["stats_demographic"],
sort=True)),
represent=stats_parameter_represent,
label=T("Total")),
*s3_meta_fields()
)
# CRUD Strings
ADD_DEMOGRAPHIC = T("Add Demographic")
crud_strings[tablename] = Storage(
title_create = ADD_DEMOGRAPHIC,
title_display = T("Demographic Details"),
title_list = T("Demographics"),
title_update = T("Edit Demographic"),
#title_search = T("Search Demographics"),
#title_upload = T("Import Demographics"),
subtitle_create = T("Add New Demographic"),
label_list_button = T("List Demographics"),
label_create_button = ADD_DEMOGRAPHIC,
msg_record_created = T("Demographic added"),
msg_record_modified = T("Demographic updated"),
msg_record_deleted = T("Demographic deleted"),
msg_list_empty = T("No demographics currently defined"))
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.stats_demographic_duplicate,
requires_approval = True,
)
#----------------------------------------------------------------------
# Demographic Data
#
tablename = "stats_demographic_data"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
instance_types = ["stats_demographic"],
label = T("Demographic"),
represent = stats_parameter_represent,
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="stats",
f="demographic",
vars = dict(child = "parameter_id"),
title=ADD_DEMOGRAPHIC,
),
),
location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION(),
required = True,
),
Field("value", "double",
required = True,
label = T("Value"),
),
s3_date(required = True),
# Unused but needed for the stats_data SE
#Field("date_end", "date",
# readable=False,
# writable=False
# ),
# Link to Source
self.stats_source_id(),
s3_comments(),
*s3_meta_fields()
)
# CRUD Strings
ADD_DEMOGRAPHIC = T("Add Demographic Data")
crud_strings[tablename] = Storage(
title_create = ADD_DEMOGRAPHIC,
title_display = T("Demographic Data Details"),
title_list = T("Demographic Data"),
title_update = T("Edit Demographic Data"),
title_search = T("Search Demographic Data"),
title_upload = T("Import Demographic Data"),
subtitle_create = T("Add New Demographic Data"),
label_list_button = T("List Demographic Data"),
label_create_button = ADD_DEMOGRAPHIC,
msg_record_created = T("Demographic Data added"),
msg_record_modified = T("Demographic Data updated"),
msg_record_deleted = T("Demographic Data deleted"),
msg_list_empty = T("No demographic data currently defined"))
configure(tablename,
super_entity = "stats_data",
deduplicate = self.stats_demographic_data_duplicate,
requires_approval=True,
)
#----------------------------------------------------------------------
# Demographic Aggregated data
#
# The data can be aggregated against:
# location, all the aggregated values across a number of locations
# thus for an L2 it will aggregate all the L3 values
# time, all the demographic_data values for the same time period.
# currently this is just the latest value in the time period
# copy, this is a copy of the previous time aggregation because no
# data is currently available for this time period
aggregate_types = {1 : T("Time"),
2 : T("Location"),
3 : T("Copy"),
}
tablename = "stats_demographic_aggregate"
table = define_table(tablename,
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Demographic"),
instance_types = ["stats_demographic"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
),
location_id(
widget = S3LocationAutocompleteWidget(),
requires = IS_LOCATION()
),
Field("agg_type", "integer",
requires = IS_IN_SET(aggregate_types),
represent = lambda opt: \
aggregate_types.get(opt,
current.messages.UNKNOWN_OPT),
default = 1,
label = T("Aggregation Type"),
),
Field("date", "date",
label = T("Start Date"),
),
Field("end_date", "date",
label = T("End Date"),
),
# Sum is used by Vulnerability as a fallback if we have no data at this level
Field("sum", "double",
label = T("Sum"),
),
# Percentage is used to compare an absolute value against a total
Field("percentage", "double",
label = T("Percentage"),
),
#Field("min", "double",
# label = T("Minimum"),
# ),
#Field("max", "double",
# label = T("Maximum"),
# ),
#Field("mean", "double",
# label = T("Mean"),
# ),
#Field("median", "double",
# label = T("Median"),
# ),
#Field("mad", "double",
# label = T("Median Absolute Deviation"),
# default = 0.0,
# ),
#Field("mean_ad", "double",
# label = T("Mean Absolute Deviation"),
# ),
#Field("std", "double",
# label = T("Standard Deviation"),
# ),
#Field("variance", "double",
# label = T("Variance"),
# ),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return dict(
stats_demographic_rebuild_all_aggregates = self.stats_demographic_rebuild_all_aggregates,
stats_demographic_update_aggregates = self.stats_demographic_update_aggregates,
stats_demographic_update_location_aggregate = self.stats_demographic_update_location_aggregate,
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_duplicate(item):
""" Import item de-duplication """
if item.tablename == "stats_demographic":
table = item.table
name = item.data.get("name", None)
query = (table.name.lower() == name.lower())
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_data_duplicate(item):
""" Import item de-duplication """
if item.tablename == "stats_demographic_data":
data = item.data
parameter_id = data.get("parameter_id", None)
location_id = data.get("location_id", None)
date = data.get("date", None)
table = item.table
query = (table.date == date) & \
(table.location_id == location_id) & \
(table.parameter_id == parameter_id)
duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if duplicate:
item.id = duplicate.id
item.method = item.METHOD.UPDATE
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_rebuild_all_aggregates():
"""
This will delete all the stats_demographic_aggregate records and
then rebuild them by triggering off a request for each
stats_demographic_data record.
This function is normally only run during prepop or postpop so we
don't need to worry about the aggregate data being unavailable for
any length of time
"""
# Check to see whether an existing task is running and if it is then kill it
db = current.db
ttable = db.scheduler_task
rtable = db.scheduler_run
wtable = db.scheduler_worker
query = (ttable.task_name == "stats_demographic_update_aggregates") & \
(rtable.task_id == ttable.id) & \
(rtable.status == "RUNNING")
rows = db(query).select(rtable.id,
rtable.task_id,
rtable.worker_name)
now = current.request.utcnow
for row in rows:
db(wtable.worker_name == row.worker_name).update(status="KILL")
db(rtable.id == row.id).update(stop_time=now,
status="STOPPED")
db(ttable.id == row.task_id).update(stop_time=now,
status="STOPPED")
# Delete the existing aggregates
current.s3db.stats_demographic_aggregate.truncate()
# Read all the approved vulnerability_data records
dtable = db.stats_demographic
ddtable = db.stats_demographic_data
query = (ddtable.deleted != True) & \
(ddtable.approved_by != None) & \
(ddtable.parameter_id == dtable.parameter_id)
records = db(query).select(ddtable.data_id,
ddtable.parameter_id,
ddtable.date,
ddtable.location_id,
ddtable.value,
dtable.total_id,
)
# Fire off a rebuild task
current.s3task.async("stats_demographic_update_aggregates",
vars=dict(records=records.json()),
timeout=21600 # 6 hours
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_aggregated_period(data_date=None):
"""
This will return the start and end dates of the aggregated time
period.
Currently the time period is annually so it will return the start
and end of the current year.
"""
if data_date is None:
data_date = date.today()
year = data_date.year
soap = date(year, 1, 1)
eoap = date(year, 12, 31)
return (soap, eoap)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_aggregates(records=None):
"""
This will calculate the stats_demographic_aggregate for the
specified parameter(s) at the specified location(s).
This will get the raw data from stats_demographic_data and generate
a stats_demographic_aggregate record for the given time period.
The reason for doing this is so that all aggregated data can be
obtained from a single table. So when displaying data for a
particular location it will not be necessary to try the aggregate
table, and if it's not there then try the data table. Rather just
look at the aggregate table.
Once this has run then a complete set of aggregate records should
exists for this parameter_id and location for every time period from
the first data item until the current time period.
Where appropriate add test cases to modules/unit_tests/s3db/stats.py
"""
if not records:
return
import datetime
from dateutil.rrule import rrule, YEARLY
db = current.db
s3db = current.s3db
dtable = s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
gtable = db.gis_location
# Data Structures used for the OPTIMISATION
param_total_dict = {} # the total_id for each parameter
param_location_dict = {} # a list of locations for each parameter
location_dict = {} # a list of locations
loc_level_list = {} # a list of levels for each location
aggregated_period = S3StatsDemographicModel.stats_demographic_aggregated_period
(last_period, year_end) = aggregated_period(None)
# Test to see which date format we have based on how we were called
if isinstance(records, basestring):
from_json = True
from dateutil.parser import parse
records = json.loads(records)
elif isinstance(records[0]["stats_demographic_data"]["date"],
(datetime.date, datetime.datetime)):
from_json = False
else:
from_json = True
from dateutil.parser import parse
for record in records:
total_id = record["stats_demographic"]["total_id"]
record = record["stats_demographic_data"]
data_id = record["data_id"]
location_id = record["location_id"]
parameter_id = record["parameter_id"]
# Skip if either the location or the parameter is not valid
if not location_id or not parameter_id:
s3_debug("Skipping bad stats_demographic_data record with data_id %s " % data_id)
continue
if total_id and parameter_id not in param_total_dict:
param_total_dict[parameter_id] = total_id
if from_json:
date = parse(record["date"])
else:
date = record["date"]
(start_date, end_date) = aggregated_period(date)
# Get all the approved stats_demographic_data records for this location and parameter
query = (dtable.location_id == location_id) & \
(dtable.deleted != True) & \
(dtable.approved_by != None)
fields = [dtable.data_id,
dtable.date,
dtable.value,
]
if total_id:
# Also get the records for the Total to use to calculate the percentage
query &= (dtable.parameter_id.belongs([parameter_id, total_id]))
fields.append(dtable.parameter_id)
else:
percentage = None
query &= (dtable.parameter_id == parameter_id)
data_rows = db(query).select(*fields)
if total_id:
# Separate out the rows relating to the Totals
total_rows = data_rows.exclude(lambda row: row.parameter_id == total_id)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = current.request.utcnow.date()
end_date = year_end
totals = {}
for row in total_rows:
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in totals:
if row_date <= totals[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in totals but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the totals storage
totals[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get each record and store them in a dict keyed on the start date
# of the aggregated period. If a record already exists for the
# reporting period then the most recent value will be stored.
earliest_period = start_date
end_date = year_end
data = {}
data[start_date] = Storage(date = date,
id = data_id,
value = record["value"])
for row in data_rows:
if row.data_id == data_id:
# This is the record we started with, so skip
continue
row_date = row.date
(start_date, end_date) = aggregated_period(row_date)
if start_date in data:
if row_date <= data[start_date]["date"]:
# The indicator in the row is of the same time period as
# another which is already stored in data but it is earlier
# so ignore this particular record
continue
elif start_date < earliest_period:
earliest_period = start_date
# Store the record from the db in the data storage
data[start_date] = Storage(date = row_date,
id = row.data_id,
value = row.value)
# Get all the aggregate records for this parameter and location
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id)
aggr_rows = db(query).select(atable.id,
atable.agg_type,
atable.date,
atable.end_date,
atable.sum,
)
aggr = {}
for row in aggr_rows:
(start_date, end_date) = aggregated_period(row.date)
aggr[start_date] = Storage(id = row.id,
type = row.agg_type,
end_date = row.end_date,
sum = row.sum,
)
# Step through each period and check that aggr is correct
last_data_period = earliest_period
last_type_agg = False # Whether the type of previous non-copy record was aggr
last_data_value = None # The value of the previous aggr record
last_total = None # The value of the previous aggr record for the totals param
# Keep track of which periods the aggr record has been changed in
# the database
changed_periods = []
for dt in rrule(YEARLY, dtstart=earliest_period, until=last_period):
# Calculate the end of the dt period.
# - it will be None if this is the last period
dt = dt.date()
if dt != last_period:
(start_date, end_date) = aggregated_period(dt)
else:
start_date = dt
end_date = None
if dt in aggr:
# Check that the stored aggr data is correct
agg_type = aggr[dt]["type"]
if agg_type == 2:
# This is built using other location aggregates
# so it can be ignored because only time or copy aggregates
# are being calculated in this function
last_type_agg = True
last_data_value = aggr[dt]["sum"]
continue
# Query to use to update aggr records
query = (atable.id == aggr[dt]["id"])
if agg_type == 3:
# This is a copy aggregate
if dt in data:
# There is data in the data dictionary for this period
# so aggregate record needs to be changed
value = data[dt]["value"]
last_data_value = value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif last_type_agg:
# No data in the data dictionary and the last type was aggr
continue
# Check that the data currently stored is correct
elif aggr[dt]["sum"] != last_data_value:
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
elif agg_type == 1:
# The value in the aggr should match the value in data
if dt in data:
value = data[dt]["value"]
last_data_value = value
if total_id and dt in totals:
last_total = totals[dt]["value"]
if aggr[dt]["sum"] != value:
if total_id and last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 1, # time
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
else:
# The data is not there so it must have been deleted
# Copy the value from the previous record
value = last_data_value
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
db(query).update(agg_type = 3, # copy
#reported_count = 1, # one record
#ward_count = 1, # one ward
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# No aggregate record for this time period exists
# So one needs to be inserted
else:
if dt in data:
value = data[dt]["value"]
agg_type = 1 # time
last_data_value = value
else:
value = last_data_value
agg_type = 3 # copy
if total_id:
if dt in totals:
last_total = totals[dt]["value"]
if last_total:
percentage = 100 * value / last_total
percentage = round(percentage, 3)
atable.insert(parameter_id = parameter_id,
location_id = location_id,
agg_type = agg_type,
#reported_count = 1, # one record
#ward_count = 1, # one ward
date = start_date,
end_date = end_date,
percentage = percentage,
sum = value,
#min = value,
#max = value,
#mean = value,
#median = value,
)
changed_periods.append((start_date, end_date))
# End of loop through each time period
if changed_periods == []:
continue
# The following structures are used in the OPTIMISATION step later
location = db(gtable.id == location_id).select(gtable.level,
limitby=(0, 1)
).first()
loc_level_list[location_id] = location.level
if parameter_id not in param_location_dict:
param_location_dict[parameter_id] = {location_id : changed_periods}
elif location_id not in param_location_dict[parameter_id]:
param_location_dict[parameter_id][location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < param_location_dict[parameter_id][location_id][0][0]:
param_location_dict[parameter_id][location_id] = changed_periods
if location_id not in location_dict:
location_dict[location_id] = changed_periods
else:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if changed_periods[0][0] < location_dict[location_id][0][0]:
location_dict[location_id] = changed_periods
# End of loop through each stats_demographic_data record
# OPTIMISATION
# The following code will get all the locations for which a parameter
# has been changed. This will remove duplicates which will occur when
# items are being imported for many communes in the same district.
# Take an import of 12 communes in the same district, without this the
# district will be updated 12 times, the province will be updated 12
# times and the country will be updated 12 times that is 33 unnecessary
# updates (for each time period) (i.e. 15 updates rather than 48)
# Get all the parents
parents = {}
get_parents = current.gis.get_parents
for loc_id in location_dict.keys():
_parents = get_parents(loc_id)
if parents:
parents[loc_id] = _parents
# Expand the list of locations for each parameter
parents_data = {}
for (param_id, loc_dict) in param_location_dict.items():
for (loc_id, periods) in loc_dict.items():
if loc_id in parents: # There won't be a parent if this is a L0
for p_loc_row in parents[loc_id]:
p_loc_id = p_loc_row.id
if param_id in parents_data:
if p_loc_id in parents_data[param_id]:
# Store the older of the changed periods (the end will always be None)
# Only need to check the start date of the first period
if periods[0][0] < parents_data[param_id][p_loc_id][0][0][0]:
parents_data[param_id][p_loc_id][0] = periods
else:
parents_data[param_id][p_loc_id] = [periods,
loc_level_list[loc_id]
]
else:
parents_data[param_id] = {p_loc_id : [periods,
loc_level_list[loc_id]
]
}
# Now that the time aggregate types have been set up correctly,
# fire off requests for the location aggregates to be calculated
async = current.s3task.async
for (param_id, loc_dict) in parents_data.items():
total_id = param_total_dict[param_id]
for (loc_id, (changed_periods, loc_level)) in loc_dict.items():
for (start_date, end_date) in changed_periods:
s, e = str(start_date), str(end_date)
async("stats_demographic_update_aggregate_location",
args = [loc_level, loc_id, param_id, total_id, s, e],
timeout = 1800 # 30m
)
# -------------------------------------------------------------------------
@staticmethod
def stats_demographic_update_location_aggregate(location_level,
location_id,
parameter_id,
total_id,
start_date,
end_date
):
"""
Calculates the stats_demographic_aggregate for a specific parameter at a
specific location.
@param location_id: the location record ID
@param parameter_id: the parameter record ID
@param total_id: the parameter record ID for the percentage calculation
@param start_date: the start date of the time period (as string)
@param end_date: the end date of the time period (as string)
"""
db = current.db
dtable = current.s3db.stats_demographic_data
atable = db.stats_demographic_aggregate
# Get all the child locations
child_locations = current.gis.get_children(location_id, location_level)
child_ids = [row.id for row in child_locations]
# Get the most recent stats_demographic_data record for all child locations
query = (dtable.parameter_id == parameter_id) & \
(dtable.deleted != True) & \
(dtable.approved_by != None) & \
(dtable.location_id.belongs(child_ids))
if end_date == "None": # converted to string as async parameter
end_date = None
else:
query &= (dtable.date <= end_date)
rows = db(query).select(dtable.value,
dtable.date,
dtable.location_id,
orderby=(dtable.location_id, ~dtable.date),
# groupby avoids duplicate records for the same
# location, but is slightly slower than just
# skipping the duplicates in the loop below
#groupby=(dtable.location_id)
)
# Get the most recent aggregate for this location for the total parameter
if total_id == "None": # converted to string as async parameter
total_id = None
# Collect the values, skip duplicate records for the
# same location => use the most recent one, which is
# the first row for each location as per the orderby
# in the query above
last_location = None
values = []
append = values.append
for row in rows:
new_location_id = row.location_id
if new_location_id != last_location:
last_location = new_location_id
append(row.value)
# Aggregate the values
values_len = len(values)
if not values_len:
return
values_sum = sum(values)
#values_min = min(values)
#values_max = max(values)
#values_avg = float(values_sum) / values_len
percentage = 100 * values_sum / values_total
values_percentage = round(percentage, 3)
#from numpy import median
#values_med = median(values)
#values_mad = median([abs(v - values_med) for v in values])
# Add or update the aggregated values in the database
# Do we already have a record?
query = (atable.location_id == location_id) & \
(atable.parameter_id == parameter_id) & \
(atable.date == start_date) & \
(atable.end_date == end_date)
exists = db(query).select(atable.id, limitby=(0, 1)).first()
attr = dict(agg_type = 2, # Location
#reported_count = values_len,
#ward_count = len(child_ids),
#min = values_min,
#max = values_max,
#mean = values_avg,
#median = values_med,
#mad = values_mad,
sum = values_sum,
percentage = values_percentage,
)
if exists:
# Update
db(query).update(**attr)
else:
# Insert new
atable.insert(parameter_id = parameter_id,
location_id = location_id,
date = start_date,
end_date = end_date,
**attr
)
return
# =============================================================================
def stats_demographic_data_controller():
"""
Function to be called from controller functions
- display all demographic data for a location as a tab.
- options.s3json lookups for AddResourceLink
"""
request = current.request
if "options.s3json" in request.args:
# options.s3json lookups for AddResourceLink
output = current.rest_controller("stats", "demographic_data")
return output
# Only viewing is valid
vars = request.get_vars
if "viewing" not in vars:
error = current.xml.json_message(False, 400, message="viewing not in vars")
raise HTTP(400, error)
else:
viewing = vars.viewing
if "." in viewing:
tablename, id = viewing.split(".", 1)
else:
error = current.xml.json_message(False, 400, message="viewing needs a period")
raise HTTP(400, error)
s3db = current.s3db
table = s3db[tablename]
location_id = current.db(table.id == id).select(table.location_id,
limitby=(0, 1)
).first().location_id
s3 = current.response.s3
dtable = s3db.stats_demographic_data
field = dtable.location_id
s3.filter = (field == location_id)
field.default = location_id
field.readable = field.writable = False
# Post-process
def postp(r, output):
if r.representation == "html":
output["title"] = s3.crud_strings[tablename].title_display
return output
s3.postp = postp
if tablename == "project_location":
rheader = s3db.project_rheader
else:
rheader = None
output = current.rest_controller("stats", "demographic_data",
rheader=rheader)
return output
# =============================================================================
class S3StatsPeopleModel(S3Model):
"""
Used to record people in the CRMT (Community Resilience Mapping Tool) template
"""
names = ["stats_people",
"stats_people_type",
"stats_people_group",
]
def model(self):
T = current.T
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Type of Peoples
#
tablename = "stats_people_type"
table = define_table(tablename,
# Instance
super_link("doc_id", "doc_entity"),
super_link("parameter_id", "stats_parameter"),
Field("name",
label=T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_PEOPLE_TYPE = T("Add New Type of People")
crud_strings[tablename] = Storage(
title_create=T("Add Type of People"),
title_display=T("Type of People Details"),
title_list=T("Type of Peoples"),
title_update=T("Edit Type of People"),
#title_search=T("Search Type of Peoples"),
#title_upload=T("Import Type of Peoples"),
subtitle_create=ADD_PEOPLE_TYPE,
label_list_button=T("Type of Peoples"),
label_create_button=ADD_PEOPLE_TYPE,
label_delete_button=T("Delete Type of People"),
msg_record_created=T("Type of People added"),
msg_record_modified=T("Type of People updated"),
msg_record_deleted=T("Type of People deleted"),
msg_list_empty=T("No Type of Peoples defined"))
# Resource Configuration
configure(tablename,
super_entity = ("doc_entity", "stats_parameter"),
deduplicate = self.stats_people_type_duplicate,
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People
#
tablename = "stats_people"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos)
super_link("doc_id", "doc_entity"),
Field("name", #notnull=True,
label=T("Name")),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Type of People"),
instance_types = ["stats_people_type"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = False,
comment = S3AddResourceLink(c="stats",
f="people_type",
vars = dict(child = "parameter_id"),
title=ADD_PEOPLE_TYPE),
),
Field("value", "integer",
requires=IS_INT_IN_RANGE(0, 999999),
label=T("Number of People")),
self.gis_location_id(label=T("Address")),
self.pr_person_id(label=T("Contact Person")),
s3_comments(),
*s3_meta_fields())
ADD_PEOPLE = T("Add New People")
crud_strings[tablename] = Storage(
title_create=T("Add People"),
title_display=T("People Details"),
title_list=T("People"),
title_update=T("Edit People"),
title_search=T("Search People"),
title_upload=T("Import People"),
subtitle_create=ADD_PEOPLE,
label_list_button=T("People"),
label_create_button=ADD_PEOPLE,
label_delete_button=T("Delete People"),
msg_record_created=T("People added"),
msg_record_modified=T("People updated"),
msg_record_deleted=T("People deleted"),
msg_list_empty=T("No People defined"))
filter_widgets = [S3OptionsFilter("people_group.group_id",
label=T("Coalition"),
represent="%(name)s",
widget="multiselect",
),
S3OptionsFilter("parameter_id",
label=T("Type"),
represent="%(name)s",
widget="multiselect",
),
]
configure(tablename,
super_entity = ("doc_entity", "stats_data"),
filter_widgets = filter_widgets,
)
# Coalitions
add_component("org_group",
stats_people=dict(link="stats_people_group",
joinby="people_id",
key="group_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("stats_people_group",
stats_people="people_id")
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# People <> Coalitions link table
#
tablename = "stats_people_group"
table = define_table(tablename,
Field("people_id", table,
requires = IS_ONE_OF(current.db, "stats_people.id",
represent,
sort=True,
),
represent = represent,
),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# ---------------------------------------------------------------------
@staticmethod
def stats_people_type_duplicate(item):
"""
Deduplication of Type of Peoples
"""
if item.tablename != "stats_people_type":
return
data = item.data
name = data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# =============================================================================
class S3StatsTrainedPeopleModel(S3Model):
"""
Used to record trained people in the CRMT (Community Resilience Mapping Tool) template
"""
names = ["stats_trained",
"stats_trained_type",
"stats_trained_group",
]
def model(self):
T = current.T
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
super_link = self.super_link
# ---------------------------------------------------------------------
# Trained Type of Peoples
#
tablename = "stats_trained_type"
table = define_table(tablename,
# Instance
super_link("parameter_id", "stats_parameter"),
Field("name",
label=T("Name"),
),
s3_comments(),
*s3_meta_fields())
ADD_TRAINED_PEOPLE_TYPE = T("Add New Type of Trained People")
crud_strings[tablename] = Storage(
title_create=T("Add Type of Trained People"),
title_display=T("Type of Trained People Details"),
title_list=T("Types of Trained People"),
title_update=T("Edit Type of Trained People"),
#title_search=T("Search Trained Type of Peoples"),
#title_upload=T("Import Types of Trained People"),
subtitle_create=ADD_TRAINED_PEOPLE_TYPE,
label_list_button=T("Types of Trained People"),
label_create_button=ADD_TRAINED_PEOPLE_TYPE,
label_delete_button=T("Delete Type of Trained People "),
msg_record_created=T("Type of Trained People added"),
msg_record_modified=T("Type of Trained People updated"),
msg_record_deleted=T("Type of Trained People deleted"),
msg_list_empty=T("No Types of Trained People defined"))
# Resource Configuration
configure(tablename,
super_entity = "stats_parameter",
deduplicate = self.stats_trained_type_duplicate,
)
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# Trained People
#
tablename = "stats_trained"
table = define_table(tablename,
# Instance
super_link("data_id", "stats_data"),
# Instance (link to Photos)
super_link("doc_id", "doc_entity"),
Field("name", notnull=True,
label=T("Name")),
# This is a component, so needs to be a super_link
# - can't override field name, ondelete or requires
super_link("parameter_id", "stats_parameter",
label = T("Type of Trained People"),
instance_types = ["stats_trained_type"],
represent = S3Represent(lookup="stats_parameter"),
readable = True,
writable = True,
empty = True,
comment = S3AddResourceLink(c="stats",
f="trained_type",
vars = dict(child = "parameter_id"),
title=ADD_TRAINED_PEOPLE_TYPE),
),
Field("value", "integer",
requires=IS_NULL_OR(
IS_INT_IN_RANGE(0, 999999)
),
label=T("Number of Trained People")),
self.org_organisation_id(),
self.gis_location_id(label=T("Address")),
# Which contact is this?
# Training Org should be a human_resource_id
# Team Leader should also be a human_resource_id
# Either way label should be clear
self.pr_person_id(label=T("Contact Person")),
s3_comments(),
*s3_meta_fields())
ADD_TRAINED_PEOPLE = T("Add Trained People")
crud_strings[tablename] = Storage(
title_create=ADD_TRAINED_PEOPLE,
title_display=T("Trained People Details"),
title_list=T("Trained People"),
title_update=T("Edit Trained People"),
title_search=T("Search Trained People"),
title_upload=T("Import Trained People"),
subtitle_create=ADD_TRAINED_PEOPLE,
label_list_button=T("Trained People"),
label_create_button=ADD_TRAINED_PEOPLE,
label_delete_button=T("Delete Trained People"),
msg_record_created=T("Trained People added"),
msg_record_modified=T("Trained People updated"),
msg_record_deleted=T("Trained People deleted"),
msg_list_empty=T("No Trained People defined"))
filter_widgets = [S3OptionsFilter("stats_trained_group.group_id",
label=T("Coalition"),
represent="%(name)s",
widget="multiselect",
),
S3OptionsFilter("parameter_id",
label=T("Type"),
represent="%(name)s",
widget="multiselect",
),
]
configure(tablename,
super_entity = ("doc_entity", "stats_data"),
filter_widgets = filter_widgets,
)
# Coalitions
add_component("org_group",
stats_trained=dict(link="stats_trained_group",
joinby="trained_id",
key="group_id",
actuate="hide"))
# Format for InlineComponent/filter_widget
add_component("stats_trained_group",
stats_trained="trained_id")
represent = S3Represent(lookup=tablename)
# ---------------------------------------------------------------------
# Trained People <> Coalitions link table
#
tablename = "stats_trained_group"
table = define_table(tablename,
Field("trained_id", table,
requires = IS_ONE_OF(current.db, "stats_trained.id",
represent,
sort=True,
),
represent = represent,
),
self.org_group_id(empty=False),
*s3_meta_fields())
# Pass names back to global scope (s3.*)
return dict()
# ---------------------------------------------------------------------
@staticmethod
def stats_trained_type_duplicate(item):
"""
Deduplication of Trained Types
"""
if item.tablename != "stats_trained_type":
return
data = item.data
name = data.get("name", None)
if not name:
return
table = item.table
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
item.id = _duplicate.id
item.data.id = _duplicate.id
item.method = item.METHOD.UPDATE
# END =========================================================================
| mit |
PacktPublishing/Mastering-Mesos | Chapter4/Aurora/src/test/python/apache/thermos/core/test_staged_kill.py | 7 | 9217 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import signal
import sys
import threading
import time
import pytest
from twitter.common.process import ProcessProviderFactory
from twitter.common.quantity import Amount, Time
from apache.thermos.config.schema import Process, Task
from apache.thermos.core.runner import TaskRunner
from apache.thermos.monitoring.monitor import TaskMonitor
from apache.thermos.testing.runner import Runner
from gen.apache.thermos.ttypes import ProcessState, TaskState
sleepy_process = Process(name="sleepy", cmdline="sleep 3", min_duration=1)
ignore_script = [
"import time, signal",
"signal.signal(signal.SIGTERM, signal.SIG_IGN)",
"time.sleep(1000)"
]
ignorant_process = Process(
name="ignorant",
cmdline="%s -c '%s'" % (sys.executable, ';'.join(ignore_script)),
min_duration=1)
class RunnerBase(object):
@classmethod
def task(cls):
raise NotImplementedError
@classmethod
def start_runner(cls):
runner = Runner(cls.task())
class RunThread(threading.Thread):
def run(self):
runner.run()
rt = RunThread()
rt.start()
return runner
@classmethod
def wait_until_running(cls, monitor):
while True:
procs = monitor.get_state().processes
if 'process' in procs:
# check the process hasn't died unexpectedly
assert procs['process'][0].return_code is None
active_processes = monitor.get_active_processes()
if len(active_processes) == 0:
time.sleep(0.1)
else:
assert len(active_processes) == 1
if active_processes[0][0].pid is None:
time.sleep(0.1)
else:
break
class ProcessPidTestCase(object):
def test_process_kill(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'process'
assert run_number == 0
os.kill(process_state.pid, signal.SIGKILL)
while True:
if not hasattr(runner, 'state'):
time.sleep(0.1)
else:
break
assert runner.state.statuses[-1].state == TaskState.SUCCESS
assert 'process' in runner.state.processes
assert len(runner.state.processes['process']) == 2
assert runner.state.processes['process'][0].state == ProcessState.KILLED
assert runner.state.processes['process'][0].return_code == -signal.SIGKILL
assert runner.state.processes['process'][1].state == ProcessState.SUCCESS
class TestRunnerKill(RunnerBase, ProcessPidTestCase):
@classmethod
def task(cls):
task = Task(name="task", processes=[sleepy_process(name="process")])
return task.interpolate()[0]
def test_coordinator_kill(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'process'
assert run_number == 0
os.kill(process_state.coordinator_pid, signal.SIGKILL)
while True:
if not hasattr(runner, 'state'):
time.sleep(0.1)
else:
break
assert runner.state.statuses[-1].state == TaskState.SUCCESS
assert 'process' in runner.state.processes
assert len(runner.state.processes['process']) == 2
assert runner.state.processes['process'][0].state == ProcessState.LOST
assert runner.state.processes['process'][1].state == ProcessState.SUCCESS
class TestRunnerKillProcessTrappingSIGTERM(RunnerBase):
@classmethod
def task(cls):
task = Task(name="task",
finalization_wait=3,
processes=[ignorant_process(name="ignorant_process")])
return task.interpolate()[0]
def test_coordinator_kill(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'ignorant_process'
assert run_number == 0
os.kill(process_state.coordinator_pid, signal.SIGKILL)
while True:
active_procs = tm.get_active_processes()
if active_procs and active_procs[0][1] > 0:
break
time.sleep(0.2)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'ignorant_process'
assert run_number == 1
os.kill(process_state.pid, signal.SIGKILL)
while True:
active_procs = tm.get_active_processes()
if active_procs and active_procs[0][1] > 1:
break
time.sleep(0.2)
self.wait_until_running(tm)
os.kill(runner.po.pid, signal.SIGKILL)
try:
state = tm.get_state()
assert state.processes['ignorant_process'][0].state == ProcessState.LOST
assert state.processes['ignorant_process'][1].state == ProcessState.KILLED
assert state.processes['ignorant_process'][2].state == ProcessState.RUNNING
finally:
os.kill(state.processes['ignorant_process'][2].coordinator_pid, signal.SIGKILL)
os.kill(state.processes['ignorant_process'][2].pid, signal.SIGKILL)
def test_coordinator_dead_kill(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'ignorant_process'
assert run_number == 0
os.kill(runner.po.pid, signal.SIGKILL)
os.kill(process_state.coordinator_pid, signal.SIGKILL)
os.kill(process_state.pid, signal.SIGKILL)
killer = TaskRunner.get(runner.task_id, runner.root)
assert killer is not None
killer.kill(force=True)
state = tm.get_state()
assert len(state.processes['ignorant_process']) == 1
assert state.processes['ignorant_process'][0].state == ProcessState.LOST
@pytest.mark.skipif('True', reason='Flaky test (AURORA-161)')
def test_preemption_wait(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'ignorant_process'
assert run_number == 0
preempter = TaskRunner.get(runner.task_id, runner.root)
assert preempter is not None
now = time.time()
preempter.kill(force=True, preemption_wait=Amount(1, Time.SECONDS))
duration = time.time() - now
# This is arbitrary, but make sure we finish within half a second of
# requested preemption wait.
assert abs(duration - 1.0) < 0.5
assert preempter.state.statuses[-1].state == TaskState.KILLED
assert preempter.state.processes['ignorant_process'][-1].state == ProcessState.KILLED
SIMPLEFORK_SCRIPT = """
cat <<EOF | %(INTERPRETER)s -
from __future__ import print_function
import os
import time
pid = os.fork()
if pid == 0:
pid = os.getpid()
with open('child.txt', 'w') as fp:
print(pid, file=fp)
time.sleep(60)
else:
with open('parent.txt', 'w') as fp:
print(os.getpid(), file=fp)
while not os.path.exists('exit.txt'):
time.sleep(0.1)
EOF
""" % {'INTERPRETER': sys.executable}
class TestRunnerKillProcessGroup(RunnerBase):
@classmethod
def task(cls):
task = Task(name="task", processes=[Process(name="process", cmdline=SIMPLEFORK_SCRIPT)])
return task.interpolate()[0]
def test_pg_is_killed(self):
runner = self.start_runner()
tm = TaskMonitor(runner.tempdir, runner.task_id)
self.wait_until_running(tm)
process_state, run_number = tm.get_active_processes()[0]
assert process_state.process == 'process'
assert run_number == 0
child_pidfile = os.path.join(runner.sandbox, runner.task_id, 'child.txt')
while not os.path.exists(child_pidfile):
time.sleep(0.1)
parent_pidfile = os.path.join(runner.sandbox, runner.task_id, 'parent.txt')
while not os.path.exists(parent_pidfile):
time.sleep(0.1)
with open(child_pidfile) as fp:
child_pid = int(fp.read().rstrip())
with open(parent_pidfile) as fp:
parent_pid = int(fp.read().rstrip())
ps = ProcessProviderFactory.get()
ps.collect_all()
assert parent_pid in ps.pids()
assert child_pid in ps.pids()
assert child_pid in ps.children_of(parent_pid)
with open(os.path.join(runner.sandbox, runner.task_id, 'exit.txt'), 'w') as fp:
fp.write('go away!')
while tm.task_state() is not TaskState.SUCCESS:
time.sleep(0.1)
state = tm.get_state()
assert state.processes['process'][0].state == ProcessState.SUCCESS
ps.collect_all()
assert parent_pid not in ps.pids()
assert child_pid not in ps.pids()
| mit |
msoftware/w2ui | server/python/bottle/w2lib.py | 51 | 4798 | class w2Grid:
def __init__(self,conn):
self.conn = conn
def getRecords(self, sql, request, cql=None):
sql_components = { 'where': [], 'params': [], 'sort': [] }
if request.get('search',[]):
for search in request['search']:
operator = "="
field = search['field'] # TODO: protect from sql injection!!!
value = [ search['value'] ]
op = search['operator'].lower()
if op == "begins":
operator = "LIKE ?||'%%'"
elif op == "ends":
operator = "LIKE '%%'||?"
elif op == "contains":
operator = "LIKE '%%'||?||'%%'"
elif op == "is":
operator = "= LOWER(?)"
elif op == "between":
value = value[0]
operator = "BETWEEN ? AND ?"
elif op == "in":
value = value[0]
operator = "IN (%s)" % ','.join(['?'] * len(value))
sql_components['where'].append("%s %s" % (field,operator))
for v in value:
sql_components['params'].append(v)
if request.get('sort',[]):
for sort in request['sort']:
field = sort['field'] # TODO: protect from sql injection!!!
dir_ = sort['direction'] # TODO: protect from sql injection!!!
sql_components['sort'].append(field+' '+dir_)
connector = ' %s ' % request.get('searchLogic','AND') # TODO: protect from sql injection!!!
where = connector.join(sql_components['where'])
if not where:
where = '1=1'
sort = ",".join(sql_components['sort'])
if not sort:
sort = '1'
sql = sql.replace("~search~",where)
sql = sql.replace("~order~","~sort~")
sql = sql.replace("~sort~",sort)
if not cql:
cql = "SELECT count(1) FROM (%s) as grid_list_1" % sql
limit = 50
offset = 0
try:
limit = abs(int(request['limit']))
except:
pass
try:
offset = abs(int(request['offset']))
except:
pass
sql += " LIMIT %s OFFSET %s" % (limit,offset)
data = {}
try:
cursor = self.conn.cursor()
# count records
cursor.execute(cql,sql_components['params'])
data['status'] = 'success'
data['total'] = cursor.fetchone()[0]
# execute sql
data['records'] = []
rows = cursor.execute(sql,sql_components['params'])
columns = [ d[0] for d in cursor.description ]
columns[0] = "recid"
for row in rows:
record = zip(columns,list(row))
data['records'].append( dict(record) )
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def deleteRecords(self, table, keyField, request):
recs = request['selected']
# TODO: protect table, keyField from sql injection!!!
sql = "DELETE FROM %s WHERE %s IN (%s)" % (table, keyField,','.join(['?'] * len(recs)))
data = {}
try:
cursor = self.conn.cursor()
cursor.execute(sql,recs)
self.conn.commit()
data['status'] = 'success'
data['message'] = ''
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def getRecord(self, sql, recid):
data = {}
try:
cursor = self.conn.cursor()
# execute sql
cursor.execute(sql,[recid])
data['status'] = 'success'
data['message'] = ''
columns = [ d[0] for d in cursor.description ]
row = cursor.fetchone()
record = zip(columns,list(row))[1:]
data['record'] = dict(record)
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def saveRecord(self, table, keyField, request):
# TODO: protect table, keyField, field names from sql injection!!!
fields, values = [], []
for k, v in request['record'].items():
if k == keyField: continue # key field should not be here
fields.append(k)
if v.startswith('__'):
v = v[2:]
elif v == "":
v = None
values.append(v)
if request.get('recid','0') == '0':
sql = "INSERT INTO %s (%s) VALUES (%s)" % (table,','.join(fields),','.join(['?']*len(fields)))
else:
sql = "UPDATE %s SET %s WHERE %s = ?" % (table, ','.join([ '%s=?' % f for f in fields ]), keyField)
values.append( request['recid'] )
data = {}
try:
cursor = self.conn.cursor()
cursor.execute(sql,values)
self.conn.commit()
data['status'] = 'success'
data['message'] = ''
except Exception, e:
data['status'] = 'error'
data['message'] = '%s\n%s' % (e,sql)
return data
def newRecord(self, table, data):
return self.saveRecord(table, '', {'recid': 0, 'record': data})
def getItems(self, sql):
# TODO: what's this function for?
return {}
| mit |
mlaitinen/odoo | addons/account_analytic_plans/account_analytic_plans.py | 143 | 23352 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
class one2many_mod2(fields.one2many):
def get(self, cr, obj, ids, name, user=None, offset=0, context=None, values=None):
if context is None:
context = {}
res = {}
for id in ids:
res[id] = []
ids2 = None
if 'journal_id' in context:
journal = obj.pool.get('account.journal').browse(cr, user, context['journal_id'], context=context)
pnum = int(name[7]) -1
plan = journal.plan_id
if plan and len(plan.plan_ids) > pnum:
acc_id = plan.plan_ids[pnum].root_analytic_id.id
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids),('analytic_account_id','child_of',[acc_id])], limit=self._limit)
if ids2 is None:
ids2 = obj.pool[self._obj].search(cr, user, [(self._fields_id,'in',ids)], limit=self._limit)
for r in obj.pool[self._obj].read(cr, user, ids2, [self._fields_id], context=context, load='_classic_write'):
key = r[self._fields_id]
if isinstance(key, tuple):
# Read return a tuple in the case where the field is a many2one
# but we want to get the id of this field.
key = key[0]
res[key].append( r['id'] )
return res
class account_analytic_line(osv.osv):
_inherit = 'account.analytic.line'
_description = 'Analytic Line'
def _get_amount(self, cr, uid, ids, name, args, context=None):
res = {}
for id in ids:
res.setdefault(id, 0.0)
for line in self.browse(cr, uid, ids, context=context):
amount = line.move_id and line.move_id.amount_currency * (line.percentage / 100) or 0.0
res[line.id] = amount
return res
_columns = {
'amount_currency': fields.function(_get_amount, string="Amount Currency", type="float", store=True, help="The amount expressed in the related account currency if not equal to the company one.", readonly=True),
'percentage': fields.float('Percentage')
}
class account_analytic_plan(osv.osv):
_name = "account.analytic.plan"
_description = "Analytic Plan"
_columns = {
'name': fields.char('Analytic Plan', required=True, select=True),
'plan_ids': fields.one2many('account.analytic.plan.line', 'plan_id', 'Analytic Plans', copy=True),
}
class account_analytic_plan_line(osv.osv):
_name = "account.analytic.plan.line"
_description = "Analytic Plan Line"
_order = "sequence, id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan','Analytic Plan',required=True),
'name': fields.char('Axis Name', required=True, select=True),
'sequence': fields.integer('Sequence'),
'root_analytic_id': fields.many2one('account.analytic.account', 'Root Account', help="Root account of this plan.", required=False),
'min_required': fields.float('Minimum Allowed (%)'),
'max_required': fields.float('Maximum Allowed (%)'),
}
_defaults = {
'min_required': 100.0,
'max_required': 100.0,
}
class account_analytic_plan_instance(osv.osv):
_name = "account.analytic.plan.instance"
_description = "Analytic Plan Instance"
_columns = {
'name': fields.char('Analytic Distribution'),
'code': fields.char('Distribution Code', size=16),
'journal_id': fields.many2one('account.analytic.journal', 'Analytic Journal' ),
'account_ids': fields.one2many('account.analytic.plan.instance.line', 'plan_id', 'Account Id', copy=True),
'account1_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account1 Id'),
'account2_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account2 Id'),
'account3_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account3 Id'),
'account4_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account4 Id'),
'account5_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account5 Id'),
'account6_ids': one2many_mod2('account.analytic.plan.instance.line', 'plan_id', 'Account6 Id'),
'plan_id': fields.many2one('account.analytic.plan', "Model's Plan"),
}
def search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.get('journal_id', False):
journal = journal_obj.browse(cr, user, [context['journal_id']], context=context)[0]
analytic_journal = journal.analytic_journal_id and journal.analytic_journal_id.id or False
args.append('|')
args.append(('journal_id', '=', analytic_journal))
args.append(('journal_id', '=', False))
res = super(account_analytic_plan_instance, self).search(cr, user, args, offset=offset, limit=limit, order=order,
context=context, count=count)
return res
def _default_journal(self, cr, uid, context=None):
if context is None:
context = {}
journal_obj = self.pool.get('account.journal')
if context.has_key('journal_id') and context['journal_id']:
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
if journal.analytic_journal_id:
return journal.analytic_journal_id.id
return False
_defaults = {
'plan_id': False,
'journal_id': _default_journal,
}
def name_get(self, cr, uid, ids, context=None):
res = []
for inst in self.browse(cr, uid, ids, context=context):
name = inst.name or '/'
if name and inst.code:
name=name+' ('+inst.code+')'
res.append((inst.id, name))
return res
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
args = args or []
if name:
ids = self.search(cr, uid, [('code', '=', name)] + args, limit=limit, context=context or {})
if not ids:
ids = self.search(cr, uid, [('name', operator, name)] + args, limit=limit, context=context or {})
else:
ids = self.search(cr, uid, args, limit=limit, context=context or {})
return self.name_get(cr, uid, ids, context or {})
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
wiz_id = self.pool.get('ir.actions.act_window').search(cr, uid, [("name","=","analytic.plan.create.model.action")], context=context)
res = super(account_analytic_plan_instance,self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
journal_obj = self.pool.get('account.journal')
analytic_plan_obj = self.pool.get('account.analytic.plan')
if (res['type']=='form'):
plan_id = False
if context.get('journal_id', False):
plan_id = journal_obj.browse(cr, uid, int(context['journal_id']), context=context).plan_id
elif context.get('plan_id', False):
plan_id = analytic_plan_obj.browse(cr, uid, int(context['plan_id']), context=context)
if plan_id:
i=1
res['arch'] = """<form string="%s">
<field name="name"/>
<field name="code"/>
<field name="journal_id"/>
<button name="%d" string="Save This Distribution as a Model" type="action" colspan="2"/>
"""% (tools.to_xml(plan_id.name), wiz_id[0])
for line in plan_id.plan_ids:
res['arch']+="""
<field name="account%d_ids" string="%s" nolabel="1" colspan="4">
<tree string="%s" editable="bottom">
<field name="rate"/>
<field name="analytic_account_id" domain="[('parent_id','child_of',[%d])]" groups="analytic.group_analytic_accounting"/>
</tree>
</field>
<newline/>"""%(i,tools.to_xml(line.name),tools.to_xml(line.name),line.root_analytic_id and line.root_analytic_id.id or 0)
i+=1
res['arch'] += "</form>"
doc = etree.fromstring(res['arch'].encode('utf8'))
xarch, xfields = self._view_look_dom_arch(cr, uid, doc, view_id, context=context)
res['arch'] = xarch
res['fields'] = xfields
return res
else:
return res
def create(self, cr, uid, vals, context=None):
journal_obj = self.pool.get('account.journal')
ana_plan_instance_obj = self.pool.get('account.analytic.plan.instance')
acct_anal_acct = self.pool.get('account.analytic.account')
acct_anal_plan_line_obj = self.pool.get('account.analytic.plan.line')
if context and context.get('journal_id'):
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
pids = ana_plan_instance_obj.search(cr, uid, [('name','=',vals['name']), ('code','=',vals['code']), ('plan_id','<>',False)], context=context)
if pids:
raise osv.except_osv(_('Error!'), _('A model with this name and code already exists.'))
res = acct_anal_plan_line_obj.search(cr, uid, [('plan_id','=',journal.plan_id.id)], context=context)
for i in res:
total_per_plan = 0
item = acct_anal_plan_line_obj.browse(cr, uid, i, context=context)
temp_list = ['account1_ids','account2_ids','account3_ids','account4_ids','account5_ids','account6_ids']
for l in temp_list:
if vals.has_key(l):
for tempo in vals[l]:
if acct_anal_acct.search(cr, uid, [('parent_id', 'child_of', [item.root_analytic_id.id]), ('id', '=', tempo[2]['analytic_account_id'])], context=context):
total_per_plan += tempo[2]['rate']
if total_per_plan < item.min_required or total_per_plan > item.max_required:
raise osv.except_osv(_('Error!'),_('The total should be between %s and %s.') % (str(item.min_required), str(item.max_required)))
return super(account_analytic_plan_instance, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0], context=context)
invoice_line_obj = self.pool.get('account.invoice.line')
if this.plan_id and not vals.has_key('plan_id'):
#this instance is a model, so we have to create a new plan instance instead of modifying it
#copy the existing model
temp_id = self.copy(cr, uid, this.id, None, context=context)
#get the list of the invoice line that were linked to the model
lists = invoice_line_obj.search(cr, uid, [('analytics_id','=',this.id)], context=context)
#make them link to the copy
invoice_line_obj.write(cr, uid, lists, {'analytics_id':temp_id}, context=context)
#and finally modify the old model to be not a model anymore
vals['plan_id'] = False
if not vals.has_key('name'):
vals['name'] = this.name and (str(this.name)+'*') or "*"
if not vals.has_key('code'):
vals['code'] = this.code and (str(this.code)+'*') or "*"
return super(account_analytic_plan_instance, self).write(cr, uid, ids, vals, context=context)
class account_analytic_plan_instance_line(osv.osv):
_name = "account.analytic.plan.instance.line"
_description = "Analytic Instance Line"
_rec_name = "analytic_account_id"
_columns = {
'plan_id': fields.many2one('account.analytic.plan.instance', 'Plan Id'),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', required=True, domain=[('type','<>','view')]),
'rate': fields.float('Rate (%)', required=True),
}
_defaults = {
'rate': 100.0
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['analytic_account_id'], context=context)
res = []
for record in reads:
res.append((record['id'], record['analytic_account_id']))
return res
class account_journal(osv.osv):
_inherit = "account.journal"
_name = "account.journal"
_columns = {
'plan_id': fields.many2one('account.analytic.plan', 'Analytic Plans'),
}
class account_invoice_line(osv.osv):
_inherit = "account.invoice.line"
_name = "account.invoice.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def create(self, cr, uid, vals, context=None):
if 'analytics_id' in vals and isinstance(vals['analytics_id'], tuple):
vals['analytics_id'] = vals['analytics_id'][0]
return super(account_invoice_line, self).create(cr, uid, vals, context=context)
def move_line_get_item(self, cr, uid, line, context=None):
res = super(account_invoice_line, self).move_line_get_item(cr, uid, line, context=context)
res ['analytics_id'] = line.analytics_id and line.analytics_id.id or False
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, company_id=None, context=None):
res_prod = super(account_invoice_line, self).product_id_change(cr, uid, ids, product, uom_id, qty, name, type, partner_id, fposition_id, price_unit, currency_id, company_id=company_id, context=context)
rec = self.pool.get('account.analytic.default').account_get(cr, uid, product, partner_id, uid, time.strftime('%Y-%m-%d'), context=context)
if rec and rec.analytics_id:
res_prod['value'].update({'analytics_id': rec.analytics_id.id})
return res_prod
class account_move_line(osv.osv):
_inherit = "account.move.line"
_name = "account.move.line"
_columns = {
'analytics_id':fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def _default_get_move_form_hook(self, cursor, user, data):
data = super(account_move_line, self)._default_get_move_form_hook(cursor, user, data)
if data.has_key('analytics_id'):
del(data['analytics_id'])
return data
def create_analytic_lines(self, cr, uid, ids, context=None):
if context is None:
context = {}
super(account_move_line, self).create_analytic_lines(cr, uid, ids, context=context)
analytic_line_obj = self.pool.get('account.analytic.line')
for line in self.browse(cr, uid, ids, context=context):
if line.analytics_id:
if not line.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (line.journal_id.name,))
toremove = analytic_line_obj.search(cr, uid, [('move_id','=',line.id)], context=context)
if toremove:
analytic_line_obj.unlink(cr, uid, toremove, context=context)
for line2 in line.analytics_id.account_ids:
val = (line.credit or 0.0) - (line.debit or 0.0)
amt=val * (line2.rate/100)
al_vals={
'name': line.name,
'date': line.date,
'account_id': line2.analytic_account_id.id,
'unit_amount': line.quantity,
'product_id': line.product_id and line.product_id.id or False,
'product_uom_id': line.product_uom_id and line.product_uom_id.id or False,
'amount': amt,
'general_account_id': line.account_id.id,
'move_id': line.id,
'journal_id': line.journal_id.analytic_journal_id.id,
'ref': line.ref,
'percentage': line2.rate
}
analytic_line_obj.create(cr, uid, al_vals, context=context)
return True
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
result = super(account_move_line, self).fields_view_get(cr, uid, view_id, view_type, context=context, toolbar=toolbar, submenu=submenu)
return result
class account_invoice(osv.osv):
_name = "account.invoice"
_inherit = "account.invoice"
def line_get_convert(self, cr, uid, x, part, date, context=None):
res=super(account_invoice,self).line_get_convert(cr, uid, x, part, date, context=context)
res['analytics_id'] = x.get('analytics_id', False)
return res
def _get_analytic_lines(self, cr, uid, ids, context=None):
inv = self.browse(cr, uid, ids)[0]
cur_obj = self.pool.get('res.currency')
invoice_line_obj = self.pool.get('account.invoice.line')
acct_ins_obj = self.pool.get('account.analytic.plan.instance')
company_currency = inv.company_id.currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = invoice_line_obj.move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il.get('analytics_id', False):
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = inv.number
obj_move_line = acct_ins_obj.browse(cr, uid, il['analytics_id'], context=context)
ctx = context.copy()
ctx.update({'date': inv.date_invoice})
amount_calc = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context=ctx) * sign
qty = il['quantity']
il['analytic_lines'] = []
for line2 in obj_move_line.account_ids:
amt = amount_calc * (line2.rate/100)
qtty = qty* (line2.rate/100)
al_vals = {
'name': il['name'],
'date': inv['date_invoice'],
'unit_amount': qtty,
'product_id': il['product_id'],
'account_id': line2.analytic_account_id.id,
'amount': amt,
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': self._get_journal_analytic(cr, uid, inv.type),
'ref': ref,
}
il['analytic_lines'].append((0, 0, al_vals))
return iml
class account_analytic_plan(osv.osv):
_inherit = "account.analytic.plan"
_columns = {
'default_instance_id': fields.many2one('account.analytic.plan.instance', 'Default Entries'),
}
class analytic_default(osv.osv):
_inherit = "account.analytic.default"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
class sale_order_line(osv.osv):
_inherit = "sale.order.line"
# Method overridden to set the analytic account by default on criterion match
def invoice_line_create(self, cr, uid, ids, context=None):
create_ids = super(sale_order_line,self).invoice_line_create(cr, uid, ids, context=context)
inv_line_obj = self.pool.get('account.invoice.line')
acct_anal_def_obj = self.pool.get('account.analytic.default')
if ids:
sale_line = self.browse(cr, uid, ids[0], context=context)
for line in inv_line_obj.browse(cr, uid, create_ids, context=context):
rec = acct_anal_def_obj.account_get(cr, uid, line.product_id.id,
sale_line.order_id.partner_id.id, uid, time.strftime('%Y-%m-%d'),
sale_line.order_id.company_id.id, context=context)
if rec:
inv_line_obj.write(cr, uid, [line.id], {'analytics_id': rec.analytics_id.id}, context=context)
return create_ids
class account_bank_statement(osv.osv):
_inherit = "account.bank.statement"
_name = "account.bank.statement"
def _prepare_bank_move_line(self, cr, uid, st_line, move_id, amount, company_currency_id, context=None):
result = super(account_bank_statement,self)._prepare_bank_move_line(cr, uid, st_line,
move_id, amount, company_currency_id, context=context)
result['analytics_id'] = st_line.analytics_id.id
return result
def button_confirm_bank(self, cr, uid, ids, context=None):
super(account_bank_statement,self).button_confirm_bank(cr, uid, ids, context=context)
for st in self.browse(cr, uid, ids, context=context):
for st_line in st.line_ids:
if st_line.analytics_id:
if not st.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal.") % (st.journal_id.name,))
if not st_line.amount:
continue
return True
class account_bank_statement_line(osv.osv):
_inherit = "account.bank.statement.line"
_name = "account.bank.statement.line"
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
PengyuSun/tera | example/docker/hdfs.py | 19 | 1496 | import time
class Hdfs:
def __init__(self, ip, mode, log_prefix):
self.ip = ip
self.mode = mode
self.path = self.get_log_path(log_prefix)
def get_log_path(self, log_prefix):
path = '{pre}/hdfs/{ip}-{mode}-{time}'.format(pre=log_prefix, ip=self.ip, mode=self.mode, time=time.strftime('%Y%m%d%H%M%S'))
return path
def to_string(self):
info = 'hdfs\t{ip}:\t{mode}\tlog:{log}'.format(ip=self.ip, mode=self.mode, log=self.path)
return info
def to_cmd(self, docker, masters, slaves):
cmd = 'docker run -t -d -v {dir}:/opt/share -p 9000:9000 -p 9001:9001 --net=host {docker} /usr/bin/python /opt/hdfs_setup.py --masters {master} --slaves {slaves} --mode {mode}'.\
format(dir=self.path, docker=docker, master=masters, slaves=slaves, mode=self.mode)
return cmd
class HdfsCluster:
def __init__(self, ip_list, num_of_hdfs, log_prefix):
self.ip_list = ip_list
self.ip_index = 0
self.num_of_hdfs = num_of_hdfs
self.cluster = []
self.log_prefix = log_prefix
self.master_ip = self.ip_list[0]
self.slave_ip = []
def add_hdfs(self):
hdfs = Hdfs(self.ip_list[self.ip_index], 'slave', self.log_prefix)
self.cluster.append(hdfs)
self.slave_ip.append(hdfs.ip)
self.ip_index += 1
def populate_hdfs_cluster(self):
if self.num_of_hdfs > len(self.ip_list):
print 'not enough ip address for hdfs!!'
return False
master = Hdfs(self.ip_list[0], 'master', self.log_prefix)
self.cluster.append(master)
for i in range(self.num_of_hdfs):
self.add_hdfs()
| bsd-3-clause |
Alwnikrotikz/cortex-vfx | contrib/IECoreMantra/test/IECoreMantra/RendererTest.py | 12 | 8210 | ##########################################################################
#
# Copyright 2012, Electric Theatre Collective Limited. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import subprocess
import unittest
import IECore
import IECoreMantra
_dir = os.path.dirname( __file__ )
class RendererTest( unittest.TestCase ):
def __greenSquare( self, r ):
r.shader( "surface", "constant", { "Cd": IECore.V3fData( IECore.V3f( 0, 1, 0 ) ) } )
r.mesh(
IECore.IntVectorData( [ 4, 4 ] ),
IECore.IntVectorData( [ 0, 1, 2, 3, 3, 2, 4, 5 ] ),
"linear",
{
"P" :IECore.PrimitiveVariable(
IECore.PrimitiveVariable.Interpolation.Vertex,
IECore.V3fVectorData( [ IECore.V3f( 0, 0, 0 ), IECore.V3f( 0, 1, 0 ),
IECore.V3f( 1, 1, 0 ), IECore.V3f( 1, 0, 0 ),
IECore.V3f( 2, 1, 0 ), IECore.V3f( 2, 0, 0 ) ] )
)
}
)
def testTypeId( self ):
self.assertEqual( IECoreMantra.Renderer().typeId(), IECoreMantra.Renderer.staticTypeId() )
self.assertNotEqual( IECoreMantra.Renderer.staticTypeId(), IECore.Renderer.staticTypeId() )
def testTypeName( self ):
r = IECoreMantra.Renderer()
self.assertEqual( r.typeName(), "IECoreMantra::Renderer" )
def testWorldMesh( self ):
# test that the ieworld procedural picks up the cache file written by worldEnd() and renders correctly
r = IECoreMantra.Renderer()
r.display(
_dir + "/output/testWorldMesh.tif" ,
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
self.__greenSquare( r )
r.worldEnd()
del r
imageCreated = IECore.Reader.create( _dir + "/output/testWorldMesh.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testWorldMesh.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def testIfdGen( self ):
# the image generated by this scene should be identical to the output of testWorldMesh()
ifd = _dir + "/output/testIfdGen.ifd"
r = IECoreMantra.Renderer( ifd )
r. display(
_dir + "/output/testIfdGen.tif",
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
self.__greenSquare( r )
r.worldEnd()
del r
self.assertTrue( os.path.isfile( ifd ) )
p = subprocess.Popen( ['mantra'], stdin=open( ifd ), stdout=subprocess.PIPE)
p.communicate()
imageCreated = IECore.Reader.create( _dir + "/output/testIfdGen.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testWorldMesh.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def __renderGeometry( self ):
r = IECoreMantra.Renderer()
r.display(
_dir + "/output/testGeometry.tif",
"tiff",
"rgba",
{ "variable": "Cf+Af", "vextype": "vector4", "channel": "C" }
)
m = IECore.M44f().translate( IECore.V3f(0,0,6) )
r.camera( "main", { "projection": "perspective", "transform": m } )
r.worldBegin()
r.geometry(
"ieprocedural",
{"className": "sphereProcedural", "classVersion": 1, "parameterString": ""},
{}
)
r.worldEnd()
del r
def testGeometry( self ):
self.__renderGeometry()
imageCreated = IECore.Reader.create( _dir + "/output/testGeometry.tif" ).read()
expectedImage = IECore.Reader.create( _dir + "/data/testGeometry.tif" ).read()
self.assertEqual(
IECore.ImageDiffOp()( imageA=imageCreated, imageB=expectedImage, maxError=0.01 ),
IECore.BoolData( False )
)
def testVrayIncludes( self ):
# test that mantra can find VRAY_ieProcedural.so and VRAY_ieWorld.so
p = subprocess.Popen( ['mantra', '-V8'], stdin=open('/dev/null'), stdout=subprocess.PIPE )
out = p.communicate()[0]
self.assertTrue( out )
self.failUnless( "Registering procedural 'ieprocedural'" in out )
self.failUnless( "Registering procedural 'ieworld'" in out )
def testOptions( self ):
ifd = _dir + "/output/testOptions.ifd"
r = IECoreMantra.Renderer( ifd )
r.setOption( "itest", IECore.IntData(42) );
r.setOption( "ftest", IECore.FloatData(1.23) );
r.setOption( "v3ftest", IECore.V3f(1,0,0) );
r.setOption( "stringtest", IECore.StringData("hello") );
r.worldBegin()
r.worldEnd()
del r
l = "".join( file( ifd ).readlines() ).replace( "\n", "" )
self.failUnless( 'ray_declare global int itest 42' in l )
self.failUnless( 'ray_declare global float ftest 1.23' in l )
self.failUnless( 'ray_declare global vector3 v3ftest 1 0 0' in l )
self.failUnless( 'ray_declare global string stringtest "hello"' in l )
def testShaderParameters( self ):
# Test the shader parameters end up in the scene.. you would expect them in
# ifd but because everything post-world is stored in a side-car .cob file
# we look for that instead and check the shader invocation string is on the
# top of the render state.
ifd = _dir + "/output/testShaderParameters.ifd"
r = IECoreMantra.Renderer( ifd )
r.worldBegin()
r.shader("surface", "testshader",
{
"p1": IECore.IntData(11),
"p2": IECore.FloatData(1.234),
"p3": IECore.StringData("hello"),
"p4": IECore.V3fData( IECore.V3f(1,2,3) ),
"p5": IECore.Color3fData( IECore.Color3f(1,0,0) ),
}
)
r.worldEnd()
del r
self.failUnless( os.path.exists( ifd ) )
self.failUnless( os.path.exists( ifd + ".ieworld.cob" ) )
world = IECore.Reader.create( ifd + ".ieworld.cob" ).read()
self.assertTrue( world )
self.assertEquals( world.typeId(), IECore.Group.staticTypeId() )
self.assertTrue( world.state() )
self.assertEquals(
world.state()[0].attributes[':surface'],
IECore.StringData( 'testshader p2 1.234 p3 "hello" p1 11 p4 1 2 3 p5 1 0 0 ')
)
def tearDown( self ):
files = [
_dir + "/output/testGeometry.tif",
_dir + "/output/testWorldMesh.tif",
_dir + "/output/testIfdGen.tif",
_dir + "/output/testIfdGen.ifd",
_dir + "/output/testIfdGen.ifd.ieworld.cob",
_dir + "/output/testOptions.ifd",
_dir + "/output/testOptions.ifd.ieworld.cob",
_dir + "/output/testShaderParameters.ifd",
_dir + "/output/testShaderParameters.ifd.ieworld.cob",
]
for f in files:
if os.path.exists( f ):
os.remove( f )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
blacklin/kbengine | kbe/src/lib/python/Tools/scripts/mailerdaemon.py | 97 | 8039 | #!/usr/bin/env python3
"""Classes to parse mailer-daemon messages."""
import calendar
import email.message
import re
import os
import sys
class Unparseable(Exception):
pass
class ErrorMessage(email.message.Message):
def __init__(self):
email.message.Message.__init__(self)
self.sub = ''
def is_warning(self):
sub = self.get('Subject')
if not sub:
return 0
sub = sub.lower()
if sub.startswith('waiting mail'):
return 1
if 'warning' in sub:
return 1
self.sub = sub
return 0
def get_errors(self):
for p in EMPARSERS:
self.rewindbody()
try:
return p(self.fp, self.sub)
except Unparseable:
pass
raise Unparseable
# List of re's or tuples of re's.
# If a re, it should contain at least a group (?P<email>...) which
# should refer to the email address. The re can also contain a group
# (?P<reason>...) which should refer to the reason (error message).
# If no reason is present, the emparse_list_reason list is used to
# find a reason.
# If a tuple, the tuple should contain 2 re's. The first re finds a
# location, the second re is repeated one or more times to find
# multiple email addresses. The second re is matched (not searched)
# where the previous match ended.
# The re's are compiled using the re module.
emparse_list_list = [
'error: (?P<reason>unresolvable): (?P<email>.+)',
('----- The following addresses had permanent fatal errors -----\n',
'(?P<email>[^ \n].*)\n( .*\n)?'),
'remote execution.*\n.*rmail (?P<email>.+)',
('The following recipients did not receive your message:\n\n',
' +(?P<email>.*)\n(The following recipients did not receive your message:\n\n)?'),
'------- Failure Reasons --------\n\n(?P<reason>.*)\n(?P<email>.*)',
'^<(?P<email>.*)>:\n(?P<reason>.*)',
'^(?P<reason>User mailbox exceeds allowed size): (?P<email>.+)',
'^5\\d{2} <(?P<email>[^\n>]+)>\\.\\.\\. (?P<reason>.+)',
'^Original-Recipient: rfc822;(?P<email>.*)',
'^did not reach the following recipient\\(s\\):\n\n(?P<email>.*) on .*\n +(?P<reason>.*)',
'^ <(?P<email>[^\n>]+)> \\.\\.\\. (?P<reason>.*)',
'^Report on your message to: (?P<email>.*)\nReason: (?P<reason>.*)',
'^Your message was not delivered to +(?P<email>.*)\n +for the following reason:\n +(?P<reason>.*)',
'^ was not +(?P<email>[^ \n].*?) *\n.*\n.*\n.*\n because:.*\n +(?P<reason>[^ \n].*?) *\n',
]
# compile the re's in the list and store them in-place.
for i in range(len(emparse_list_list)):
x = emparse_list_list[i]
if type(x) is type(''):
x = re.compile(x, re.MULTILINE)
else:
xl = []
for x in x:
xl.append(re.compile(x, re.MULTILINE))
x = tuple(xl)
del xl
emparse_list_list[i] = x
del x
del i
# list of re's used to find reasons (error messages).
# if a string, "<>" is replaced by a copy of the email address.
# The expressions are searched for in order. After the first match,
# no more expressions are searched for. So, order is important.
emparse_list_reason = [
r'^5\d{2} <>\.\.\. (?P<reason>.*)',
'<>\.\.\. (?P<reason>.*)',
re.compile(r'^<<< 5\d{2} (?P<reason>.*)', re.MULTILINE),
re.compile('===== stderr was =====\nrmail: (?P<reason>.*)'),
re.compile('^Diagnostic-Code: (?P<reason>.*)', re.MULTILINE),
]
emparse_list_from = re.compile('^From:', re.IGNORECASE|re.MULTILINE)
def emparse_list(fp, sub):
data = fp.read()
res = emparse_list_from.search(data)
if res is None:
from_index = len(data)
else:
from_index = res.start(0)
errors = []
emails = []
reason = None
for regexp in emparse_list_list:
if type(regexp) is type(()):
res = regexp[0].search(data, 0, from_index)
if res is not None:
try:
reason = res.group('reason')
except IndexError:
pass
while 1:
res = regexp[1].match(data, res.end(0), from_index)
if res is None:
break
emails.append(res.group('email'))
break
else:
res = regexp.search(data, 0, from_index)
if res is not None:
emails.append(res.group('email'))
try:
reason = res.group('reason')
except IndexError:
pass
break
if not emails:
raise Unparseable
if not reason:
reason = sub
if reason[:15] == 'returned mail: ':
reason = reason[15:]
for regexp in emparse_list_reason:
if type(regexp) is type(''):
for i in range(len(emails)-1,-1,-1):
email = emails[i]
exp = re.compile(re.escape(email).join(regexp.split('<>')), re.MULTILINE)
res = exp.search(data)
if res is not None:
errors.append(' '.join((email.strip()+': '+res.group('reason')).split()))
del emails[i]
continue
res = regexp.search(data)
if res is not None:
reason = res.group('reason')
break
for email in emails:
errors.append(' '.join((email.strip()+': '+reason).split()))
return errors
EMPARSERS = [emparse_list]
def sort_numeric(a, b):
a = int(a)
b = int(b)
if a < b:
return -1
elif a > b:
return 1
else:
return 0
def parsedir(dir, modify):
os.chdir(dir)
pat = re.compile('^[0-9]*$')
errordict = {}
errorfirst = {}
errorlast = {}
nok = nwarn = nbad = 0
# find all numeric file names and sort them
files = list(filter(lambda fn, pat=pat: pat.match(fn) is not None, os.listdir('.')))
files.sort(sort_numeric)
for fn in files:
# Lets try to parse the file.
fp = open(fn)
m = email.message_from_file(fp, _class=ErrorMessage)
sender = m.getaddr('From')
print('%s\t%-40s\t'%(fn, sender[1]), end=' ')
if m.is_warning():
fp.close()
print('warning only')
nwarn = nwarn + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
continue
try:
errors = m.get_errors()
except Unparseable:
print('** Not parseable')
nbad = nbad + 1
fp.close()
continue
print(len(errors), 'errors')
# Remember them
for e in errors:
try:
mm, dd = m.getdate('date')[1:1+2]
date = '%s %02d' % (calendar.month_abbr[mm], dd)
except:
date = '??????'
if e not in errordict:
errordict[e] = 1
errorfirst[e] = '%s (%s)' % (fn, date)
else:
errordict[e] = errordict[e] + 1
errorlast[e] = '%s (%s)' % (fn, date)
fp.close()
nok = nok + 1
if modify:
os.rename(fn, ','+fn)
## os.unlink(fn)
print('--------------')
print(nok, 'files parsed,',nwarn,'files warning-only,', end=' ')
print(nbad,'files unparseable')
print('--------------')
list = []
for e in errordict.keys():
list.append((errordict[e], errorfirst[e], errorlast[e], e))
list.sort()
for num, first, last, e in list:
print('%d %s - %s\t%s' % (num, first, last, e))
def main():
modify = 0
if len(sys.argv) > 1 and sys.argv[1] == '-d':
modify = 1
del sys.argv[1]
if len(sys.argv) > 1:
for folder in sys.argv[1:]:
parsedir(folder, modify)
else:
parsedir('/ufs/jack/Mail/errorsinbox', modify)
if __name__ == '__main__' or sys.argv[0] == __name__:
main()
| lgpl-3.0 |
gunchleoc/django | django/db/models/__init__.py | 66 | 1610 | from functools import wraps
from django.core.exceptions import ObjectDoesNotExist # NOQA
from django.db.models import signals # NOQA
from django.db.models.aggregates import * # NOQA
from django.db.models.deletion import ( # NOQA
CASCADE, DO_NOTHING, PROTECT, SET, SET_DEFAULT, SET_NULL, ProtectedError,
)
from django.db.models.expressions import ( # NOQA
F, Case, Expression, ExpressionWrapper, Func, Value, When,
)
from django.db.models.fields import * # NOQA
from django.db.models.fields.files import FileField, ImageField # NOQA
from django.db.models.fields.proxy import OrderWrt # NOQA
from django.db.models.lookups import Lookup, Transform # NOQA
from django.db.models.manager import Manager # NOQA
from django.db.models.query import Q, Prefetch, QuerySet # NOQA
# Imports that would create circular imports if sorted
from django.db.models.base import Model # NOQA isort:skip
from django.db.models.fields.related import ( # NOQA isort:skip
ForeignKey, ForeignObject, OneToOneField, ManyToManyField,
ManyToOneRel, ManyToManyRel, OneToOneRel,
)
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
@wraps(func)
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| bsd-3-clause |
quxiaolong1504/django | tests/postgres_tests/test_unaccent.py | 328 | 1884 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import modify_settings
from . import PostgreSQLTestCase
from .models import CharFieldModel, TextFieldModel
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class UnaccentTest(PostgreSQLTestCase):
Model = CharFieldModel
def setUp(self):
self.Model.objects.bulk_create([
self.Model(field="àéÖ"),
self.Model(field="aeO"),
self.Model(field="aeo"),
])
def test_unaccent(self):
self.assertQuerysetEqual(
self.Model.objects.filter(field__unaccent="aeO"),
["àéÖ", "aeO"],
transform=lambda instance: instance.field,
ordered=False
)
def test_unaccent_chained(self):
"""
Check that unaccent can be used chained with a lookup (which should be
the case since unaccent implements the Transform API)
"""
self.assertQuerysetEqual(
self.Model.objects.filter(field__unaccent__iexact="aeO"),
["àéÖ", "aeO", "aeo"],
transform=lambda instance: instance.field,
ordered=False
)
self.assertQuerysetEqual(
self.Model.objects.filter(field__unaccent__endswith="éÖ"),
["àéÖ", "aeO"],
transform=lambda instance: instance.field,
ordered=False
)
def test_unaccent_accentuated_needle(self):
self.assertQuerysetEqual(
self.Model.objects.filter(field__unaccent="aéÖ"),
["àéÖ", "aeO"],
transform=lambda instance: instance.field,
ordered=False
)
class UnaccentTextFieldTest(UnaccentTest):
"""
TextField should have the exact same behavior as CharField
regarding unaccent lookups.
"""
Model = TextFieldModel
| bsd-3-clause |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/distutils/command/build_ext.py | 12 | 31230 | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import sys, os, re
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
from site import USER_BASE
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath")
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# If in a virtualenv, add its include directory
# Issue 16116
if sys.exec_prefix != sys.base_exec_prefix:
self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
self.ensure_string_list('libraries')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if sys.base_exec_prefix != sys.prefix: # Issue 16116
self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
self.library_dirs.append(_sys_home)
if MSVC_VERSION >= 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
try:
self.build_extension(ext)
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' %
(ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
# extensions in debug_mode are named 'module_d.pyd' under windows
ext_suffix = get_config_var('EXT_SUFFIX')
if os.name == 'nt' and self.debug:
return os.path.join(*ext_path) + '_d' + ext_suffix
return os.path.join(*ext_path) + ext_suffix
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
elif sys.platform[:3] == 'aix':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
sys.abiflags)
return ext.libraries + [pythonlib]
else:
return ext.libraries
| gpl-2.0 |
genonfire/portality | portality/urls.py | 1 | 1812 | """portality URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
urlpatterns = [
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(
r'^accounts/login/$',
'django.contrib.auth.views.login',
name='login',
kwargs={'template_name': 'login.html'}
),
url(
r'^accounts/logout/$',
'django.contrib.auth.views.logout',
name='logout',
kwargs={'next_page': 'login'}
),
url(
r'^accounts/passwordchange/$',
'django.contrib.auth.views.password_change',
name='passwordchange'
),
url(
r'^accounts/passwordreset/$',
'django.contrib.auth.views.password_reset',
name='passwordreset'
),
url(
r'^podcast/(?P<pod>\w+)/$',
'core.views.podcast',
name="podcast"
),
url(r'^', include('issue.urls')),
url(r'^db/', include('giza.urls', namespace='giza')),
url(r'^accounts/', include('accounts.urls', namespace='accounts')),
url(r'^api/', include('core.apiurls', namespace='api')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| mit |
mastersign/orbit | src/orbit_framework/components/common.py | 1 | 1785 | # coding=utf-8
## ORBIT ist ein Python-Framework für TinkerForge-Anwendungen
## Copyright (C) 2014 Tobias Kiertscher <dev@mastersign.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as
## published by the Free Software Foundation, either version 3
## of the License, or (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU (Lesser) General
## Public License along with this program.
## If not, see <http://www.gnu.org/licenses/>.
# Module orbit_framework.components.common
"""
Dieses Modul enthält einige Komponenten für den allgemeinen Einsatz.
Enthalten sind die folgenden Komponenten:
- :py:class:`EventCallbackComponent`
"""
from .. import Component
class EventCallbackComponent(Component):
"""
Diese Komponente wartet mit Hilfe eines Empfangsmusters auf Nachrichten,
und ruft ein Callback auf, wenn eine passende Nachricht eintrifft.
**Parameter**
``name``
Der Name der Komponente.
``slot``
Das Empfangsmuster für den Empfang der Nachrichten.
``callback``
Eine parameterlose Funktion.
"""
def __init__(self, name,
slot, callback,
**nargs):
super(EventCallbackComponent, self).__init__(name, **nargs)
self.callback = callback
self.add_listener(slot.listener(self.process_message))
def process_message(self, job, component, name, value):
self.callback()
| lgpl-3.0 |
fangxingli/hue | desktop/core/ext-py/markdown/markdown/odict.py | 143 | 5157 | class OrderedDict(dict):
"""
A dictionary that keeps its keys in the order in which they're inserted.
Copied from Django's SortedDict with some modifications.
"""
def __new__(cls, *args, **kwargs):
instance = super(OrderedDict, cls).__new__(cls, *args, **kwargs)
instance.keyOrder = []
return instance
def __init__(self, data=None):
if data is None:
data = {}
super(OrderedDict, self).__init__(data)
if isinstance(data, dict):
self.keyOrder = data.keys()
else:
self.keyOrder = []
for key, value in data:
if key not in self.keyOrder:
self.keyOrder.append(key)
def __deepcopy__(self, memo):
from copy import deepcopy
return self.__class__([(key, deepcopy(value, memo))
for key, value in self.iteritems()])
def __setitem__(self, key, value):
super(OrderedDict, self).__setitem__(key, value)
if key not in self.keyOrder:
self.keyOrder.append(key)
def __delitem__(self, key):
super(OrderedDict, self).__delitem__(key)
self.keyOrder.remove(key)
def __iter__(self):
for k in self.keyOrder:
yield k
def pop(self, k, *args):
result = super(OrderedDict, self).pop(k, *args)
try:
self.keyOrder.remove(k)
except ValueError:
# Key wasn't in the dictionary in the first place. No problem.
pass
return result
def popitem(self):
result = super(OrderedDict, self).popitem()
self.keyOrder.remove(result[0])
return result
def items(self):
return zip(self.keyOrder, self.values())
def iteritems(self):
for key in self.keyOrder:
yield key, super(OrderedDict, self).__getitem__(key)
def keys(self):
return self.keyOrder[:]
def iterkeys(self):
return iter(self.keyOrder)
def values(self):
return [super(OrderedDict, self).__getitem__(k) for k in self.keyOrder]
def itervalues(self):
for key in self.keyOrder:
yield super(OrderedDict, self).__getitem__(key)
def update(self, dict_):
for k, v in dict_.items():
self.__setitem__(k, v)
def setdefault(self, key, default):
if key not in self.keyOrder:
self.keyOrder.append(key)
return super(OrderedDict, self).setdefault(key, default)
def value_for_index(self, index):
"""Return the value of the item at the given zero-based index."""
return self[self.keyOrder[index]]
def insert(self, index, key, value):
"""Insert the key, value pair before the item with the given index."""
if key in self.keyOrder:
n = self.keyOrder.index(key)
del self.keyOrder[n]
if n < index:
index -= 1
self.keyOrder.insert(index, key)
super(OrderedDict, self).__setitem__(key, value)
def copy(self):
"""Return a copy of this object."""
# This way of initializing the copy means it works for subclasses, too.
obj = self.__class__(self)
obj.keyOrder = self.keyOrder[:]
return obj
def __repr__(self):
"""
Replace the normal dict.__repr__ with a version that returns the keys
in their sorted order.
"""
return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])
def clear(self):
super(OrderedDict, self).clear()
self.keyOrder = []
def index(self, key):
""" Return the index of a given key. """
return self.keyOrder.index(key)
def index_for_location(self, location):
""" Return index or None for a given location. """
if location == '_begin':
i = 0
elif location == '_end':
i = None
elif location.startswith('<') or location.startswith('>'):
i = self.index(location[1:])
if location.startswith('>'):
if i >= len(self):
# last item
i = None
else:
i += 1
else:
raise ValueError('Not a valid location: "%s". Location key '
'must start with a ">" or "<".' % location)
return i
def add(self, key, value, location):
""" Insert by key location. """
i = self.index_for_location(location)
if i is not None:
self.insert(i, key, value)
else:
self.__setitem__(key, value)
def link(self, key, location):
""" Change location of an existing item. """
n = self.keyOrder.index(key)
del self.keyOrder[n]
i = self.index_for_location(location)
try:
if i is not None:
self.keyOrder.insert(i, key)
else:
self.keyOrder.append(key)
except Error:
# restore to prevent data loss and reraise
self.keyOrder.insert(n, key)
raise Error
| apache-2.0 |
Hasimir/pyjs | examples/picasaweb/__main__.py | 8 | 1101 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
TARGETS = [
'PicasaWeb.py',
]
PACKAGE = {
'title': 'picasaweb',
'desc': 'Access RESTful services such as google provide with their gdata api',
}
def setup(targets):
'''Setup example for translation, MUST call util.setup(targets).'''
util.setup(targets)
def translate():
'''Translate example, MUST call util.translate().'''
util.translate()
def install(package):
'''Install and cleanup example module. MUST call util.install(package)'''
util.install(package)
##---------------------------------------##
# --------- (-: DO NOT EDIT :-) --------- #
##---------------------------------------##
import sys
import os
examples = head = os.path.abspath(os.path.dirname(__file__))
while os.path.split(examples)[1].lower() != 'examples':
examples = os.path.split(examples)[0]
if not examples:
raise ValueError("Cannot determine examples directory")
sys.path.insert(0, os.path.join(examples))
from _examples import util
sys.path.pop(0)
util.init(head)
setup(TARGETS)
translate()
install(PACKAGE)
| apache-2.0 |
sftd/scons | scons-local/SCons/Tool/msvs.py | 7 | 73307 | """SCons.Tool.msvs
Tool-specific initialization for Microsoft Visual Studio project files.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Tool/msvs.py 2014/03/02 14:18:15 garyo"
import SCons.compat
import base64
import hashlib
import ntpath
import os
# compat layer imports "cPickle" for us if it's available.
import pickle
import re
import sys
import SCons.Builder
import SCons.Node.FS
import SCons.Platform.win32
import SCons.Script.SConscript
import SCons.PathList
import SCons.Util
import SCons.Warnings
from MSCommon import msvc_exists, msvc_setup_env_once
from SCons.Defaults import processDefines
##############################################################################
# Below here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def xmlify(s):
s = s.replace("&", "&") # do this first
s = s.replace("'", "'")
s = s.replace('"', """)
return s
# Process a CPPPATH list in includes, given the env, target and source.
# Returns a tuple of nodes.
def processIncludes(includes, env, target, source):
return SCons.PathList.PathList(includes).subst_path(env, target, source)
external_makefile_guid = '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}'
def _generateGUID(slnfile, name):
"""This generates a dummy GUID for the sln file to use. It is
based on the MD5 signatures of the sln filename plus the name of
the project. It basically just needs to be unique, and not
change with each invocation."""
m = hashlib.md5()
# Normalize the slnfile path to a Windows path (\ separators) so
# the generated file has a consistent GUID even if we generate
# it on a non-Windows platform.
m.update(ntpath.normpath(str(slnfile)) + str(name))
solution = m.hexdigest().upper()
# convert most of the signature to GUID form (discard the rest)
solution = "{" + solution[:8] + "-" + solution[8:12] + "-" + solution[12:16] + "-" + solution[16:20] + "-" + solution[20:32] + "}"
return solution
version_re = re.compile(r'(\d+\.\d+)(.*)')
def msvs_parse_version(s):
"""
Split a Visual Studio version, which may in fact be something like
'7.0Exp', into is version number (returned as a float) and trailing
"suite" portion.
"""
num, suite = version_re.match(s).groups()
return float(num), suite
# os.path.relpath has been introduced in Python 2.6
# We define it locally for earlier versions of Python
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
import sys
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.sep)
path_list = os.path.abspath(path).split(os.sep)
if 'posix' in sys.builtin_module_names:
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
else:
if start_list[0].lower() != path_list[0].lower():
unc_path, rest = os.path.splitunc(path)
unc_start, rest = os.path.splitunc(start)
if bool(unc_path) ^ bool(unc_start):
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
% (path, start))
else:
raise ValueError("path is on drive %s, start on drive %s"
% (path_list[0], start_list[0]))
# Work out how much of the filepath is shared by start and path.
for i in range(min(len(start_list), len(path_list))):
if start_list[i].lower() != path_list[i].lower():
break
else:
i += 1
rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
if not "relpath" in os.path.__all__:
os.path.relpath = relpath
# This is how we re-invoke SCons from inside MSVS Project files.
# The problem is that we might have been invoked as either scons.bat
# or scons.py. If we were invoked directly as scons.py, then we could
# use sys.argv[0] to find the SCons "executable," but that doesn't work
# if we were invoked as scons.bat, which uses "python -c" to execute
# things and ends up with "-c" as sys.argv[0]. Consequently, we have
# the MSVS Project file invoke SCons the same way that scons.bat does,
# which works regardless of how we were invoked.
def getExecScriptMain(env, xml=None):
scons_home = env.get('SCONS_HOME')
if not scons_home and 'SCONS_LIB_DIR' in os.environ:
scons_home = os.environ['SCONS_LIB_DIR']
if scons_home:
exec_script_main = "from os.path import join; import sys; sys.path = [ r'%s' ] + sys.path; import SCons.Script; SCons.Script.main()" % scons_home
else:
version = SCons.__version__
exec_script_main = "from os.path import join; import sys; sys.path = [ join(sys.prefix, 'Lib', 'site-packages', 'scons-%(version)s'), join(sys.prefix, 'scons-%(version)s'), join(sys.prefix, 'Lib', 'site-packages', 'scons'), join(sys.prefix, 'scons') ] + sys.path; import SCons.Script; SCons.Script.main()" % locals()
if xml:
exec_script_main = xmlify(exec_script_main)
return exec_script_main
# The string for the Python executable we tell the Project file to use
# is either sys.executable or, if an external PYTHON_ROOT environment
# variable exists, $(PYTHON)ROOT\\python.exe (generalized a little to
# pluck the actual executable name from sys.executable).
try:
python_root = os.environ['PYTHON_ROOT']
except KeyError:
python_executable = sys.executable
else:
python_executable = os.path.join('$$(PYTHON_ROOT)',
os.path.split(sys.executable)[1])
class Config(object):
pass
def splitFully(path):
dir, base = os.path.split(path)
if dir and dir != '' and dir != path:
return splitFully(dir)+[base]
if base == '':
return []
return [base]
def makeHierarchy(sources):
'''Break a list of files into a hierarchy; for each value, if it is a string,
then it is a file. If it is a dictionary, it is a folder. The string is
the original path of the file.'''
hierarchy = {}
for file in sources:
path = splitFully(file)
if len(path):
dict = hierarchy
for part in path[:-1]:
if part not in dict:
dict[part] = {}
dict = dict[part]
dict[path[-1]] = file
#else:
# print 'Warning: failed to decompose path for '+str(file)
return hierarchy
class _DSPGenerator(object):
""" Base class for DSP generators """
srcargs = [
'srcs',
'incs',
'localincs',
'resources',
'misc']
def __init__(self, dspfile, source, env):
self.dspfile = str(dspfile)
try:
get_abspath = dspfile.get_abspath
except AttributeError:
self.dspabs = os.path.abspath(dspfile)
else:
self.dspabs = get_abspath()
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVSProject.")
elif SCons.Util.is_String(env['variant']):
variants = [env['variant']]
elif SCons.Util.is_List(env['variant']):
variants = env['variant']
if 'buildtarget' not in env or env['buildtarget'] == None:
buildtarget = ['']
elif SCons.Util.is_String(env['buildtarget']):
buildtarget = [env['buildtarget']]
elif SCons.Util.is_List(env['buildtarget']):
if len(env['buildtarget']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'buildtarget' and 'variant' lists must be the same.")
buildtarget = []
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
buildtarget.append(bt)
else:
buildtarget.append(bt.get_abspath())
else:
buildtarget = [env['buildtarget'].get_abspath()]
if len(buildtarget) == 1:
bt = buildtarget[0]
buildtarget = []
for _ in variants:
buildtarget.append(bt)
if 'outdir' not in env or env['outdir'] == None:
outdir = ['']
elif SCons.Util.is_String(env['outdir']):
outdir = [env['outdir']]
elif SCons.Util.is_List(env['outdir']):
if len(env['outdir']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'outdir' and 'variant' lists must be the same.")
outdir = []
for s in env['outdir']:
if SCons.Util.is_String(s):
outdir.append(s)
else:
outdir.append(s.get_abspath())
else:
outdir = [env['outdir'].get_abspath()]
if len(outdir) == 1:
s = outdir[0]
outdir = []
for v in variants:
outdir.append(s)
if 'runfile' not in env or env['runfile'] == None:
runfile = buildtarget[-1:]
elif SCons.Util.is_String(env['runfile']):
runfile = [env['runfile']]
elif SCons.Util.is_List(env['runfile']):
if len(env['runfile']) != len(variants):
raise SCons.Errors.InternalError("Sizes of 'runfile' and 'variant' lists must be the same.")
runfile = []
for s in env['runfile']:
if SCons.Util.is_String(s):
runfile.append(s)
else:
runfile.append(s.get_abspath())
else:
runfile = [env['runfile'].get_abspath()]
if len(runfile) == 1:
s = runfile[0]
runfile = []
for v in variants:
runfile.append(s)
self.sconscript = env['MSVSSCONSCRIPT']
cmdargs = env.get('cmdargs', '')
self.env = env
if 'name' in self.env:
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dspfile)[0])
self.name = self.env.subst(self.name)
sourcenames = [
'Source Files',
'Header Files',
'Local Headers',
'Resource Files',
'Other Files']
self.sources = {}
for n in sourcenames:
self.sources[n] = []
self.configs = {}
self.nokeep = 0
if 'nokeep' in env and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dspabs):
self.Parse()
for t in zip(sourcenames,self.srcargs):
if t[1] in self.env:
if SCons.Util.is_List(self.env[t[1]]):
for i in self.env[t[1]]:
if not i in self.sources[t[0]]:
self.sources[t[0]].append(i)
else:
if not self.env[t[1]] in self.sources[t[0]]:
self.sources[t[0]].append(self.env[t[1]])
for n in sourcenames:
#TODO 2.4: compat layer supports sorted(key=) but not sort(key=)
#TODO 2.4: self.sources[n].sort(key=lambda a: a.lower())
self.sources[n] = sorted(self.sources[n], key=lambda a: a.lower())
def AddConfig(self, variant, buildtarget, outdir, runfile, cmdargs, dspfile=dspfile):
config = Config()
config.buildtarget = buildtarget
config.outdir = outdir
config.cmdargs = cmdargs
config.runfile = runfile
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dspfile) + "'"
for i in range(len(variants)):
AddConfig(self, variants[i], buildtarget[i], outdir[i], runfile[i], cmdargs)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def Build(self):
pass
V6DSPHeader = """\
# Microsoft Developer Studio Project File - Name="%(name)s" - Package Owner=<4>
# Microsoft Developer Studio Generated Build File, Format Version 6.00
# ** DO NOT EDIT **
# TARGTYPE "Win32 (x86) External Target" 0x0106
CFG=%(name)s - Win32 %(confkey)s
!MESSAGE This is not a valid makefile. To build this project using NMAKE,
!MESSAGE use the Export Makefile command and run
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak".
!MESSAGE
!MESSAGE You can specify a configuration when running NMAKE
!MESSAGE by defining the macro CFG on the command line. For example:
!MESSAGE
!MESSAGE NMAKE /f "%(name)s.mak" CFG="%(name)s - Win32 %(confkey)s"
!MESSAGE
!MESSAGE Possible choices for configuration are:
!MESSAGE
"""
class _GenerateV6DSP(_DSPGenerator):
"""Generates a Project file for MSVS 6.0"""
def PrintHeader(self):
# pick a default config
confkeys = sorted(self.configs.keys())
name = self.name
confkey = confkeys[0]
self.file.write(V6DSPHeader % locals())
for kind in confkeys:
self.file.write('!MESSAGE "%s - Win32 %s" (based on "Win32 (x86) External Target")\n' % (name, kind))
self.file.write('!MESSAGE \n\n')
def PrintProject(self):
name = self.name
self.file.write('# Begin Project\n'
'# PROP AllowPerConfigDependencies 0\n'
'# PROP Scc_ProjName ""\n'
'# PROP Scc_LocalPath ""\n\n')
first = 1
confkeys = sorted(self.configs.keys())
for kind in confkeys:
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
if first == 1:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
first = 0
else:
self.file.write('\n!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name, kind))
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
# have to write this twice, once with the BASE settings, and once without
for base in ("BASE ",""):
self.file.write('# PROP %sUse_MFC 0\n'
'# PROP %sUse_Debug_Libraries ' % (base, base))
if kind.lower().find('debug') < 0:
self.file.write('0\n')
else:
self.file.write('1\n')
self.file.write('# PROP %sOutput_Dir "%s"\n'
'# PROP %sIntermediate_Dir "%s"\n' % (base,outdir,base,outdir))
cmd = 'echo Starting SCons && ' + self.env.subst('$MSVSBUILDCOM', 1)
self.file.write('# PROP %sCmd_Line "%s"\n'
'# PROP %sRebuild_Opt "-c && %s"\n'
'# PROP %sTarget_File "%s"\n'
'# PROP %sBsc_Name ""\n'
'# PROP %sTarget_Dir ""\n'\
%(base,cmd,base,cmd,base,buildtarget,base,base))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write('\n!ENDIF\n\n'
'# Begin Target\n\n')
for kind in confkeys:
self.file.write('# Name "%s - Win32 %s"\n' % (name,kind))
self.file.write('\n')
first = 0
for kind in confkeys:
if first == 0:
self.file.write('!IF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
first = 1
else:
self.file.write('!ELSEIF "$(CFG)" == "%s - Win32 %s"\n\n' % (name,kind))
self.file.write('!ENDIF \n\n')
self.PrintSourceFiles()
self.file.write('# End Target\n'
'# End Project\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp|c|cxx|l|y|def|odl|idl|hpj|bat',
'Header Files': 'h|hpp|hxx|hm|inl',
'Local Headers': 'h|hpp|hxx|hm|inl',
'Resource Files': 'r|rc|ico|cur|bmp|dlg|rc2|rct|bin|cnt|rtf|gif|jpg|jpeg|jpe',
'Other Files': ''}
for kind in sorted(categories.keys(), key=lambda a: a.lower()):
if not self.sources[kind]:
continue # skip empty groups
self.file.write('# Begin Group "' + kind + '"\n\n')
typelist = categories[kind].replace('|', ';')
self.file.write('# PROP Default_Filter "' + typelist + '"\n')
for file in self.sources[kind]:
file = os.path.normpath(file)
self.file.write('# Begin Source File\n\n'
'SOURCE="' + file + '"\n'
'# End Source File\n')
self.file.write('# End Group\n')
# add the SConscript file outside of the groups
self.file.write('# Begin Source File\n\n'
'SOURCE="' + str(self.sconscript) + '"\n'
'# End Source File\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find("# End Project") > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
# it has a "# " in front of it, so we strip that.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
V7DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
\tProjectGUID="%(project_guid)s"
%(scc_attrs)s
\tKeyword="MakeFileProj">
"""
V7DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|%(platform)s"
\t\t\tOutputDirectory="%(outdir)s"
\t\t\tIntermediateDirectory="%(outdir)s"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="FALSE">
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tReBuildCommandLine="%(rebuildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tOutput="%(runfile)s"/>
\t\t</Configuration>
"""
V8DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<VisualStudioProject
\tProjectType="Visual C++"
\tVersion="%(versionstr)s"
\tName="%(name)s"
\tProjectGUID="%(project_guid)s"
\tRootNamespace="%(name)s"
%(scc_attrs)s
\tKeyword="MakeFileProj">
"""
V8DSPConfiguration = """\
\t\t<Configuration
\t\t\tName="%(variant)s|%(platform)s"
\t\t\tConfigurationType="0"
\t\t\tUseOfMFC="0"
\t\t\tATLMinimizesCRunTimeLibraryUsage="false"
\t\t\t>
\t\t\t<Tool
\t\t\t\tName="VCNMakeTool"
\t\t\t\tBuildCommandLine="%(buildcmd)s"
\t\t\t\tReBuildCommandLine="%(rebuildcmd)s"
\t\t\t\tCleanCommandLine="%(cleancmd)s"
\t\t\t\tOutput="%(runfile)s"
\t\t\t\tPreprocessorDefinitions="%(preprocdefs)s"
\t\t\t\tIncludeSearchPath="%(includepath)s"
\t\t\t\tForcedIncludes=""
\t\t\t\tAssemblySearchPath=""
\t\t\t\tForcedUsingAssemblies=""
\t\t\t\tCompileAsManaged=""
\t\t\t/>
\t\t</Configuration>
"""
class _GenerateV7DSP(_DSPGenerator):
"""Generates a Project file for MSVS .NET"""
def __init__(self, dspfile, source, env):
_DSPGenerator.__init__(self, dspfile, source, env)
self.version = env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
if self.version_num >= 9.0:
self.versionstr = '9.00'
self.dspheader = V8DSPHeader
self.dspconfiguration = V8DSPConfiguration
elif self.version_num >= 8.0:
self.versionstr = '8.00'
self.dspheader = V8DSPHeader
self.dspconfiguration = V8DSPConfiguration
else:
if self.version_num >= 7.1:
self.versionstr = '7.10'
else:
self.versionstr = '7.00'
self.dspheader = V7DSPHeader
self.dspconfiguration = V7DSPConfiguration
self.file = None
def PrintHeader(self):
env = self.env
versionstr = self.versionstr
name = self.name
encoding = self.env.subst('$MSVSENCODING')
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
# MSVS_SCC_LOCAL_PATH is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_local_path_legacy = env.get('MSVS_SCC_LOCAL_PATH', '')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, os.path.dirname(self.dspabs))
project_guid = env.get('MSVS_PROJECT_GUID', '')
if not project_guid:
project_guid = _generateGUID(self.dspfile, '')
if scc_provider != '':
scc_attrs = '\tSccProjectName="%s"\n' % scc_project_name
if scc_aux_path != '':
scc_attrs += '\tSccAuxPath="%s"\n' % scc_aux_path
scc_attrs += ('\tSccLocalPath="%s"\n'
'\tSccProvider="%s"' % (scc_local_path, scc_provider))
elif scc_local_path_legacy != '':
# This case is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_attrs = ('\tSccProjectName="%s"\n'
'\tSccLocalPath="%s"' % (scc_project_name, scc_local_path_legacy))
else:
self.dspheader = self.dspheader.replace('%(scc_attrs)s\n', '')
self.file.write(self.dspheader % locals())
self.file.write('\t<Platforms>\n')
for platform in self.platforms:
self.file.write(
'\t\t<Platform\n'
'\t\t\tName="%s"/>\n' % platform)
self.file.write('\t</Platforms>\n')
if self.version_num >= 8.0:
self.file.write('\t<ToolFiles>\n'
'\t</ToolFiles>\n')
def PrintProject(self):
self.file.write('\t<Configurations>\n')
confkeys = sorted(self.configs.keys())
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
# This isn't perfect; CPPDEFINES and CPPPATH can contain $TARGET and $SOURCE,
# so they could vary depending on the command being generated. This code
# assumes they don't.
preprocdefs = xmlify(';'.join(processDefines(self.env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(self.env.get('CPPPATH', []), self.env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(self.dspconfiguration % locals())
self.file.write('\t</Configurations>\n')
if self.version_num >= 7.1:
self.file.write('\t<References>\n'
'\t</References>\n')
self.PrintSourceFiles()
self.file.write('</VisualStudioProject>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n')
def printSources(self, hierarchy, commonprefix):
sorteditems = sorted(hierarchy.items(), key=lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
self.file.write('\t\t\t<Filter\n'
'\t\t\t\tName="%s"\n'
'\t\t\t\tFilter="">\n' % (key))
self.printSources(value, commonprefix)
self.file.write('\t\t\t</Filter>\n')
for key, value in sorteditems:
if SCons.Util.is_String(value):
file = value
if commonprefix:
file = os.path.join(commonprefix, value)
file = os.path.normpath(file)
self.file.write('\t\t\t<File\n'
'\t\t\t\tRelativePath="%s">\n'
'\t\t\t</File>\n' % (file))
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp;c;cxx;l;y;def;odl;idl;hpj;bat',
'Header Files': 'h;hpp;hxx;hm;inl',
'Local Headers': 'h;hpp;hxx;hm;inl',
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
self.file.write('\t<Files>\n')
cats = sorted([k for k in categories.keys() if self.sources[k]],
key=lambda a: a.lower())
for kind in cats:
if len(cats) > 1:
self.file.write('\t\t<Filter\n'
'\t\t\tName="%s"\n'
'\t\t\tFilter="%s">\n' % (kind, categories[kind]))
sources = self.sources[kind]
# First remove any common prefix
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printSources(hierarchy, commonprefix=commonprefix)
if len(cats)>1:
self.file.write('\t\t</Filter>\n')
# add the SConscript file outside of the groups
self.file.write('\t\t<File\n'
'\t\t\tRelativePath="%s">\n'
'\t\t</File>\n' % str(self.sconscript))
self.file.write('\t</Files>\n'
'\t<Globals>\n'
'\t</Globals>\n')
def Parse(self):
try:
dspfile = open(self.dspabs,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dspfile.readline()
while line:
if line.find('<!-- SCons Data:') > -1:
break
line = dspfile.readline()
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
data = None
line = dspfile.readline()
datas = line
while line and line != '\n':
line = dspfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.sources.update(data)
def Build(self):
try:
self.file = open(self.dspabs,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
V10DSPHeader = """\
<?xml version="1.0" encoding="%(encoding)s"?>
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
"""
V10DSPProjectConfiguration = """\
\t\t<ProjectConfiguration Include="%(variant)s|%(platform)s">
\t\t\t<Configuration>%(variant)s</Configuration>
\t\t\t<Platform>%(platform)s</Platform>
\t\t</ProjectConfiguration>
"""
V10DSPGlobals = """\
\t<PropertyGroup Label="Globals">
\t\t<ProjectGuid>%(project_guid)s</ProjectGuid>
%(scc_attrs)s\t\t<RootNamespace>%(name)s</RootNamespace>
\t\t<Keyword>MakeFileProj</Keyword>
\t</PropertyGroup>
"""
V10DSPPropertyGroupCondition = """\
\t<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'" Label="Configuration">
\t\t<ConfigurationType>Makefile</ConfigurationType>
\t\t<UseOfMfc>false</UseOfMfc>
\t</PropertyGroup>
"""
V10DSPImportGroupCondition = """\
\t<ImportGroup Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'" Label="PropertySheets">
\t\t<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
\t</ImportGroup>
"""
V10DSPCommandLine = """\
\t\t<NMakeBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(buildcmd)s</NMakeBuildCommandLine>
\t\t<NMakeReBuildCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(rebuildcmd)s</NMakeReBuildCommandLine>
\t\t<NMakeCleanCommandLine Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(cleancmd)s</NMakeCleanCommandLine>
\t\t<NMakeOutput Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(runfile)s</NMakeOutput>
\t\t<NMakePreprocessorDefinitions Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(preprocdefs)s</NMakePreprocessorDefinitions>
\t\t<NMakeIncludeSearchPath Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">%(includepath)s</NMakeIncludeSearchPath>
\t\t<NMakeForcedIncludes Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeForcedIncludes)</NMakeForcedIncludes>
\t\t<NMakeAssemblySearchPath Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeAssemblySearchPath)</NMakeAssemblySearchPath>
\t\t<NMakeForcedUsingAssemblies Condition="'$(Configuration)|$(Platform)'=='%(variant)s|%(platform)s'">$(NMakeForcedUsingAssemblies)</NMakeForcedUsingAssemblies>
"""
class _GenerateV10DSP(_DSPGenerator):
"""Generates a Project file for MSVS 2010"""
def __init__(self, dspfile, source, env):
_DSPGenerator.__init__(self, dspfile, source, env)
self.dspheader = V10DSPHeader
self.dspconfiguration = V10DSPProjectConfiguration
self.dspglobals = V10DSPGlobals
def PrintHeader(self):
env = self.env
name = self.name
encoding = env.subst('$MSVSENCODING')
project_guid = env.get('MSVS_PROJECT_GUID', '')
scc_provider = env.get('MSVS_SCC_PROVIDER', '')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '')
scc_aux_path = env.get('MSVS_SCC_AUX_PATH', '')
# MSVS_SCC_LOCAL_PATH is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_local_path_legacy = env.get('MSVS_SCC_LOCAL_PATH', '')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, os.path.dirname(self.dspabs))
if not project_guid:
project_guid = _generateGUID(self.dspfile, '')
if scc_provider != '':
scc_attrs = '\t\t<SccProjectName>%s</SccProjectName>\n' % scc_project_name
if scc_aux_path != '':
scc_attrs += '\t\t<SccAuxPath>%s</SccAuxPath>\n' % scc_aux_path
scc_attrs += ('\t\t<SccLocalPath>%s</SccLocalPath>\n'
'\t\t<SccProvider>%s</SccProvider>\n' % (scc_local_path, scc_provider))
elif scc_local_path_legacy != '':
# This case is kept for backwards compatibility purpose and should
# be deprecated as soon as possible.
scc_attrs = ('\t\t<SccProjectName>%s</SccProjectName>\n'
'\t\t<SccLocalPath>%s</SccLocalPath>\n' % (scc_project_name, scc_local_path_legacy))
else:
self.dspglobals = self.dspglobals.replace('%(scc_attrs)s', '')
self.file.write(self.dspheader % locals())
self.file.write('\t<ItemGroup Label="ProjectConfigurations">\n')
confkeys = sorted(self.configs.keys())
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(self.dspconfiguration % locals())
self.file.write('\t</ItemGroup>\n')
self.file.write(self.dspglobals % locals())
def PrintProject(self):
name = self.name
confkeys = sorted(self.configs.keys())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPPropertyGroupCondition % locals())
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\n')
self.file.write('\t<ImportGroup Label="ExtensionSettings">\n')
self.file.write('\t</ImportGroup>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
self.file.write(V10DSPImportGroupCondition % locals())
self.file.write('\t<PropertyGroup Label="UserMacros" />\n')
self.file.write('\t<PropertyGroup>\n')
self.file.write('\t<_ProjectFileVersion>10.0.30319.1</_ProjectFileVersion>\n')
for kind in confkeys:
variant = self.configs[kind].variant
platform = self.configs[kind].platform
outdir = self.configs[kind].outdir
buildtarget = self.configs[kind].buildtarget
runfile = self.configs[kind].runfile
cmdargs = self.configs[kind].cmdargs
env_has_buildtarget = 'MSVSBUILDTARGET' in self.env
if not env_has_buildtarget:
self.env['MSVSBUILDTARGET'] = buildtarget
starting = 'echo Starting SCons && '
if cmdargs:
cmdargs = ' ' + cmdargs
else:
cmdargs = ''
buildcmd = xmlify(starting + self.env.subst('$MSVSBUILDCOM', 1) + cmdargs)
rebuildcmd = xmlify(starting + self.env.subst('$MSVSREBUILDCOM', 1) + cmdargs)
cleancmd = xmlify(starting + self.env.subst('$MSVSCLEANCOM', 1) + cmdargs)
# This isn't perfect; CPPDEFINES and CPPPATH can contain $TARGET and $SOURCE,
# so they could vary depending on the command being generated. This code
# assumes they don't.
preprocdefs = xmlify(';'.join(processDefines(self.env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(self.env.get('CPPPATH', []), self.env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
if not env_has_buildtarget:
del self.env['MSVSBUILDTARGET']
self.file.write(V10DSPCommandLine % locals())
self.file.write('\t</PropertyGroup>\n')
#filter settings in MSVS 2010 are stored in separate file
self.filtersabs = self.dspabs + '.filters'
try:
self.filters_file = open(self.filtersabs, 'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.filtersabs + '" for writing:' + str(detail))
self.filters_file.write('<?xml version="1.0" encoding="utf-8"?>\n'
'<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\n')
self.PrintSourceFiles()
self.filters_file.write('</Project>')
self.filters_file.close()
self.file.write('\t<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\n'
'\t<ImportGroup Label="ExtensionTargets">\n'
'\t</ImportGroup>\n'
'</Project>\n')
if self.nokeep == 0:
# now we pickle some data and add it to the file -- MSDEV will ignore it.
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write('<!-- SCons Data:\n' + pdata + '\n')
pdata = pickle.dumps(self.sources,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '-->\n')
def printFilters(self, hierarchy, name):
sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
filter_name = name + '\\' + key
self.filters_file.write('\t\t<Filter Include="%s">\n'
'\t\t\t<UniqueIdentifier>%s</UniqueIdentifier>\n'
'\t\t</Filter>\n' % (filter_name, _generateGUID(self.dspabs, filter_name)))
self.printFilters(value, filter_name)
def printSources(self, hierarchy, kind, commonprefix, filter_name):
keywords = {'Source Files': 'ClCompile',
'Header Files': 'ClInclude',
'Local Headers': 'ClInclude',
'Resource Files': 'None',
'Other Files': 'None'}
sorteditems = sorted(hierarchy.items(), key = lambda a: a[0].lower())
# First folders, then files
for key, value in sorteditems:
if SCons.Util.is_Dict(value):
self.printSources(value, kind, commonprefix, filter_name + '\\' + key)
for key, value in sorteditems:
if SCons.Util.is_String(value):
file = value
if commonprefix:
file = os.path.join(commonprefix, value)
file = os.path.normpath(file)
self.file.write('\t\t<%s Include="%s" />\n' % (keywords[kind], file))
self.filters_file.write('\t\t<%s Include="%s">\n'
'\t\t\t<Filter>%s</Filter>\n'
'\t\t</%s>\n' % (keywords[kind], file, filter_name, keywords[kind]))
def PrintSourceFiles(self):
categories = {'Source Files': 'cpp;c;cxx;l;y;def;odl;idl;hpj;bat',
'Header Files': 'h;hpp;hxx;hm;inl',
'Local Headers': 'h;hpp;hxx;hm;inl',
'Resource Files': 'r;rc;ico;cur;bmp;dlg;rc2;rct;bin;cnt;rtf;gif;jpg;jpeg;jpe',
'Other Files': ''}
cats = sorted([k for k in categories.keys() if self.sources[k]],
key = lambda a: a.lower())
# print vcxproj.filters file first
self.filters_file.write('\t<ItemGroup>\n')
for kind in cats:
self.filters_file.write('\t\t<Filter Include="%s">\n'
'\t\t\t<UniqueIdentifier>{7b42d31d-d53c-4868-8b92-ca2bc9fc052f}</UniqueIdentifier>\n'
'\t\t\t<Extensions>%s</Extensions>\n'
'\t\t</Filter>\n' % (kind, categories[kind]))
# First remove any common prefix
sources = self.sources[kind]
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printFilters(hierarchy, kind)
self.filters_file.write('\t</ItemGroup>\n')
# then print files and filters
for kind in cats:
self.file.write('\t<ItemGroup>\n')
self.filters_file.write('\t<ItemGroup>\n')
# First remove any common prefix
sources = self.sources[kind]
commonprefix = None
s = list(map(os.path.normpath, sources))
# take the dirname because the prefix may include parts
# of the filenames (e.g. if you have 'dir\abcd' and
# 'dir\acde' then the cp will be 'dir\a' )
cp = os.path.dirname( os.path.commonprefix(s) )
if cp and s[0][len(cp)] == os.sep:
# +1 because the filename starts after the separator
sources = [s[len(cp)+1:] for s in sources]
commonprefix = cp
hierarchy = makeHierarchy(sources)
self.printSources(hierarchy, kind, commonprefix, kind)
self.file.write('\t</ItemGroup>\n')
self.filters_file.write('\t</ItemGroup>\n')
# add the SConscript file outside of the groups
self.file.write('\t<ItemGroup>\n'
'\t\t<None Include="%s" />\n'
#'\t\t<None Include="SConstruct" />\n'
'\t</ItemGroup>\n' % str(self.sconscript))
def Parse(self):
print "_GenerateV10DSP.Parse()"
def Build(self):
try:
self.file = open(self.dspabs, 'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dspabs + '" for writing:' + str(detail))
else:
self.PrintHeader()
self.PrintProject()
self.file.close()
class _DSWGenerator(object):
""" Base class for DSW generators """
def __init__(self, dswfile, source, env):
self.dswfile = os.path.normpath(str(dswfile))
self.dsw_folder_path = os.path.dirname(os.path.abspath(self.dswfile))
self.env = env
if 'projects' not in env:
raise SCons.Errors.UserError("You must specify a 'projects' argument to create an MSVSSolution.")
projects = env['projects']
if not SCons.Util.is_List(projects):
raise SCons.Errors.InternalError("The 'projects' argument must be a list of nodes.")
projects = SCons.Util.flatten(projects)
if len(projects) < 1:
raise SCons.Errors.UserError("You must specify at least one project to create an MSVSSolution.")
self.dspfiles = list(map(str, projects))
if 'name' in self.env:
self.name = self.env['name']
else:
self.name = os.path.basename(SCons.Util.splitext(self.dswfile)[0])
self.name = self.env.subst(self.name)
def Build(self):
pass
class _GenerateV7DSW(_DSWGenerator):
"""Generates a Solution file for MSVS .NET"""
def __init__(self, dswfile, source, env):
_DSWGenerator.__init__(self, dswfile, source, env)
self.file = None
self.version = self.env['MSVS_VERSION']
self.version_num, self.suite = msvs_parse_version(self.version)
self.versionstr = '7.00'
if self.version_num >= 11.0:
self.versionstr = '12.00'
elif self.version_num >= 10.0:
self.versionstr = '11.00'
elif self.version_num >= 9.0:
self.versionstr = '10.00'
elif self.version_num >= 8.0:
self.versionstr = '9.00'
elif self.version_num >= 7.1:
self.versionstr = '8.00'
if 'slnguid' in env and env['slnguid']:
self.slnguid = env['slnguid']
else:
self.slnguid = _generateGUID(dswfile, self.name)
self.configs = {}
self.nokeep = 0
if 'nokeep' in env and env['variant'] != 0:
self.nokeep = 1
if self.nokeep == 0 and os.path.exists(self.dswfile):
self.Parse()
def AddConfig(self, variant, dswfile=dswfile):
config = Config()
match = re.match('(.*)\|(.*)', variant)
if match:
config.variant = match.group(1)
config.platform = match.group(2)
else:
config.variant = variant
config.platform = 'Win32'
self.configs[variant] = config
print "Adding '" + self.name + ' - ' + config.variant + '|' + config.platform + "' to '" + str(dswfile) + "'"
if 'variant' not in env:
raise SCons.Errors.InternalError("You must specify a 'variant' argument (i.e. 'Debug' or " +\
"'Release') to create an MSVS Solution File.")
elif SCons.Util.is_String(env['variant']):
AddConfig(self, env['variant'])
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
AddConfig(self, variant)
self.platforms = []
for key in self.configs.keys():
platform = self.configs[key].platform
if not platform in self.platforms:
self.platforms.append(platform)
def GenerateProjectFilesInfo(self):
for dspfile in self.dspfiles:
dsp_folder_path, name = os.path.split(dspfile)
dsp_folder_path = os.path.abspath(dsp_folder_path)
dsp_relative_folder_path = os.path.relpath(dsp_folder_path, self.dsw_folder_path)
if dsp_relative_folder_path == os.curdir:
dsp_relative_file_path = name
else:
dsp_relative_file_path = os.path.join(dsp_relative_folder_path, name)
dspfile_info = {'NAME': name,
'GUID': _generateGUID(dspfile, ''),
'FOLDER_PATH': dsp_folder_path,
'FILE_PATH': dspfile,
'SLN_RELATIVE_FOLDER_PATH': dsp_relative_folder_path,
'SLN_RELATIVE_FILE_PATH': dsp_relative_file_path}
self.dspfiles_info.append(dspfile_info)
self.dspfiles_info = []
GenerateProjectFilesInfo(self)
def Parse(self):
try:
dswfile = open(self.dswfile,'r')
except IOError:
return # doesn't exist yet, so can't add anything to configs.
line = dswfile.readline()
while line:
if line[:9] == "EndGlobal":
break
line = dswfile.readline()
line = dswfile.readline()
datas = line
while line:
line = dswfile.readline()
datas = datas + line
# OK, we've found our little pickled cache of data.
try:
datas = base64.decodestring(datas)
data = pickle.loads(datas)
except KeyboardInterrupt:
raise
except:
return # unable to unpickle any data for some reason
self.configs.update(data)
def PrintSolution(self):
"""Writes a solution file"""
self.file.write('Microsoft Visual Studio Solution File, Format Version %s\n' % self.versionstr)
if self.version_num >= 11.0:
self.file.write('# Visual Studio 11\n')
elif self.version_num >= 10.0:
self.file.write('# Visual Studio 2010\n')
elif self.version_num >= 9.0:
self.file.write('# Visual Studio 2008\n')
elif self.version_num >= 8.0:
self.file.write('# Visual Studio 2005\n')
for dspinfo in self.dspfiles_info:
name = dspinfo['NAME']
base, suffix = SCons.Util.splitext(name)
if suffix == '.vcproj':
name = base
self.file.write('Project("%s") = "%s", "%s", "%s"\n'
% (external_makefile_guid, name, dspinfo['SLN_RELATIVE_FILE_PATH'], dspinfo['GUID']))
if self.version_num >= 7.1 and self.version_num < 8.0:
self.file.write('\tProjectSection(ProjectDependencies) = postProject\n'
'\tEndProjectSection\n')
self.file.write('EndProject\n')
self.file.write('Global\n')
env = self.env
if 'MSVS_SCC_PROVIDER' in env:
scc_number_of_projects = len(self.dspfiles) + 1
slnguid = self.slnguid
scc_provider = env.get('MSVS_SCC_PROVIDER', '').replace(' ', r'\u0020')
scc_project_name = env.get('MSVS_SCC_PROJECT_NAME', '').replace(' ', r'\u0020')
scc_connection_root = env.get('MSVS_SCC_CONNECTION_ROOT', os.curdir)
scc_local_path = os.path.relpath(scc_connection_root, self.dsw_folder_path).replace('\\', '\\\\')
self.file.write('\tGlobalSection(SourceCodeControl) = preSolution\n'
'\t\tSccNumberOfProjects = %(scc_number_of_projects)d\n'
'\t\tSccProjectName0 = %(scc_project_name)s\n'
'\t\tSccLocalPath0 = %(scc_local_path)s\n'
'\t\tSccProvider0 = %(scc_provider)s\n'
'\t\tCanCheckoutShared = true\n' % locals())
sln_relative_path_from_scc = os.path.relpath(self.dsw_folder_path, scc_connection_root)
if sln_relative_path_from_scc != os.curdir:
self.file.write('\t\tSccProjectFilePathRelativizedFromConnection0 = %s\\\\\n'
% sln_relative_path_from_scc.replace('\\', '\\\\'))
if self.version_num < 8.0:
# When present, SolutionUniqueID is automatically removed by VS 2005
# TODO: check for Visual Studio versions newer than 2005
self.file.write('\t\tSolutionUniqueID = %s\n' % slnguid)
for dspinfo in self.dspfiles_info:
i = self.dspfiles_info.index(dspinfo) + 1
dsp_relative_file_path = dspinfo['SLN_RELATIVE_FILE_PATH'].replace('\\', '\\\\')
dsp_scc_relative_folder_path = os.path.relpath(dspinfo['FOLDER_PATH'], scc_connection_root).replace('\\', '\\\\')
self.file.write('\t\tSccProjectUniqueName%(i)s = %(dsp_relative_file_path)s\n'
'\t\tSccLocalPath%(i)d = %(scc_local_path)s\n'
'\t\tCanCheckoutShared = true\n'
'\t\tSccProjectFilePathRelativizedFromConnection%(i)s = %(dsp_scc_relative_folder_path)s\\\\\n'
% locals())
self.file.write('\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n')
else:
self.file.write('\tGlobalSection(SolutionConfiguration) = preSolution\n')
confkeys = sorted(self.configs.keys())
cnt = 0
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
self.file.write('\t\t%s|%s = %s|%s\n' % (variant, platform, variant, platform))
else:
self.file.write('\t\tConfigName.%d = %s\n' % (cnt, variant))
cnt = cnt + 1
self.file.write('\tEndGlobalSection\n')
if self.version_num <= 7.1:
self.file.write('\tGlobalSection(ProjectDependencies) = postSolution\n'
'\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n')
else:
self.file.write('\tGlobalSection(ProjectConfiguration) = postSolution\n')
for name in confkeys:
variant = self.configs[name].variant
platform = self.configs[name].platform
if self.version_num >= 8.0:
for dspinfo in self.dspfiles_info:
guid = dspinfo['GUID']
self.file.write('\t\t%s.%s|%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s|%s.Build.0 = %s|%s\n' % (guid,variant,platform,variant,platform,guid,variant,platform,variant,platform))
else:
for dspinfo in self.dspfiles_info:
guid = dspinfo['GUID']
self.file.write('\t\t%s.%s.ActiveCfg = %s|%s\n'
'\t\t%s.%s.Build.0 = %s|%s\n' %(guid,variant,variant,platform,guid,variant,variant,platform))
self.file.write('\tEndGlobalSection\n')
if self.version_num >= 8.0:
self.file.write('\tGlobalSection(SolutionProperties) = preSolution\n'
'\t\tHideSolutionNode = FALSE\n'
'\tEndGlobalSection\n')
else:
self.file.write('\tGlobalSection(ExtensibilityGlobals) = postSolution\n'
'\tEndGlobalSection\n'
'\tGlobalSection(ExtensibilityAddIns) = postSolution\n'
'\tEndGlobalSection\n')
self.file.write('EndGlobal\n')
if self.nokeep == 0:
pdata = pickle.dumps(self.configs,1)
pdata = base64.encodestring(pdata)
self.file.write(pdata + '\n')
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintSolution()
self.file.close()
V6DSWHeader = """\
Microsoft Developer Studio Workspace File, Format Version 6.00
# WARNING: DO NOT EDIT OR DELETE THIS WORKSPACE FILE!
###############################################################################
Project: "%(name)s"="%(dspfile)s" - Package Owner=<4>
Package=<5>
{{{
}}}
Package=<4>
{{{
}}}
###############################################################################
Global:
Package=<5>
{{{
}}}
Package=<3>
{{{
}}}
###############################################################################
"""
class _GenerateV6DSW(_DSWGenerator):
"""Generates a Workspace file for MSVS 6.0"""
def PrintWorkspace(self):
""" writes a DSW file """
name = self.name
dspfile = os.path.relpath(self.dspfiles[0], self.dsw_folder_path)
self.file.write(V6DSWHeader % locals())
def Build(self):
try:
self.file = open(self.dswfile,'w')
except IOError, detail:
raise SCons.Errors.InternalError('Unable to open "' + self.dswfile + '" for writing:' + str(detail))
else:
self.PrintWorkspace()
self.file.close()
def GenerateDSP(dspfile, source, env):
"""Generates a Project file based on the version of MSVS that is being used"""
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 10.0:
g = _GenerateV10DSP(dspfile, source, env)
g.Build()
elif version_num >= 7.0:
g = _GenerateV7DSP(dspfile, source, env)
g.Build()
else:
g = _GenerateV6DSP(dspfile, source, env)
g.Build()
def GenerateDSW(dswfile, source, env):
"""Generates a Solution/Workspace file based on the version of MSVS that is being used"""
version_num = 6.0
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
if version_num >= 7.0:
g = _GenerateV7DSW(dswfile, source, env)
g.Build()
else:
g = _GenerateV6DSW(dswfile, source, env)
g.Build()
##############################################################################
# Above here are the classes and functions for generation of
# DSP/DSW/SLN/VCPROJ files.
##############################################################################
def GetMSVSProjectSuffix(target, source, env, for_signature):
return env['MSVS']['PROJECTSUFFIX']
def GetMSVSSolutionSuffix(target, source, env, for_signature):
return env['MSVS']['SOLUTIONSUFFIX']
def GenerateProject(target, source, env):
# generate the dsp file, according to the version of MSVS.
builddspfile = target[0]
dspfile = builddspfile.srcnode()
# this detects whether or not we're using a VariantDir
if not dspfile is builddspfile:
try:
bdsp = open(str(builddspfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsp.write("This is just a placeholder file.\nThe real project file is here:\n%s\n" % dspfile.get_abspath())
GenerateDSP(dspfile, source, env)
if env.get('auto_build_solution', 1):
builddswfile = target[1]
dswfile = builddswfile.srcnode()
if not dswfile is builddswfile:
try:
bdsw = open(str(builddswfile), "w+")
except IOError, detail:
print 'Unable to open "' + str(dspfile) + '" for writing:',detail,'\n'
raise
bdsw.write("This is just a placeholder file.\nThe real workspace file is here:\n%s\n" % dswfile.get_abspath())
GenerateDSW(dswfile, source, env)
def GenerateSolution(target, source, env):
GenerateDSW(target[0], source, env)
def projectEmitter(target, source, env):
"""Sets up the DSP dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSPROJECTSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSPROJECTSUFFIX')
target[0] = base + suff
if not source:
source = 'prj_inputs:'
source = source + env.subst('$MSVSSCONSCOM', 1)
source = source + env.subst('$MSVSENCODING', 1)
# Project file depends on CPPDEFINES and CPPPATH
preprocdefs = xmlify(';'.join(processDefines(env.get('CPPDEFINES', []))))
includepath_Dirs = processIncludes(env.get('CPPPATH', []), env, None, None)
includepath = xmlify(';'.join([str(x) for x in includepath_Dirs]))
source = source + "; ppdefs:%s incpath:%s"%(preprocdefs, includepath)
if 'buildtarget' in env and env['buildtarget'] != None:
if SCons.Util.is_String(env['buildtarget']):
source = source + ' "%s"' % env['buildtarget']
elif SCons.Util.is_List(env['buildtarget']):
for bt in env['buildtarget']:
if SCons.Util.is_String(bt):
source = source + ' "%s"' % bt
else:
try: source = source + ' "%s"' % bt.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['buildtarget'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("buildtarget can be a string, a node, a list of strings or nodes, or None")
if 'outdir' in env and env['outdir'] != None:
if SCons.Util.is_String(env['outdir']):
source = source + ' "%s"' % env['outdir']
elif SCons.Util.is_List(env['outdir']):
for s in env['outdir']:
if SCons.Util.is_String(s):
source = source + ' "%s"' % s
else:
try: source = source + ' "%s"' % s.get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
else:
try: source = source + ' "%s"' % env['outdir'].get_abspath()
except AttributeError: raise SCons.Errors.InternalError("outdir can be a string, a node, a list of strings or nodes, or None")
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
for s in _DSPGenerator.srcargs:
if s in env:
if SCons.Util.is_String(env[s]):
source = source + ' "%s' % env[s]
elif SCons.Util.is_List(env[s]):
for t in env[s]:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
else:
raise SCons.Errors.InternalError(s + " must be a string or a list of strings")
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
targetlist = [target[0]]
sourcelist = source
if env.get('auto_build_solution', 1):
env['projects'] = [env.File(t).srcnode() for t in targetlist]
t, s = solutionEmitter(target, target, env)
targetlist = targetlist + t
# Beginning with Visual Studio 2010 for each project file (.vcxproj) we have additional file (.vcxproj.filters)
if float(env['MSVS_VERSION']) >= 10.0:
targetlist.append(targetlist[0] + '.filters')
return (targetlist, sourcelist)
def solutionEmitter(target, source, env):
"""Sets up the DSW dependencies."""
# todo: Not sure what sets source to what user has passed as target,
# but this is what happens. When that is fixed, we also won't have
# to make the user always append env['MSVSSOLUTIONSUFFIX'] to target.
if source[0] == target[0]:
source = []
# make sure the suffix is correct for the version of MSVS we're running.
(base, suff) = SCons.Util.splitext(str(target[0]))
suff = env.subst('$MSVSSOLUTIONSUFFIX')
target[0] = base + suff
if not source:
source = 'sln_inputs:'
if 'name' in env:
if SCons.Util.is_String(env['name']):
source = source + ' "%s"' % env['name']
else:
raise SCons.Errors.InternalError("name must be a string")
if 'variant' in env:
if SCons.Util.is_String(env['variant']):
source = source + ' "%s"' % env['variant']
elif SCons.Util.is_List(env['variant']):
for variant in env['variant']:
if SCons.Util.is_String(variant):
source = source + ' "%s"' % variant
else:
raise SCons.Errors.InternalError("name must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be a string or a list of strings")
else:
raise SCons.Errors.InternalError("variant must be specified")
if 'slnguid' in env:
if SCons.Util.is_String(env['slnguid']):
source = source + ' "%s"' % env['slnguid']
else:
raise SCons.Errors.InternalError("slnguid must be a string")
if 'projects' in env:
if SCons.Util.is_String(env['projects']):
source = source + ' "%s"' % env['projects']
elif SCons.Util.is_List(env['projects']):
for t in env['projects']:
if SCons.Util.is_String(t):
source = source + ' "%s"' % t
source = source + ' "%s"' % str(target[0])
source = [SCons.Node.Python.Value(source)]
return ([target[0]], source)
projectAction = SCons.Action.Action(GenerateProject, None)
solutionAction = SCons.Action.Action(GenerateSolution, None)
projectBuilder = SCons.Builder.Builder(action = '$MSVSPROJECTCOM',
suffix = '$MSVSPROJECTSUFFIX',
emitter = projectEmitter)
solutionBuilder = SCons.Builder.Builder(action = '$MSVSSOLUTIONCOM',
suffix = '$MSVSSOLUTIONSUFFIX',
emitter = solutionEmitter)
default_MSVS_SConscript = None
def generate(env):
"""Add Builders and construction variables for Microsoft Visual
Studio project files to an Environment."""
try:
env['BUILDERS']['MSVSProject']
except KeyError:
env['BUILDERS']['MSVSProject'] = projectBuilder
try:
env['BUILDERS']['MSVSSolution']
except KeyError:
env['BUILDERS']['MSVSSolution'] = solutionBuilder
env['MSVSPROJECTCOM'] = projectAction
env['MSVSSOLUTIONCOM'] = solutionAction
if SCons.Script.call_stack:
# XXX Need to find a way to abstract this; the build engine
# shouldn't depend on anything in SCons.Script.
env['MSVSSCONSCRIPT'] = SCons.Script.call_stack[0].sconscript
else:
global default_MSVS_SConscript
if default_MSVS_SConscript is None:
default_MSVS_SConscript = env.File('SConstruct')
env['MSVSSCONSCRIPT'] = default_MSVS_SConscript
env['MSVSSCONS'] = '"%s" -c "%s"' % (python_executable, getExecScriptMain(env))
env['MSVSSCONSFLAGS'] = '-C "${MSVSSCONSCRIPT.dir.abspath}" -f ${MSVSSCONSCRIPT.name}'
env['MSVSSCONSCOM'] = '$MSVSSCONS $MSVSSCONSFLAGS'
env['MSVSBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSREBUILDCOM'] = '$MSVSSCONSCOM "$MSVSBUILDTARGET"'
env['MSVSCLEANCOM'] = '$MSVSSCONSCOM -c "$MSVSBUILDTARGET"'
# Set-up ms tools paths for default version
msvc_setup_env_once(env)
if 'MSVS_VERSION' in env:
version_num, suite = msvs_parse_version(env['MSVS_VERSION'])
else:
(version_num, suite) = (7.0, None) # guess at a default
if 'MSVS' not in env:
env['MSVS'] = {}
if (version_num < 7.0):
env['MSVS']['PROJECTSUFFIX'] = '.dsp'
env['MSVS']['SOLUTIONSUFFIX'] = '.dsw'
elif (version_num < 10.0):
env['MSVS']['PROJECTSUFFIX'] = '.vcproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
else:
env['MSVS']['PROJECTSUFFIX'] = '.vcxproj'
env['MSVS']['SOLUTIONSUFFIX'] = '.sln'
if (version_num >= 10.0):
env['MSVSENCODING'] = 'utf-8'
else:
env['MSVSENCODING'] = 'Windows-1252'
env['GET_MSVSPROJECTSUFFIX'] = GetMSVSProjectSuffix
env['GET_MSVSSOLUTIONSUFFIX'] = GetMSVSSolutionSuffix
env['MSVSPROJECTSUFFIX'] = '${GET_MSVSPROJECTSUFFIX}'
env['MSVSSOLUTIONSUFFIX'] = '${GET_MSVSSOLUTIONSUFFIX}'
env['SCONS_HOME'] = os.environ.get('SCONS_HOME')
def exists(env):
return msvc_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
ddepaoli3/magnum | magnum/common/pythonk8sclient/client/models/V1beta3_ResourceQuotaSpec.py | 15 | 1377 | #!/usr/bin/env python
"""
Copyright 2015 Reverb Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
class V1beta3_ResourceQuotaSpec(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'hard': 'dict'
}
self.attributeMap = {
'hard': 'hard'
}
#hard is the set of desired hard limits for each named resource
self.hard = None # any
| apache-2.0 |
hilaskis/UAV_MissionPlanner | Lib/zipfile.py | 81 | 54020 | """
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
comment = data[start+sizeEndCentDir:]
# check that comment length is correct
if endrec[_ECD_COMMENT_SIZE] == len(comment):
# Append the archive comment and start offset
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None):
self._fileobj = fileobj
self._decrypter = decrypter
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = ''
self._readbuffer = ''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find('\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = ''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == '':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + '\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = ''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = ''.join(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
tumbl3w33d/ansible | lib/ansible/modules/storage/vexata/vexata_eg.py | 25 | 5824 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Sandeep Kasargod (sandeep@vexata.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vexata_eg
version_added: 2.9
short_description: Manage export groups on Vexata VX100 storage arrays
description:
- Create or delete export groups on a Vexata VX100 array.
- An export group is a tuple of a volume group, initiator group and port
group that allows a set of volumes to be exposed to one or more hosts
through specific array ports.
author:
- Sandeep Kasargod (@vexata)
options:
name:
description:
- Export group name.
required: true
type: str
state:
description:
- Creates export group when present or delete when absent.
default: present
choices: [ present, absent ]
type: str
vg:
description:
- Volume group name.
type: str
ig:
description:
- Initiator group name.
type: str
pg:
description:
- Port group name.
type: str
extends_documentation_fragment:
- vexata.vx100
'''
EXAMPLES = r'''
- name: Create export group named db_export.
vexata_eg:
name: db_export
vg: dbvols
ig: dbhosts
pg: pg1
state: present
array: vx100_ultra.test.com
user: admin
password: secret
- name: Delete export group named db_export
vexata_eg:
name: db_export
state: absent
array: vx100_ultra.test.com
user: admin
password: secret
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vexata import (
argument_spec, get_array, required_together)
def get_eg(module, array):
"""Retrieve a named vg if it exists, None if absent."""
name = module.params['name']
try:
egs = array.list_egs()
eg = filter(lambda eg: eg['name'] == name, egs)
if len(eg) == 1:
return eg[0]
else:
return None
except Exception:
module.fail_json(msg='Error while attempting to retrieve export groups.')
def get_vg_id(module, array):
"""Retrieve a named vg's id if it exists, error if absent."""
name = module.params['vg']
try:
vgs = array.list_vgs()
vg = filter(lambda vg: vg['name'] == name, vgs)
if len(vg) == 1:
return vg[0]['id']
else:
module.fail_json(msg='Volume group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve volume groups.')
def get_ig_id(module, array):
"""Retrieve a named ig's id if it exists, error if absent."""
name = module.params['ig']
try:
igs = array.list_igs()
ig = filter(lambda ig: ig['name'] == name, igs)
if len(ig) == 1:
return ig[0]['id']
else:
module.fail_json(msg='Initiator group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve initiator groups.')
def get_pg_id(module, array):
"""Retrieve a named pg's id if it exists, error if absent."""
name = module.params['pg']
try:
pgs = array.list_pgs()
pg = filter(lambda pg: pg['name'] == name, pgs)
if len(pg) == 1:
return pg[0]['id']
else:
module.fail_json(msg='Port group {0} was not found.'.format(name))
except Exception:
module.fail_json(msg='Error while attempting to retrieve port groups.')
def create_eg(module, array):
""""Create a new export group."""
changed = False
eg_name = module.params['name']
vg_id = get_vg_id(module, array)
ig_id = get_ig_id(module, array)
pg_id = get_pg_id(module, array)
if module.check_mode:
module.exit_json(changed=changed)
try:
eg = array.create_eg(
eg_name,
'Ansible export group',
(vg_id, ig_id, pg_id))
if eg:
module.log(msg='Created export group {0}'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} create failed.'.format(eg_name))
module.exit_json(changed=changed)
def delete_eg(module, array, eg):
changed = False
eg_name = eg['name']
if module.check_mode:
module.exit_json(changed=changed)
try:
ok = array.delete_eg(
eg['id'])
if ok:
module.log(msg='Export group {0} deleted.'.format(eg_name))
changed = True
else:
raise Exception
except Exception:
module.fail_json(msg='Export group {0} delete failed.'.format(eg_name))
module.exit_json(changed=changed)
def main():
arg_spec = argument_spec()
arg_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
vg=dict(type='str'),
ig=dict(type='str'),
pg=dict(type='str')
)
)
module = AnsibleModule(arg_spec,
supports_check_mode=True,
required_together=required_together())
state = module.params['state']
array = get_array(module)
eg = get_eg(module, array)
if state == 'present' and not eg:
create_eg(module, array)
elif state == 'absent' and eg:
delete_eg(module, array, eg)
else:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
neiudemo1/django | tests/mail/test_sendtestemail.py | 327 | 3088 | from __future__ import unicode_literals
from django.core import mail
from django.core.management import call_command
from django.test import SimpleTestCase, override_settings
@override_settings(
ADMINS=(('Admin', 'admin@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
MANAGERS=(('Manager', 'manager@example.com'), ('Admin and Manager', 'admin_and_manager@example.com')),
)
class SendTestEmailManagementCommand(SimpleTestCase):
"""
Test the sending of a test email using the `sendtestemail` command.
"""
def test_single_receiver(self):
"""
The mail is sent with the correct subject and recipient.
"""
recipient = 'joe@example.com'
call_command('sendtestemail', recipient)
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(mail_message.recipients(), [recipient])
def test_multiple_receivers(self):
"""
The mail may be sent with multiple recipients.
"""
recipients = ['joe@example.com', 'jane@example.com']
call_command('sendtestemail', recipients[0], recipients[1])
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(mail_message.subject[0:15], 'Test email from')
self.assertEqual(sorted(mail_message.recipients()), [
'jane@example.com',
'joe@example.com',
])
def test_manager_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.MANAGERS.
"""
call_command('sendtestemail', '--managers')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
def test_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in
settings.ADMIN.
"""
call_command('sendtestemail', '--admins')
self.assertEqual(len(mail.outbox), 1)
mail_message = mail.outbox[0]
self.assertEqual(sorted(mail_message.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
def test_manager_and_admin_receivers(self):
"""
The mail should be sent to the email addresses specified in both
settings.MANAGERS and settings.ADMINS.
"""
call_command('sendtestemail', '--managers', '--admins')
self.assertEqual(len(mail.outbox), 2)
manager_mail = mail.outbox[0]
self.assertEqual(sorted(manager_mail.recipients()), [
'admin_and_manager@example.com',
'manager@example.com',
])
admin_mail = mail.outbox[1]
self.assertEqual(sorted(admin_mail.recipients()), [
'admin@example.com',
'admin_and_manager@example.com',
])
| bsd-3-clause |
bhairavmehta95/flashcard-helper-alexa-skill | venv/lib/python2.7/site-packages/pip/utils/appdirs.py | 340 | 8811 | """
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip.compat import WINDOWS, expanduser
from pip._vendor.six import PY2, text_type
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if PY2 and isinstance(path, text_type):
path = _win_path_to_bytes(path)
# Add our app name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
"""
Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.config/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
"""Return a list of potential user-shared config dirs for this application.
"appname" is the name of application.
Typical user config directories are:
macOS: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
| mit |
faywong/FFPlayer | project/jni/python/src/Lib/urllib.py | 48 | 64844 | """Open an arbitrary URL.
See the following document for more info on URLs:
"Names and Addresses, URIs, URLs, URNs, URCs", at
http://www.w3.org/pub/WWW/Addressing/Overview.html
See also the HTTP spec (from which the error codes are derived):
"HTTP - Hypertext Transfer Protocol", at
http://www.w3.org/pub/WWW/Protocols/
Related standards and specs:
- RFC1808: the "relative URL" spec. (authoritative status)
- RFC1738 - the "URL standard". (authoritative status)
- RFC1630 - the "URI spec". (informational status)
The object returned by URLopener().open(file) will differ per
protocol. All you know is that is has methods read(), readline(),
readlines(), fileno(), close() and info(). The read*(), fileno()
and close() methods work like those of open files.
The info() method returns a mimetools.Message object which can be
used to query various info about the object, if available.
(mimetools.Message objects are queried with the getheader() method.)
"""
import string
import socket
import os
import time
import sys
from urlparse import urljoin as basejoin
import warnings
__all__ = ["urlopen", "URLopener", "FancyURLopener", "urlretrieve",
"urlcleanup", "quote", "quote_plus", "unquote", "unquote_plus",
"urlencode", "url2pathname", "pathname2url", "splittag",
"localhost", "thishost", "ftperrors", "basejoin", "unwrap",
"splittype", "splithost", "splituser", "splitpasswd", "splitport",
"splitnport", "splitquery", "splitattr", "splitvalue",
"getproxies"]
__version__ = '1.17' # XXX This version is not always updated :-(
MAXFTPCACHE = 10 # Trim the ftp cache beyond this size
# Helper for non-unix systems
if os.name == 'mac':
from macurl2path import url2pathname, pathname2url
elif os.name == 'nt':
from nturl2path import url2pathname, pathname2url
elif os.name == 'riscos':
from rourl2path import url2pathname, pathname2url
else:
def url2pathname(pathname):
"""OS-specific conversion from a relative URL of the 'file' scheme
to a file system path; not recommended for general use."""
return unquote(pathname)
def pathname2url(pathname):
"""OS-specific conversion from a file system path to a relative URL
of the 'file' scheme; not recommended for general use."""
return quote(pathname)
# This really consists of two pieces:
# (1) a class which handles opening of all sorts of URLs
# (plus assorted utilities etc.)
# (2) a set of functions for parsing URLs
# XXX Should these be separated out into different modules?
# Shortcut for basic usage
_urlopener = None
def urlopen(url, data=None, proxies=None):
"""Create a file-like object for the specified URL to read from."""
from warnings import warnpy3k
warnings.warnpy3k("urllib.urlopen() has been removed in Python 3.0 in "
"favor of urllib2.urlopen()", stacklevel=2)
global _urlopener
if proxies is not None:
opener = FancyURLopener(proxies=proxies)
elif not _urlopener:
opener = FancyURLopener()
_urlopener = opener
else:
opener = _urlopener
if data is None:
return opener.open(url)
else:
return opener.open(url, data)
def urlretrieve(url, filename=None, reporthook=None, data=None):
global _urlopener
if not _urlopener:
_urlopener = FancyURLopener()
return _urlopener.retrieve(url, filename, reporthook, data)
def urlcleanup():
if _urlopener:
_urlopener.cleanup()
# check for SSL
try:
import ssl
except:
_have_ssl = False
else:
_have_ssl = True
# exception raised when downloaded size does not match content-length
class ContentTooShortError(IOError):
def __init__(self, message, content):
IOError.__init__(self, message)
self.content = content
ftpcache = {}
class URLopener:
"""Class to open URLs.
This is a class rather than just a subroutine because we may need
more than one set of global protocol-specific options.
Note -- this is a base class for those who don't want the
automatic handling of errors type 302 (relocated) and 401
(authorization needed)."""
__tempfiles = None
version = "Python-urllib/%s" % __version__
# Constructor
def __init__(self, proxies=None, **x509):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
self.key_file = x509.get('key_file')
self.cert_file = x509.get('cert_file')
self.addheaders = [('User-Agent', self.version)]
self.__tempfiles = []
self.__unlink = os.unlink # See cleanup()
self.tempcache = None
# Undocumented feature: if you assign {} to tempcache,
# it is used to cache files retrieved with
# self.retrieve(). This is not enabled by default
# since it does not work for changing documents (and I
# haven't got the logic to check expiration headers
# yet).
self.ftpcache = ftpcache
# Undocumented feature: you can use a different
# ftp cache by assigning to the .ftpcache member;
# in case you want logically independent URL openers
# XXX This is not threadsafe. Bah.
def __del__(self):
self.close()
def close(self):
self.cleanup()
def cleanup(self):
# This code sometimes runs when the rest of this module
# has already been deleted, so it can't use any globals
# or import anything.
if self.__tempfiles:
for file in self.__tempfiles:
try:
self.__unlink(file)
except OSError:
pass
del self.__tempfiles[:]
if self.tempcache:
self.tempcache.clear()
def addheader(self, *args):
"""Add a header to be used by the HTTP interface only
e.g. u.addheader('Accept', 'sound/basic')"""
self.addheaders.append(args)
# External interface
def open(self, fullurl, data=None):
"""Use URLopener().open(file) instead of open(file, 'r')."""
fullurl = unwrap(toBytes(fullurl))
if self.tempcache and fullurl in self.tempcache:
filename, headers = self.tempcache[fullurl]
fp = open(filename, 'rb')
return addinfourl(fp, headers, fullurl)
urltype, url = splittype(fullurl)
if not urltype:
urltype = 'file'
if urltype in self.proxies:
proxy = self.proxies[urltype]
urltype, proxyhost = splittype(proxy)
host, selector = splithost(proxyhost)
url = (host, fullurl) # Signal special case to open_*()
else:
proxy = None
name = 'open_' + urltype
self.type = urltype
name = name.replace('-', '_')
if not hasattr(self, name):
if proxy:
return self.open_unknown_proxy(proxy, fullurl, data)
else:
return self.open_unknown(fullurl, data)
try:
if data is None:
return getattr(self, name)(url)
else:
return getattr(self, name)(url, data)
except socket.error, msg:
raise IOError, ('socket error', msg), sys.exc_info()[2]
def open_unknown(self, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'unknown url type', type)
def open_unknown_proxy(self, proxy, fullurl, data=None):
"""Overridable interface to open unknown URL type."""
type, url = splittype(fullurl)
raise IOError, ('url error', 'invalid proxy for %s' % type, proxy)
# External interface
def retrieve(self, url, filename=None, reporthook=None, data=None):
"""retrieve(url) returns (filename, headers) for a local object
or (tempfilename, headers) for a remote object."""
url = unwrap(toBytes(url))
if self.tempcache and url in self.tempcache:
return self.tempcache[url]
type, url1 = splittype(url)
if filename is None and (not type or type == 'file'):
try:
fp = self.open_local_file(url1)
hdrs = fp.info()
del fp
return url2pathname(splithost(url1)[1]), hdrs
except IOError, msg:
pass
fp = self.open(url, data)
try:
headers = fp.info()
if filename:
tfp = open(filename, 'wb')
else:
import tempfile
garbage, path = splittype(url)
garbage, path = splithost(path or "")
path, garbage = splitquery(path or "")
path, garbage = splitattr(path or "")
suffix = os.path.splitext(path)[1]
(fd, filename) = tempfile.mkstemp(suffix)
self.__tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
try:
result = filename, headers
if self.tempcache is not None:
self.tempcache[url] = result
bs = 1024*8
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
finally:
tfp.close()
finally:
fp.close()
del fp
del tfp
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError("retrieval incomplete: got only %i out "
"of %i bytes" % (read, size), result)
return result
# Each method named open_<type> knows how to open that type of URL
def open_http(self, url, data=None):
"""Use HTTP protocol."""
import httplib
user_passwd = None
proxy_passwd= None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# check whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
# now we proceed with the url we want to obtain
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'http':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
if proxy_bypass(realhost):
host = realhost
#print "proxy via http:", host, selector
if not host: raise IOError, ('http error', 'no host given')
if proxy_passwd:
import base64
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
import base64
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTP(host)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type', 'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders()
if data is not None:
h.send(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "http:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers, data)
def http_error(self, url, fp, errcode, errmsg, headers, data=None):
"""Handle http errors.
Derived class can override this, or provide specific handlers
named http_error_DDD where DDD is the 3-digit error code."""
# First check if there's a specific handler for this error
name = 'http_error_%d' % errcode
if hasattr(self, name):
method = getattr(self, name)
if data is None:
result = method(url, fp, errcode, errmsg, headers)
else:
result = method(url, fp, errcode, errmsg, headers, data)
if result: return result
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handler: close the connection and raise IOError."""
void = fp.read()
fp.close()
raise IOError, ('http error', errcode, errmsg, headers)
if _have_ssl:
def open_https(self, url, data=None):
"""Use HTTPS protocol."""
import httplib
user_passwd = None
proxy_passwd = None
if isinstance(url, str):
host, selector = splithost(url)
if host:
user_passwd, host = splituser(host)
host = unquote(host)
realhost = host
else:
host, selector = url
# here, we determine, whether the proxy contains authorization information
proxy_passwd, host = splituser(host)
urltype, rest = splittype(selector)
url = rest
user_passwd = None
if urltype.lower() != 'https':
realhost = None
else:
realhost, rest = splithost(rest)
if realhost:
user_passwd, realhost = splituser(realhost)
if user_passwd:
selector = "%s://%s%s" % (urltype, realhost, rest)
#print "proxy via https:", host, selector
if not host: raise IOError, ('https error', 'no host given')
if proxy_passwd:
import base64
proxy_auth = base64.b64encode(proxy_passwd).strip()
else:
proxy_auth = None
if user_passwd:
import base64
auth = base64.b64encode(user_passwd).strip()
else:
auth = None
h = httplib.HTTPS(host, 0,
key_file=self.key_file,
cert_file=self.cert_file)
if data is not None:
h.putrequest('POST', selector)
h.putheader('Content-Type',
'application/x-www-form-urlencoded')
h.putheader('Content-Length', '%d' % len(data))
else:
h.putrequest('GET', selector)
if proxy_auth: h.putheader('Proxy-Authorization', 'Basic %s' % proxy_auth)
if auth: h.putheader('Authorization', 'Basic %s' % auth)
if realhost: h.putheader('Host', realhost)
for args in self.addheaders: h.putheader(*args)
h.endheaders()
if data is not None:
h.send(data)
errcode, errmsg, headers = h.getreply()
fp = h.getfile()
if errcode == -1:
if fp: fp.close()
# something went wrong with the HTTP status line
raise IOError, ('http protocol error', 0,
'got a bad status line', None)
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if (200 <= errcode < 300):
return addinfourl(fp, headers, "https:" + url, errcode)
else:
if data is None:
return self.http_error(url, fp, errcode, errmsg, headers)
else:
return self.http_error(url, fp, errcode, errmsg, headers,
data)
def open_file(self, url):
"""Use local file or FTP depending on form of URL."""
if not isinstance(url, str):
raise IOError, ('file error', 'proxy support for file protocol currently not implemented')
if url[:2] == '//' and url[2:3] != '/' and url[2:12].lower() != 'localhost/':
return self.open_ftp(url)
else:
return self.open_local_file(url)
def open_local_file(self, url):
"""Use local file."""
import mimetypes, mimetools, email.utils
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, file = splithost(url)
localname = url2pathname(file)
try:
stats = os.stat(localname)
except OSError, e:
raise IOError(e.errno, e.strerror, e.filename)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(url)[0]
headers = mimetools.Message(StringIO(
'Content-Type: %s\nContent-Length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if not host:
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'),
headers, urlfile)
host, port = splitport(host)
if not port \
and socket.gethostbyname(host) in (localhost(), thishost()):
urlfile = file
if file[:1] == '/':
urlfile = 'file://' + file
return addinfourl(open(localname, 'rb'),
headers, urlfile)
raise IOError, ('local file error', 'not on local host')
def open_ftp(self, url):
"""Use FTP protocol."""
if not isinstance(url, str):
raise IOError, ('ftp error', 'proxy support for ftp protocol currently not implemented')
import mimetypes, mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
host, path = splithost(url)
if not host: raise IOError, ('ftp error', 'no host given')
host, port = splitport(host)
user, host = splituser(host)
if user: user, passwd = splitpasswd(user)
else: passwd = None
host = unquote(host)
user = unquote(user or '')
passwd = unquote(passwd or '')
host = socket.gethostbyname(host)
if not port:
import ftplib
port = ftplib.FTP_PORT
else:
port = int(port)
path, attrs = splitattr(path)
path = unquote(path)
dirs = path.split('/')
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]: dirs = dirs[1:]
if dirs and not dirs[0]: dirs[0] = '/'
key = user, host, port, '/'.join(dirs)
# XXX thread unsafe!
if len(self.ftpcache) > MAXFTPCACHE:
# Prune the cache, rather arbitrarily
for k in self.ftpcache.keys():
if k != key:
v = self.ftpcache[k]
del self.ftpcache[k]
v.close()
try:
if not key in self.ftpcache:
self.ftpcache[key] = \
ftpwrapper(user, passwd, host, port, dirs)
if not file: type = 'D'
else: type = 'I'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
(fp, retrlen) = self.ftpcache[key].retrfile(file, type)
mtype = mimetypes.guess_type("ftp:" + url)[0]
headers = ""
if mtype:
headers += "Content-Type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-Length: %d\n" % retrlen
headers = mimetools.Message(StringIO(headers))
return addinfourl(fp, headers, "ftp:" + url)
except ftperrors(), msg:
raise IOError, ('ftp error', msg), sys.exc_info()[2]
def open_data(self, url, data=None):
"""Use "data" URL."""
if not isinstance(url, str):
raise IOError, ('data error', 'proxy support for data protocol currently not implemented')
# ignore POSTed data
#
# syntax of data URLs:
# dataurl := "data:" [ mediatype ] [ ";base64" ] "," data
# mediatype := [ type "/" subtype ] *( ";" parameter )
# data := *urlchar
# parameter := attribute "=" value
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
[type, data] = url.split(',', 1)
except ValueError:
raise IOError, ('data error', 'bad data URL')
if not type:
type = 'text/plain;charset=US-ASCII'
semi = type.rfind(';')
if semi >= 0 and '=' not in type[semi:]:
encoding = type[semi+1:]
type = type[:semi]
else:
encoding = ''
msg = []
msg.append('Date: %s'%time.strftime('%a, %d %b %Y %T GMT',
time.gmtime(time.time())))
msg.append('Content-type: %s' % type)
if encoding == 'base64':
import base64
data = base64.decodestring(data)
else:
data = unquote(data)
msg.append('Content-Length: %d' % len(data))
msg.append('')
msg.append(data)
msg = '\n'.join(msg)
f = StringIO(msg)
headers = mimetools.Message(f, 0)
#f.fileno = None # needed for addinfourl
return addinfourl(f, headers, url)
class FancyURLopener(URLopener):
"""Derived class with handlers for errors we can handle (perhaps)."""
def __init__(self, *args, **kwargs):
URLopener.__init__(self, *args, **kwargs)
self.auth_cache = {}
self.tries = 0
self.maxtries = 10
def http_error_default(self, url, fp, errcode, errmsg, headers):
"""Default error handling -- don't raise an exception."""
return addinfourl(fp, headers, "http:" + url, errcode)
def http_error_302(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 302 -- relocated (temporarily)."""
self.tries += 1
if self.maxtries and self.tries >= self.maxtries:
if hasattr(self, "http_error_500"):
meth = self.http_error_500
else:
meth = self.http_error_default
self.tries = 0
return meth(url, fp, 500,
"Internal Server Error: Redirect Recursion", headers)
result = self.redirect_internal(url, fp, errcode, errmsg, headers,
data)
self.tries = 0
return result
def redirect_internal(self, url, fp, errcode, errmsg, headers, data):
if 'location' in headers:
newurl = headers['location']
elif 'uri' in headers:
newurl = headers['uri']
else:
return
void = fp.read()
fp.close()
# In case the server sent a relative URL, join with original:
newurl = basejoin(self.type + ":" + url, newurl)
return self.open(newurl)
def http_error_301(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 301 -- also relocated (permanently)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_303(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 303 -- also relocated (essentially identical to 302)."""
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
def http_error_307(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 307 -- relocated, but turn POST into error."""
if data is None:
return self.http_error_302(url, fp, errcode, errmsg, headers, data)
else:
return self.http_error_default(url, fp, errcode, errmsg, headers)
def http_error_401(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 401 -- authentication required.
This function supports Basic authentication only."""
if not 'www-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['www-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def http_error_407(self, url, fp, errcode, errmsg, headers, data=None):
"""Error 407 -- proxy authentication required.
This function supports Basic authentication only."""
if not 'proxy-authenticate' in headers:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
stuff = headers['proxy-authenticate']
import re
match = re.match('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', stuff)
if not match:
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
scheme, realm = match.groups()
if scheme.lower() != 'basic':
URLopener.http_error_default(self, url, fp,
errcode, errmsg, headers)
name = 'retry_proxy_' + self.type + '_basic_auth'
if data is None:
return getattr(self,name)(url, realm)
else:
return getattr(self,name)(url, realm, data)
def retry_proxy_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'http://' + host + selector
proxy = self.proxies['http']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['http'] = 'http://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_proxy_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
newurl = 'https://' + host + selector
proxy = self.proxies['https']
urltype, proxyhost = splittype(proxy)
proxyhost, proxyselector = splithost(proxyhost)
i = proxyhost.find('@') + 1
proxyhost = proxyhost[i:]
user, passwd = self.get_user_passwd(proxyhost, realm, i)
if not (user or passwd): return None
proxyhost = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + proxyhost
self.proxies['https'] = 'https://' + proxyhost + proxyselector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_http_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'http://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def retry_https_basic_auth(self, url, realm, data=None):
host, selector = splithost(url)
i = host.find('@') + 1
host = host[i:]
user, passwd = self.get_user_passwd(host, realm, i)
if not (user or passwd): return None
host = quote(user, safe='') + ':' + quote(passwd, safe='') + '@' + host
newurl = 'https://' + host + selector
if data is None:
return self.open(newurl)
else:
return self.open(newurl, data)
def get_user_passwd(self, host, realm, clear_cache = 0):
key = realm + '@' + host.lower()
if key in self.auth_cache:
if clear_cache:
del self.auth_cache[key]
else:
return self.auth_cache[key]
user, passwd = self.prompt_user_passwd(host, realm)
if user or passwd: self.auth_cache[key] = (user, passwd)
return user, passwd
def prompt_user_passwd(self, host, realm):
"""Override this in a GUI environment!"""
import getpass
try:
user = raw_input("Enter username for %s at %s: " % (realm,
host))
passwd = getpass.getpass("Enter password for %s in %s at %s: " %
(user, realm, host))
return user, passwd
except KeyboardInterrupt:
print
return None, None
# Utility functions
_localhost = None
def localhost():
"""Return the IP address of the magic hostname 'localhost'."""
global _localhost
if _localhost is None:
_localhost = socket.gethostbyname('localhost')
return _localhost
_thishost = None
def thishost():
"""Return the IP address of the current host."""
global _thishost
if _thishost is None:
_thishost = socket.gethostbyname(socket.gethostname())
return _thishost
_ftperrors = None
def ftperrors():
"""Return the set of errors raised by the FTP class."""
global _ftperrors
if _ftperrors is None:
import ftplib
_ftperrors = ftplib.all_errors
return _ftperrors
_noheaders = None
def noheaders():
"""Return an empty mimetools.Message object."""
global _noheaders
if _noheaders is None:
import mimetools
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
_noheaders = mimetools.Message(StringIO(), 0)
_noheaders.fp.close() # Recycle file descriptor
return _noheaders
# Utility classes
class ftpwrapper:
"""Class used by open_ftp() for cache of open FTP connections."""
def __init__(self, user, passwd, host, port, dirs,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.user = user
self.passwd = passwd
self.host = host
self.port = port
self.dirs = dirs
self.timeout = timeout
self.init()
def init(self):
import ftplib
self.busy = 0
self.ftp = ftplib.FTP()
self.ftp.connect(self.host, self.port, self.timeout)
self.ftp.login(self.user, self.passwd)
for dir in self.dirs:
self.ftp.cwd(dir)
def retrfile(self, file, type):
import ftplib
self.endtransfer()
if type in ('d', 'D'): cmd = 'TYPE A'; isdir = 1
else: cmd = 'TYPE ' + type; isdir = 0
try:
self.ftp.voidcmd(cmd)
except ftplib.all_errors:
self.init()
self.ftp.voidcmd(cmd)
conn = None
if file and not isdir:
# Try to retrieve as a file
try:
cmd = 'RETR ' + file
conn = self.ftp.ntransfercmd(cmd)
except ftplib.error_perm, reason:
if str(reason)[:3] != '550':
raise IOError, ('ftp error', reason), sys.exc_info()[2]
if not conn:
# Set transfer mode to ASCII!
self.ftp.voidcmd('TYPE A')
# Try a directory listing. Verify that directory exists.
if file:
pwd = self.ftp.pwd()
try:
try:
self.ftp.cwd(file)
except ftplib.error_perm, reason:
raise IOError, ('ftp error', reason), sys.exc_info()[2]
finally:
self.ftp.cwd(pwd)
cmd = 'LIST ' + file
else:
cmd = 'LIST'
conn = self.ftp.ntransfercmd(cmd)
self.busy = 1
# Pass back both a suitably decorated object and a retrieval length
return (addclosehook(conn[0].makefile('rb'),
self.endtransfer), conn[1])
def endtransfer(self):
if not self.busy:
return
self.busy = 0
try:
self.ftp.voidresp()
except ftperrors():
pass
def close(self):
self.endtransfer()
try:
self.ftp.close()
except ftperrors():
pass
class addbase:
"""Base class for addinfo and addclosehook."""
def __init__(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
if hasattr(self.fp, "__iter__"):
self.__iter__ = self.fp.__iter__
if hasattr(self.fp, "next"):
self.next = self.fp.next
def __repr__(self):
return '<%s at %r whose fp = %r>' % (self.__class__.__name__,
id(self), self.fp)
def close(self):
self.read = None
self.readline = None
self.readlines = None
self.fileno = None
if self.fp: self.fp.close()
self.fp = None
class addclosehook(addbase):
"""Class to add a close hook to an open file."""
def __init__(self, fp, closehook, *hookargs):
addbase.__init__(self, fp)
self.closehook = closehook
self.hookargs = hookargs
def close(self):
addbase.close(self)
if self.closehook:
self.closehook(*self.hookargs)
self.closehook = None
self.hookargs = None
class addinfo(addbase):
"""class to add an info() method to an open file."""
def __init__(self, fp, headers):
addbase.__init__(self, fp)
self.headers = headers
def info(self):
return self.headers
class addinfourl(addbase):
"""class to add info() and geturl() methods to an open file."""
def __init__(self, fp, headers, url, code=None):
addbase.__init__(self, fp)
self.headers = headers
self.url = url
self.code = code
def info(self):
return self.headers
def getcode(self):
return self.code
def geturl(self):
return self.url
# Utilities to parse URLs (most of these return None for missing parts):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
# splittype('type:opaquestring') --> 'type', 'opaquestring'
# splithost('//host[:port]/path') --> 'host[:port]', '/path'
# splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'
# splitpasswd('user:passwd') -> 'user', 'passwd'
# splitport('host:port') --> 'host', 'port'
# splitquery('/path?query') --> '/path', 'query'
# splittag('/path#tag') --> '/path', 'tag'
# splitattr('/path;attr1=value1;attr2=value2;...') ->
# '/path', ['attr1=value1', 'attr2=value2', ...]
# splitvalue('attr=value') --> 'attr', 'value'
# unquote('abc%20def') -> 'abc def'
# quote('abc def') -> 'abc%20def')
try:
unicode
except NameError:
def _is_unicode(x):
return 0
else:
def _is_unicode(x):
return isinstance(x, unicode)
def toBytes(url):
"""toBytes(u"URL") --> 'URL'."""
# Most URL schemes require ASCII. If that changes, the conversion
# can be relaxed
if _is_unicode(url):
try:
url = url.encode("ASCII")
except UnicodeError:
raise UnicodeError("URL " + repr(url) +
" contains non-ASCII characters")
return url
def unwrap(url):
"""unwrap('<URL:type://host/path>') --> 'type://host/path'."""
url = url.strip()
if url[:1] == '<' and url[-1:] == '>':
url = url[1:-1].strip()
if url[:4] == 'URL:': url = url[4:].strip()
return url
_typeprog = None
def splittype(url):
"""splittype('type:opaquestring') --> 'type', 'opaquestring'."""
global _typeprog
if _typeprog is None:
import re
_typeprog = re.compile('^([^/:]+):')
match = _typeprog.match(url)
if match:
scheme = match.group(1)
return scheme.lower(), url[len(scheme) + 1:]
return None, url
_hostprog = None
def splithost(url):
"""splithost('//host[:port]/path') --> 'host[:port]', '/path'."""
global _hostprog
if _hostprog is None:
import re
_hostprog = re.compile('^//([^/?]*)(.*)$')
match = _hostprog.match(url)
if match: return match.group(1, 2)
return None, url
_userprog = None
def splituser(host):
"""splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
global _userprog
if _userprog is None:
import re
_userprog = re.compile('^(.*)@(.*)$')
match = _userprog.match(host)
if match: return map(unquote, match.group(1, 2))
return None, host
_passwdprog = None
def splitpasswd(user):
"""splitpasswd('user:passwd') -> 'user', 'passwd'."""
global _passwdprog
if _passwdprog is None:
import re
_passwdprog = re.compile('^([^:]*):(.*)$')
match = _passwdprog.match(user)
if match: return match.group(1, 2)
return user, None
# splittag('/path#tag') --> '/path', 'tag'
_portprog = None
def splitport(host):
"""splitport('host:port') --> 'host', 'port'."""
global _portprog
if _portprog is None:
import re
_portprog = re.compile('^(.*):([0-9]+)$')
match = _portprog.match(host)
if match: return match.group(1, 2)
return host, None
_nportprog = None
def splitnport(host, defport=-1):
"""Split host and port, returning numeric port.
Return given default port if no ':' found; defaults to -1.
Return numerical port if a valid number are found after ':'.
Return None if ':' but not a valid number."""
global _nportprog
if _nportprog is None:
import re
_nportprog = re.compile('^(.*):(.*)$')
match = _nportprog.match(host)
if match:
host, port = match.group(1, 2)
try:
if not port: raise ValueError, "no digits"
nport = int(port)
except ValueError:
nport = None
return host, nport
return host, defport
_queryprog = None
def splitquery(url):
"""splitquery('/path?query') --> '/path', 'query'."""
global _queryprog
if _queryprog is None:
import re
_queryprog = re.compile('^(.*)\?([^?]*)$')
match = _queryprog.match(url)
if match: return match.group(1, 2)
return url, None
_tagprog = None
def splittag(url):
"""splittag('/path#tag') --> '/path', 'tag'."""
global _tagprog
if _tagprog is None:
import re
_tagprog = re.compile('^(.*)#([^#]*)$')
match = _tagprog.match(url)
if match: return match.group(1, 2)
return url, None
def splitattr(url):
"""splitattr('/path;attr1=value1;attr2=value2;...') ->
'/path', ['attr1=value1', 'attr2=value2', ...]."""
words = url.split(';')
return words[0], words[1:]
_valueprog = None
def splitvalue(attr):
"""splitvalue('attr=value') --> 'attr', 'value'."""
global _valueprog
if _valueprog is None:
import re
_valueprog = re.compile('^([^=]*)=(.*)$')
match = _valueprog.match(attr)
if match: return match.group(1, 2)
return attr, None
_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
def unquote(s):
"""unquote('abc%20def') -> 'abc def'."""
res = s.split('%')
for i in xrange(1, len(res)):
item = res[i]
try:
res[i] = _hextochr[item[:2]] + item[2:]
except KeyError:
res[i] = '%' + item
except UnicodeDecodeError:
res[i] = unichr(int(item[:2], 16)) + item[2:]
return "".join(res)
def unquote_plus(s):
"""unquote('%7e/abc+def') -> '~/abc def'"""
s = s.replace('+', ' ')
return unquote(s)
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
_safemaps = {}
def quote(s, safe = '/'):
"""quote('abc def') -> 'abc%20def'
Each part of a URL, e.g. the path info, the query, etc., has a
different set of reserved characters that must be quoted.
RFC 2396 Uniform Resource Identifiers (URI): Generic Syntax lists
the following reserved characters.
reserved = ";" | "/" | "?" | ":" | "@" | "&" | "=" | "+" |
"$" | ","
Each of these characters is reserved in some component of a URL,
but not necessarily in all of them.
By default, the quote function is intended for quoting the path
section of a URL. Thus, it will not encode '/'. This character
is reserved, but in typical usage the quote function is being
called on a path where the existing slash characters are used as
reserved characters.
"""
cachekey = (safe, always_safe)
try:
safe_map = _safemaps[cachekey]
except KeyError:
safe += always_safe
safe_map = {}
for i in range(256):
c = chr(i)
safe_map[c] = (c in safe) and c or ('%%%02X' % i)
_safemaps[cachekey] = safe_map
res = map(safe_map.__getitem__, s)
return ''.join(res)
def quote_plus(s, safe = ''):
"""Quote the query fragment of a URL; replacing ' ' with '+'"""
if ' ' in s:
s = quote(s, safe + ' ')
return s.replace(' ', '+')
return quote(s, safe)
def urlencode(query,doseq=0):
"""Encode a sequence of two-element tuples or dictionary into a URL query string.
If any values in the query arg are sequences and doseq is true, each
sequence element is converted to a separate parameter.
If the query arg is a sequence of two-element tuples, the order of the
parameters in the output will match the order of parameters in the
input.
"""
if hasattr(query,"items"):
# mapping objects
query = query.items()
else:
# it's a bother at times that strings and string-like objects are
# sequences...
try:
# non-sequence items should not work with len()
# non-empty strings will fail this
if len(query) and not isinstance(query[0], tuple):
raise TypeError
# zero-length sequences of all types will get here and succeed,
# but that's a minor nit - since the original implementation
# allowed empty dicts that type of behavior probably should be
# preserved for consistency
except TypeError:
ty,va,tb = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", tb
l = []
if not doseq:
# preserve old behavior
for k, v in query:
k = quote_plus(str(k))
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
for k, v in query:
k = quote_plus(str(k))
if isinstance(v, str):
v = quote_plus(v)
l.append(k + '=' + v)
elif _is_unicode(v):
# is there a reasonable way to convert to ASCII?
# encode generates a string, but "replace" or "ignore"
# lose information and "strict" can raise UnicodeError
v = quote_plus(v.encode("ASCII","replace"))
l.append(k + '=' + v)
else:
try:
# is this a sufficient test for sequence-ness?
x = len(v)
except TypeError:
# not a sequence
v = quote_plus(str(v))
l.append(k + '=' + v)
else:
# loop over the sequence
for elt in v:
l.append(k + '=' + quote_plus(str(elt)))
return '&'.join(l)
# Proxy handling
def getproxies_environment():
"""Return a dictionary of scheme -> proxy server URL mappings.
Scan the environment for variables named <scheme>_proxy;
this seems to be the standard convention. If you need a
different way, you can pass a proxies dictionary to the
[Fancy]URLopener constructor.
"""
proxies = {}
for name, value in os.environ.items():
name = name.lower()
if value and name[-6:] == '_proxy':
proxies[name[:-6]] = value
return proxies
def proxy_bypass_environment(host):
"""Test if proxies should not be used for a particular host.
Checks the environment for a variable named no_proxy, which should
be a list of DNS suffixes separated by commas, or '*' for all hosts.
"""
no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
# '*' is special case for always bypass
if no_proxy == '*':
return 1
# strip port off host
hostonly, port = splitport(host)
# check if the host ends with any of the DNS suffixes
for name in no_proxy.split(','):
if name and (hostonly.endswith(name) or host.endswith(name)):
return 1
# otherwise, don't bypass
return 0
if sys.platform == 'darwin':
def _CFSetup(sc):
from ctypes import c_int32, c_void_p, c_char_p, c_int
sc.CFStringCreateWithCString.argtypes = [ c_void_p, c_char_p, c_int32 ]
sc.CFStringCreateWithCString.restype = c_void_p
sc.SCDynamicStoreCopyProxies.argtypes = [ c_void_p ]
sc.SCDynamicStoreCopyProxies.restype = c_void_p
sc.CFDictionaryGetValue.argtypes = [ c_void_p, c_void_p ]
sc.CFDictionaryGetValue.restype = c_void_p
sc.CFStringGetLength.argtypes = [ c_void_p ]
sc.CFStringGetLength.restype = c_int32
sc.CFStringGetCString.argtypes = [ c_void_p, c_char_p, c_int32, c_int32 ]
sc.CFStringGetCString.restype = c_int32
sc.CFNumberGetValue.argtypes = [ c_void_p, c_int, c_void_p ]
sc.CFNumberGetValue.restype = c_int32
sc.CFRelease.argtypes = [ c_void_p ]
sc.CFRelease.restype = None
def _CStringFromCFString(sc, value):
from ctypes import create_string_buffer
length = sc.CFStringGetLength(value) + 1
buff = create_string_buffer(length)
sc.CFStringGetCString(value, buff, length, 0)
return buff.value
def _CFNumberToInt32(sc, cfnum):
from ctypes import byref, c_int
val = c_int()
kCFNumberSInt32Type = 3
sc.CFNumberGetValue(cfnum, kCFNumberSInt32Type, byref(val))
return val.value
def proxy_bypass_macosx_sysconf(host):
"""
Return True iff this host shouldn't be accessed using a proxy
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
from ctypes import cdll
from ctypes.util import find_library
import re
import socket
from fnmatch import fnmatch
def ip2num(ipAddr):
parts = ipAddr.split('.')
parts = map(int, parts)
if len(parts) != 4:
parts = (parts + [0, 0, 0, 0])[:4]
return (parts[0] << 24) | (parts[1] << 16) | (parts[2] << 8) | parts[3]
sc = cdll.LoadLibrary(find_library("SystemConfiguration"))
_CFSetup(sc)
hostIP = None
if not sc:
return False
kSCPropNetProxiesExceptionsList = sc.CFStringCreateWithCString(0, "ExceptionsList", 0)
kSCPropNetProxiesExcludeSimpleHostnames = sc.CFStringCreateWithCString(0,
"ExcludeSimpleHostnames", 0)
proxyDict = sc.SCDynamicStoreCopyProxies(None)
if proxyDict is None:
return False
try:
# Check for simple host names:
if '.' not in host:
exclude_simple = sc.CFDictionaryGetValue(proxyDict,
kSCPropNetProxiesExcludeSimpleHostnames)
if exclude_simple and _CFNumberToInt32(sc, exclude_simple):
return True
# Check the exceptions list:
exceptions = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesExceptionsList)
if exceptions:
# Items in the list are strings like these: *.local, 169.254/16
for index in xrange(sc.CFArrayGetCount(exceptions)):
value = sc.CFArrayGetValueAtIndex(exceptions, index)
if not value: continue
value = _CStringFromCFString(sc, value)
m = re.match(r"(\d+(?:\.\d+)*)(/\d+)?", value)
if m is not None:
if hostIP is None:
hostIP = socket.gethostbyname(host)
hostIP = ip2num(hostIP)
base = ip2num(m.group(1))
mask = int(m.group(2)[1:])
mask = 32 - mask
if (hostIP >> mask) == (base >> mask):
return True
elif fnmatch(host, value):
return True
return False
finally:
sc.CFRelease(kSCPropNetProxiesExceptionsList)
sc.CFRelease(kSCPropNetProxiesExcludeSimpleHostnames)
def getproxies_macosx_sysconf():
"""Return a dictionary of scheme -> proxy server URL mappings.
This function uses the MacOSX framework SystemConfiguration
to fetch the proxy information.
"""
from ctypes import cdll
from ctypes.util import find_library
sc = cdll.LoadLibrary(find_library("SystemConfiguration"))
_CFSetup(sc)
if not sc:
return {}
kSCPropNetProxiesHTTPEnable = sc.CFStringCreateWithCString(0, "HTTPEnable", 0)
kSCPropNetProxiesHTTPProxy = sc.CFStringCreateWithCString(0, "HTTPProxy", 0)
kSCPropNetProxiesHTTPPort = sc.CFStringCreateWithCString(0, "HTTPPort", 0)
kSCPropNetProxiesHTTPSEnable = sc.CFStringCreateWithCString(0, "HTTPSEnable", 0)
kSCPropNetProxiesHTTPSProxy = sc.CFStringCreateWithCString(0, "HTTPSProxy", 0)
kSCPropNetProxiesHTTPSPort = sc.CFStringCreateWithCString(0, "HTTPSPort", 0)
kSCPropNetProxiesFTPEnable = sc.CFStringCreateWithCString(0, "FTPEnable", 0)
kSCPropNetProxiesFTPPassive = sc.CFStringCreateWithCString(0, "FTPPassive", 0)
kSCPropNetProxiesFTPPort = sc.CFStringCreateWithCString(0, "FTPPort", 0)
kSCPropNetProxiesFTPProxy = sc.CFStringCreateWithCString(0, "FTPProxy", 0)
kSCPropNetProxiesGopherEnable = sc.CFStringCreateWithCString(0, "GopherEnable", 0)
kSCPropNetProxiesGopherPort = sc.CFStringCreateWithCString(0, "GopherPort", 0)
kSCPropNetProxiesGopherProxy = sc.CFStringCreateWithCString(0, "GopherProxy", 0)
proxies = {}
proxyDict = sc.SCDynamicStoreCopyProxies(None)
try:
# HTTP:
enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPEnable)
if enabled and _CFNumberToInt32(sc, enabled):
proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPProxy)
port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPPort)
if proxy:
proxy = _CStringFromCFString(sc, proxy)
if port:
port = _CFNumberToInt32(sc, port)
proxies["http"] = "http://%s:%i" % (proxy, port)
else:
proxies["http"] = "http://%s" % (proxy, )
# HTTPS:
enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSEnable)
if enabled and _CFNumberToInt32(sc, enabled):
proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSProxy)
port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesHTTPSPort)
if proxy:
proxy = _CStringFromCFString(sc, proxy)
if port:
port = _CFNumberToInt32(sc, port)
proxies["https"] = "http://%s:%i" % (proxy, port)
else:
proxies["https"] = "http://%s" % (proxy, )
# FTP:
enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPEnable)
if enabled and _CFNumberToInt32(sc, enabled):
proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPProxy)
port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesFTPPort)
if proxy:
proxy = _CStringFromCFString(sc, proxy)
if port:
port = _CFNumberToInt32(sc, port)
proxies["ftp"] = "http://%s:%i" % (proxy, port)
else:
proxies["ftp"] = "http://%s" % (proxy, )
# Gopher:
enabled = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherEnable)
if enabled and _CFNumberToInt32(sc, enabled):
proxy = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherProxy)
port = sc.CFDictionaryGetValue(proxyDict, kSCPropNetProxiesGopherPort)
if proxy:
proxy = _CStringFromCFString(sc, proxy)
if port:
port = _CFNumberToInt32(sc, port)
proxies["gopher"] = "http://%s:%i" % (proxy, port)
else:
proxies["gopher"] = "http://%s" % (proxy, )
finally:
sc.CFRelease(proxyDict)
sc.CFRelease(kSCPropNetProxiesHTTPEnable)
sc.CFRelease(kSCPropNetProxiesHTTPProxy)
sc.CFRelease(kSCPropNetProxiesHTTPPort)
sc.CFRelease(kSCPropNetProxiesFTPEnable)
sc.CFRelease(kSCPropNetProxiesFTPPassive)
sc.CFRelease(kSCPropNetProxiesFTPPort)
sc.CFRelease(kSCPropNetProxiesFTPProxy)
sc.CFRelease(kSCPropNetProxiesGopherEnable)
sc.CFRelease(kSCPropNetProxiesGopherPort)
sc.CFRelease(kSCPropNetProxiesGopherProxy)
return proxies
def proxy_bypass(host):
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_macosx_sysconf(host)
def getproxies():
return getproxies_environment() or getproxies_macosx_sysconf()
elif os.name == 'nt':
def getproxies_registry():
"""Return a dictionary of scheme -> proxy server URL mappings.
Win32 uses the registry to store proxies.
"""
proxies = {}
try:
import _winreg
except ImportError:
# Std module, so should be around - but you never know!
return proxies
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
if proxyEnable:
# Returned as Unicode but problems if not converted to ASCII
proxyServer = str(_winreg.QueryValueEx(internetSettings,
'ProxyServer')[0])
if '=' in proxyServer:
# Per-protocol settings
for p in proxyServer.split(';'):
protocol, address = p.split('=', 1)
# See if address has a type:// prefix
import re
if not re.match('^([^/:]+)://', address):
address = '%s://%s' % (protocol, address)
proxies[protocol] = address
else:
# Use one setting for all protocols
if proxyServer[:5] == 'http:':
proxies['http'] = proxyServer
else:
proxies['http'] = 'http://%s' % proxyServer
proxies['ftp'] = 'ftp://%s' % proxyServer
internetSettings.Close()
except (WindowsError, ValueError, TypeError):
# Either registry key not found etc, or the value in an
# unexpected format.
# proxies already set up to be empty so nothing to do
pass
return proxies
def getproxies():
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
return getproxies_environment() or getproxies_registry()
def proxy_bypass_registry(host):
try:
import _winreg
import re
except ImportError:
# Std modules, so should be around - but you never know!
return 0
try:
internetSettings = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
proxyEnable = _winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0]
proxyOverride = str(_winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0])
# ^^^^ Returned as Unicode but problems if not converted to ASCII
except WindowsError:
return 0
if not proxyEnable or not proxyOverride:
return 0
# try to make a host list from name and IP address.
rawHost, port = splitport(host)
host = [rawHost]
try:
addr = socket.gethostbyname(rawHost)
if addr != rawHost:
host.append(addr)
except socket.error:
pass
try:
fqdn = socket.getfqdn(rawHost)
if fqdn != rawHost:
host.append(fqdn)
except socket.error:
pass
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
i = 0
while i < len(proxyOverride):
if proxyOverride[i] == '<local>':
proxyOverride[i:i+1] = ['localhost',
'127.0.0.1',
socket.gethostname(),
socket.gethostbyname(
socket.gethostname())]
i += 1
# print proxyOverride
# now check if we match one of the registry values.
for test in proxyOverride:
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
for val in host:
# print "%s <--> %s" %( test, val )
if re.match(test, val, re.I):
return 1
return 0
def proxy_bypass(host):
"""Return a dictionary of scheme -> proxy server URL mappings.
Returns settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
else:
# By default use environment variables
getproxies = getproxies_environment
proxy_bypass = proxy_bypass_environment
# Test and time quote() and unquote()
def test1():
s = ''
for i in range(256): s = s + chr(i)
s = s*4
t0 = time.time()
qs = quote(s)
uqs = unquote(qs)
t1 = time.time()
if uqs != s:
print 'Wrong!'
print repr(s)
print repr(qs)
print repr(uqs)
print round(t1 - t0, 3), 'sec'
def reporthook(blocknum, blocksize, totalsize):
# Report during remote transfers
print "Block number: %d, Block size: %d, Total size: %d" % (
blocknum, blocksize, totalsize)
# Test program
def test(args=[]):
if not args:
args = [
'/etc/passwd',
'file:/etc/passwd',
'file://localhost/etc/passwd',
'ftp://ftp.gnu.org/pub/README',
'http://www.python.org/index.html',
]
if hasattr(URLopener, "open_https"):
args.append('https://synergy.as.cmu.edu/~geek/')
try:
for url in args:
print '-'*10, url, '-'*10
fn, h = urlretrieve(url, None, reporthook)
print fn
if h:
print '======'
for k in h.keys(): print k + ':', h[k]
print '======'
fp = open(fn, 'rb')
data = fp.read()
del fp
if '\r' in data:
table = string.maketrans("", "")
data = data.translate(table, "\r")
print data
fn, h = None, None
print '-'*40
finally:
urlcleanup()
def main():
import getopt, sys
try:
opts, args = getopt.getopt(sys.argv[1:], "th")
except getopt.error, msg:
print msg
print "Use -h for help"
return
t = 0
for o, a in opts:
if o == '-t':
t = t + 1
if o == '-h':
print "Usage: python urllib.py [-t] [url ...]"
print "-t runs self-test;",
print "otherwise, contents of urls are printed"
return
if t:
if t > 1:
test1()
test(args)
else:
if not args:
print "Use -h for help"
for url in args:
print urlopen(url).read(),
# Run test program when run as a script
if __name__ == '__main__':
main()
| lgpl-2.1 |
StephenWeber/ansible | lib/ansible/plugins/cache/__init__.py | 42 | 2409 | # (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import MutableMapping
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins import cache_loader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class FactCache(MutableMapping):
def __init__(self, *args, **kwargs):
self._plugin = cache_loader.get(C.CACHE_PLUGIN)
if not self._plugin:
raise AnsibleError('Unable to load the facts cache plugin (%s).' % (C.CACHE_PLUGIN))
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
def __getitem__(self, key):
if not self._plugin.contains(key):
raise KeyError
return self._plugin.get(key)
def __setitem__(self, key, value):
self._plugin.set(key, value)
def __delitem__(self, key):
self._plugin.delete(key)
def __contains__(self, key):
return self._plugin.contains(key)
def __iter__(self):
return iter(self._plugin.keys())
def __len__(self):
return len(self._plugin.keys())
def copy(self):
""" Return a primitive copy of the keys and values from the cache. """
return dict(self)
def keys(self):
return self._plugin.keys()
def flush(self):
""" Flush the fact cache of all keys. """
self._plugin.flush()
def update(self, key, value):
host_cache = self._plugin.get(key)
host_cache.update(value)
self._plugin.set(key, host_cache)
| gpl-3.0 |
heropunch/data-services | src/data_escrow/assurance.py | 1 | 2108 | # -*- coding: utf-8 -*-
import os
import math
from random import SystemRandom
from ensure import ensure_annotations
random = SystemRandom()
@ensure_annotations
def sample_size(N: int, z: float=1.96, e: float=0.05, p: float=0.5) -> int:
"""
Calculate a sample size that will provide the desired level of confidence,
with the specified margin of error.
:param N: population size
:param z: confidence level
:param e: margin of error
:param p: prediction
Assurance Values:
+--------------------------+---------+
| Desired Confidence Level | z-score |
+==========================+=========+
| 80% | 1.28 |
+--------------------------+---------+
| 85% | 1.44 |
+--------------------------+---------+
| 90% | 1.65 |
+--------------------------+---------+
| 95% | 1.96 |
+--------------------------+---------+
| 99% | 2.58 |
+--------------------------+---------+
"""
n = ( ( N * ( p * ( ( z / e ) ** 2 ) ) )
/ ( ( p * ( ( z / e ) ** 2 ) ) + N - 1 ) )
return int(math.ceil(n))
def representative_sample(data):
"""
generate a representative sample of the given data.
:param data: rows of data
"""
count = len(data)
size = sample_size(count)
return {
"sample": random.sample(data, size),
"count": count,
"size": size
}
def shuffle_columns(data):
"""
randomize the values of a data set, preserve column associations.
:param data: rows of data
"""
columns = {}
for row in reversed(data):
for key, value in row.items():
try:
columns[key].append(value)
except KeyError:
columns[key] = [value]
for key in columns.keys():
columns[key] = random.sample(columns[key], len(columns[key]))
while True:
try:
yield {key: columns[key].pop() for key in columns.keys()}
except IndexError:
break
| agpl-3.0 |
yqm/sl4a | python/src/Lib/distutils/command/build_ext.py | 30 | 32079 | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: build_ext.py 69317 2009-02-05 22:55:00Z tarek.ziade $"
import sys, os, string, re
from types import *
from site import USER_BASE, USER_SITE
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
if os.name == 'nt':
from distutils.msvccompiler import get_build_version
MSVC_VERSION = int(get_build_version())
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext (Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options (self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
def finalize_options (self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if type(self.include_dirs) is StringType:
self.include_dirs = string.split(self.include_dirs, os.pathsep)
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
if type(self.libraries) is StringType:
self.libraries = [self.libraries]
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif type(self.library_dirs) is StringType:
self.library_dirs = string.split(self.library_dirs, os.pathsep)
if self.rpath is None:
self.rpath = []
elif type(self.rpath) is StringType:
self.rpath = string.split(self.rpath, os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.join(sys.exec_prefix, 'PC'))
if MSVC_VERSION == 9:
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = ''
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
elif MSVC_VERSION == 8:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS8.0', 'win32release'))
elif MSVC_VERSION == 7:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VS7.1'))
else:
self.library_dirs.append(os.path.join(sys.exec_prefix,
'PC', 'VC6'))
# OS/2 (EMX) doesn't support Debug vs Release builds, but has the
# import libraries in its "Config" subdirectory
if os.name == 'os2':
self.library_dirs.append(os.path.join(sys.exec_prefix, 'Config'))
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# for extensions under Linux or Solaris with a shared Python library,
# Python's library directory must be appended to library_dirs
sysconfig.get_config_var('Py_ENABLE_SHARED')
if ((sys.platform.startswith('linux') or sys.platform.startswith('gnu')
or sys.platform.startswith('sunos'))
and sysconfig.get_config_var('Py_ENABLE_SHARED')):
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = string.split(self.define, ',')
self.define = map(lambda symbol: (symbol, '1'), defines)
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = string.split(self.undef, ',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
# finalize_options ()
def run (self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
# run ()
def check_extensions_list (self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if type(extensions) is not ListType:
raise DistutilsSetupError, \
"'ext_modules' option must be a list of Extension instances"
for i in range(len(extensions)):
ext = extensions[i]
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
(ext_name, build_info) = ext
log.warn(("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance" % ext_name))
if type(ext) is not TupleType and len(ext) != 2:
raise DistutilsSetupError, \
("each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
if not (type(ext_name) is StringType and
extension_name_re.match(ext_name)):
raise DistutilsSetupError, \
("first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if type(build_info) is not DictionaryType:
raise DistutilsSetupError, \
("second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (type(macro) is TupleType and
1 <= len(macro) <= 2):
raise DistutilsSetupError, \
("'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
# for extensions
# check_extensions_list ()
def get_source_files (self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs (self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
outputs.append(os.path.join(self.build_lib,
self.get_ext_filename(fullname)))
return outputs
# get_outputs ()
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
sources = ext.sources
if sources is None or type(sources) not in (ListType, TupleType):
raise DistutilsSetupError, \
("in 'ext_modules' option (extension '%s'), " +
"'sources' must be present and must be " +
"a list of source filenames") % ext.name
sources = list(sources)
fullname = self.get_ext_fullname(ext.name)
if self.inplace:
# ignore build-lib -- put the compiled extension into
# the source tree along with pure Python modules
modpath = string.split(fullname, '.')
package = string.join(modpath[0:-1], '.')
base = modpath[-1]
build_py = self.get_finalized_command('build_py')
package_dir = build_py.get_package_dir(package)
ext_filename = os.path.join(package_dir,
self.get_ext_filename(base))
else:
ext_filename = os.path.join(self.build_lib,
self.get_ext_filename(fullname))
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_filename, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX -- this is a Vile HACK!
#
# The setup.py script for Python on Unix needs to be able to
# get this list so it can perform all the clean up needed to
# avoid keeping object files around when cleaning out a failed
# build of an extension module. Since Distutils does not
# track dependencies, we have to get rid of intermediates to
# ensure all the intermediates will be properly re-built.
#
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_filename,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources (self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
# swig_sources ()
def find_swig (self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
elif os.name == "os2":
# assume swig available in the PATH.
return "swig.exe"
else:
raise DistutilsPlatformError, \
("I don't know how to find (much less run) SWIG "
"on platform '%s'") % os.name
# find_swig ()
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullname (self, ext_name):
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename (self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = string.split(ext_name, '.')
# OS/2 has an 8 character module (extension) limit :-(
if os.name == "os2":
ext_path[len(ext_path) - 1] = ext_path[len(ext_path) - 1][:8]
# extensions in debug_mode are named 'module_d.pyd' under windows
so_ext = get_config_var('SO')
if os.name == 'nt' and self.debug:
return apply(os.path.join, ext_path) + '_d' + so_ext
return os.path.join(*ext_path) + so_ext
def get_export_symbols (self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "init" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "init" function.
"""
initfunc_name = "init" + string.split(ext.name,'.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries (self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows and OS/2, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils.msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform == "os2emx":
# EMX/GCC requires the python library explicitly, and I
# believe VACPP does as well (though not confirmed) - AIM Apr01
template = "python%d%d"
# debug versions of the main DLL aren't supported, at least
# not at this time - AIM Apr01
#if self.debug:
# template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
return ext.libraries + [pythonlib]
else:
return ext.libraries
# class build_ext
| apache-2.0 |
bixbydev/Bixby | google/gdata-2.0.18/build/lib.linux-x86_64-2.7/gdata/geo/__init__.py | 249 | 6006 | # -*-*- encoding: utf-8 -*-*-
#
# This is gdata.photos.geo, implementing geological positioning in gdata structures
#
# $Id: __init__.py 81 2007-10-03 14:41:42Z havard.gulldahl $
#
# Copyright 2007 Håvard Gulldahl
# Portions copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Picasa Web Albums uses the georss and gml namespaces for
elements defined in the GeoRSS and Geography Markup Language specifications.
Specifically, Picasa Web Albums uses the following elements:
georss:where
gml:Point
gml:pos
http://code.google.com/apis/picasaweb/reference.html#georss_reference
Picasa Web Albums also accepts geographic-location data in two other formats:
W3C format and plain-GeoRSS (without GML) format.
"""
#
#Over the wire, the Picasa Web Albums only accepts and sends the
#elements mentioned above, but this module will let you seamlessly convert
#between the different formats (TODO 2007-10-18 hg)
__author__ = u'havard@gulldahl.no'# (Håvard Gulldahl)' #BUG: api chokes on non-ascii chars in __author__
__license__ = 'Apache License v2'
import atom
import gdata
GEO_NAMESPACE = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
GML_NAMESPACE = 'http://www.opengis.net/gml'
GEORSS_NAMESPACE = 'http://www.georss.org/georss'
class GeoBaseElement(atom.AtomBase):
"""Base class for elements.
To add new elements, you only need to add the element tag name to self._tag
and the namespace to self._namespace
"""
_tag = ''
_namespace = GML_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Pos(GeoBaseElement):
"""(string) Specifies a latitude and longitude, separated by a space,
e.g. `35.669998 139.770004'"""
_tag = 'pos'
def PosFromString(xml_string):
return atom.CreateClassFromXMLString(Pos, xml_string)
class Point(GeoBaseElement):
"""(container) Specifies a particular geographical point, by means of
a <gml:pos> element."""
_tag = 'Point'
_children = atom.AtomBase._children.copy()
_children['{%s}pos' % GML_NAMESPACE] = ('pos', Pos)
def __init__(self, pos=None, extension_elements=None, extension_attributes=None, text=None):
GeoBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
if pos is None:
pos = Pos()
self.pos=pos
def PointFromString(xml_string):
return atom.CreateClassFromXMLString(Point, xml_string)
class Where(GeoBaseElement):
"""(container) Specifies a geographical location or region.
A container element, containing a single <gml:Point> element.
(Not to be confused with <gd:where>.)
Note that the (only) child attribute, .Point, is title-cased.
This reflects the names of elements in the xml stream
(principle of least surprise).
As a convenience, you can get a tuple of (lat, lon) with Where.location(),
and set the same data with Where.setLocation( (lat, lon) ).
Similarly, there are methods to set and get only latitude and longitude.
"""
_tag = 'where'
_namespace = GEORSS_NAMESPACE
_children = atom.AtomBase._children.copy()
_children['{%s}Point' % GML_NAMESPACE] = ('Point', Point)
def __init__(self, point=None, extension_elements=None, extension_attributes=None, text=None):
GeoBaseElement.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
if point is None:
point = Point()
self.Point=point
def location(self):
"(float, float) Return Where.Point.pos.text as a (lat,lon) tuple"
try:
return tuple([float(z) for z in self.Point.pos.text.split(' ')])
except AttributeError:
return tuple()
def set_location(self, latlon):
"""(bool) Set Where.Point.pos.text from a (lat,lon) tuple.
Arguments:
lat (float): The latitude in degrees, from -90.0 to 90.0
lon (float): The longitude in degrees, from -180.0 to 180.0
Returns True on success.
"""
assert(isinstance(latlon[0], float))
assert(isinstance(latlon[1], float))
try:
self.Point.pos.text = "%s %s" % (latlon[0], latlon[1])
return True
except AttributeError:
return False
def latitude(self):
"(float) Get the latitude value of the geo-tag. See also .location()"
lat, lon = self.location()
return lat
def longitude(self):
"(float) Get the longtitude value of the geo-tag. See also .location()"
lat, lon = self.location()
return lon
longtitude = longitude
def set_latitude(self, lat):
"""(bool) Set the latitude value of the geo-tag.
Args:
lat (float): The new latitude value
See also .set_location()
"""
_lat, lon = self.location()
return self.set_location(lat, lon)
def set_longitude(self, lon):
"""(bool) Set the longtitude value of the geo-tag.
Args:
lat (float): The new latitude value
See also .set_location()
"""
lat, _lon = self.location()
return self.set_location(lat, lon)
set_longtitude = set_longitude
def WhereFromString(xml_string):
return atom.CreateClassFromXMLString(Where, xml_string)
| gpl-3.0 |
m0re4u/LeRoT-SCLP | lerot/environment/PositionBasedUserModel.py | 2 | 1440 | # This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from numpy import zeros
from AbstractUserModel import AbstractUserModel
class PositionBasedUserModel(AbstractUserModel):
"""Defines a positions based user model."""
def __init__(self, p):
self.p_param = p
def p(self, i):
return self.p_param ** i
def get_clicks(self, result_list, labels, **kwargs):
"""simulate clicks on list l"""
c = zeros(len(result_list), dtype='int')
for pos, d in enumerate(result_list):
E = np.random.binomial(1, self.p(pos))
label = labels[d.get_id()]
if E and label:
c[pos] = 1
return c
def get_examination_prob(self, result_list, **kwargs):
return [self.p(i) for i in range(len(result_list))]
| gpl-3.0 |
gitlabhq/pygments.rb | vendor/pygments-main/pygments/lexers/__init__.py | 194 | 7698 | # -*- coding: utf-8 -*-
"""
pygments.lexers
~~~~~~~~~~~~~~~
Pygments lexers.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import types
import fnmatch
from os.path import basename
from pygments.lexers._mapping import LEXERS
from pygments.modeline import get_filetype_from_buffer
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes
__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
'guess_lexer'] + LEXERS.keys()
_lexer_cache = {}
def _load_lexers(module_name):
"""
Load a lexer (and all others in the module too).
"""
mod = __import__(module_name, None, None, ['__all__'])
for lexer_name in mod.__all__:
cls = getattr(mod, lexer_name)
_lexer_cache[cls.name] = cls
def get_all_lexers():
"""
Return a generator of tuples in the form ``(name, aliases,
filenames, mimetypes)`` of all know lexers.
"""
for item in LEXERS.itervalues():
yield item[1:]
for lexer in find_plugin_lexers():
yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
def find_lexer_class(name):
"""
Lookup a lexer class by name. Return None if not found.
"""
if name in _lexer_cache:
return _lexer_cache[name]
# lookup builtin lexers
for module_name, lname, aliases, _, _ in LEXERS.itervalues():
if name == lname:
_load_lexers(module_name)
return _lexer_cache[name]
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if cls.name == name:
return cls
def get_lexer_by_name(_alias, **options):
"""
Get a lexer by an alias.
"""
# lookup builtin lexers
for module_name, name, aliases, _, _ in LEXERS.itervalues():
if _alias in aliases:
if name not in _lexer_cache:
_load_lexers(module_name)
return _lexer_cache[name](**options)
# continue with lexers from setuptools entrypoints
for cls in find_plugin_lexers():
if _alias in cls.aliases:
return cls(**options)
raise ClassNotFound('no lexer for alias %r found' % _alias)
def get_lexer_for_filename(_fn, code=None, **options):
"""
Get a lexer for a filename. If multiple lexers match the filename
pattern, use ``analyze_text()`` to figure out which one is more
appropriate.
"""
matches = []
fn = basename(_fn)
for modname, name, _, filenames, _ in LEXERS.itervalues():
for filename in filenames:
if fnmatch.fnmatch(fn, filename):
if name not in _lexer_cache:
_load_lexers(modname)
matches.append((_lexer_cache[name], filename))
for cls in find_plugin_lexers():
for filename in cls.filenames:
if fnmatch.fnmatch(fn, filename):
matches.append((cls, filename))
if sys.version_info > (3,) and isinstance(code, bytes):
# decode it, since all analyse_text functions expect unicode
code = code.decode('latin1')
def get_rating(info):
cls, filename = info
# explicit patterns get a bonus
bonus = '*' not in filename and 0.5 or 0
# The class _always_ defines analyse_text because it's included in
# the Lexer class. The default implementation returns None which
# gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
# to find lexers which need it overridden.
if code:
return cls.analyse_text(code) + bonus
return cls.priority + bonus
if matches:
matches.sort(key=get_rating)
#print "Possible lexers, after sort:", matches
return matches[-1][0](**options)
raise ClassNotFound('no lexer for filename %r found' % _fn)
def get_lexer_for_mimetype(_mime, **options):
"""
Get a lexer for a mimetype.
"""
for modname, name, _, _, mimetypes in LEXERS.itervalues():
if _mime in mimetypes:
if name not in _lexer_cache:
_load_lexers(modname)
return _lexer_cache[name](**options)
for cls in find_plugin_lexers():
if _mime in cls.mimetypes:
return cls(**options)
raise ClassNotFound('no lexer for mimetype %r found' % _mime)
def _iter_lexerclasses():
"""
Return an iterator over all lexer classes.
"""
for key in sorted(LEXERS):
module_name, name = LEXERS[key][:2]
if name not in _lexer_cache:
_load_lexers(module_name)
yield _lexer_cache[name]
for lexer in find_plugin_lexers():
yield lexer
def guess_lexer_for_filename(_fn, _text, **options):
"""
Lookup all lexers that handle those filenames primary (``filenames``)
or secondary (``alias_filenames``). Then run a text analysis for those
lexers and choose the best result.
usage::
>>> from pygments.lexers import guess_lexer_for_filename
>>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
<pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
>>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
<pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
>>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
<pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
"""
fn = basename(_fn)
primary = None
matching_lexers = set()
for lexer in _iter_lexerclasses():
for filename in lexer.filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
primary = lexer
for filename in lexer.alias_filenames:
if fnmatch.fnmatch(fn, filename):
matching_lexers.add(lexer)
if not matching_lexers:
raise ClassNotFound('no lexer for filename %r found' % fn)
if len(matching_lexers) == 1:
return matching_lexers.pop()(**options)
result = []
for lexer in matching_lexers:
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
result.append((rv, lexer))
result.sort()
if not result[-1][0] and primary is not None:
return primary(**options)
return result[-1][1](**options)
def guess_lexer(_text, **options):
"""
Guess a lexer by strong distinctions in the text (eg, shebang).
"""
# try to get a vim modeline first
ft = get_filetype_from_buffer(_text)
if ft is not None:
try:
return get_lexer_by_name(ft, **options)
except ClassNotFound:
pass
best_lexer = [0.0, None]
for lexer in _iter_lexerclasses():
rv = lexer.analyse_text(_text)
if rv == 1.0:
return lexer(**options)
if rv > best_lexer[0]:
best_lexer[:] = (rv, lexer)
if not best_lexer[0] or best_lexer[1] is None:
raise ClassNotFound('no lexer matching the text found')
return best_lexer[1](**options)
class _automodule(types.ModuleType):
"""Automatically import lexers."""
def __getattr__(self, name):
info = LEXERS.get(name)
if info:
_load_lexers(info[0])
cls = _lexer_cache[info[1]]
setattr(self, name, cls)
return cls
raise AttributeError(name)
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
sys.modules['pygments.lexers'] = newmod
del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
| mit |
manojhirway/ExistingImagesOnNFS | cinder/volume/drivers/lvm.py | 3 | 32887 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver for Linux servers running LVM.
"""
import math
import os
import socket
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import units
import six
from cinder.brick.local_dev import lvm as lvm
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
LOG = logging.getLogger(__name__)
# FIXME(jdg): We'll put the lvm_ prefix back on these when we
# move over to using this as the real LVM driver, for now we'll
# rename them so that the config generation utility doesn't barf
# on duplicate entries.
volume_opts = [
cfg.StrOpt('volume_group',
default='cinder-volumes',
help='Name for the VG that will contain exported volumes'),
cfg.IntOpt('lvm_mirrors',
default=0,
help='If >0, create LVs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space'),
cfg.StrOpt('lvm_type',
default='default',
choices=['default', 'thin', 'auto'],
help='Type of LVM volumes to deploy; (default, thin, or auto). '
'Auto defaults to thin if thin is supported.'),
cfg.StrOpt('lvm_conf_file',
default='/etc/cinder/lvm.conf',
help='LVM conf file to use for the LVM driver in Cinder; '
'this setting is ignored if the specified file does '
'not exist (You can also specify \'None\' to not use '
'a conf file even if one exists).')
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LVMVolumeDriver(driver.VolumeDriver):
"""Executes commands relating to Volumes."""
VERSION = '3.0.0'
def __init__(self, vg_obj=None, *args, **kwargs):
# Parent sets db, host, _execute and base config
super(LVMVolumeDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(volume_opts)
self.hostname = socket.gethostname()
self.vg = vg_obj
self.backend_name =\
self.configuration.safe_get('volume_backend_name') or 'LVM'
# Target Driver is what handles data-transport
# Transport specific code should NOT be in
# the driver (control path), this way
# different target drivers can be added (iscsi, FC etc)
target_driver = \
self.target_mapping[self.configuration.safe_get('iscsi_helper')]
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: %s',
target_driver)
self.target_driver = importutils.import_object(
target_driver,
configuration=self.configuration,
db=self.db,
executor=self._execute)
self.protocol = self.target_driver.protocol
self.sparse_copy_volume = False
def _sizestr(self, size_in_g):
return '%sg' % size_in_g
def _volume_not_present(self, volume_name):
return self.vg.get_volume(volume_name) is None
def _delete_volume(self, volume, is_snapshot=False):
"""Deletes a logical volume."""
if self.configuration.volume_clear != 'none' and \
self.configuration.lvm_type != 'thin':
self._clear_volume(volume, is_snapshot)
name = volume['name']
if is_snapshot:
name = self._escape_snapshot(volume['name'])
self.vg.delete(name)
def _clear_volume(self, volume, is_snapshot=False):
# zero out old volumes to prevent data leaking between users
# TODO(ja): reclaiming space should be done lazy and low priority
if is_snapshot:
# if the volume to be cleared is a snapshot of another volume
# we need to clear out the volume using the -cow instead of the
# directly volume path. We need to skip this if we are using
# thin provisioned LVs.
# bug# lp1191812
dev_path = self.local_path(volume) + "-cow"
else:
dev_path = self.local_path(volume)
# TODO(jdg): Maybe we could optimize this for snaps by looking at
# the cow table and only overwriting what's necessary?
# for now we're still skipping on snaps due to hang issue
if not os.path.exists(dev_path):
msg = (_('Volume device file path %s does not exist.')
% dev_path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
size_in_g = volume.get('volume_size') or volume.get('size')
if size_in_g is None:
msg = (_("Size for volume: %s not found, cannot secure delete.")
% volume['id'])
LOG.error(msg)
raise exception.InvalidParameterValue(msg)
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
volume_clear=self.configuration.volume_clear,
volume_clear_size=self.configuration.volume_clear_size)
def _escape_snapshot(self, snapshot_name):
# Linux LVM reserves name that starts with snapshot, so that
# such volume name can't be created. Mangle it.
if not snapshot_name.startswith('snapshot'):
return snapshot_name
return '_' + snapshot_name
def _create_volume(self, name, size, lvm_type, mirror_count, vg=None):
vg_ref = self.vg
if vg is not None:
vg_ref = vg
vg_ref.create_volume(name, size, lvm_type, mirror_count)
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats")
if self.vg is None:
LOG.warning(_LW('Unable to update stats on non-initialized '
'Volume Group: %s'),
self.configuration.volume_group)
return
self.vg.update_volume_group_info()
data = {}
# Note(zhiteng): These information are driver/backend specific,
# each driver may define these values in its own config options
# or fetch from driver specific configuration file.
data["volume_backend_name"] = self.backend_name
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
data["pools"] = []
total_capacity = 0
free_capacity = 0
if self.configuration.lvm_mirrors > 0:
total_capacity =\
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
free_capacity =\
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
elif self.configuration.lvm_type == 'thin':
total_capacity = self.vg.vg_thin_pool_size
free_capacity = self.vg.vg_thin_pool_free_space
provisioned_capacity = self.vg.vg_provisioned_capacity
else:
total_capacity = self.vg.vg_size
free_capacity = self.vg.vg_free_space
provisioned_capacity = round(
float(total_capacity) - float(free_capacity), 2)
location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'vg': self.configuration.volume_group,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
thin_enabled = self.configuration.lvm_type == 'thin'
# Calculate the total volumes used by the VG group.
# This includes volumes and snapshots.
total_volumes = len(self.vg.get_volumes())
# Skip enabled_pools setting, treat the whole backend as one pool
# XXX FIXME if multipool support is added to LVM driver.
single_pool = {}
single_pool.update(dict(
pool_name=data["volume_backend_name"],
total_capacity_gb=total_capacity,
free_capacity_gb=free_capacity,
reserved_percentage=self.configuration.reserved_percentage,
location_info=location_info,
QoS_support=False,
provisioned_capacity_gb=provisioned_capacity,
max_over_subscription_ratio=(
self.configuration.max_over_subscription_ratio),
thin_provisioning_support=thin_enabled,
thick_provisioning_support=not thin_enabled,
total_volumes=total_volumes,
filter_function=self.get_filter_function(),
goodness_function=self.get_goodness_function(),
multiattach=True
))
data["pools"].append(single_pool)
# Check availability of sparse volume copy.
data['sparse_copy_volume'] = self.configuration.lvm_type == 'thin'
self._stats = data
def check_for_setup_error(self):
"""Verify that requirements are in place to use LVM driver."""
if self.vg is None:
root_helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
try:
self.vg = lvm.LVM(self.configuration.volume_group,
root_helper,
lvm_type=self.configuration.lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
except exception.VolumeGroupNotFound:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
vg_list = volutils.get_all_volume_groups(
self.configuration.volume_group)
vg_dict = \
next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
if vg_dict is None:
message = (_("Volume Group %s does not exist") %
self.configuration.volume_group)
raise exception.VolumeBackendAPIException(data=message)
pool_name = "%s-pool" % self.configuration.volume_group
if self.configuration.lvm_type == 'auto':
# Default to thin provisioning if it is supported and
# the volume group is empty, or contains a thin pool
# for us to use.
self.vg.update_volume_group_info()
self.configuration.lvm_type = 'default'
if volutils.supports_thin_provisioning():
if self.vg.get_volume(pool_name) is not None:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because a thin pool exists.'))
self.configuration.lvm_type = 'thin'
elif len(self.vg.get_volumes()) == 0:
LOG.info(_LI('Enabling LVM thin provisioning by default '
'because no LVs exist.'))
self.configuration.lvm_type = 'thin'
if self.configuration.lvm_type == 'thin':
# Specific checks for using Thin provisioned LV's
if not volutils.supports_thin_provisioning():
message = _("Thin provisioning not supported "
"on this version of LVM.")
raise exception.VolumeBackendAPIException(data=message)
if self.vg.get_volume(pool_name) is None:
try:
self.vg.create_thin_pool(pool_name)
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to create thin pool, "
"error message was: %s")
% six.text_type(exc.stderr))
raise exception.VolumeBackendAPIException(
data=exception_message)
# Enable sparse copy since lvm_type is 'thin'
self.sparse_copy_volume = True
def create_volume(self, volume):
"""Creates a logical volume."""
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from LVM for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
if original_volume_status == 'available':
current_name = CONF.volume_name_template % new_volume['id']
original_volume_name = CONF.volume_name_template % volume['id']
try:
self.vg.rename_volume(current_name, original_volume_name)
except processutils.ProcessExecutionError:
LOG.error(_LE('Unable to rename the logical volume '
'for volume: %s'), volume['name'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
else:
# The back-end will not be renamed.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
self.configuration.lvm_mirrors)
# Some configurations of LVM do not automatically activate
# ThinLVM snapshot LVs.
self.vg.activate_lv(snapshot['name'], is_snapshot=True)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(jdg): We don't need to explicitly call
# remove export here because we already did it
# in the manager before we got here.
if self._volume_not_present(volume['name']):
# If the volume isn't present, then don't attempt to delete
return True
if self.vg.lv_has_snapshot(volume['name']):
LOG.error(_LE('Unable to delete due to existing snapshot '
'for volume: %s'), volume['name'])
raise exception.VolumeIsBusy(volume_name=volume['name'])
self._delete_volume(volume)
LOG.info(_LI('Successfully deleted volume: %s'), volume['id'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
self.vg.create_lv_snapshot(self._escape_snapshot(snapshot['name']),
snapshot['volume_name'],
self.configuration.lvm_type)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
LOG.warning(_LW("snapshot: %s not found, "
"skipping delete operations"), snapshot['name'])
LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
# it's quite slow.
self._delete_volume(snapshot, is_snapshot=True)
def local_path(self, volume, vg=None):
if vg is None:
vg = self.configuration.volume_group
# NOTE(vish): stops deprecation warning
escaped_group = vg.replace('-', '--')
escaped_name = self._escape_snapshot(volume['name']).replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
if self.configuration.lvm_type == 'thin':
self.vg.create_lv_snapshot(volume['name'],
src_vref['name'],
self.configuration.lvm_type)
if volume['size'] > src_vref['size']:
LOG.debug("Resize the new volume to %s.", volume['size'])
self.extend_volume(volume, volume['size'])
self.vg.activate_lv(volume['name'], is_snapshot=True,
permanent=True)
return
mirror_count = 0
if self.configuration.lvm_mirrors:
mirror_count = self.configuration.lvm_mirrors
LOG.info(_LI('Creating clone of volume: %s'), src_vref['id'])
volume_name = src_vref['name']
temp_id = 'tmp-snap-%s' % volume['id']
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % volume['id'],
'id': temp_id}
self.create_snapshot(temp_snapshot)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
try:
self._create_volume(volume['name'],
self._sizestr(volume['size']),
self.configuration.lvm_type,
mirror_count)
self.vg.activate_lv(temp_snapshot['name'], is_snapshot=True)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
finally:
self.delete_snapshot(temp_snapshot)
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
return None, False
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup.volume_id)
temp_snapshot = None
previous_status = volume['previous_status']
if previous_status == 'in-use':
temp_snapshot = self._create_temp_snapshot(context, volume)
backup.temp_snapshot_id = temp_snapshot.id
backup.save()
volume_path = self.local_path(temp_snapshot)
else:
volume_path = self.local_path(volume)
try:
with utils.temporary_chown(volume_path):
with open(volume_path) as volume_file:
backup_service.backup(backup, volume_file)
finally:
if temp_snapshot:
self._delete_temp_snapshot(context, temp_snapshot)
backup.temp_snapshot_id = None
backup.save()
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update the stats first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume's size."""
self.vg.extend_volume(volume['name'],
self._sizestr(new_size))
def manage_existing(self, volume, existing_ref):
"""Manages an existing LV.
Renames the LV to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
"""
lv_name = existing_ref['source-name']
self.vg.get_volume(lv_name)
if volutils.check_already_managed_volume(self.db, lv_name):
raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)
# Attempt to rename the LV to match the OpenStack internal name.
try:
self.vg.rename_volume(lv_name, volume['name'])
except processutils.ProcessExecutionError as exc:
exception_message = (_("Failed to rename logical volume %(name)s, "
"error message was: %(err_msg)s")
% {'name': lv_name,
'err_msg': exc.stderr})
raise exception.VolumeBackendAPIException(
data=exception_message)
def manage_existing_object_get_size(self, existing_object, existing_ref,
object_type):
"""Return size of an existing LV for manage existing volume/snapshot.
existing_ref is a dictionary of the form:
{'source-name': <name of LV>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
lv_name = existing_ref['source-name']
lv = self.vg.get_volume(lv_name)
# Raise an exception if we didn't find a suitable LV.
if not lv:
kwargs = {'existing_ref': lv_name,
'reason': 'Specified logical volume does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
# LV size is returned in gigabytes. Attempt to parse size as a float
# and round up to the next integer.
try:
lv_size = int(math.ceil(float(lv['size'])))
except ValueError:
exception_message = (_("Failed to manage existing %(type)s "
"%(name)s, because reported size %(size)s "
"was not a floating-point number.")
% {'type': object_type,
'name': lv_name,
'size': lv['size']})
raise exception.VolumeBackendAPIException(
data=exception_message)
return lv_size
def manage_existing_get_size(self, volume, existing_ref):
return self.manage_existing_object_get_size(volume, existing_ref,
"volume")
def manage_existing_snapshot_get_size(self, snapshot, existing_ref):
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing_object_get_size(snapshot, existing_ref,
"snapshot")
def manage_existing_snapshot(self, snapshot, existing_ref):
dest_name = self._escape_snapshot(snapshot['name'])
snapshot_temp = {"name": dest_name}
if not isinstance(existing_ref, dict):
existing_ref = {"source-name": existing_ref}
return self.manage_existing(snapshot_temp, existing_ref)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
"""Optimize the migration if the destination is on the same server.
If the specified host is another back-end on the same server, and
the volume is not attached, we can do the migration locally without
going through iSCSI.
"""
false_ret = (False, None)
if volume['status'] != 'available':
return false_ret
if 'location_info' not in host['capabilities']:
return false_ret
info = host['capabilities']['location_info']
try:
(dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
info.split(':')
lvm_mirrors = int(lvm_mirrors)
except ValueError:
return false_ret
if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
return false_ret
if dest_vg != self.vg.vg_name:
vg_list = volutils.get_all_volume_groups()
try:
next(vg for vg in vg_list if vg['name'] == dest_vg)
except StopIteration:
LOG.error(_LE("Destination Volume Group %s does not exist"),
dest_vg)
return false_ret
helper = utils.get_root_helper()
lvm_conf_file = self.configuration.lvm_conf_file
if lvm_conf_file.lower() == 'none':
lvm_conf_file = None
dest_vg_ref = lvm.LVM(dest_vg, helper,
lvm_type=lvm_type,
executor=self._execute,
lvm_conf=lvm_conf_file)
self._create_volume(volume['name'],
self._sizestr(volume['size']),
lvm_type,
lvm_mirrors,
dest_vg_ref)
# copy_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
size_in_mb = int(volume['size']) * units.Ki
volutils.copy_volume(self.local_path(volume),
self.local_path(volume, vg=dest_vg),
size_in_mb,
self.configuration.volume_dd_blocksize,
execute=self._execute,
sparse=self.sparse_copy_volume)
self._delete_volume(volume)
return (True, None)
else:
message = (_("Refusing to migrate volume ID: %(id)s. Please "
"check your configuration because source and "
"destination are the same Volume Group: %(name)s.") %
{'id': volume['id'], 'name': self.vg.vg_name})
LOG.exception(message)
raise exception.VolumeBackendAPIException(data=message)
def get_pool(self, volume):
return self.backend_name
# ####### Interface methods for DataPath (Target Driver) ########
def ensure_export(self, context, volume):
volume_path = "/dev/%s/%s" % (self.configuration.volume_group,
volume['name'])
model_update = \
self.target_driver.ensure_export(context, volume, volume_path)
return model_update
def create_export(self, context, volume, connector, vg=None):
if vg is None:
vg = self.configuration.volume_group
volume_path = "/dev/%s/%s" % (vg, volume['name'])
export_info = self.target_driver.create_export(
context,
volume,
volume_path)
return {'provider_location': export_info['location'],
'provider_auth': export_info['auth'], }
def remove_export(self, context, volume):
self.target_driver.remove_export(context, volume)
def initialize_connection(self, volume, connector):
return self.target_driver.initialize_connection(volume, connector)
def validate_connector(self, connector):
return self.target_driver.validate_connector(connector)
def terminate_connection(self, volume, connector, **kwargs):
return self.target_driver.terminate_connection(volume, connector,
**kwargs)
class LVMISCSIDriver(LVMVolumeDriver):
"""Empty class designation for LVMISCSI.
Since we've decoupled the inheritance of iSCSI and LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISCSIDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISCSIDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use.'))
class LVMISERDriver(LVMVolumeDriver):
"""Empty class designation for LVMISER.
Since we've decoupled the inheritance of data path in LVM we
don't really need this class any longer. We do however want
to keep it (at least for now) for back compat in driver naming.
"""
def __init__(self, *args, **kwargs):
super(LVMISERDriver, self).__init__(*args, **kwargs)
LOG.warning(_LW('LVMISERDriver is deprecated, you should '
'now just use LVMVolumeDriver and specify '
'iscsi_helper for the target driver you '
'wish to use. In order to enable iser, please '
'set iscsi_protocol with the value iser.'))
LOG.debug('Attempting to initialize LVM driver with the '
'following target_driver: '
'cinder.volume.targets.iser.ISERTgtAdm')
self.target_driver = importutils.import_object(
'cinder.volume.targets.iser.ISERTgtAdm',
configuration=self.configuration,
db=self.db,
executor=self._execute)
| apache-2.0 |
hhstore/tornado-annotated | src/tornado-3.2.2/tornado/tcpserver.py | 4 | 10246 | #!/usr/bin/env python
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A non-blocking, single-threaded TCP server."""
from __future__ import absolute_import, division, print_function, with_statement
import errno
import os
import socket
import ssl
from tornado.log import app_log
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream, SSLIOStream
from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket
from tornado import process
class TCPServer(object):
r"""A non-blocking, single-threaded TCP server.
To use `TCPServer`, define a subclass which overrides the `handle_stream`
method.
To make this server serve SSL traffic, send the ssl_options dictionary
argument with the arguments required for the `ssl.wrap_socket` method,
including "certfile" and "keyfile"::
TCPServer(ssl_options={
"certfile": os.path.join(data_dir, "mydomain.crt"),
"keyfile": os.path.join(data_dir, "mydomain.key"),
})
`TCPServer` initialization follows one of three patterns:
1. `listen`: simple single-process::
server = TCPServer()
server.listen(8888)
IOLoop.instance().start()
2. `bind`/`start`: simple multi-process::
server = TCPServer()
server.bind(8888)
server.start(0) # Forks multiple sub-processes
IOLoop.instance().start()
When using this interface, an `.IOLoop` must *not* be passed
to the `TCPServer` constructor. `start` will always start
the server on the default singleton `.IOLoop`.
3. `add_sockets`: advanced multi-process::
sockets = bind_sockets(8888)
tornado.process.fork_processes(0)
server = TCPServer()
server.add_sockets(sockets)
IOLoop.instance().start()
The `add_sockets` interface is more complicated, but it can be
used with `tornado.process.fork_processes` to give you more
flexibility in when the fork happens. `add_sockets` can
also be used in single-process servers if you want to create
your listening sockets in some way other than
`~tornado.netutil.bind_sockets`.
.. versionadded:: 3.1
The ``max_buffer_size`` argument.
"""
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None):
self.io_loop = io_loop
self.ssl_options = ssl_options
self._sockets = {} # fd -> socket object
self._pending_sockets = []
self._started = False
self.max_buffer_size = max_buffer_size
# Verify the SSL options. Otherwise we don't get errors until clients
# connect. This doesn't verify that the keys are legitimate, but
# the SSL module doesn't do that until there is a connected socket
# which seems like too much work
if self.ssl_options is not None and isinstance(self.ssl_options, dict):
# Only certfile is required: it can contain both keys
if 'certfile' not in self.ssl_options:
raise KeyError('missing key "certfile" in ssl_options')
if not os.path.exists(self.ssl_options['certfile']):
raise ValueError('certfile "%s" does not exist' %
self.ssl_options['certfile'])
if ('keyfile' in self.ssl_options and
not os.path.exists(self.ssl_options['keyfile'])):
raise ValueError('keyfile "%s" does not exist' %
self.ssl_options['keyfile'])
def listen(self, port, address=""):
"""Starts accepting connections on the given port.
This method may be called more than once to listen on multiple ports.
`listen` takes effect immediately; it is not necessary to call
`TCPServer.start` afterwards. It is, however, necessary to start
the `.IOLoop`.
"""
sockets = bind_sockets(port, address=address)
self.add_sockets(sockets)
def add_sockets(self, sockets):
"""Makes this server start accepting connections on the given sockets.
The ``sockets`` parameter is a list of socket objects such as
those returned by `~tornado.netutil.bind_sockets`.
`add_sockets` is typically used in combination with that
method and `tornado.process.fork_processes` to provide greater
control over the initialization of a multi-process server.
"""
if self.io_loop is None:
self.io_loop = IOLoop.current()
for sock in sockets:
self._sockets[sock.fileno()] = sock
add_accept_handler(sock, self._handle_connection,
io_loop=self.io_loop)
def add_socket(self, socket):
"""Singular version of `add_sockets`. Takes a single socket object."""
self.add_sockets([socket])
def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128):
"""Binds this server to the given port on the given address.
To start the server, call `start`. If you want to run this server
in a single process, you can call `listen` as a shortcut to the
sequence of `bind` and `start` calls.
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen <socket.socket.listen>`.
This method may be called multiple times prior to `start` to listen
on multiple ports or interfaces.
"""
sockets = bind_sockets(port, address=address, family=family,
backlog=backlog)
if self._started:
self.add_sockets(sockets)
else:
self._pending_sockets.extend(sockets)
def start(self, num_processes=1):
"""Starts this server in the `.IOLoop`.
By default, we run the server in this process and do not fork any
additional child process.
If num_processes is ``None`` or <= 0, we detect the number of cores
available on this machine and fork that number of child
processes. If num_processes is given and > 1, we fork that
specific number of sub-processes.
Since we use processes and not threads, there is no shared memory
between any server code.
Note that multiple processes are not compatible with the autoreload
module (or the ``autoreload=True`` option to `tornado.web.Application`
which defaults to True when ``debug=True``).
When using multiple processes, no IOLoops can be created or
referenced until after the call to ``TCPServer.start(n)``.
"""
assert not self._started
self._started = True
if num_processes != 1:
process.fork_processes(num_processes)
sockets = self._pending_sockets
self._pending_sockets = []
self.add_sockets(sockets)
def stop(self):
"""Stops listening for new connections.
Requests currently in progress may still continue after the
server is stopped.
"""
for fd, sock in self._sockets.items():
self.io_loop.remove_handler(fd)
sock.close()
def handle_stream(self, stream, address):
"""Override to handle a new `.IOStream` from an incoming connection."""
raise NotImplementedError()
def _handle_connection(self, connection, address):
if self.ssl_options is not None:
assert ssl, "Python 2.6+ and OpenSSL required for SSL"
try:
connection = ssl_wrap_socket(connection,
self.ssl_options,
server_side=True,
do_handshake_on_connect=False)
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_EOF:
return connection.close()
else:
raise
except socket.error as err:
# If the connection is closed immediately after it is created
# (as in a port scan), we can get one of several errors.
# wrap_socket makes an internal call to getpeername,
# which may return either EINVAL (Mac OS X) or ENOTCONN
# (Linux). If it returns ENOTCONN, this error is
# silently swallowed by the ssl module, so we need to
# catch another error later on (AttributeError in
# SSLIOStream._do_ssl_handshake).
# To test this behavior, try nmap with the -sT flag.
# https://github.com/tornadoweb/tornado/pull/750
if err.args[0] in (errno.ECONNABORTED, errno.EINVAL):
return connection.close()
else:
raise
try:
if self.ssl_options is not None:
stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
else:
stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size)
self.handle_stream(stream, address)
except Exception:
app_log.error("Error in connection callback", exc_info=True)
| mit |
phil-el/phetools | statistics/not_transcluded.py | 1 | 4625 | # -*- coding: utf-8 -*-
#
# @file transclusions.py
#
# @remark Copyright 2016 Philippe Elie
# @remark Read the file COPYING
#
# @author Philippe Elie
import sys
import os
sys.path.append(os.path.expanduser('~/wikisource'))
from ws_category import domain_urls as urls
from ws_namespaces import index as index_name
from common import db
from gen_stats import all_domain
from common import common_html
import urllib
def filter_result(books):
result = []
for key in books:
# FIXME: is >= 5 ok ?
page_ids = books[key]
if len(page_ids) >= 5:
# FIXME: this is perhaps not the best way as we check for
# activity only in pages not transcluded, that means someone
# is perhaps working on pages already transcluded or is working
# on red page w/o validating them. Another way will be to get the
# page id of the index and from that use "related changes" filtered
# to namespace Page:
fmt_strs = ','.join(['%s'] * len(page_ids))
cursor.execute("""SELECT count(*)
FROM recentchanges
WHERE rc_bot=0 AND rc_cur_id IN (%s)
""" % fmt_strs,
page_ids )
if not cursor.fetchone()[0]:
result.append( ( len(page_ids), key ) )
else:
print "filtered:", key
# debug: for replicas missing record
if False and key == 'Dictionnaire_portatif_de_cuisine,_d’office,_et_de_distillation,_1772.djvu':
q = 'select page_title from page where page_id in (%s)' % fmt_strs
cursor.execute(q, page_ids)
for x in range(cursor.rowcount):
print cursor.fetchone()[0]
result.sort(reverse = True)
return result
def format_html_line(domain, bookname, count):
title = index_name['wikisource'][domain] + ':' + bookname
if domain == 'old':
domain = 'mul'
result = '<li>'
fmt = '<a href="//%s.wikisource.org/wiki/%s">%s</a> %d'
result += fmt % (domain, urllib.quote(title), bookname, count)
# checker redirect with a 301 from checker? to checker/? so use
# directly that url even if it's a bit weird
fmt = ' — <a href="/checker/?db=%s&title=%s">Check pages</a>'
result += fmt % (db.database_name(domain, 'wikisource'), title)
result += '</li>'
return result
def not_transcluded(domain, cursor):
# set of Page: in cat 3/4 not transcluded from main
query = """
SELECT page_title, page_id FROM categorylinks LEFT JOIN page ON page_id=cl_from
WHERE cl_to in (%s, %s) AND page_title NOT IN
(SELECT tl_title FROM templatelinks
WHERE tl_namespace=%s AND tl_from_namespace=0);
"""
ns = urls[domain][0]
cat3 = urls[domain][1]
cat4 = urls[domain][2]
cursor.execute(query, [ cat3, cat4, ns ])
print cursor.rowcount
result = {}
for x in range(cursor.rowcount):
title, page_id = cursor.fetchone()
title = title.split('/')[0]
if title[-5:] in [ '.djvu', '.pdf', '.tif' ]:
result.setdefault(title, [])
result[title].append(page_id)
result = filter_result(result)
if False:
out_file = os.path.expanduser('~/tmp/transclusions/%s.txt' % domain)
out_fd = open(out_file, 'w')
for d in result:
print >> out_fd, d[1], d[0]
out_fd.close()
out_file = os.path.expanduser('~/tmp/transclusions/%s.html' % domain)
if os.path.exists(out_file):
os.remove(out_file)
out_fd = open(out_file, 'w')
title = '%s.wikisource.org not transcluded page' % domain
head = common_html.get_head(title, html5 = True).encode('utf-8')
print >> out_fd, head
print >> out_fd, '<body>'
if len(result):
print >> out_fd, '<ol>'
for d in result:
print >> out_fd, format_html_line(domain, d[1], d[0])
print >> out_fd, '</ol>'
else:
"Empty result, no Index meet the criteria to be listed in this file."
print >> out_fd, '\n</body>\n</html>'
out_fd.close()
return len(result)
if __name__ == "__main__":
tot_count = 0
for domain in all_domain:
print domain
#if domain != 'fr':
# continue
conn = db.create_conn(domain = domain, family = 'wikisource')
cursor = db.use_db(conn, domain = domain, family = 'wikisource')
tot_count += not_transcluded(domain, cursor)
cursor.close()
conn.close()
print "total:", tot_count
| gpl-3.0 |
asen6/amartyasenguptadotcom | django/contrib/localflavor/au/forms.py | 309 | 1629 | """
Australian-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
"""Australian post code field."""
default_error_messages = {
'invalid': _('Enter a 4 digit post code.'),
}
def __init__(self, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
class AUPhoneNumberField(Field):
"""Australian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""
Validate a phone number. Strips parentheses, whitespace and hyphens.
"""
super(AUPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+|-)', '', smart_unicode(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return u'%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""
A Select widget that uses a list of Australian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from au_states import STATE_CHOICES
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
| bsd-3-clause |
jangorecki/h2o-3 | h2o-py/dynamic_tests/testdir_algos/glm/pyunit_glm_binomial_large.py | 4 | 116214 | from __future__ import print_function
import sys
sys.path.insert(1, "../../../")
import random
import os
import math
import numpy as np
import h2o
import time
from builtins import range
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.grid.grid_search import H2OGridSearch
from scipy import stats
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
class TestGLMBinomial:
"""
This class is created to test the GLM algo with Binomial family. In this case, the relationship
between the response Y and predictor vector X is assumed to be
Prob(Y = 1|X) = exp(W^T * X + E)/(1+exp(W^T * X + E)) where E is unknown Gaussian noise. We
generate random data set using the exact formula. To evaluate the H2O GLM Model, we run the sklearn
logistic regression with the same data sets and compare the performance of the two. If they are close
enough within a certain tolerance, we declare the H2O model working. When regularization and other
parameters are enabled, we can evaluate H2O GLM model performance by comparing the logloss/accuracy
from H2O model and to the H2O model generated without regularization. As long as they do not deviate
too much, we consider the H2O model performance satisfactory.
In particular, I have written 8 tests in the hope to exercise as many parameters settings of the GLM
algo with Binomial distribution as possible. Tomas has requested 2 tests to be added to test his new
feature of missing_values_handling with predictors with both categorical/real columns. Here is a list
of all tests descriptions:
test1_glm_no_regularization(): sklearn logistic regression model is built.
H2O GLM is built for Binomial family with the same random data sets. We observe
the weights, confusion matrices from the two models. We compare the logloss, prediction
accuracy from the two models to determine if H2O GLM model shall pass the test.
test2_glm_lambda_search(): test lambda search with alpha set to 0.5 per Tomas's
suggestion. Make sure logloss and prediction accuracy generated here is comparable in
value to H2O GLM with no regularization.
test3_glm_grid_search_over_params(): test grid search over
various alpha values while lambda is set to be the best value obtained
from test 2. Cross validation with k=5 and random assignment is enabled
as well. The best model performance hopefully will generate logloss and
prediction accuracies close to H2O with no regularization in test 1.
test4_glm_remove_collinear_columns(): test parameter remove_collinear_columns=True
with lambda set to best lambda from test 2, alpha set to best alpha from Gridsearch
and solver set to the one which generate the smallest validation logloss. The same dataset
is used here except that we randomly choose predictor columns to repeat and scale.
Make sure logloss and prediction accuracies generated here is comparable in value
to H2O GLM model with no regularization.
test5_missing_values(): Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
test6_enum_missing_values(): Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression model and compare our H2O model with it.
test7_missing_enum_values_lambda_search(): Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same prediction data with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# parameters set by users, change with care
max_col_count = 50 # set maximum values of train/test row and column counts
max_col_count_ratio = 500 # set max row count to be multiples of col_count to avoid overfitting
min_col_count_ratio = 100 # set min row count to be multiples of col_count to avoid overfitting
###### for debugging
# max_col_count = 5 # set maximum values of train/test row and column counts
# max_col_count_ratio = 50 # set max row count to be multiples of col_count to avoid overfitting
# min_col_count_ratio = 10
max_p_value = 2 # set maximum predictor value
min_p_value = -2 # set minimum predictor value
max_w_value = 2 # set maximum weight value
min_w_value = -2 # set minimum weight value
enum_levels = 5 # maximum number of levels for categorical variables not counting NAs
class_method = 'probability' # can be 'probability' or 'threshold', control how discrete response is generated
test_class_method = 'probability' # for test data set
margin = 0.0 # only used when class_method = 'threshold'
test_class_margin = 0.2 # for test data set
family = 'binomial' # this test is for Binomial GLM
curr_time = str(round(time.time()))
# parameters denoting filenames of interested that store training/validation/test data sets
training_filename = family+"_"+curr_time+"_training_set.csv"
training_filename_duplicate = family+"_"+curr_time+"_training_set_duplicate.csv"
training_filename_nans = family+"_"+curr_time+"_training_set_NA.csv"
training_filename_enum = family+"_"+curr_time+"_training_set_enum.csv"
training_filename_enum_true_one_hot = family+"_"+curr_time+"_training_set_enum_trueOneHot.csv"
training_filename_enum_nans = family+"_"+curr_time+"_training_set_enum_NAs.csv"
training_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_training_set_enum_NAs_trueOneHot.csv"
validation_filename = family+"_"+curr_time+"_validation_set.csv"
validation_filename_enum = family+"_"+curr_time+"_validation_set_enum.csv"
validation_filename_enum_true_one_hot = family+"_"+curr_time+"_validation_set_enum_trueOneHot.csv"
validation_filename_enum_nans = family+"_"+curr_time+"_validation_set_enum_NAs.csv"
validation_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_validation_set_enum_NAs_trueOneHot.csv"
test_filename = family+"_"+curr_time+"_test_set.csv"
test_filename_duplicate = family+"_"+curr_time+"_test_set_duplicate.csv"
test_filename_nans = family+"_"+curr_time+"_test_set_NA.csv"
test_filename_enum = family+"_"+curr_time+"_test_set_enum.csv"
test_filename_enum_true_one_hot = family+"_"+curr_time+"_test_set_enum_trueOneHot.csv"
test_filename_enum_nans = family+"_"+curr_time+"_test_set_enum_NAs.csv"
test_filename_enum_nans_true_one_hot = family+"_"+curr_time+"_test_set_enum_NAs_trueOneHot.csv"
weight_filename = family+"_"+curr_time+"_weight.csv"
weight_filename_enum = family+"_"+curr_time+"_weight_enum.csv"
total_test_number = 7 # total number of tests being run for GLM Binomial family
ignored_eps = 1e-15 # if p-values < than this value, no comparison is performed, only for Gaussian
allowed_diff = 0.1 # tolerance of comparison for logloss/prediction accuracy, okay to be loose. Condition
# to run the codes are different
duplicate_col_counts = 5 # maximum number of times to duplicate a column
duplicate_threshold = 0.2 # for each column, a coin is tossed to see if we duplicate that column or not
duplicate_max_scale = 2 # maximum scale factor for duplicated columns
nan_fraction = 0.2 # denote maximum fraction of NA's to be inserted into a column
# System parameters, do not change. Dire consequences may follow if you do
current_dir = os.path.dirname(os.path.realpath(sys.argv[1])) # directory of this test file
enum_col = 0 # set maximum number of categorical columns in predictor
enum_level_vec = [] # vector containing number of levels for each categorical column
noise_std = 0 # noise variance in Binomial noise generation added to response
train_row_count = 0 # training data row count, randomly generated later
train_col_count = 0 # training data column count, randomly generated later
class_number = 2 # actual number of classes existed in data set, randomly generated later
data_type = 2 # determine data type of data set and weight, 1: integers, 2: real
# parameters denoting filenames with absolute paths
training_data_file = os.path.join(current_dir, training_filename)
training_data_file_duplicate = os.path.join(current_dir, training_filename_duplicate)
training_data_file_nans = os.path.join(current_dir, training_filename_nans)
training_data_file_enum = os.path.join(current_dir, training_filename_enum)
training_data_file_enum_true_one_hot = os.path.join(current_dir, training_filename_enum_true_one_hot)
training_data_file_enum_nans = os.path.join(current_dir, training_filename_enum_nans)
training_data_file_enum_nans_true_one_hot = os.path.join(current_dir, training_filename_enum_nans_true_one_hot)
validation_data_file = os.path.join(current_dir, validation_filename)
validation_data_file_enum = os.path.join(current_dir, validation_filename_enum)
validation_data_file_enum_true_one_hot = os.path.join(current_dir, validation_filename_enum_true_one_hot)
validation_data_file_enum_nans = os.path.join(current_dir, validation_filename_enum_nans)
validation_data_file_enum_nans_true_one_hot = os.path.join(current_dir, validation_filename_enum_nans_true_one_hot)
test_data_file = os.path.join(current_dir, test_filename)
test_data_file_duplicate = os.path.join(current_dir, test_filename_duplicate)
test_data_file_nans = os.path.join(current_dir, test_filename_nans)
test_data_file_enum = os.path.join(current_dir, test_filename_enum)
test_data_file_enum_true_one_hot = os.path.join(current_dir, test_filename_enum_true_one_hot)
test_data_file_enum_nans = os.path.join(current_dir, test_filename_enum_nans)
test_data_file_enum_nans_true_one_hot = os.path.join(current_dir, test_filename_enum_nans_true_one_hot)
weight_data_file = os.path.join(current_dir, weight_filename)
weight_data_file_enum = os.path.join(current_dir, weight_filename_enum)
# store template model performance values for later comparison
test1_model = None # store template model for later comparison
test1_model_metrics = None # store template model test metrics for later comparison
best_lambda = 0.0 # store best lambda obtained using lambda search
test_name = "pyunit_glm_binomial.py" # name of this test
sandbox_dir = "" # sandbox directory where we are going to save our failed test data sets
# store information about training data set, validation and test data sets that are used
# by many tests. We do not want to keep loading them for each set in the hope of
# saving time. Trading off memory and speed here.
x_indices = [] # store predictor indices in the data set
y_index = [] # store response index in the data set
training_data = [] # store training data set
test_data = [] # store test data set
valid_data = [] # store validation data set
training_data_grid = [] # store combined training and validation data set for cross validation
best_alpha = 0.5 # store best alpha value found
best_grid_logloss = -1 # store lowest MSE found from grid search
test_failed_array = [0]*total_test_number # denote test results for all tests run. 1 error, 0 pass
test_num = 0 # index representing which test is being run
duplicate_col_indices = [] # denote column indices when column duplication is applied
duplicate_col_scales = [] # store scaling factor for all columns when duplication is applied
noise_var = noise_std*noise_std # Binomial noise variance
test_failed = 0 # count total number of tests that have failed
sklearn_class_weight = {} # used to make sure Sklearn will know the correct number of classes
def __init__(self):
self.setup()
def setup(self):
"""
This function performs all initializations necessary:
1. generates all the random values for our dynamic tests like the Binomial
noise std, column count and row count for training data set;
2. generate the training/validation/test data sets with only real values;
3. insert missing values into training/valid/test data sets.
4. taken the training/valid/test data sets, duplicate random certain columns,
each duplicated column is repeated for a random number of times and randomly scaled;
5. generate the training/validation/test data sets with predictors containing enum
and real values as well***.
6. insert missing values into the training/validation/test data sets with predictors
containing enum and real values as well
*** according to Tomas, when working with mixed predictors (contains both enum/real
value columns), the encoding used is different when regularization is enabled or disabled.
When regularization is enabled, true one hot encoding is enabled to encode the enum
values to binary bits. When regularization is disabled, a reference level plus one hot encoding
is enabled when encoding the enum values to binary bits. One data set is generated
when we work with mixed predictors.
"""
# clean out the sandbox directory first
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# randomly set Binomial noise standard deviation as a fraction of actual predictor standard deviation
self.noise_std = random.uniform(0, math.sqrt(pow((self.max_p_value - self.min_p_value), 2) / 12))
self.noise_var = self.noise_std*self.noise_std
# randomly determine data set size in terms of column and row counts
self.train_col_count = random.randint(3, self.max_col_count) # account for enum columns later
self.train_row_count = int(round(self.train_col_count*random.uniform(self.min_col_count_ratio,
self.max_col_count_ratio)))
# # DEBUGGING setup_data, remember to comment them out once done.
# self.train_col_count = 3
# self.train_row_count = 500
# end DEBUGGING
# randomly set number of enum and real columns in the data set
self.enum_col = random.randint(1, self.train_col_count-1)
# randomly set number of levels for each categorical column
self.enum_level_vec = np.random.random_integers(2, self.enum_levels-1, [self.enum_col, 1])
# generate real value weight vector and training/validation/test data sets for GLM
pyunit_utils.write_syn_floating_point_dataset_glm(self.training_data_file,
self.validation_data_file,
self.test_data_file, self.weight_data_file,
self.train_row_count, self.train_col_count, self.data_type,
self.max_p_value, self.min_p_value, self.max_w_value,
self.min_w_value, self.noise_std, self.family,
self.train_row_count, self.train_row_count,
class_number=self.class_number,
class_method=[self.class_method, self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin,
self.test_class_margin])
# randomly generate the duplicated and scaled columns
(self.duplicate_col_indices, self.duplicate_col_scales) = \
pyunit_utils.random_col_duplication(self.train_col_count, self.duplicate_threshold,
self.duplicate_col_counts, True, self.duplicate_max_scale)
# apply the duplication and scaling to training and test set
# need to add the response column to the end of duplicated column indices and scale
dup_col_indices = self.duplicate_col_indices
dup_col_indices.append(self.train_col_count)
dup_col_scale = self.duplicate_col_scales
dup_col_scale.append(1.0)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
# print out duplication information for easy debugging
print("duplication column and duplication scales are: ")
print(dup_col_indices)
print(dup_col_scale)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.training_data_file,
self.training_data_file_duplicate)
pyunit_utils.duplicate_scale_cols(dup_col_indices, dup_col_scale, self.test_data_file,
self.test_data_file_duplicate)
# insert NAs into training/test data sets
pyunit_utils.insert_nan_in_data(self.training_data_file, self.training_data_file_nans, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file, self.test_data_file_nans, self.nan_fraction)
# generate data sets with enum as well as real values
pyunit_utils.write_syn_mixed_dataset_glm(self.training_data_file_enum,
self.training_data_file_enum_true_one_hot,
self.validation_data_file_enum,
self.validation_data_file_enum_true_one_hot,
self.test_data_file_enum, self.test_data_file_enum_true_one_hot,
self.weight_data_file_enum, self.train_row_count, self.train_col_count,
self.max_p_value, self.min_p_value, self.max_w_value, self.min_w_value,
self.noise_std, self.family, self.train_row_count,
self.train_row_count, self.enum_col, self.enum_level_vec,
class_number=self.class_number,
class_method=[self.class_method,
self.class_method,
self.test_class_method],
class_margin=[self.margin, self.margin, self.test_class_margin])
# insert NAs into data set with categorical columns
pyunit_utils.insert_nan_in_data(self.training_data_file_enum, self.training_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum, self.validation_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum, self.test_data_file_enum_nans,
self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.training_data_file_enum_true_one_hot,
self.training_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.validation_data_file_enum_true_one_hot,
self.validation_data_file_enum_nans_true_one_hot, self.nan_fraction)
pyunit_utils.insert_nan_in_data(self.test_data_file_enum_true_one_hot,
self.test_data_file_enum_nans_true_one_hot,
self.nan_fraction)
# only preload data sets that will be used for multiple tests and change the response to enums
self.training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file))
# set indices for response and predictor columns in data set for H2O GLM model to use
self.y_index = self.training_data.ncol-1
self.x_indices = list(range(self.y_index))
# added the round() so that this will work on win8.
self.training_data[self.y_index] = self.training_data[self.y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if self.training_data[self.y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
self.valid_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file))
self.valid_data[self.y_index] = self.valid_data[self.y_index].round().asfactor()
self.test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file))
self.test_data[self.y_index] = self.test_data[self.y_index].round().asfactor()
# make a bigger training set for grid search by combining data from validation data set
self.training_data_grid = self.training_data.rbind(self.valid_data)
# setup_data sklearn class weight of all ones. Used only to make sure sklearn know the correct number of classes
for ind in range(self.class_number):
self.sklearn_class_weight[ind] = 1.0
# save the training data files just in case the code crashed.
pyunit_utils.remove_csv_files(self.current_dir, ".csv", action='copy', new_dir_path=self.sandbox_dir)
def teardown(self):
"""
This function performs teardown after the dynamic test is completed. If all tests
passed, it will delete all data sets generated since they can be quite large. It
will move the training/validation/test data sets into a Rsandbox directory so that
we can re-run the failed test.
"""
remove_files = []
# create Rsandbox directory to keep data sets and weight information
self.sandbox_dir = pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, True)
# Do not want to save all data sets. Only save data sets that are needed for failed tests
if sum(self.test_failed_array[0:4]):
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file, self.validation_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
else: # remove those files instead of moving them
remove_files.append(self.training_data_file)
remove_files.append(self.validation_data_file)
remove_files.append(self.test_data_file)
if sum(self.test_failed_array[0:6]):
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file, self.weight_filename)
else:
remove_files.append(self.weight_data_file)
if self.test_failed_array[3]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_duplicate, self.test_filename_duplicate)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_duplicate,
self.training_filename_duplicate)
else:
remove_files.append(self.training_data_file_duplicate)
remove_files.append(self.test_data_file_duplicate)
if self.test_failed_array[4]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file, self.training_filename)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file, self.test_filename)
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_nans, self.training_filename_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_nans, self.test_filename_nans)
else:
remove_files.append(self.training_data_file_nans)
remove_files.append(self.test_data_file_nans)
if self.test_failed_array[5]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans,
self.training_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans, self.test_filename_enum_nans)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans)
remove_files.append(self.training_data_file_enum)
remove_files.append(self.test_data_file_enum_nans)
remove_files.append(self.test_data_file_enum)
remove_files.append(self.validation_data_file_enum_nans)
remove_files.append(self.validation_data_file_enum)
remove_files.append(self.weight_data_file_enum)
if self.test_failed_array[6]:
pyunit_utils.move_files(self.sandbox_dir, self.training_data_file_enum_nans_true_one_hot,
self.training_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.validation_data_file_enum_nans_true_one_hot,
self.validation_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.test_data_file_enum_nans_true_one_hot,
self.test_filename_enum_nans_true_one_hot)
pyunit_utils.move_files(self.sandbox_dir, self.weight_data_file_enum, self.weight_filename_enum)
else:
remove_files.append(self.training_data_file_enum_nans_true_one_hot)
remove_files.append(self.training_data_file_enum_true_one_hot)
remove_files.append(self.validation_data_file_enum_nans_true_one_hot)
remove_files.append(self.validation_data_file_enum_true_one_hot)
remove_files.append(self.test_data_file_enum_nans_true_one_hot)
remove_files.append(self.test_data_file_enum_true_one_hot)
if not(self.test_failed): # all tests have passed. Delete sandbox if if was not wiped before
pyunit_utils.make_Rsandbox_dir(self.current_dir, self.test_name, False)
# remove any csv files left in test directory, do not remove them, shared computing resources
if len(remove_files) > 0:
for file in remove_files:
pyunit_utils.remove_files(file)
def test1_glm_no_regularization(self):
"""
In this test, a sklearn logistic regression model and a H2O GLM are built for Binomial family with the same
random data sets. We observe the weights, confusion matrices from the two models. We compare the logloss,
prediction accuracy from the two models to determine if H2O GLM model shall pass the test.
"""
print("*******************************************************************************************")
print("Test1: build H2O GLM with Binomial with no regularization.")
h2o.cluster_info()
# training result from python Sklearn logistic regression model
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file, self.test_data_file, False, False)
# build our H2O model
self.test1_model = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0)
self.test1_model.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data)
# calculate test metrics
self.test1_model_metrics = self.test1_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed # used to determine if the current test has failed
# print out comparison results for weight/logloss/prediction accuracy
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(self.test1_model,
self.test1_model_metrics,
self.family, "\nTest1 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from"
" test dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
"dataset ...."],
h2o_att_str=[
"H2O intercept and weights: \n",
"H2O logloss from training dataset: ",
"H2O logloss from test dataset",
"H2O confusion matrix from training "
"dataset: \n",
"H2O confusion matrix from test"
" dataset: \n",
"H2O accuracy from training dataset: ",
"H2O accuracy from test dataset: "],
template_att_str=[
"Sklearn intercept and weights: \n",
"Sklearn logloss from training "
"dataset: ",
"Sklearn logloss from test dataset: ",
"Sklearn confusion matrix from"
" training dataset: \n",
"Sklearn confusion matrix from test "
"dataset: \n",
"Sklearn accuracy from training "
"dataset: ",
"Sklearn accuracy from test "
"dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too "
"much!", "", "",
"Accuracies from training dataset "
"differ too much!",
"Accuracies from test dataset differ "
"too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are "
"close enough!",
"Logloss from test dataset are close "
"enough!", "", "",
"Accuracies from training dataset are "
"close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test1_glm_no_regularization",
num_test_failed, self.test_failed)
self.test_num += 1 # update test index
def test2_glm_lambda_search(self):
"""
This test is used to test the lambda search. Recall that lambda search enables efficient and
automatic search for the optimal value of the lambda parameter. When lambda search is enabled,
GLM will first fit a model with maximum regularization and then keep decreasing it until
over-fitting occurs. The resulting model is based on the best lambda value. According to Tomas,
set alpha = 0.5 and enable validation but not cross-validation.
"""
print("*******************************************************************************************")
print("Test2: tests the lambda search.")
h2o.cluster_info()
# generate H2O model with lambda search enabled
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20)
model_h2o_0p5.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data,
validation_frame=self.valid_data)
# get best lambda here
self.best_lambda = pyunit_utils.get_train_glm_params(model_h2o_0p5, 'best_lambda')
# get test performance here
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest2 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O lambda search intercept and "
"weights: \n",
"H2O lambda search logloss from"
" training dataset: ",
"H2O lambda search logloss from test "
"dataset",
"H2O lambda search confusion matrix "
"from training dataset: \n",
"H2O lambda search confusion matrix "
"from test dataset: \n",
"H2O lambda search accuracy from "
"training dataset: ",
"H2O lambda search accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from "
"training dataset: ",
"H2O test1 template logloss from "
"test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from "
"training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, False, True, True, True,
True, True],
just_print=[True, False, False, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test2_glm_lambda_search",
num_test_failed, self.test_failed)
self.test_num += 1
def test3_glm_grid_search(self):
"""
This test is used to test GridSearch with the following parameters:
1. Lambda = best_lambda value from test2
2. alpha = [0 0.5 0.99]
3. cross-validation with k = 5, fold_assignment = "Random"
We will look at the best results from the grid search and compare it with H2O model built in test 1.
:return: None
"""
print("*******************************************************************************************")
print("Test3: explores various parameter settings in training the GLM using GridSearch using solver ")
h2o.cluster_info()
hyper_parameters = {'alpha': [0, 0.5, 0.99]} # set hyper_parameters for grid search
# train H2O GLM model with grid search
model_h2o_gridsearch = \
H2OGridSearch(H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, nfolds=5,
fold_assignment='Random'), hyper_parameters)
model_h2o_gridsearch.train(x=self.x_indices, y=self.y_index, training_frame=self.training_data_grid)
# print out the model sequence ordered by the best validation logloss values, thanks Ludi!
temp_model = model_h2o_gridsearch.sort_by("logloss(xval=True)")
# obtain the model ID of best model (with smallest MSE) and use that for our evaluation
best_model_id = temp_model['Model Id'][0]
self.best_grid_logloss = temp_model['logloss(xval=True)'][0]
self.best_alpha = model_h2o_gridsearch.get_hyperparams(best_model_id)
best_model = h2o.get_model(best_model_id)
best_model_test_metrics = best_model.model_performance(test_data=self.test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with H2O model from test 1
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(best_model, best_model_test_metrics,
self.family,
"\nTest3 " + " Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and"
" weights ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test dataset"
" ....",
"\nComparing confusion matrices from "
"training dataset ....",
"\nComparing confusion matrices from "
"test dataset ...",
"\nComparing accuracy from training "
"dataset ....",
"\nComparing accuracy from test "
" sdataset ...."],
h2o_att_str=[
"H2O grid search intercept and "
"weights: \n",
"H2O grid search logloss from training"
" dataset: ",
"H2O grid search logloss from test "
"dataset",
"H2O grid search confusion matrix from"
" training dataset: \n",
"H2O grid search confusion matrix from"
" test dataset: \n",
"H2O grid search accuracy from"
" training dataset: ",
"H2O grid search accuracy from test "
"dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test_glm_grid_search_over_params",
num_test_failed, self.test_failed)
self.test_num += 1
def test4_glm_remove_collinear_columns(self):
"""
With the best parameters obtained from test 3 grid search, we will trained GLM
with duplicated columns and enable remove_collinear_columns and see if the
algorithm catches the duplicated columns. We will compare the results with test
1 results.
"""
print("*******************************************************************************************")
print("Test4: test the GLM remove_collinear_columns.")
h2o.cluster_info()
# read in training data sets with duplicated columns
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_duplicate))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_duplicate))
y_index = training_data.ncol-1
x_indices = list(range(y_index))
# change response variable to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model with remove_collinear_columns=True
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=self.best_lambda, alpha=self.best_alpha,
remove_collinear_columns=True)
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
print("Best lambda is {0}, best alpha is {1}".format(self.best_lambda, self.best_alpha))
# evaluate model over test data set
model_h2o_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and test1 H2O model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, model_h2o_metrics, self.family,
"\nTest3 Done!",
test_model=self.test1_model,
test_model_metric=self.test1_model_metrics,
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training "
"dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O remove_collinear_columns "
"intercept and weights: \n",
"H2O remove_collinear_columns"
" logloss from training dataset: ",
"H2O remove_collinear_columns"
" logloss from test dataset",
"H2O remove_collinear_columns"
" confusion matrix from "
"training dataset: \n",
"H2O remove_collinear_columns"
" confusion matrix from"
" test dataset: \n",
"H2O remove_collinear_columns"
" accuracy from"
" training dataset: ",
"H2O remove_collinear_columns"
" accuracy from test"
" dataset: "],
template_att_str=[
"H2O test1 template intercept and"
" weights: \n",
"H2O test1 template logloss from"
" training dataset: ",
"H2O test1 template logloss from"
" test dataset: ",
"H2O test1 template confusion"
" matrix from training dataset: \n",
"H2O test1 template confusion"
" matrix from test dataset: \n",
"H2O test1 template accuracy from"
" training dataset: ",
"H2O test1 template accuracy from"
" test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test4_glm_remove_collinear_columns",
num_test_failed, self.test_failed)
self.test_num += 1
def test5_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
only real value predictors. The same data sets as before is used. However, we
go into the predictor matrix and randomly decide to replace a value with
nan and create missing values. Sklearn logistic regression model is built using the
data set where we have imputed the missing values. This Sklearn model will be used to
compare our H2O models with.
"""
print("*******************************************************************************************")
print("Test5: test the GLM with imputation of missing values with column averages.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_nans, self.test_data_file_nans, False, False)
# import training set and test set
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_nans))
# change the response columns to be categorical
training_data[self.y_index] = training_data[self.y_index].round().asfactor()
test_data[self.y_index] = test_data[self.y_index].round().asfactor()
# train H2O models with missing_values_handling="MeanImputation"
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=self.x_indices, y=self.y_index, training_frame=training_data)
# calculate H2O model performance with test data set
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM and Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest5 Done!",
compare_att_str=[
"\nComparing intercept and weights"
" ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O missing values intercept and"
" weights: \n",
"H2O missing values logloss from"
" training dataset: ",
"H2O missing values logloss from"
" test dataset",
"H2O missing values confusion matrix"
" from training dataset: \n",
"H2O missing values confusion matrix"
" from test dataset: \n",
"H2O missing values accuracy from"
" training dataset: ",
"H2O missing values accuracy from"
" test dataset: "],
template_att_str=[
"Sklearn missing values intercept"
" and weights: \n",
"Sklearn missing values logloss from"
" training dataset: ",
"Sklearn missing values logloss from"
" test dataset: ",
"Sklearn missing values confusion"
" matrix from training dataset: \n",
"Sklearn missing values confusion"
" matrix from test dataset: \n",
"Sklearn missing values accuracy"
" from training dataset: ",
"Sklearn missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ"
" too much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if tests have failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test5_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test6_enum_missing_values(self):
"""
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns). We first generate a data set that
contains a random number of columns of categorical and real value columns. Next, we
encode the categorical columns. Then, we generate the random data set using the formula
as before. Next, we go into the predictor matrix and randomly
decide to change a value to be nan and create missing values. Again, we build a Sklearn
logistic regression and compare our H2O models with it.
"""
# no regularization in this case, use reference level plus one-hot-encoding
print("*******************************************************************************************")
print("Test6: test the GLM with enum/real values.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans, self.test_data_file_enum_nans, True, False)
# import training set and test set with missing values
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response variables to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
test_data[y_index] = test_data[y_index].round().asfactor()
# generate H2O model
model_h2o = H2OGeneralizedLinearEstimator(family=self.family, Lambda=0,
missing_values_handling="MeanImputation")
model_h2o.train(x=x_indices, y=y_index, training_frame=training_data)
h2o_model_test_metrics = model_h2o.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o, h2o_model_test_metrics,
self.family, "\nTest6 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, "
"no regularization and missing values"
" intercept and weights: \n",
"H2O with enum/real values, no "
"regularization and missing values"
" logloss from training dataset: ",
"H2O with enum/real values, no"
" regularization and missing values"
" logloss from test dataset",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from training"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values"
" confusion matrix from test"
" dataset: \n",
"H2O with enum/real values, no"
" regularization and missing values "
"accuracy from training dataset: ",
"H2O with enum/real values, no "
"regularization and missing values"
" accuracy from test dataset: "],
template_att_str=[
"Sklearn missing values intercept "
"and weights: \n",
"Sklearn with enum/real values, no"
" regularization and missing values"
" logloss from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values"
" logloss from test dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from training"
" dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"confusion matrix from test "
"dataset: \n",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from training dataset: ",
"Sklearn with enum/real values, no "
"regularization and missing values "
"accuracy from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ"
" too much!",
"Logloss from test dataset differ too"
" much!", "", "",
"Accuracies from training dataset"
" differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close"
" enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
h2o.cluster_info()
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += pyunit_utils.show_test_results("test6_enum_missing_values",
num_test_failed, self.test_failed)
self.test_num += 1
def test7_missing_enum_values_lambda_search(self):
"""
Test parameter
missing_values_handling="MeanImputation" with mixed predictors (categorical/real value columns).
Test parameter missing_values_handling="MeanImputation" with
mixed predictors (categorical/real value columns) and setting lambda search to be True.
We use the same predictors with missing values from test6. Next, we encode the categorical columns using
true one hot encoding since Lambda-search will be enabled with alpha set to 0.5. Since the encoding
is different in this case from test6, we will build a brand new Sklearn logistic regression model and
compare the best H2O model logloss/prediction accuracy with it.
"""
# perform lambda_search, regularization and one hot encoding.
print("*******************************************************************************************")
print("Test7: test the GLM with imputation of missing enum/real values under lambda search.")
h2o.cluster_info()
# training result from python sklearn
(p_weights, p_logloss_train, p_cm_train, p_accuracy_training, p_logloss_test, p_cm_test, p_accuracy_test) = \
self.sklearn_binomial_result(self.training_data_file_enum_nans,
self.test_data_file_enum_nans_true_one_hot, True, True,
validation_data_file=self.validation_data_file_enum_nans_true_one_hot)
# import training set and test set with missing values and true one hot encoding
training_data = h2o.import_file(pyunit_utils.locate(self.training_data_file_enum_nans_true_one_hot))
validation_data = h2o.import_file(pyunit_utils.locate(self.validation_data_file_enum_nans_true_one_hot))
test_data = h2o.import_file(pyunit_utils.locate(self.test_data_file_enum_nans_true_one_hot))
# change the categorical data using .asfactor()
for ind in range(self.enum_col):
training_data[ind] = training_data[ind].round().asfactor()
validation_data[ind] = validation_data[ind].round().asfactor()
test_data[ind] = test_data[ind].round().asfactor()
num_col = training_data.ncol
y_index = num_col - 1
x_indices = list(range(y_index))
# change response column to be categorical
training_data[y_index] = training_data[y_index].round().asfactor()
# check to make sure all response classes are represented, otherwise, quit
if training_data[y_index].nlevels()[0] < self.class_number:
print("Response classes are not represented in training dataset.")
sys.exit(0)
validation_data[y_index] = validation_data[y_index].round().asfactor()
test_data[y_index] = test_data[y_index].round().asfactor()
# train H2O model
model_h2o_0p5 = H2OGeneralizedLinearEstimator(family=self.family, lambda_search=True, alpha=0.5,
lambda_min_ratio=1e-20, missing_values_handling="MeanImputation")
model_h2o_0p5.train(x=x_indices, y=y_index, training_frame=training_data, validation_frame=validation_data)
h2o_model_0p5_test_metrics = model_h2o_0p5.model_performance(test_data=test_data)
num_test_failed = self.test_failed
# print out comparison results for our H2O GLM with Sklearn model
self.test_failed = \
pyunit_utils.extract_comparison_attributes_and_print_multinomial(model_h2o_0p5, h2o_model_0p5_test_metrics,
self.family, "\nTest7 Done!",
compare_att_str=[
"\nComparing intercept and "
"weights ....",
"\nComparing logloss from training"
" dataset ....",
"\nComparing logloss from test"
" dataset ....",
"\nComparing confusion matrices from"
" training dataset ....",
"\nComparing confusion matrices from"
" test dataset ...",
"\nComparing accuracy from training"
" dataset ....",
"\nComparing accuracy from test"
" dataset ...."],
h2o_att_str=[
"H2O with enum/real values, lamba "
"search and missing values intercept"
" and weights: \n",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values logloss "
"from test dataset",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from training dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values confusion "
"matrix from test dataset: \n",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from training dataset: ",
"H2O with enum/real values, lamba "
"search and missing values accuracy "
"from test dataset: "],
template_att_str=[
"Sklearn with enum/real values, lamba"
" search and missing values intercept"
" and weights: \n",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values logloss "
"from test dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from training dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values confusion"
" matrix from test dataset: \n",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from training dataset: ",
"Sklearn with enum/real values, lamba"
" search and missing values accuracy"
" from test dataset: "],
att_str_fail=[
"Intercept and weights are not equal!",
"Logloss from training dataset differ "
"too much!",
"Logloss from test dataset differ too"
" much!", "", "", "Accuracies from"
" training dataset differ too much!",
"Accuracies from test dataset differ"
" too much!"],
att_str_success=[
"Intercept and weights are close "
"enough!",
"Logloss from training dataset are"
" close enough!",
"Logloss from test dataset are close"
" enough!", "", "",
"Accuracies from training dataset are"
" close enough!",
"Accuracies from test dataset are"
" close enough!"],
can_be_better_than_template=[
True, True, True, True, True,
True, True],
just_print=[
True, True, True, True, True,
True, False],
failed_test_number=self.test_failed,
template_params=[
p_weights, p_logloss_train, p_cm_train,
p_accuracy_training, p_logloss_test,
p_cm_test, p_accuracy_test],
ignored_eps=self.ignored_eps,
allowed_diff=self.allowed_diff)
# print out test results and update test_failed_array status to reflect if this test has failed
self.test_failed_array[self.test_num] += \
pyunit_utils.show_test_results("test7_missing_enum_values_lambda_search", num_test_failed, self.test_failed)
self.test_num += 1
def sklearn_binomial_result(self, training_data_file, test_data_file, has_categorical, true_one_hot,
validation_data_file=""):
"""
This function will generate a Sklearn logistic model using the same set of data sets we have used to build
our H2O models. The purpose here is to be able to compare the performance of H2O
models with the Sklearn model built here. This is useful in cases where theoretical solutions
do not exist. If the data contains missing values, mean imputation is applied to the data set before
a Sklearn model is built. In addition, if there are enum columns in predictors and also missing values,
the same encoding and missing value imputation method used by H2O is applied to the data set before we build
the Sklearn model.
:param training_data_file: string storing training data set filename with directory path.
:param test_data_file: string storing test data set filename with directory path.
:param has_categorical: bool indicating if we data set contains mixed predictors (both enum and real)
:param true_one_hot: bool True: true one hot encoding is used. False: reference level plus one hot encoding
is used
:param validation_data_file: optional string, denoting validation file so that we can concatenate
training and validation data sets into a big training set since H2O model is using a training
and a validation data set.
:return: a tuple containing the weights, logloss, confusion matrix, prediction accuracy calculated on training
data set and test data set respectively.
"""
# read in the training data into a matrix
training_data_xy = np.asmatrix(np.genfromtxt(training_data_file, delimiter=',', dtype=None))
test_data_xy = np.asmatrix(np.genfromtxt(test_data_file, delimiter=',', dtype=None))
if len(validation_data_file) > 0: # validation data set exist and add it to training_data
temp_data_xy = np.asmatrix(np.genfromtxt(validation_data_file, delimiter=',', dtype=None))
training_data_xy = np.concatenate((training_data_xy, temp_data_xy), axis=0)
# if predictor contains categorical data, perform encoding of enums to binary bits
# for missing categorical enums, a new level is created for the nans
if has_categorical:
training_data_xy = pyunit_utils.encode_enum_dataset(training_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
test_data_xy = pyunit_utils.encode_enum_dataset(test_data_xy, self.enum_level_vec, self.enum_col,
true_one_hot, np.any(training_data_xy))
# replace missing values for real value columns with column mean before proceeding for training/test data sets
if np.isnan(training_data_xy).any():
inds = np.where(np.isnan(training_data_xy))
col_means = np.asarray(np.nanmean(training_data_xy, axis=0))[0]
training_data_xy[inds] = np.take(col_means, inds[1])
if np.isnan(test_data_xy).any():
# replace the actual means with column means from training
inds = np.where(np.isnan(test_data_xy))
test_data_xy = pyunit_utils.replace_nan_with_mean(test_data_xy, inds, col_means)
# now data is ready to be massaged into format that sklearn can use
(response_y, x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(training_data_xy)
(t_response_y, t_x_mat) = pyunit_utils.prepare_data_sklearn_multinomial(test_data_xy)
# train the sklearn Model
sklearn_model = LogisticRegression(class_weight=self.sklearn_class_weight)
sklearn_model = sklearn_model.fit(x_mat, response_y)
# grab the performance metrics on training data set
accuracy_training = sklearn_model.score(x_mat, response_y)
weights = sklearn_model.coef_
p_response_y = sklearn_model.predict(x_mat)
log_prob = sklearn_model.predict_log_proba(x_mat)
logloss_training = self.logloss_sklearn(response_y, log_prob)
cm_train = metrics.confusion_matrix(response_y, p_response_y)
# grab the performance metrics on the test data set
p_response_y = sklearn_model.predict(t_x_mat)
log_prob = sklearn_model.predict_log_proba(t_x_mat)
logloss_test = self.logloss_sklearn(t_response_y, log_prob)
cm_test = metrics.confusion_matrix(t_response_y, p_response_y)
accuracy_test = metrics.accuracy_score(t_response_y, p_response_y)
return weights, logloss_training, cm_train, accuracy_training, logloss_test, cm_test, accuracy_test
def logloss_sklearn(self, true_y, log_prob):
"""
This function calculate the average logloss for SKlean model given the true response (trueY) and the log
probabilities (logProb).
:param true_y: array denoting the true class label
:param log_prob: matrix containing the log of Prob(Y=0) and Prob(Y=1)
:return: average logloss.
"""
(num_row, num_class) = log_prob.shape
logloss = 0.0
for ind in range(num_row):
logloss += log_prob[ind, int(true_y[ind])]
return -1.0 * logloss / num_row
def test_glm_binomial():
"""
Create and instantiate TestGLMBinomial class and perform tests specified for GLM
Binomial family.
:return: None
"""
test_glm_binomial = TestGLMBinomial()
test_glm_binomial.test1_glm_no_regularization()
test_glm_binomial.test2_glm_lambda_search()
test_glm_binomial.test3_glm_grid_search()
test_glm_binomial.test4_glm_remove_collinear_columns()
test_glm_binomial.test5_missing_values()
test_glm_binomial.test6_enum_missing_values()
test_glm_binomial.test7_missing_enum_values_lambda_search()
test_glm_binomial.teardown()
sys.stdout.flush()
if test_glm_binomial.test_failed: # exit with error if any tests have failed
sys.exit(1)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_glm_binomial)
else:
test_glm_binomial()
| apache-2.0 |
galaxy001/libtorrent | python_BTL_BitTorrent-5.3-GPL/lsprof/lsprof.py | 5 | 3598 | #! /usr/bin/env python
import sys
from _lsprof import Profiler, profiler_entry, profiler_subentry
__all__ = ['profile', 'Stats']
def profile(f, *args, **kwds):
"""XXX docstring"""
p = Profiler()
p.enable(subcalls=True, builtins=True)
try:
f(*args, **kwds)
finally:
p.disable()
return Stats(p.getstats())
class Stats(object):
"""XXX docstring"""
def __init__(self, data):
self.data = data
def sort(self, crit="inlinetime"):
"""XXX docstring"""
if crit not in profiler_entry.__dict__:
raise ValueError, "Can't sort by %s" % crit
self.data.sort(lambda b, a: cmp(getattr(a, crit),
getattr(b, crit)))
for e in self.data:
if e.calls:
e.calls.sort(lambda b, a: cmp(getattr(a, crit),
getattr(b, crit)))
def pprint(self, top=None, file=None):
"""XXX docstring"""
if file is None:
file = sys.stdout
d = self.data
if top is not None:
d = d[:top]
cols = "% 12s %12s %11.4f %11.4f %s\n"
hcols = "% 12s %12s %12s %12s %s\n"
cols2 = "+%12s %12s %11.4f %11.4f + %s\n"
file.write(hcols % ("CallCount", "Recursive", "Total(ms)",
"Inline(ms)", "module:lineno(function)"))
for e in d:
file.write(cols % (e.callcount, e.reccallcount, e.totaltime,
e.inlinetime, label(e.code)))
if e.calls:
for se in e.calls:
file.write(cols % ("+%s" % se.callcount, se.reccallcount,
se.totaltime, se.inlinetime,
"+%s" % label(se.code)))
def freeze(self):
"""Replace all references to code objects with string
descriptions; this makes it possible to pickle the instance."""
# this code is probably rather ickier than it needs to be!
for i in range(len(self.data)):
e = self.data[i]
if not isinstance(e.code, str):
self.data[i] = type(e)((label(e.code),) + e[1:])
if e.calls:
for j in range(len(e.calls)):
se = e.calls[j]
if not isinstance(se.code, str):
e.calls[j] = type(se)((label(se.code),) + se[1:])
_fn2mod = {}
def label(code):
if isinstance(code, str):
return code
try:
mname = _fn2mod[code.co_filename]
except KeyError:
for k, v in sys.modules.items():
if v is None:
continue
if not hasattr(v, '__file__'):
continue
if not isinstance(v.__file__, str):
continue
if v.__file__.startswith(code.co_filename):
mname = _fn2mod[code.co_filename] = k
break
else:
mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename
return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name)
if __name__ == '__main__':
import os
sys.argv = sys.argv[1:]
if not sys.argv:
print >> sys.stderr, "usage: lsprof.py <script> <arguments...>"
sys.exit(2)
sys.path.insert(0, os.path.abspath(os.path.dirname(sys.argv[0])))
stats = profile(execfile, sys.argv[0], globals(), locals())
stats.sort()
stats.pprint()
| mit |
ChanderG/scipy | scipy/integrate/quadpack.py | 42 | 30898 | # Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
from __future__ import division, print_function, absolute_import
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : function
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then f may
instead be a ``ctypes`` function of the form:
f(int n, double args[n]),
where ``args`` is an array of function arguments and ``n`` is the
length of ``args``. ``f.argtypes`` should be set to
``(c_int, c_double)``, and ``f.restype`` should be ``(c_double,)``.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message :
A convergence message.
explain :
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
"""
if not isinstance(args, tuple):
args = (args,)
if (weight is None):
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos','sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
else:
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
nl = len(points)
the_points = numpy.zeros((nl+2,), float)
the_points[:nl] = points
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight[:3] == 'alg':
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def _infunc(x,func,gfun,hfun,more_args):
a = gfun(x)
b = hfun(x)
myargs = (x,) + more_args
return quad(func,a,b,args=myargs)[0]
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result: a
lambda function can be useful here.
hfun : callable
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
"""
return quad(_infunc, a, b, (func, gfun, hfun, args),
epsabs=epsabs, epsrel=epsrel)
def _infunc2(y,x,func,qfun,rfun,more_args):
a2 = qfun(x,y)
b2 = rfun(x,y)
myargs = (y,x) + more_args
return quad(func,a2,b2,args=myargs)[0]
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result:
a lambda function can be useful here.
hfun : function
The upper boundary curve in y (same requirements as `gfun`).
qfun : function
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float.
rfun : function
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
"""
return dblquad(_infunc2, a, b, gfun, hfun, (func, qfun, rfun, args),
epsabs=epsabs, epsrel=epsrel)
def nquad(func, ranges, args=None, opts=None):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : callable
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If performance is a concern, this function may be a ctypes function of
the form::
f(int n, double args[n])
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters. This function may then
be compiled to a dynamic/shared library then imported through
``ctypes``, setting the function's argtypes to ``(c_int, c_double)``,
and the function's restype to ``(c_double)``. Its pointer may then be
passed into `nquad` normally.
This allows the underlying Fortran library to evaluate the function in
the innermost integration calls without callbacks to Python, and also
speeds up the evaluation of the function itself.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available.
e.g. if ``func = f(x0, x1, x2)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quadare used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.
opts[0] corresponds to integration over x0, and so on. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
The ``full_output`` option from `quad` is unavailable, due to the
complexity of handling the large amount of data such an option would
return for this kind of nested integration. For more information on
these options, see `quad` and `quad_explain`.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
See Also
--------
quad : 1-dimensional numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda (x1,x2,x3) : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}])
(1.5267454070738633, 2.9437360001402324e-14)
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
value, abserr = quad(f, low, high, args=args, **opt)
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of n-D integration with error
return value, self.abserr
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.