repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
cpyou/odoo | addons/resource/tests/test_resource.py | 243 | 32181 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from openerp.addons.resource.tests.common import TestResourceCommon
class TestResource(TestResourceCommon):
def test_00_intervals(self):
intervals = [
(
datetime.strptime('2013-02-04 09:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 12:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 11:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S')
), (
datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'),
datetime.strptime('2013-02-04 19:00:00', '%Y-%m-%d %H:%M:%S')
)
]
# Test: interval cleaning
cleaned_intervals = self.resource_calendar.interval_clean(intervals)
self.assertEqual(len(cleaned_intervals), 3, 'resource_calendar: wrong interval cleaning')
# First interval: 03, unchanged
self.assertEqual(cleaned_intervals[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Second intreval: 04, 08-14, combining 08-12 and 11-14, 09-11 being inside 08-12
self.assertEqual(cleaned_intervals[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Third interval: 04, 17-21, 18-19 being inside 17-21
self.assertEqual(cleaned_intervals[2][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
self.assertEqual(cleaned_intervals[2][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong interval cleaning')
# Test: disjoint removal
working_interval = (datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), datetime.strptime('2013-02-04 18:00:00', '%Y-%m-%d %H:%M:%S'))
result = self.resource_calendar.interval_remove_leaves(working_interval, intervals)
self.assertEqual(len(result), 1, 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 14-17
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-03 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-03 10:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 08:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 11:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# Test: schedule hours on intervals, backwards
cleaned_intervals.reverse()
result = self.resource_calendar.interval_schedule_hours(cleaned_intervals, 5.5, remove_at_end=False)
self.assertEqual(len(result), 2, 'resource_calendar: wrong hours scheduling in interval')
# First interval: 03, 8-10 untouches
self.assertEqual(result[0][0], datetime.strptime('2013-02-04 17:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[0][1], datetime.strptime('2013-02-04 21:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
# First interval: 04, 08-11:30
self.assertEqual(result[1][0], datetime.strptime('2013-02-04 12:30:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
self.assertEqual(result[1][1], datetime.strptime('2013-02-04 14:00:00', '%Y-%m-%d %H:%M:%S'), 'resource_calendar: wrong leave removal from interval')
def test_10_calendar_basics(self):
""" Testing basic method of resource.calendar """
cr, uid = self.cr, self.uid
# --------------------------------------------------
# Test1: get_next_day
# --------------------------------------------------
# Test: next day: next day after day1 is day4
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day4+1 is (day1+7)
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date1.date() + relativedelta(days=7), 'resource_calendar: wrong next day computing')
# Test: next day: next day after day1-1 is day1
date = self.resource_calendar.get_next_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong next day computing')
# --------------------------------------------------
# Test2: get_previous_day
# --------------------------------------------------
# Test: previous day: previous day before day1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date())
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4 is day1
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date())
self.assertEqual(date, self.date1.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day4+1 is day4
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date2.date() + relativedelta(days=1))
self.assertEqual(date, self.date2.date(), 'resource_calendar: wrong previous day computing')
# Test: previous day: previous day before day1-1 is (day4-7)
date = self.resource_calendar.get_previous_day(cr, uid, self.calendar_id, day_date=self.date1.date() + relativedelta(days=-1))
self.assertEqual(date, self.date2.date() + relativedelta(days=-7), 'resource_calendar: wrong previous day computing')
# --------------------------------------------------
# Test3: misc
# --------------------------------------------------
weekdays = self.resource_calendar.get_weekdays(cr, uid, self.calendar_id)
self.assertEqual(weekdays, [1, 4], 'resource_calendar: wrong weekdays computing')
attendances = self.resource_calendar.get_attendances_for_weekdays(cr, uid, self.calendar_id, [2, 3, 4, 5])
self.assertEqual(set([att.id for att in attendances]), set([self.att2_id, self.att3_id]),
'resource_calendar: wrong attendances filtering by weekdays computing')
def test_20_calendar_working_intervals(self):
""" Testing working intervals computing method of resource.calendar """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day0 without leaves: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 09:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day3 without leaves: 2 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date2)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-15 10:11:12', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves outside range: 1 interval
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=0), compute_leaves=True)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working intervals')
# Test: day0 with leaves: 2 intervals because of leave between 9 ans 12, ending at 15:45:30
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=8) + relativedelta(days=7),
end_dt=self.date1.replace(hour=15, minute=45, second=30) + relativedelta(days=7),
compute_leaves=True)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:08:07', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working intervals')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 15:45:30', _format), 'resource_calendar: wrong working intervals')
def test_30_calendar_working_days(self):
""" Testing calendar hours computation on a working day """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# Test: day1, beginning at 10:30 -> work from 10:30 (arrival) until 16:00
intervals = self.resource_calendar.get_working_intervals_of_day(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-12 10:30:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: hour computation for same interval, should give 5.5
wh = self.resource_calendar.get_working_hours_of_date(cr, uid, self.calendar_id, start_dt=self.date1.replace(hour=10, minute=30, second=0))
self.assertEqual(wh, 5.5, 'resource_calendar: wrong working interval / day time computing')
# Test: day1+7 on leave, without leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7)
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+7 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=7),
compute_leaves=True
)
# Result: day1 (08->09 + 12->16)
self.assertEqual(len(intervals), 2, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[1][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with generic leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True
)
# Result: day1 (08->16)
self.assertEqual(len(intervals), 1, 'resource_calendar: wrong working interval/day computing')
self.assertEqual(intervals[0][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong working interval / day computing')
self.assertEqual(intervals[0][1], datetime.strptime('2013-02-26 16:00:00', _format), 'resource_calendar: wrong working interval / day computing')
# Test: day1+14 on leave, with resource leave computation
intervals = self.resource_calendar.get_working_intervals_of_day(
cr, uid, self.calendar_id,
start_dt=self.date1.replace(hour=7, minute=0, second=0) + relativedelta(days=14),
compute_leaves=True,
resource_id=self.resource1_id
)
# Result: nothing, because on leave
self.assertEqual(len(intervals), 0, 'resource_calendar: wrong working interval/day computing')
def test_40_calendar_hours_scheduling(self):
""" Testing calendar hours scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test0: schedule hours backwards (old interval_min_get)
# Done without calendar
# --------------------------------------------------
# Done without calendar
# res = self.resource_calendar.interval_min_get(cr, uid, None, self.date1, 40, resource=False)
# res: (datetime.datetime(2013, 2, 7, 9, 8, 7), datetime.datetime(2013, 2, 12, 9, 8, 7))
# --------------------------------------------------
# Test1: schedule hours backwards (old interval_min_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_min_get(cr, uid, self.calendar_id, self.date1, 40, resource=False)
# (datetime.datetime(2013, 1, 29, 9, 0), datetime.datetime(2013, 1, 29, 16, 0))
# (datetime.datetime(2013, 2, 1, 8, 0), datetime.datetime(2013, 2, 1, 13, 0))
# (datetime.datetime(2013, 2, 1, 16, 0), datetime.datetime(2013, 2, 1, 23, 0))
# (datetime.datetime(2013, 2, 5, 8, 0), datetime.datetime(2013, 2, 5, 16, 0))
# (datetime.datetime(2013, 2, 8, 8, 0), datetime.datetime(2013, 2, 8, 13, 0))
# (datetime.datetime(2013, 2, 8, 16, 0), datetime.datetime(2013, 2, 8, 23, 0))
# (datetime.datetime(2013, 2, 12, 8, 0), datetime.datetime(2013, 2, 12, 9, 0))
res = self.resource_calendar.schedule_hours(cr, uid, self.calendar_id, -40, day_dt=self.date1.replace(minute=0, second=0))
# current day, limited at 09:00 because of day_dt specified -> 1 hour
self.assertEqual(res[-1][0], datetime.strptime('2013-02-12 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-1][1], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
# previous days: 5+7 hours / 8 hours / 5+7 hours -> 32 hours
self.assertEqual(res[-2][0], datetime.strptime('2013-02-08 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-2][1], datetime.strptime('2013-02-08 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][0], datetime.strptime('2013-02-08 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-3][1], datetime.strptime('2013-02-08 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][0], datetime.strptime('2013-02-05 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-4][1], datetime.strptime('2013-02-05 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][0], datetime.strptime('2013-02-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-5][1], datetime.strptime('2013-02-01 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][0], datetime.strptime('2013-02-01 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-6][1], datetime.strptime('2013-02-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
# 7 hours remaining
self.assertEqual(res[-7][0], datetime.strptime('2013-01-29 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[-7][1], datetime.strptime('2013-01-29 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
# Compute scheduled hours
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test2: schedule hours forward (old interval_get)
# --------------------------------------------------
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=False, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 2, 22, 8, 0), datetime.datetime(2013, 2, 22, 13, 0))
# (datetime.datetime(2013, 2, 22, 16, 0), datetime.datetime(2013, 2, 22, 23, 0))
# (datetime.datetime(2013, 2, 26, 8, 0), datetime.datetime(2013, 2, 26, 16, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0)
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-22 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-26 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-26 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# res = self.resource_calendar.interval_get(cr, uid, self.calendar_id, self.date1, 40, resource=self.resource1_id, byday=True)
# (datetime.datetime(2013, 2, 12, 9, 0), datetime.datetime(2013, 2, 12, 16, 0))
# (datetime.datetime(2013, 2, 15, 8, 0), datetime.datetime(2013, 2, 15, 13, 0))
# (datetime.datetime(2013, 2, 15, 16, 0), datetime.datetime(2013, 2, 15, 23, 0))
# (datetime.datetime(2013, 3, 1, 8, 0), datetime.datetime(2013, 3, 1, 13, 0))
# (datetime.datetime(2013, 3, 1, 16, 0), datetime.datetime(2013, 3, 1, 23, 0))
# (datetime.datetime(2013, 3, 5, 8, 0), datetime.datetime(2013, 3, 5, 16, 0))
# (datetime.datetime(2013, 3, 8, 8, 0), datetime.datetime(2013, 3, 8, 9, 0))
res = self.resource_calendar.schedule_hours(
cr, uid, self.calendar_id, 40,
day_dt=self.date1.replace(minute=0, second=0),
compute_leaves=True,
resource_id=self.resource1_id
)
self.assertEqual(res[0][0], datetime.strptime('2013-02-12 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[0][1], datetime.strptime('2013-02-12 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][0], datetime.strptime('2013-02-15 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[1][1], datetime.strptime('2013-02-15 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][0], datetime.strptime('2013-02-15 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[2][1], datetime.strptime('2013-02-15 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][0], datetime.strptime('2013-02-19 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[3][1], datetime.strptime('2013-02-19 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][0], datetime.strptime('2013-02-19 12:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[4][1], datetime.strptime('2013-02-19 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][0], datetime.strptime('2013-02-22 08:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[5][1], datetime.strptime('2013-02-22 09:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][0], datetime.strptime('2013-02-22 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[6][1], datetime.strptime('2013-02-22 23:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][0], datetime.strptime('2013-03-01 11:30:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[7][1], datetime.strptime('2013-03-01 13:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][0], datetime.strptime('2013-03-01 16:00:00', _format), 'resource_calendar: wrong hours scheduling')
self.assertEqual(res[8][1], datetime.strptime('2013-03-01 22:30:00', _format), 'resource_calendar: wrong hours scheduling')
td = timedelta()
for item in res:
td += item[1] - item[0]
self.assertEqual(seconds(td) / 3600.0, 40.0, 'resource_calendar: wrong hours scheduling')
# --------------------------------------------------
# Test3: working hours (old _interval_hours_get)
# --------------------------------------------------
# old API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=True)
self.assertEqual(res, 40.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource without leaves
# res: 2 weeks -> 40 hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=False, resource_id=self.resource1_id)
self.assertEqual(res, 40.0, 'resource_calendar: wrong get_working_hours computation')
# old API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar._interval_hours_get(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
resource_id=self.resource1_id, exclude_leaves=False)
self.assertEqual(res, 33.0, 'resource_calendar: wrong _interval_hours_get compatibility computation')
# new API: resource and leaves
# res: 2 weeks -> 40 hours - (3+4) leave hours
res = self.resource_calendar.get_working_hours(
cr, uid, self.calendar_id,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0) + relativedelta(days=7),
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res, 33.0, 'resource_calendar: wrong get_working_hours computation')
# --------------------------------------------------
# Test4: misc
# --------------------------------------------------
# Test without calendar and default_interval
res = self.resource_calendar.get_working_hours(
cr, uid, None,
self.date1.replace(hour=6, minute=0),
self.date2.replace(hour=23, minute=0),
compute_leaves=True, resource_id=self.resource1_id,
default_interval=(8, 16))
self.assertEqual(res, 32.0, 'resource_calendar: wrong get_working_hours computation')
def test_50_calendar_schedule_days(self):
""" Testing calendar days scheduling """
cr, uid = self.cr, self.uid
_format = '%Y-%m-%d %H:%M:%S'
# --------------------------------------------------
# Test1: with calendar
# --------------------------------------------------
res = self.resource_calendar.schedule_days_get_date(cr, uid, self.calendar_id, 5, day_date=self.date1)
self.assertEqual(res.date(), datetime.strptime('2013-02-26 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
res = self.resource_calendar.schedule_days_get_date(
cr, uid, self.calendar_id, 5, day_date=self.date1,
compute_leaves=True, resource_id=self.resource1_id)
self.assertEqual(res.date(), datetime.strptime('2013-03-01 00:0:00', _format).date(), 'resource_calendar: wrong days scheduling')
# --------------------------------------------------
# Test2: misc
# --------------------------------------------------
# Without calendar, should only count days -> 12 -> 16, 5 days with default intervals
res = self.resource_calendar.schedule_days_get_date(cr, uid, None, 5, day_date=self.date1, default_interval=(8, 16))
self.assertEqual(res, datetime.strptime('2013-02-16 16:00:00', _format), 'resource_calendar: wrong days scheduling')
def seconds(td):
assert isinstance(td, timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
| agpl-3.0 |
grengojbo/st2 | st2actions/st2actions/config.py | 1 | 3073 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options registration and useful routines.
"""
import sys
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
CONF = cfg.CONF
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_action_runner_opts()
def _register_common_opts():
common_config.register_opts()
def _register_action_runner_opts():
logging_opts = [
cfg.StrOpt('logging', default='conf/logging.conf',
help='location of the logging.conf file'),
cfg.StrOpt('python_binary', default=sys.executable,
help='Python binary which will be used by Python actions.')
]
CONF.register_opts(logging_opts, group='actionrunner')
db_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='host of db server'),
cfg.IntOpt('port', default=27017, help='port of db server'),
cfg.StrOpt('db_name', default='st2', help='name of database')
]
CONF.register_opts(db_opts, group='database')
ssh_runner_opts = [
cfg.StrOpt('remote_dir',
default='/tmp',
help='Location of the script on the remote filesystem.'),
cfg.BoolOpt('allow_partial_failure',
default=False,
help='How partial success of actions run on multiple nodes should be treated.')
]
CONF.register_opts(ssh_runner_opts, group='ssh_runner')
mistral_opts = [
cfg.StrOpt('v2_base_url', default='http://localhost:8989/v2',
help='Mistral v2 API server root endpoint.'),
cfg.IntOpt('max_attempts', default=180,
help='Maximum no of attempts made to connect to Mistral.'),
cfg.IntOpt('retry_wait', default=5,
help='Time in seconds to wait before retrying connection to Mistral.')
]
CONF.register_opts(mistral_opts, group='mistral')
cloudslang_opts = [
cfg.StrOpt('home_dir', default='/opt/cslang',
help='CloudSlang home directory.'),
]
CONF.register_opts(cloudslang_opts, group='cloudslang')
def get_logging_config_path():
return CONF.actionrunner.logging
register_opts()
| apache-2.0 |
mikel-egana-aranguren/SADI-Galaxy-Docker | galaxy-dist/eggs/nose-0.11.1-py2.7.egg/nose/proxy.py | 1 | 5945 | """
Result Proxy
------------
The result proxy wraps the result instance given to each test. It
performs two functions: enabling extended error/failure reporting
and calling plugins.
As each result event is fired, plugins are called with the same event;
however, plugins are called with the nose.case.Test instance that
wraps the actual test. So when a test fails and calls
result.addFailure(self, err), the result proxy calls
addFailure(self.test, err) for each plugin. This allows plugins to
have a single stable interface for all test types, and also to
manipulate the test object itself by setting the `test` attribute of
the nose.case.Test that they receive.
"""
import logging
from nose.config import Config
log = logging.getLogger(__name__)
def proxied_attribute(local_attr, proxied_attr, doc):
"""Create a property that proxies attribute ``proxied_attr`` through
the local attribute ``local_attr``.
"""
def fget(self):
return getattr(getattr(self, local_attr), proxied_attr)
def fset(self, value):
setattr(getattr(self, local_attr), proxied_attr, value)
def fdel(self):
delattr(getattr(self, local_attr), proxied_attr)
return property(fget, fset, fdel, doc)
class ResultProxyFactory(object):
"""Factory for result proxies. Generates a ResultProxy bound to each test
and the result passed to the test.
"""
def __init__(self, config=None):
if config is None:
config = Config()
self.config = config
self.__prepared = False
self.__result = None
def __call__(self, result, test):
"""Return a ResultProxy for the current test.
On first call, plugins are given a chance to replace the
result used for the remaining tests. If a plugin returns a
value from prepareTestResult, that object will be used as the
result for all tests.
"""
if not self.__prepared:
self.__prepared = True
plug_result = self.config.plugins.prepareTestResult(result)
if plug_result is not None:
self.__result = result = plug_result
if self.__result is not None:
result = self.__result
return ResultProxy(result, test, config=self.config)
class ResultProxy(object):
"""Proxy to TestResults (or other results handler).
One ResultProxy is created for each nose.case.Test. The result
proxy calls plugins with the nose.case.Test instance (instead of
the wrapped test case) as each result call is made. Finally, the
real result method is called, also with the nose.case.Test
instance as the test parameter.
"""
def __init__(self, result, test, config=None):
if config is None:
config = Config()
self.config = config
self.plugins = config.plugins
self.result = result
self.test = test
def __repr__(self):
return repr(self.result)
def assertMyTest(self, test):
# The test I was called with must be my .test or my
# .test's .test. or my .test.test's .case
case = getattr(self.test, 'test', None)
assert (test is self.test
or test is case
or test is getattr(case, '_nose_case', None)), (
"ResultProxy for %r (%s) was called with test %r (%s)"
% (self.test, id(self.test), test, id(test)))
def afterTest(self, test):
self.assertMyTest(test)
self.plugins.afterTest(self.test)
if hasattr(self.result, "afterTest"):
self.result.afterTest(self.test)
def beforeTest(self, test):
self.assertMyTest(test)
self.plugins.beforeTest(self.test)
if hasattr(self.result, "beforeTest"):
self.result.beforeTest(self.test)
def addError(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleError(self.test, err)
if plugin_handled:
return
# test.passed is set in result, to account for error classes
formatted = plugins.formatError(self.test, err)
if formatted is not None:
err = formatted
plugins.addError(self.test, err)
self.result.addError(self.test, err)
if not self.result.wasSuccessful() and self.config.stopOnError:
self.shouldStop = True
def addFailure(self, test, err):
self.assertMyTest(test)
plugins = self.plugins
plugin_handled = plugins.handleFailure(self.test, err)
if plugin_handled:
return
self.test.passed = False
formatted = plugins.formatFailure(self.test, err)
if formatted is not None:
err = formatted
plugins.addFailure(self.test, err)
self.result.addFailure(self.test, err)
if self.config.stopOnError:
self.shouldStop = True
def addSuccess(self, test):
self.assertMyTest(test)
self.plugins.addSuccess(self.test)
self.result.addSuccess(self.test)
def startTest(self, test):
self.assertMyTest(test)
self.plugins.startTest(self.test)
self.result.startTest(self.test)
def stop(self):
self.result.stop()
def stopTest(self, test):
self.assertMyTest(test)
self.plugins.stopTest(self.test)
self.result.stopTest(self.test)
# proxied attributes
shouldStop = proxied_attribute('result', 'shouldStop',
"""Should the test run stop?""")
errors = proxied_attribute('result', 'errors',
"""Tests that raised an exception""")
failures = proxied_attribute('result', 'failures',
"""Tests that failed""")
testsRun = proxied_attribute('result', 'testsRun',
"""Number of tests run""")
| gpl-3.0 |
dummie999/android_kernel_htc_z4u | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
thomasem/nova | nova/api/openstack/compute/schemas/v3/console_output.py | 110 | 1393 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_console_output = {
'type': 'object',
'properties': {
'os-getConsoleOutput': {
'type': 'object',
'properties': {
'length': {
'type': ['integer', 'string', 'null'],
'pattern': '^-?[0-9]+$',
# NOTE: -1 means an unlimited length.
# TODO(cyeoh): None also means unlimited length
# and is supported for v2 backwards compatibility
# Should remove in the future with a microversion
'minimum': -1,
},
},
'additionalProperties': False,
},
},
'required': ['os-getConsoleOutput'],
'additionalProperties': False,
}
| apache-2.0 |
xxsergzzxx/python-for-android | python3-alpha/extra_modules/gdata/oauth/rsa.py | 102 | 4676 | #!/usr/bin/python
"""
requires tlslite - http://trevp.net/tlslite/
"""
import binascii
try:
from gdata.tlslite.utils import keyfactory
except ImportError:
from tlslite.tlslite.utils import keyfactory
try:
from gdata.tlslite.utils import cryptomath
except ImportError:
from tlslite.tlslite.utils import cryptomath
# XXX andy: ugly local import due to module name, oauth.oauth
import gdata.oauth as oauth
class OAuthSignatureMethod_RSA_SHA1(oauth.OAuthSignatureMethod):
def get_name(self):
return "RSA-SHA1"
def _fetch_public_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
# (2) fetch via http using a url provided by the requester
# (3) some sort of specific discovery code based on request
#
# either way should return a string representation of the certificate
raise NotImplementedError
def _fetch_private_cert(self, oauth_request):
# not implemented yet, ideas are:
# (1) do a lookup in a table of trusted certs keyed off of consumer
#
# either way should return a string representation of the certificate
raise NotImplementedError
def build_signature_base_string(self, oauth_request, consumer, token):
sig = (
oauth.escape(oauth_request.get_normalized_http_method()),
oauth.escape(oauth_request.get_normalized_http_url()),
oauth.escape(oauth_request.get_normalized_parameters()),
)
key = ''
raw = '&'.join(sig)
return key, raw
def build_signature(self, oauth_request, consumer, token):
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the private key cert based on the request
cert = self._fetch_private_cert(oauth_request)
# Pull the private key from the certificate
privatekey = keyfactory.parsePrivateKey(cert)
# Convert base_string to bytes
#base_string_bytes = cryptomath.createByteArraySequence(base_string)
# Sign using the key
signed = privatekey.hashAndSign(base_string)
return binascii.b2a_base64(signed)[:-1]
def check_signature(self, oauth_request, consumer, token, signature):
decoded_sig = base64.b64decode(signature);
key, base_string = self.build_signature_base_string(oauth_request,
consumer,
token)
# Fetch the public key cert based on the request
cert = self._fetch_public_cert(oauth_request)
# Pull the public key from the certificate
publickey = keyfactory.parsePEMKey(cert, public=True)
# Check the signature
ok = publickey.hashAndVerify(decoded_sig, base_string)
return ok
class TestOAuthSignatureMethod_RSA_SHA1(OAuthSignatureMethod_RSA_SHA1):
def _fetch_public_cert(self, oauth_request):
cert = """
-----BEGIN CERTIFICATE-----
MIIBpjCCAQ+gAwIBAgIBATANBgkqhkiG9w0BAQUFADAZMRcwFQYDVQQDDA5UZXN0
IFByaW5jaXBhbDAeFw03MDAxMDEwODAwMDBaFw0zODEyMzEwODAwMDBaMBkxFzAV
BgNVBAMMDlRlc3QgUHJpbmNpcGFsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKB
gQC0YjCwIfYoprq/FQO6lb3asXrxLlJFuCvtinTF5p0GxvQGu5O3gYytUvtC2JlY
zypSRjVxwxrsuRcP3e641SdASwfrmzyvIgP08N4S0IFzEURkV1wp/IpH7kH41Etb
mUmrXSwfNZsnQRE5SYSOhh+LcK2wyQkdgcMv11l4KoBkcwIDAQABMA0GCSqGSIb3
DQEBBQUAA4GBAGZLPEuJ5SiJ2ryq+CmEGOXfvlTtEL2nuGtr9PewxkgnOjZpUy+d
4TvuXJbNQc8f4AMWL/tO9w0Fk80rWKp9ea8/df4qMq5qlFWlx6yOLQxumNOmECKb
WpkUQDIDJEoFUzKMVuJf4KO/FJ345+BNLGgbJ6WujreoM1X/gYfdnJ/J
-----END CERTIFICATE-----
"""
return cert
def _fetch_private_cert(self, oauth_request):
cert = """
-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----
"""
return cert
| apache-2.0 |
apanju/GMIO_Odoo | addons/l10n_syscohada/__openerp__.py | 430 | 1940 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010-2011 BAAMTU SARL (<http://www.baamtu.sn>).
# contact: leadsn@baamtu.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'OHADA - Accounting',
'version' : '1.0',
'author' : 'Baamtu Senegal',
'category' : 'Localization/Account Charts',
'description': """
This module implements the accounting chart for OHADA area.
===========================================================
It allows any company or association to manage its financial accounting.
Countries that use OHADA are the following:
-------------------------------------------
Benin, Burkina Faso, Cameroon, Central African Republic, Comoros, Congo,
Ivory Coast, Gabon, Guinea, Guinea Bissau, Equatorial Guinea, Mali, Niger,
Replica of Democratic Congo, Senegal, Chad, Togo.
""",
'website': 'http://www.baamtu.com',
'depends' : ['account', 'base_vat'],
'demo' : [],
'data' : ['l10n_syscohada_data.xml','l10n_syscohada_wizard.xml'],
'auto_install': False,
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
meowtec/page-navigator | example.py | 1 | 2364 | # coding:utf-8
html_tpl = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Document</title>
<style type="text/css">
.nav{
margin: 10px 0;
font-size: 12px;
font-family: "Helvetica", "Arial", sans-serif;
}
.nav a{
text-decoration: none;
color: #000;
}
.nav span{
color: #999;
}
.nav .item{
display: inline-block;
padding: 3px 8px;
margin: 0 3px;
}
.nav a.number:hover{
background: #99dddd;
color: #ffffff;
}
.nav span.current{
background: #9cc;
color: #fff;
}
.nav a.prev:hover, .nav a.next:hover{
color: #9cc;
}
h2{
margin-top: 2em;
}
</style>
</head>
<body>
<h2>基本</h2>
<div class="nav">{{html_1_1}}</div>
<div class="nav">{{html_1_2}}</div>
<div class="nav">{{html_1_3}}</div>
<div class="nav">{{html_1_4}}</div>
<div class="nav">{{html_1_5}}</div>
<div class="nav">{{html_1_6}}</div>
<h2>设置</h2>
<div class="nav">{{html_2_1}}</div>
<h2>自定义Helper</h2>
<div class="nav">{{html_3_1}}</div>
</body>
</html>
'''
from pagenavigator import PageNavigator
def string_replace(string, **data):
for key in data:
string = string.replace('{{' + key + '}}', str(data[key]))
return string
nav_1 = PageNavigator()
html_1_1 = nav_1.create(1, 5)
html_1_2 = nav_1.create(2, 5)
html_1_3 = nav_1.create(5, 5)
html_1_4 = nav_1.create(1, 6)
html_1_5 = nav_1.create(5, 6)
html_1_6 = nav_1.create(5, 10)
nav_2 = PageNavigator(link_helper='list.html?page={{page}}&from={{current}}&max={{max}}',
prev_text='←', next_text='→', more_text='……', size=9)
html_2_1 = nav_2.create(10, 20)
nav_3 = PageNavigator(number_helper='<button href="{{link}}" class="item number" data-page="{{page}}">{{page}}</button>',
current_helper='<button class="item number current" data-page="{{page}}" disabled="disabled">{{page}}</button>')
html_3_1 = nav_3.create(10, 20)
html = string_replace(html_tpl, html_1_1=html_1_1, html_1_2=html_1_2, html_1_3=html_1_3,
html_1_4=html_1_4, html_1_5=html_1_5, html_1_6=html_1_6,
html_2_1=html_2_1,
html_3_1=html_3_1
)
file_object = open('python_example.html', 'w')
file_object.write(html)
file_object.close( )
| mit |
umberto1978/speedwizz_kernel_N2 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
ehovind/extensible-ebook-converter | eecon_fetcher.py | 1 | 3151 | #!/usr/bin/env python
"""
This file is part of Extensible eBook Converter (EeCon),
an advanced ebook analysis and conversion tool.
Copyright (C) 2012 Espen Hovind <espehov@ifi.uio.no>
EeCon is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Eeon is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with EeCon. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
from fetcher import fetcher as Fetcher
# ==============================================================================
# CONFIGURATION:
# ==============================================================================
WORKSPACE = "workspace/project_runeberg/"
VALID_DOMAINS = ("runeberg.org",)
# ==============================================================================
# FUNCTION:
# main()
# ==============================================================================
def main():
"""
DESCRIPTION:
PARAMETERS:
RETURN:
"""
# parse arguments
args = parse_command()
# fetch and initalize the workspace
fetcher = Fetcher.Fetcher(WORKSPACE, VALID_DOMAINS, args)
# process the arguments
fetcher.process()
# ==============================================================================
# FUNCTION:
# parse_command()
# ==============================================================================
def parse_command():
"""
DESCRIPTION:
Parse the user-provided command using argparse.
PARAMETERS:
None
RETURN:
Dictionary of command line options
"""
print "[STATUS] parsing arguments... ",
# create an ArgumentParser
parser = argparse.ArgumentParser()
# positional arguments
parser.add_argument("--auto-markup", action="store_true",
help="Automatic conversion from HTML to XHTML (best effort)")
parser.add_argument("--auto-populate", action="store_true",
help="Automatic population from Project Runeberg Pages files")
parser.add_argument("--auto-utf8", action="store_true",
help="auto convert publication files to UTF-8")
parser.add_argument("--patch", help="apply pre-made git patch")
parser.add_argument("--source",
help="fetch a ebook archive URL or filename")
parser.add_argument("--title",
help="title of publication")
# parse the command into a ArgumentParser object
args = parser.parse_args()
print "ok."
# return a dict with command line options
return vars(args)
# ==============================================================================
# MODULE:
# __name__
# ==============================================================================
if __name__ == "__main__":
main()
| gpl-3.0 |
BlackSmith/selenium | py/test/selenium/webdriver/common/page_load_timeout_tests.py | 63 | 2276 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import pytest
from selenium.common.exceptions import TimeoutException
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
class PageLoadTimeoutTest(unittest.TestCase):
def testShouldTimeoutOnPageLoadTakingTooLong(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not implement page load timeouts")
self.driver.set_page_load_timeout(0.01)
try:
self._loadSimplePage()
self.fail("Expected a timeout on page load")
except TimeoutException as e:
pass
def testClickShouldTimeout(self):
if self.driver.capabilities['browserName'] == 'phantomjs':
pytest.xfail("phantomjs driver does not implement page load timeouts")
self._loadSimplePage()
self.driver.set_page_load_timeout(0.01)
try:
self.driver.find_element_by_id("multilinelink").click()
self.fail("Expected a timeout on page load after clicking")
except TimeoutException as e:
pass
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
crazy-cat/incubator-mxnet | tests/python/unittest/test_io.py | 6 | 9265 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import mxnet as mx
from mxnet.test_utils import *
import numpy as np
import os, gzip
import pickle as pickle
import time
try:
import h5py
except ImportError:
h5py = None
import sys
from common import get_data
import unittest
def test_MNISTIter():
# prepare data
get_data.GetMNIST_ubyte()
batch_size = 100
train_dataiter = mx.io.MNISTIter(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data_shape=(784,),
batch_size=batch_size, shuffle=1, flat=1, silent=0, seed=10)
# test_loop
nbatch = 60000 / batch_size
batch_count = 0
for batch in train_dataiter:
batch_count += 1
assert(nbatch == batch_count)
# test_reset
train_dataiter.reset()
train_dataiter.iter_next()
label_0 = train_dataiter.getlabel().asnumpy().flatten()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.iter_next()
train_dataiter.reset()
train_dataiter.iter_next()
label_1 = train_dataiter.getlabel().asnumpy().flatten()
assert(sum(label_0 - label_1) == 0)
def test_Cifar10Rec():
get_data.GetCifar10()
dataiter = mx.io.ImageRecordIter(
path_imgrec="data/cifar/train.rec",
mean_img="data/cifar/cifar10_mean.bin",
rand_crop=False,
and_mirror=False,
shuffle=False,
data_shape=(3,28,28),
batch_size=100,
preprocess_threads=4,
prefetch_buffer=1)
labelcount = [0 for i in range(10)]
batchcount = 0
for batch in dataiter:
npdata = batch.data[0].asnumpy().flatten().sum()
sys.stdout.flush()
batchcount += 1
nplabel = batch.label[0].asnumpy()
for i in range(nplabel.shape[0]):
labelcount[int(nplabel[i])] += 1
for i in range(10):
assert(labelcount[i] == 5000)
def test_NDArrayIter():
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
dataiter = mx.io.NDArrayIter(data, label, 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(data, label, 128, False, last_batch_handle='pad')
batchidx = 0
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_h5py():
if not h5py:
return
data = np.ones([1000, 2, 2])
label = np.ones([1000, 1])
for i in range(1000):
data[i] = i / 100
label[i] = i / 100
try:
os.remove("ndarraytest.h5")
except OSError:
pass
with h5py.File("ndarraytest.h5") as f:
f.create_dataset("data", data=data)
f.create_dataset("label", data=label)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, True, last_batch_handle='pad')
batchidx = 0
for batch in dataiter:
batchidx += 1
assert(batchidx == 8)
dataiter = mx.io.NDArrayIter(f["data"], f["label"], 128, False, last_batch_handle='pad')
labelcount = [0 for i in range(10)]
for batch in dataiter:
label = batch.label[0].asnumpy().flatten()
assert((batch.data[0].asnumpy()[:,0,0] == label).all())
for i in range(label.shape[0]):
labelcount[int(label[i])] += 1
try:
os.remove("ndarraytest.h5")
except OSError:
pass
for i in range(10):
if i == 0:
assert(labelcount[i] == 124)
else:
assert(labelcount[i] == 100)
def test_NDArrayIter_csr():
# creating toy data
num_rows = rnd.randint(5, 15)
num_cols = rnd.randint(1, 20)
batch_size = rnd.randint(1, num_rows)
shape = (num_rows, num_cols)
csr, _ = rand_sparse_ndarray(shape, 'csr')
dns = csr.asnumpy()
# make iterators
csr_iter = iter(mx.io.NDArrayIter(csr, csr, batch_size, last_batch_handle='discard'))
begin = 0
for batch in csr_iter:
expected = np.zeros((batch_size, num_cols))
end = begin + batch_size
expected[:num_rows - begin] = dns[begin:end]
if end > num_rows:
expected[num_rows - begin:] = dns[0:end - num_rows]
assert_almost_equal(batch.data[0].asnumpy(), expected)
begin += batch_size
def test_LibSVMIter():
def check_libSVMIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
fout.write('1.0 0:0.5 2:1.2\n')
fout.write('-2.0\n')
fout.write('-3.0 0:0.6 1:2.4 2:1.2\n')
fout.write('4 2:-1.2\n')
with open(label_path, 'w') as fout:
fout.write('1.0\n')
fout.write('-2.0 0:0.125\n')
fout.write('-3.0 2:1.2\n')
fout.write('4 1:1.0 2:-1.2\n')
data_dir = os.path.join(cwd, 'data')
data_train = mx.io.LibSVMIter(data_libsvm=data_path, label_libsvm=label_path,
data_shape=(3, ), label_shape=(3, ), batch_size=3)
first = mx.nd.array([[ 0.5, 0., 1.2], [ 0., 0., 0.], [ 0.6, 2.4, 1.2]])
second = mx.nd.array([[ 0., 0., -1.2], [ 0.5, 0., 1.2], [ 0., 0., 0.]])
i = 0
for batch in iter(data_train):
expected = first.asnumpy() if i == 0 else second.asnumpy()
assert_almost_equal(data_train.getdata().asnumpy(), expected)
i += 1
def check_libSVMIter_news_data():
news_metadata = {
'name': 'news20.t',
'origin_name': 'news20.t.bz2',
'url': "http://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/news20.t.bz2",
'feature_dim': 62060,
'num_classes': 20,
'num_examples': 3993,
}
batch_size = 33
num_examples = news_metadata['num_examples']
data_dir = os.path.join(os.getcwd(), 'data')
get_bz2_data(data_dir, news_metadata['name'], news_metadata['url'],
news_metadata['origin_name'])
path = os.path.join(data_dir, news_metadata['name'])
data_train = mx.io.LibSVMIter(data_libsvm=path, data_shape=(news_metadata['feature_dim'],),
batch_size=batch_size)
for epoch in range(2):
num_batches = 0
for batch in data_train:
# check the range of labels
assert(np.sum(batch.label[0].asnumpy() > 20) == 0)
assert(np.sum(batch.label[0].asnumpy() <= 0) == 0)
num_batches += 1
expected_num_batches = num_examples / batch_size
assert(num_batches == int(expected_num_batches)), num_batches
data_train.reset()
check_libSVMIter_synthetic()
check_libSVMIter_news_data()
@unittest.skip("test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/7826")
def test_CSVIter():
def check_CSVIter_synthetic():
cwd = os.getcwd()
data_path = os.path.join(cwd, 'data.t')
label_path = os.path.join(cwd, 'label.t')
with open(data_path, 'w') as fout:
for i in range(1000):
fout.write(','.join(['1' for _ in range(8*8)]) + '\n')
with open(label_path, 'w') as fout:
for i in range(1000):
fout.write('0\n')
data_train = mx.io.CSVIter(data_csv=data_path, data_shape=(8,8),
label_csv=label_path, batch_size=100)
expected = mx.nd.ones((100, 8, 8))
for batch in iter(data_train):
assert_almost_equal(data_train.getdata().asnumpy(), expected.asnumpy())
check_CSVIter_synthetic()
if __name__ == "__main__":
test_NDArrayIter()
if h5py:
test_NDArrayIter_h5py()
test_MNISTIter()
test_Cifar10Rec()
test_LibSVMIter()
test_NDArrayIter_csr()
test_CSVIter()
| apache-2.0 |
slisson/intellij-community | python/lib/Lib/heapq.py | 91 | 14227 | # -*- coding: Latin-1 -*-
"""Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'nlargest',
'nsmallest']
from itertools import islice, repeat, count, imap, izip, tee
from operator import itemgetter, neg
import bisect
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heapify(x):
"""Transform list into a heap, in-place, in O(len(heap)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(xrange(n//2)):
_siftup(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heapreplace = heapreplace
sol = result[0] # sol --> smallest of the nlargest
for elem in it:
if elem <= sol:
continue
_heapreplace(result, elem)
sol = result[0]
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if hasattr(iterable, '__len__') and n * 10 <= len(iterable):
# For smaller values of n, the bisect method is faster than a minheap.
# It is also memory efficient, consuming only n elements of space.
it = iter(iterable)
result = sorted(islice(it, 0, n))
if not result:
return result
insort = bisect.insort
pop = result.pop
los = result[-1] # los --> Largest of the nsmallest
for elem in it:
if los <= elem:
continue
insort(result, elem)
pop()
los = result[-1]
return result
# An alternative approach manifests the whole iterable in memory but
# saves comparisons by heapifying all at once. Also, saves time
# over bisect.insort() which has O(n) data movement time for every
# insertion. Finding the n smallest of an m length iterable requires
# O(m) + O(n log m) comparisons.
h = list(iterable)
heapify(h)
return map(heappop, repeat(h, min(n, len(h))))
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent <= newitem:
break
heap[pos] = parent
pos = parentpos
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom __cmp__ methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and heap[rightpos] <= heap[childpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
# If available, use C implementation
try:
from _heapq import heappush, heappop, heapify, heapreplace, nlargest, nsmallest
except ImportError:
pass
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
in1, in2 = tee(iterable)
it = izip(imap(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return map(itemgetter(2), result) # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
in1, in2 = tee(iterable)
it = izip(imap(key, in1), imap(neg, count()), in2) # decorate
result = _nlargest(n, it)
return map(itemgetter(2), result) # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print sort
| apache-2.0 |
beatrizjesus/my-first-blog | pasta/Lib/site-packages/pip/_vendor/ipaddress.py | 206 | 72089 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
__version__ = '1.0.7'
import struct
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
_compat_bytes_to_byte_vals = lambda byt: byt
else:
_compat_bytes_to_byte_vals = (lambda byt:
[struct.unpack(b'!B', b)[0] for b in byt])
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
_compat_bit_length = lambda i: i.bit_length()
else:
_compat_bit_length = lambda i: len(bin(abs(i))) - 2
def _compat_range(start, end):
i = start
while i < end:
yield i
i += 1
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except:
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except:
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Returns:
A tuple containing the first and last IP addresses in the sequence.
"""
first = last = addresses[0]
for ip in addresses[1:]:
if ip._ip == last._ip + 1:
last = ip
else:
break
return (first, last)
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
for i in range(bits):
if (number >> i) & 1:
return i
# All bits of interest were zero, even if there are more in the number
return bits
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" %
(first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip('%s/%d' % (first, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
first = first.__class__(first_int)
def _collapse_addresses_recursive(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_recursive([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
while True:
last_addr = None
ret_array = []
optimized = False
for cur_addr in addresses:
if not ret_array:
last_addr = cur_addr
ret_array.append(cur_addr)
elif (cur_addr.network_address >= last_addr.network_address and
cur_addr.broadcast_address <= last_addr.broadcast_address):
optimized = True
elif cur_addr == list(last_addr.supernet().subnets())[1]:
ret_array[-1] = last_addr = last_addr.supernet()
optimized = True
else:
last_addr = cur_addr
ret_array.append(cur_addr)
addresses = ret_array
if not optimized:
return addresses
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
i = 0
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
nets = sorted(set(nets))
while i < len(ips):
(first, last) = _find_address_range(ips[i:])
i = ips.index(last) + 1
addrs.extend(summarize_address_range(first, last))
return iter(_collapse_addresses_recursive(sorted(
addrs + nets, key=_BaseNetwork._get_networks_key)))
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _TotalOrderingMixin(object):
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = ("%r (len %d != %d) is not permitted as an IPv%d address "
"(did you pass in a bytes instead of a unicode object?)")
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
def _ip_int_from_prefix(self, prefixlen=None):
"""Turn the prefix length netmask into a int for comparison.
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
if prefixlen is None:
prefixlen = self._prefixlen
return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
def _prefix_from_ip_int(self, ip_int, mask=32):
"""Return prefix length from the decimal netmask.
Args:
ip_int: An integer, the IP address.
mask: The netmask. Defaults to 32.
Returns:
An integer, the prefix length.
"""
return mask - _count_righthand_zero_bits(ip_int, mask)
def _ip_string_from_prefix(self, prefixlen=None):
"""Turn a prefix length into a dotted decimal string.
Args:
prefixlen: An integer, the netmask prefix length.
Returns:
A string, the dotted decimal netmask string.
"""
if not prefixlen:
prefixlen = self._prefixlen
return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
def __init__(self, address):
if (not isinstance(address, bytes) and '/' in _compat_str(address)):
raise AddressValueError("Unexpected '/' in %r" % address)
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip
and self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError
return self._address_class(broadcast + n)
def __lt__(self, other):
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
addr1.address_exclude(addr2) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
addr1.address_exclude(addr2) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of difffering address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
raise StopIteration
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if (other.network_address >= s1.network_address and
other.broadcast_address <= s1.broadcast_address):
yield s2
s1, s2 = s1.subnets()
elif (other.network_address >= s2.network_address and
other.broadcast_address <= s2.broadcast_address):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if not self._is_valid_netmask(str(new_prefixlen)):
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
first = self.__class__('%s/%s' %
(self.network_address,
self._prefixlen + prefixlen_diff))
yield first
current = first
while True:
broadcast = current.broadcast_address
if broadcast == self.broadcast_address:
return
new_addr = self._address_class(int(broadcast) + 1)
current = self.__class__('%s/%s' % (new_addr,
new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
if self.prefixlen - prefixlen_diff < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
# TODO (pmoody): optimize this.
t = self.__class__('%s/%d' % (self.network_address,
self.prefixlen - prefixlen_diff),
strict=False)
return t.__class__('%s/%d' % (t.network_address, t.prefixlen))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset((255, 254, 252, 248, 240, 224, 192, 128, 0))
def __init__(self, address):
self._version = 4
self._max_prefixlen = IPV4LENGTH
def _explode_shorthand_ip_string(self):
return _compat_str(self)
def _ip_int_from_string(self, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
bvs = map(self._parse_octet, octets)
return _compat_int_from_byte_vals(bvs, 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
def _parse_octet(self, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
def _string_from_ip_int(self, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_valid_netmask(self, netmask):
"""Verify that the netmask is valid.
Args:
netmask: A string, either a prefix or dotted decimal
netmask.
Returns:
A boolean, True if the prefix represents a valid IPv4
netmask.
"""
mask = netmask.split('.')
if len(mask) == 4:
try:
for x in mask:
if int(x) not in self._valid_mask_octets:
return False
except ValueError:
# Found something that isn't an integer or isn't valid
return False
for idx, y in enumerate(mask):
if idx > 0 and y > mask[idx - 1]:
return False
return True
try:
netmask = int(netmask)
except ValueError:
return False
return 0 <= netmask <= self._max_prefixlen
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
_BaseAddress.__init__(self, address)
_BaseV4.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
reserved_network = IPv4Network('240.0.0.0/4')
return self in reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 1918.
"""
private_10 = IPv4Network('10.0.0.0/8')
private_172 = IPv4Network('172.16.0.0/12')
private_192 = IPv4Network('192.168.0.0/16')
return (self in private_10 or
self in private_172 or
self in private_192)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
multicast_network = IPv4Network('224.0.0.0/4')
return self in multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
unspecified_address = IPv4Address('0.0.0.0')
return self == unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
loopback_network = IPv4Network('127.0.0.0/8')
return self in loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
linklocal_network = IPv4Network('169.254.0.0/16')
return self in linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes,) + _compat_int_types):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionaly equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseV4.__init__(self, address)
_BaseNetwork.__init__(self, address)
# Constructing from a packed address
if isinstance(address, bytes):
self.network_address = IPv4Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
#fixme: address/network test here
return
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self.network_address = IPv4Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ALL_ONES)
#fixme: address/network test here.
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
mask = addr[1].split('.')
if len(mask) == 4:
# We have dotted decimal netmask.
if self._is_valid_netmask(addr[1]):
self.netmask = IPv4Address(self._ip_int_from_string(
addr[1]))
elif self._is_hostmask(addr[1]):
self.netmask = IPv4Address(
self._ip_int_from_string(addr[1]) ^ self._ALL_ONES)
else:
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
self._prefixlen = self._prefix_from_ip_int(int(self.netmask))
else:
# We have a netmask in prefix length form.
if not self._is_valid_netmask(addr[1]):
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
self._prefixlen = int(addr[1])
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv4Address(self._ip_int_from_prefix(
self._prefixlen))
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
def __init__(self, address):
self._version = 6
self._max_prefixlen = IPV6LENGTH
def _ip_int_from_string(self, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = self._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = ("At most %d colons permitted in %r" %
(_max_parts - 1, ip_str))
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (self._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != self._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (self._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= self._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
def _parse_hextet(self, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not self._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
def _compress_hextets(self, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
def _string_from_ip_int(self, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(self._ip)
if ip_int > self._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = self._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
_BaseAddress.__init__(self, address)
_BaseV6.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
multicast_network = IPv6Network('ff00::/8')
return self in multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
reserved_nets = [IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9')]
return any(self in x for x in reserved_nets)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
linklocal_network = IPv6Network('fe80::/10')
return self in linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
sitelocal_network = IPv6Network('fec0::/10')
return self in sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per RFC 4193.
"""
private_network = IPv6Network('fc00::/7')
return self in private_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseV6.__init__(self, address)
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self.network_address = IPv6Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Constructing from a packed address
if isinstance(address, bytes):
self.network_address = IPv6Address(address)
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ALL_ONES)
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
if self._is_valid_netmask(addr[1]):
self._prefixlen = int(addr[1])
else:
raise NetmaskValueError('%r is not a valid netmask'
% addr[1])
else:
self._prefixlen = self._max_prefixlen
self.netmask = IPv6Address(self._ip_int_from_prefix(self._prefixlen))
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def _is_valid_netmask(self, prefixlen):
"""Verify that the netmask/prefixlen is valid.
Args:
prefixlen: A string, the netmask in prefix length format.
Returns:
A boolean, True if the prefix represents a valid IPv6
netmask.
"""
try:
prefixlen = int(prefixlen)
except ValueError:
return False
return 0 <= prefixlen <= self._max_prefixlen
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
| mit |
albert12132/templar | templar/cli/templar.py | 1 | 2179 | """Command-line interface for templar."""
from templar.api import config
from templar.api import publish
from templar.exceptions import TemplarError
import templar
import argparse
import logging
import sys
LOGGING_FORMAT = '%(levelname)s %(filename)s:%(lineno)d> %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger('templar')
def flags(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source',
help='Path to a source file with Markdown content.')
parser.add_argument('-t', '--template',
help='Path to a Jinja template file')
parser.add_argument('-d', '--destination',
help='Path to the destination file.')
parser.add_argument('-c', '--config', default='config.py',
help='Path to a Templar configuration file.')
parser.add_argument('--print', action='store_true',
help='Forces printing of result to stdout, '
'even if --destination is specified')
parser.add_argument('--debug', action='store_true',
help='Enable debugging messages.')
parser.add_argument('--version', action='store_true',
help='Print the version number and exit')
if args is not None:
return parser.parse_args(args)
return parser.parse_args()
def run(args):
if args.version:
print('Templar version {}'.format(templar.__version__))
exit(0)
log.setLevel(logging.DEBUG if args.debug else logging.ERROR)
try:
configuration = config.import_config(args.config)
result = publish.publish(
configuration,
source=args.source,
template=args.template,
destination=args.destination,
no_write=args.print)
except TemplarError as e:
if args.debug:
raise
else:
print('{}: {}'.format(type(e).__name__, str(e)), file=sys.stderr)
exit(1)
else:
if not args.destination or args.print:
print(result)
def main():
run(flags())
| mit |
apark263/tensorflow | tensorflow/python/keras/layers/noise.py | 5 | 6369 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layers that operate regularization via the addition of noise.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.layers.GaussianNoise')
class GaussianNoise(Layer):
"""Apply additive zero-centered Gaussian noise.
This is useful to mitigate overfitting
(you could see it as a form of random data augmentation).
Gaussian Noise (GS) is a natural choice as corruption process
for real valued inputs.
As it is a regularization layer, it is only active at training time.
Arguments:
stddev: float, standard deviation of the noise distribution.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, stddev, **kwargs):
super(GaussianNoise, self).__init__(**kwargs)
self.supports_masking = True
self.stddev = stddev
def call(self, inputs, training=None):
def noised():
return inputs + K.random_normal(
shape=array_ops.shape(inputs), mean=0., stddev=self.stddev)
return K.in_train_phase(noised, inputs, training=training)
def get_config(self):
config = {'stddev': self.stddev}
base_config = super(GaussianNoise, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.GaussianDropout')
class GaussianDropout(Layer):
"""Apply multiplicative 1-centered Gaussian noise.
As it is a regularization layer, it is only active at training time.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, **kwargs):
super(GaussianDropout, self).__init__(**kwargs)
self.supports_masking = True
self.rate = rate
def call(self, inputs, training=None):
if 0 < self.rate < 1:
def noised():
stddev = np.sqrt(self.rate / (1.0 - self.rate))
return inputs * K.random_normal(
shape=array_ops.shape(inputs), mean=1.0, stddev=stddev)
return K.in_train_phase(noised, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(GaussianDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
@keras_export('keras.layers.AlphaDropout')
class AlphaDropout(Layer):
"""Applies Alpha Dropout to the input.
Alpha Dropout is a `Dropout` that keeps mean and variance of inputs
to their original values, in order to ensure the self-normalizing property
even after this dropout.
Alpha Dropout fits well to Scaled Exponential Linear Units
by randomly setting activations to the negative saturation value.
Arguments:
rate: float, drop probability (as with `Dropout`).
The multiplicative noise will have
standard deviation `sqrt(rate / (1 - rate))`.
seed: A Python integer to use as random seed.
Input shape:
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model.
Output shape:
Same shape as input.
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super(AlphaDropout, self).__init__(**kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
self.supports_masking = True
def _get_noise_shape(self, inputs):
return self.noise_shape if self.noise_shape else array_ops.shape(inputs)
def call(self, inputs, training=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(inputs)
def dropped_inputs(inputs=inputs, rate=self.rate, seed=self.seed): # pylint: disable=missing-docstring
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
alpha_p = -alpha * scale
kept_idx = math_ops.greater_equal(
K.random_uniform(noise_shape, seed=seed), rate)
kept_idx = math_ops.cast(kept_idx, K.floatx())
# Get affine transformation params
a = ((1 - rate) * (1 + rate * alpha_p**2))**-0.5
b = -a * alpha_p * rate
# Apply mask
x = inputs * kept_idx + alpha_p * (1 - kept_idx)
# Do affine transformation
return a * x + b
return K.in_train_phase(dropped_inputs, inputs, training=training)
return inputs
def get_config(self):
config = {'rate': self.rate}
base_config = super(AlphaDropout, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_utils.shape_type_conversion
def compute_output_shape(self, input_shape):
return input_shape
| apache-2.0 |
RockySteveJobs/python-for-android | python3-alpha/python3-src/Lib/distutils/fancy_getopt.py | 207 | 17784 | """distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
import sys, string, re
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = str.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__(self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
def _build_index(self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table(self, option_table):
self.option_table = option_table
self._build_index()
def add_option(self, long_option, short_option=None, help_string=None):
if long_option in self.option_index:
raise DistutilsGetoptError(
"option conflict: already an option '%s'" % long_option)
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option(self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return long_option in self.option_index
def get_attr_name(self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return long_option.translate(longopt_xlate)
def _check_alias_dict(self, aliases, what):
assert isinstance(aliases, dict)
for (alias, opt) in aliases.items():
if alias not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias))
if opt not in self.option_index:
raise DistutilsGetoptError(("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt))
def set_aliases(self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases(self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table(self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError("invalid option tuple: %r" % (option,))
# Type- and value-check the option names
if not isinstance(long, str) or len(long) < 2:
raise DistutilsGetoptError(("invalid long option '%s': "
"must be a string of length >= 2") % long)
if (not ((short is None) or
(isinstance(short, str) and len(short) == 1))):
raise DistutilsGetoptError("invalid short option '%s': "
"must a single character or None" % short)
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid negative alias '%s': "
"aliased option '%s' takes a value"
% (long, alias_to))
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError(
"invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't"
% (long, alias_to))
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError(
"invalid long option name '%s' "
"(must be letters, numbers, hyphens only" % long)
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
def getopt(self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = True
else:
created_object = False
self._grok_option_table()
short_opts = ' '.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error as msg:
raise DistutilsArgError(msg)
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
def get_option_order(self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError("'getopt()' hasn't been called yet")
else:
return self.option_order
def generate_help(self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
return lines
def print_help(self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
def fancy_getopt(options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
def wrap_text(text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = text.expandtabs()
text = text.translate(WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(''.join(cur_line))
return lines
def translate_longopt(opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return opt.translate(longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__(self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print("width: %d" % w)
print("\n".join(wrap_text(text, w)))
print()
| apache-2.0 |
iAmMrinal0/CouchPotatoServer | libs/requests/compat.py | 1039 | 1469 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except (ImportError, SyntaxError):
# simplejson does not support Python 3.2, it throws a SyntaxError
# because of u'...' Unicode literals.
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| gpl-3.0 |
hammerlab/immuno | immuno/immunogenicity.py | 1 | 5849 | # Copyright (c) 2014. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from os import environ, listdir
from os.path import exists, split, join
from mhc_common import compact_hla_allele_name
from peptide_binding_measure import IC50_FIELD_NAME
DEFAULT_PEPTIDE_DIR = environ.get(
"IMMUNO_THYMIC_PEPTIDES",
join(split(__file__)[0], "thymic_peptides"))
THYMIC_DELETION_FIELD_NAME = 'ThymicDeletion'
def _load_allele_mapping_dict(path):
"""
Since some alleles have identical peptide sets as others, we compress
the stored data by only retaining one allele from each equivalence class
and using a mappings file to figure out which allele is retained.
"""
result = {}
with open(path, 'r') as f:
for line in f.read().split("\n"):
if len(line) > 0:
k, v = line.split("\t")
result[k] = v
return result
class ImmunogenicityPredictor(object):
"""
Predict whether some T-cell in a person's circulating repertoire could
recognize a particular pattern. The subset of the 'self' proteome which
binds to an individual's HLA alleles tells us which T-cells were removed
by negative selection. T-cells inspect peptides more strongly along
interior residues (positions 3-8), so we restrict our query only to those
positions.
"""
def __init__(
self,
alleles,
data_path = DEFAULT_PEPTIDE_DIR,
binding_threshold = 500,
first_position = 3,
last_position = 8):
"""
Parameters
--------
alleles : list of strings
data_path : str, optional
first_position : int, optional
Start position for extracting substring of
query peptide (indexed starting from 1)
last_position : int, optional
Last position for extracting substring of
query peptide (indexed starting from 1)
"""
self.binding_threshold = binding_threshold
self.first_position = first_position
self.last_position = last_position
self.alleles = {
compact_hla_allele_name(allele) for allele in alleles
}
self.data_path = data_path
assert exists(self.data_path), \
"Directory with thymic peptides (%s) does not exist" % \
self.data_path
available_alleles = listdir(self.data_path)
mappings_file_path = join(self.data_path, 'mappings')
if exists(mappings_file_path):
self.allele_mappings = \
_load_allele_mapping_dict(mappings_file_path)
else:
self.allele_mappings = \
dict(zip(available_alleles, available_alleles))
self.peptide_sets = {}
for allele in self.alleles:
if allele not in self.allele_mappings:
logging.warn(
"No MHC peptide set available for HLA allele %s", allele)
continue
else:
logging.info(
"Loading thymic MHC peptide set for HLA allele %s", allele)
filename = self.allele_mappings[allele]
assert filename in available_alleles, \
"No MHC peptide set available for HLA allele %s (file = %s)" % \
(allele,filename)
with open(join(self.data_path, filename), 'r') as f:
peptide_set = {l for l in f.read().split("\n") if len(l) > 0}
self.peptide_sets[allele] = peptide_set
def predict(self, peptides_df):
"""
Determine whether 9-mer peptide is immunogenic by checking
1) that the epitope binds strongly to a particular MHC allele
2) the "core" of the peptide (positions 3-8) don't overlap with any
other peptides in the self/thymic MHC ligand sets of that HLA allele
Returns DataFrame with two extra columns:
- ThymicDeletion: Was this epitope deleted during thymic selection
(and thus can't be recognize by T-cells)?
- Immunogenic: Is this epitope a sufficiently strong binder that
wasn't deleted during thymic selection?
"""
thymic_peptide_sets = self.peptide_sets.values()
# assume a peptide is non-immunogenic unless not in thymic sets
# We do this in case some alleles are missing, resulting in all
# their associated ligands being considered non-immunogenic
peptides_df[THYMIC_DELETION_FIELD_NAME] = True
for i in xrange(len(peptides_df)):
row = peptides_df.ix[i]
peptide = row.Epitope
allele = compact_hla_allele_name(row.Allele)
if allele in self.peptide_sets:
# positions in the epitope are indexed starting from 1 to
# match immunology nomenclature
substring = \
peptide[self.first_position - 1 : self.last_position]
peptides_df[THYMIC_DELETION_FIELD_NAME].ix[i] = \
substring in self.peptide_sets[allele]
peptides_df["Immunogenic"] = \
~peptides_df[THYMIC_DELETION_FIELD_NAME] & \
(peptides_df[IC50_FIELD_NAME] <= self.binding_threshold)
return peptides_df
| apache-2.0 |
amenonsen/ansible | test/units/modules/network/fortimanager/test_fmgr_secprof_waf.py | 38 | 2632 | # Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
import pytest
try:
from ansible.modules.network.fortimanager import fmgr_secprof_waf
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
def load_fixtures():
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') + "/{filename}.json".format(
filename=os.path.splitext(os.path.basename(__file__))[0])
try:
with open(fixture_path, "r") as fixture_file:
fixture_data = json.load(fixture_file)
except IOError:
return []
return [fixture_data]
@pytest.fixture(autouse=True)
def module_mock(mocker):
connection_class_mock = mocker.patch('ansible.module_utils.basic.AnsibleModule')
return connection_class_mock
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortimanager.fmgr_secprof_waf.Connection')
return connection_class_mock
@pytest.fixture(scope="function", params=load_fixtures())
def fixture_data(request):
func_name = request.function.__name__.replace("test_", "")
return request.param.get(func_name, None)
fmg_instance = FortiManagerHandler(connection_mock, module_mock)
def test_fmgr_waf_profile_modify(fixture_data, mocker):
mocker.patch("ansible.module_utils.network.fortimanager.fortimanager.FortiManagerHandler.process_request",
side_effect=fixture_data)
output = fmgr_secprof_waf.fmgr_waf_profile_modify(fmg_instance, fixture_data[0]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
output = fmgr_secprof_waf.fmgr_waf_profile_modify(fmg_instance, fixture_data[1]['paramgram_used'])
assert output['raw_response']['status']['code'] == 0
| gpl-3.0 |
MrLoick/python-for-android | python3-alpha/extra_modules/atom/url.py | 47 | 4351 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'api.jscudder (Jeff Scudder)'
import urllib.parse
import urllib.request, urllib.parse, urllib.error
DEFAULT_PROTOCOL = 'http'
DEFAULT_PORT = 80
def parse_url(url_string):
"""Creates a Url object which corresponds to the URL string.
This method can accept partial URLs, but it will leave missing
members of the Url unset.
"""
parts = urllib.parse.urlparse(url_string)
url = Url()
if parts[0]:
url.protocol = parts[0]
if parts[1]:
host_parts = parts[1].split(':')
if host_parts[0]:
url.host = host_parts[0]
if len(host_parts) > 1:
url.port = host_parts[1]
if parts[2]:
url.path = parts[2]
if parts[4]:
param_pairs = parts[4].split('&')
for pair in param_pairs:
pair_parts = pair.split('=')
if len(pair_parts) > 1:
url.params[urllib.parse.unquote_plus(pair_parts[0])] = (
urllib.parse.unquote_plus(pair_parts[1]))
elif len(pair_parts) == 1:
url.params[urllib.parse.unquote_plus(pair_parts[0])] = None
return url
class Url(object):
"""Represents a URL and implements comparison logic.
URL strings which are not identical can still be equivalent, so this object
provides a better interface for comparing and manipulating URLs than
strings. URL parameters are represented as a dictionary of strings, and
defaults are used for the protocol (http) and port (80) if not provided.
"""
def __init__(self, protocol=None, host=None, port=None, path=None,
params=None):
self.protocol = protocol
self.host = host
self.port = port
self.path = path
self.params = params or {}
def to_string(self):
url_parts = ['', '', '', '', '', '']
if self.protocol:
url_parts[0] = self.protocol
if self.host:
if self.port:
url_parts[1] = ':'.join((self.host, str(self.port)))
else:
url_parts[1] = self.host
if self.path:
url_parts[2] = self.path
if self.params:
url_parts[4] = self.get_param_string()
return urllib.parse.urlunparse(url_parts)
def get_param_string(self):
param_pairs = []
for key, value in self.params.items():
param_pairs.append('='.join((urllib.parse.quote_plus(key),
urllib.parse.quote_plus(str(value)))))
return '&'.join(param_pairs)
def get_request_uri(self):
"""Returns the path with the parameters escaped and appended."""
param_string = self.get_param_string()
if param_string:
return '?'.join([self.path, param_string])
else:
return self.path
def __cmp__(self, other):
if not isinstance(other, Url):
return cmp(self.to_string(), str(other))
difference = 0
# Compare the protocol
if self.protocol and other.protocol:
difference = cmp(self.protocol, other.protocol)
elif self.protocol and not other.protocol:
difference = cmp(self.protocol, DEFAULT_PROTOCOL)
elif not self.protocol and other.protocol:
difference = cmp(DEFAULT_PROTOCOL, other.protocol)
if difference != 0:
return difference
# Compare the host
difference = cmp(self.host, other.host)
if difference != 0:
return difference
# Compare the port
if self.port and other.port:
difference = cmp(self.port, other.port)
elif self.port and not other.port:
difference = cmp(self.port, DEFAULT_PORT)
elif not self.port and other.port:
difference = cmp(DEFAULT_PORT, other.port)
if difference != 0:
return difference
# Compare the path
difference = cmp(self.path, other.path)
if difference != 0:
return difference
# Compare the parameters
return cmp(self.params, other.params)
def __str__(self):
return self.to_string()
| apache-2.0 |
nox/skia | tools/skpdiff/generate_pmetric_tables.py | 179 | 4156 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from math import *
COPYRIGHT = '''/*
* Copyright 2013 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/'''
HELP = '// To regenerate SkPMetricUtil_generated.h, simply run ./generate_pmetric_tables.py'
# From Barten SPIE 1989
def contrast_sensitivity(cycles_per_degree, luminance):
a = 440.0 * pow(1.0 + 0.7 / luminance, -0.2)
b = 0.3 * pow(1 + 100.0 / luminance, 0.15)
return a * cycles_per_degree * exp(-b * cycles_per_degree) * sqrt(1.0 + 0.06 * exp(b * cycles_per_degree))
# From Ward Larson Siggraph 1997
def threshold_vs_intensity(adaptation_luminance):
log_lum = float('-inf') # Works in Python 2.6+
try:
log_lum = log10(adaptation_luminance)
except ValueError:
pass
x = 0.0
if log_lum < -3.94:
x = -2.86
elif log_lum < -1.44:
x = pow(0.405 * log_lum + 1.6, 2.18) - 2.86
elif log_lum < -0.0184:
x = log_lum - 0.395
elif log_lum < 1.9:
x = pow(0.249 * log_lum + 0.65, 2.7) - 0.72
else:
x = log_lum - 1.255
return pow(10.0, x)
# From Daly 1993
def visual_mask(contrast):
x = pow(392.498 * contrast, 0.7)
x = pow(0.0153 * x, 4.0)
return pow(1.0 + x, 0.25)
# float gCubeRootTable[]
CUBE_ROOT_ACCESS_FUNCTION = '''
static float get_cube_root(float value) {
SkASSERT(value >= 0.0f);
SkASSERT(value * 1023.0f < 1024.0f);
return gCubeRootTable[(int)(value * 1023.0f)];
}
'''
def generate_cube_root_table(stream):
print('static float gCubeRootTable[] = {', end='', file=stream)
for i in range(1024):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % pow(i / 1024.0, 1.0 / 3.0), end='f,', file=stream)
print('\n};', end='', file=stream)
print(CUBE_ROOT_ACCESS_FUNCTION, file=stream)
# float gGammaTable[]
GAMMA_ACCESS_FUNCTION = '''
static float get_gamma(unsigned char value) {
return gGammaTable[value];
}
'''
def generate_gamma_table(stream):
print('static float gGammaTable[] = {', end='', file=stream)
for i in range(256):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % pow(i / 255.0, 2.2), end='f,', file=stream)
print('\n};', end='', file=stream)
print(GAMMA_ACCESS_FUNCTION, file=stream)
# float gTVITable[]
TVI_ACCESS_FUNCTION = '''
static float get_threshold_vs_intensity(float value) {
SkASSERT(value >= 0.0f);
SkASSERT(value < 100.0f);
return gTVITable[(int)(value * 100.0f)];
}
'''
def generate_tvi_table(stream):
print('static float gTVITable[] = {', end='', file=stream)
for i in range(10000):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % threshold_vs_intensity(i / 100.0), end='f,', file=stream)
print('\n};', end='', file=stream)
print(TVI_ACCESS_FUNCTION, file=stream)
# float gVisualMaskTable[]
VISUAL_MASK_DOMAIN = 4000
VISUAL_MASK_ACCESS_FUNCTION = '''
static float get_visual_mask(float value) {{
SkASSERT(value >= 0.0f);
SkASSERT(value < {}.0f);
return gVisualMaskTable[(int)value];
}}'''
def generate_visual_mask_table(stream):
print('static float gVisualMaskTable[] = {', end='', file=stream)
for i in range(VISUAL_MASK_DOMAIN):
if i % 6 == 0:
print('\n ', end='', file=stream)
print("%.10f" % visual_mask(i), end='f,', file=stream)
print('\n};', end='', file=stream)
print(VISUAL_MASK_ACCESS_FUNCTION.format(VISUAL_MASK_DOMAIN), file=stream)
def generate_lookup_tables(stream):
print(COPYRIGHT, file=stream)
print(HELP, file=stream)
print('namespace SkPMetricUtil {', file=stream)
generate_cube_root_table(stream)
generate_gamma_table(stream)
generate_tvi_table(stream)
generate_visual_mask_table(stream)
print('}', file=stream)
def main():
pmetric_util_out = open('SkPMetricUtil_generated.h', 'wb')
generate_lookup_tables(pmetric_util_out)
pmetric_util_out.close()
if __name__ == '__main__':
main()
| bsd-3-clause |
lambday/shogun | applications/ocr/MatrixWidget.py | 39 | 2103 | # File : $HeadURL$
# Version: $Id$
import gtk
import numpy as np
import common as com
from QuadrWidget import QuadrWidget
class MatrixWidget(QuadrWidget):
def __init__(self, matrix_size):
QuadrWidget.__init__(self)
self.matrix = np.zeros((matrix_size, matrix_size),
dtype=np.bool)
self.connect("expose_event", MatrixWidget.on_redraw)
def on_redraw(self, event):
gc = self.style.fg_gc[self.state]
w = self.window
width = w.get_size()[0]
height = w.get_size()[1]
# Backup graphic context
self.default_fg = gc.foreground
# Background
gc.set_rgb_fg_color(com.COLOR_WHITE)
w.draw_rectangle(gc, True, 0, 0, width-1, height-1)
size_y = self.matrix.shape[0]
size_x = self.matrix.shape[1]
pixels_per_y = float(height)/size_y
pixels_per_x = float(width)/size_x
gc.set_rgb_fg_color(com.COLOR_GRAY)
for y in range(size_y):
w.draw_line(gc, 0, int(y*pixels_per_y),
width-1, int(y*pixels_per_y))
for x in range(size_x):
if y == 0:
w.draw_line(gc, int(x*pixels_per_x), 0,
int(x*pixels_per_x), height-1)
if self.matrix[y, x]:
gc.set_rgb_fg_color(com.COLOR_BLACK)
w.draw_rectangle(gc, self.matrix[y, x]
> com.NEAR_ZERO_POS,
int(x*pixels_per_x),
int(y*pixels_per_y),
int(pixels_per_x+1),
int(pixels_per_y+1))
gc.set_rgb_fg_color(com.COLOR_GRAY)
gc.set_rgb_fg_color(com.COLOR_BLACK)
w.draw_rectangle(gc, False, 0, 0, width-1, height-1)
gc.foreground = self.default_fg
return False
def set_image(self, image):
self.matrix = image
self.update()
def get_image(self):
return self.matrix
| bsd-3-clause |
Microsoft/PTVS | Python/Templates/Web/ProjectTemplates/Python/Web/PollsFlask/models-mongodb.py | 15 | 2884 | """
Repository of polls that stores data in a MongoDB database.
"""
from bson.objectid import ObjectId, InvalidId
from pymongo import MongoClient
from . import Poll, Choice, PollNotFound
from . import _load_samples_json
def _poll_from_doc(doc):
"""Creates a poll object from the MongoDB poll document."""
return Poll(str(doc['_id']), doc['text'])
def _choice_from_doc(doc):
"""Creates a choice object from the MongoDB choice subdocument."""
return Choice(str(doc['id']), doc['text'], doc['votes'])
class Repository(object):
"""MongoDB repository."""
def __init__(self, settings):
"""Initializes the repository with the specified settings dict.
Required settings are:
- MONGODB_HOST
- MONGODB_DATABASE
- MONGODB_COLLECTION
"""
self.name = 'MongoDB'
self.host = settings['MONGODB_HOST']
self.client = MongoClient(self.host)
self.database = self.client[settings['MONGODB_DATABASE']]
self.collection = self.database[settings['MONGODB_COLLECTION']]
def get_polls(self):
"""Returns all the polls from the repository."""
docs = self.collection.find()
polls = [_poll_from_doc(doc) for doc in docs]
return polls
def get_poll(self, poll_key):
"""Returns a poll from the repository."""
try:
doc = self.collection.find_one({"_id": ObjectId(poll_key)})
if doc is None:
raise PollNotFound()
poll = _poll_from_doc(doc)
poll.choices = [_choice_from_doc(choice_doc)
for choice_doc in doc['choices']]
return poll
except InvalidId:
raise PollNotFound()
def increment_vote(self, poll_key, choice_key):
"""Increment the choice vote count for the specified poll."""
try:
self.collection.update(
{
"_id": ObjectId(poll_key),
"choices.id": int(choice_key),
},
{
"$inc": {"choices.$.votes": 1}
}
)
except(InvalidId, ValueError):
raise PollNotFound()
def add_sample_polls(self):
"""Adds a set of polls from data stored in a samples.json file."""
for sample_poll in _load_samples_json():
choices = []
choice_id = 0
for sample_choice in sample_poll['choices']:
choice_doc = {
'id': choice_id,
'text': sample_choice,
'votes': 0,
}
choice_id += 1
choices.append(choice_doc)
poll_doc = {
'text': sample_poll['text'],
'choices': choices,
}
self.collection.insert(poll_doc)
| apache-2.0 |
Gabriel-p/mcs_rot_angles | aux_modules/validation_set.py | 1 | 10176 |
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
| gpl-3.0 |
elingg/tensorflow | tensorflow/python/kernel_tests/fractional_avg_pool_op_test.py | 107 | 21031 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for fractional average pool operation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class FractionalAvgTest(test.TestCase):
# Random number generate with seed.
_PRNG = np.random.RandomState(341261000)
_SEED = 341261001
_SEED2 = 341261002
def _AvgPoolAlongRows(self, input_matrix, row_seq, overlapping):
"""Perform average pool along row of a 2-D matrix based on row_seq.
Args:
input_matrix: A 2-D matrix.
row_seq: Cumulative pooling sequence along row.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = len(row_seq)-1
* num_cols = input_matrix.num_cols.
"""
output_image = np.zeros(input_matrix.shape[1])
row_max = row_seq[-1]
for i in range(row_seq.shape[0] - 1):
row_start = row_seq[i]
row_end = row_seq[i + 1] + 1 if overlapping else row_seq[i + 1]
row_end = min(row_end, row_max)
output_image = np.vstack((output_image, np.mean(
input_matrix[row_start:row_end, :], axis=0))) # axis 0 is along row
# remove the sentinel row
return output_image[1:, :]
def _AvgPoolAlongCols(self, input_matrix, col_seq, overlapping):
"""Perform average pool along column of a 2-D matrix based on col_seq.
Args:
input_matrix: A 2-D matrix.
col_seq: Cumulative pooling sequence along column.
overlapping: Whether or not use overlapping when pooling.
Returns:
A 2-D matrix, with
* num_rows = input_matrix.num_rows
* num_cols = len(col_seq)-1.
"""
input_matrix = input_matrix.transpose()
output_matrix = self._AvgPoolAlongRows(input_matrix, col_seq, overlapping)
return output_matrix.transpose()
def _GetExpectedFractionalAvgPoolResult(self, input_tensor, row_seq, col_seq,
overlapping):
"""Get expected fractional average pooling result.
row_seq and col_seq together defines the fractional pooling region.
Args:
input_tensor: Original input tensor, assuming it is a 4-D tensor, with
dimension as [batch, height/row, width/column, channels/depth].
row_seq: Cumulative pooling sequence along row.
col_seq: Cumulative pooling sequence along column.
overlapping: Use overlapping when doing pooling.
Returns:
A 4-D tensor that is the result of average pooling on input_tensor based
on pooling region defined by row_seq and col_seq, conditioned on whether
or not overlapping is used.
"""
input_shape = input_tensor.shape
output_shape = (input_shape[0], len(row_seq) - 1, len(col_seq) - 1,
input_shape[3])
output_tensor = np.zeros(shape=output_shape, dtype=input_tensor.dtype)
for batch in range(input_shape[0]):
for channel in range(input_shape[3]):
two_dim_slice = input_tensor[batch, :, :, channel]
tmp = self._AvgPoolAlongRows(two_dim_slice, row_seq, overlapping)
output_tensor[batch, :, :, channel] = self._AvgPoolAlongCols(
tmp, col_seq, overlapping)
return output_tensor
def _ValidateFractionalAvgPoolResult(self, input_tensor, pooling_ratio,
pseudo_random, overlapping):
"""Validate FractionalAvgPool's result against expected.
Expected result is computed given input_tensor, and pooling region defined
by row_seq and col_seq.
Args:
input_tensor: A tensor or numpy ndarray.
pooling_ratio: A list or tuple of length 4, first and last element be 1.
pseudo_random: Use pseudo random method to generate pooling sequence.
overlapping: Use overlapping when pooling.
Returns:
None
"""
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual, row_seq, col_seq = sess.run([p, r, c])
expected = self._GetExpectedFractionalAvgPoolResult(input_tensor, row_seq,
col_seq, overlapping)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def _testVisually(self):
"""Manual test by printing out intermediate result of a small random tensor.
Since _GetExpectedFractionalAvgPoolResult is 'automated', it feels safer to
have a test case that you can see what's happening.
This test will generate a small, random, int 2D matrix, and feed it to
FractionalAvgPool and _GetExpectedFractionalAvgPoolResult.
"""
num_rows = 6
num_cols = 6
tensor_shape = (1, num_rows, num_cols, 1)
pseudo_random = False
for overlapping in True, False:
print("-" * 70)
print("Testing FractionalAvgPool with overlapping = {}".format(
overlapping))
rand_mat = self._PRNG.randint(10, size=tensor_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(2), 1]
with self.test_session() as sess:
p, r, c = nn_ops.fractional_avg_pool(
rand_mat.astype(np.float32),
pooling_ratio,
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
tensor_output, row_seq, col_seq = sess.run([p, r, c])
expected_result = self._GetExpectedFractionalAvgPoolResult(
rand_mat.astype(np.float32), row_seq, col_seq, overlapping)
print("row sequence:")
print(row_seq)
print("column sequence:")
print(col_seq)
print("Input:")
# Print input with pooling region marked.
for i in range(num_rows):
row_to_print = []
for j in range(num_cols):
if j in col_seq:
row_to_print.append("|")
row_to_print.append(str(rand_mat[0, i, j, 0]))
row_to_print.append("|")
if i in row_seq:
print("-" * 2 * len(row_to_print))
print(" ".join(row_to_print))
print("-" * 2 * len(row_to_print))
print("Output from FractionalAvgPool:")
print(tensor_output[0, :, :, 0])
print("Expected result:")
print(expected_result[0, :, :, 0])
def testAllInputOptions(self):
"""Try all possible input options for fractional_avg_pool.
"""
num_batches = 5
num_channels = 3
num_rows = 20
num_cols = 30
for pseudo_random in True, False:
for overlapping in True, False:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testIntegerTensorInput(self):
"""Test FractionalAvgPool works fine when input tensor is integer type.
I would have used _ValidateFractionalAvgPoolResult function to automate this
process, however, there's rounding issue. It is caused by numpy.mean cast
integer input to numpy.float64 for intermediate use. While for
fractional_avg_pool, the mean operation is integer division (trucated). So,
for this test case, I will hard code a simple matrix.
"""
pseudo_random = True
overlapping = True
tensor_shape = (1, 6, 6, 1)
# pyformat: disable
mat = np.array([
[2, 6, 4, 1, 3, 6],
[8, 9, 1, 6, 6, 8],
[3, 9, 8, 2, 5, 6],
[2, 7, 9, 5, 4, 5],
[8, 5, 0, 5, 7, 4],
[4, 4, 5, 9, 7, 2]
])
# pyformat: enable
with self.test_session() as sess:
# Since deterministic = True, seed and seed2 are fixed. Therefore r, and c
# are the same each time. We can have an expected result precomputed.
# r = [0, 2, 4, 6]
# c = [0, 1, 3, 4, 6]
# pyformat: disable
expected = np.array([
[6, 5, 3, 5],
[5, 5, 4, 5],
[5, 4, 7, 5]
]).reshape((1, 3, 4, 1))
# pyformat: enable
p, unused_r, unused_c = nn_ops.fractional_avg_pool(
mat.reshape(tensor_shape), [1, math.sqrt(3), math.sqrt(2), 1],
pseudo_random,
overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
actual = sess.run(p)
self.assertShapeEqual(expected, p)
self.assertAllClose(expected, actual)
def testDifferentTensorShapes(self):
"""Test different shapes of input tensor.
Mainly test different combinations of num_rows and num_cols.
"""
pseudo_random = True
overlapping = True
for num_batches in [1, 3]:
for num_channels in [1, 3]:
for num_rows in [10, 20, 50]:
for num_cols in [10, 20, 50]:
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(
rand_mat, [1, math.sqrt(3), math.sqrt(2), 1], pseudo_random,
overlapping)
def testLargePoolingRatio(self):
"""Test when pooling ratio is not within [1, 2).
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
for row_ratio in [math.sqrt(11), math.sqrt(37)]:
for col_ratio in [math.sqrt(11), math.sqrt(27)]:
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat,
[1, row_ratio, col_ratio, 1],
pseudo_random, overlapping)
def testDivisiblePoolingRatio(self):
"""Test when num of rows/cols can evenly divide pooling ratio.
This is a case regular average pooling can handle. Should be handled by
fractional pooling as well.
"""
pseudo_random = True
overlapping = True
num_batches = 3
num_channels = 3
num_rows = 30
num_cols = 50
tensor_shape = (num_batches, num_rows, num_cols, num_channels)
# random tensor with value in [-500.0, 500.0)
rand_mat = self._PRNG.random_sample(tensor_shape) * 1000 - 500
self._ValidateFractionalAvgPoolResult(rand_mat, [1, 2, 2, 1], pseudo_random,
overlapping)
class FractionalAvgPoolGradTest(test.TestCase):
"""Tests for FractionalAvgPoolGrad.
Two types of tests for FractionalAvgPoolGrad.
1) Test fractional_avg_pool_grad() directly.
This type of test relies on gen_nn_ops._avg_pool_grad() returns the
correct result. For example:
* input_tensor_shape = (1, 10, 10, 1)
* window_size = (1, 2, 2, 1)
* stride_size = (1, 2, 2, 1)
* padding: not really important, since 10/2 is divisible
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 6, 8, 10]
* col_sequence = [0, 2, 4, 6, 8, 10]
* overlapping = False
This also means their gradients in such case will be the same.
Similarly, when
* input_tensor_shape = (1, 7, 7, 1)
* window_size = (1, 3, 3, 1)
* stride_size = (1, 2, 2, 1)
* padding: not important
avg pooling should generate the same result as fractional avg pooling with:
* row_sequence = [0, 2, 4, 7]
* col_sequence = [0, 2, 4, 7]
* overlapping = True
2) Test through compute_gradient_error()
"""
_PRNG = np.random.RandomState(341261004)
_SEED = 341261005
_SEED2 = 341261006
def _GenerateRandomInputTensor(self, shape):
num_elements = 1
for dim_size in shape:
num_elements *= dim_size
x = self._PRNG.rand(num_elements) * 1000
return x.reshape(shape)
def testDirectNotUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = row_window_size * 5
num_cols = col_window_size * 7
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size, col_window_size, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops._avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows + 1, row_window_size))
col_seq = list(range(0, num_cols + 1, col_window_size))
fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=False)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testDirectUseOverlapping(self):
for num_batches in [1, 3]:
for row_window_size in [2, 5]:
for col_window_size in [2, 4]:
num_rows = (row_window_size - 1) * 5 + 1
num_cols = (col_window_size - 1) * 7 + 1
for num_channels in [1, 2]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
with self.test_session() as _:
input_tensor = constant_op.constant(
self._GenerateRandomInputTensor(input_shape).astype(
np.float32))
window_size = [1, row_window_size, col_window_size, 1]
stride_size = [1, row_window_size - 1, col_window_size - 1, 1]
padding = "VALID"
output_tensor = nn_ops.avg_pool(input_tensor, window_size,
stride_size, padding)
output_data = output_tensor.eval()
num_elements = 1
for dim_size in output_data.shape:
num_elements *= dim_size
output_backprop = (self._PRNG.rand(num_elements) *
1000).reshape(output_data.shape)
input_backprop_tensor = gen_nn_ops._avg_pool_grad(
input_tensor.get_shape(), output_backprop, window_size,
stride_size, padding)
input_backprop = input_backprop_tensor.eval()
row_seq = list(range(0, num_rows, row_window_size - 1))
col_seq = list(range(0, num_cols, col_window_size - 1))
row_seq[-1] += 1
col_seq[-1] += 1
fap_input_backprop_tensor = gen_nn_ops._fractional_avg_pool_grad(
input_tensor.get_shape(),
output_backprop,
row_seq,
col_seq,
overlapping=True)
fap_input_backprop = fap_input_backprop_tensor.eval()
self.assertShapeEqual(input_backprop, fap_input_backprop_tensor)
self.assertAllClose(input_backprop, fap_input_backprop)
def testAllInputOptionsThroughGradientError(self):
input_shape = (1, 7, 13, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = [1, math.sqrt(2), math.sqrt(3), 1]
for pseudo_random in True, False:
for overlapping in True, False:
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testDifferentTensorShapesThroughGradientError(self):
pseudo_random = True
overlapping = True
pooling_ratio = [1, math.sqrt(3), math.sqrt(2), 1]
for num_batches in [1, 2]:
for num_rows in [5, 13]:
for num_cols in [5, 11]:
for num_channels in [1, 3]:
input_shape = (num_batches, num_rows, num_cols, num_channels)
input_data = self._GenerateRandomInputTensor(input_shape)
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
output_data = output_tensor.eval()
output_shape = output_data.shape
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
def testLargePoolingRatioThroughGradientError(self):
input_shape = (1, 17, 23, 1)
input_data = self._GenerateRandomInputTensor(input_shape)
pooling_ratio = (1, math.sqrt(13), math.sqrt(7), 1)
output_shape = [int(a / b) for a, b in zip(input_shape, pooling_ratio)]
overlapping = True
pseudo_random = False
with self.test_session() as _:
input_tensor = constant_op.constant(input_data, shape=input_shape)
output_tensor, unused_a, unused_b = nn_ops.fractional_avg_pool(
input_tensor,
pooling_ratio,
pseudo_random=pseudo_random,
overlapping=overlapping,
deterministic=True,
seed=self._SEED,
seed2=self._SEED2)
# error_margin and delta setting is similar to avg_pool_grad.
error_margin = 1e-4
gradient_error = gradient_checker.compute_gradient_error(
input_tensor,
input_shape,
output_tensor,
output_shape,
x_init_value=input_data.reshape(input_shape),
delta=1e-2)
self.assertLess(gradient_error, error_margin)
if __name__ == "__main__":
test.main()
| apache-2.0 |
nearbycoder/tulsawebdevs.org | events/models/mixins.py | 3 | 1419 | import arrow
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_extensions.db.models import TimeStampedModel, TitleSlugDescriptionModel
from .meta import Location
class EventModelMixin(TimeStampedModel, TitleSlugDescriptionModel):
"""
Abstract base class to prevent code duplication.
`title` `description`, and `slug` fields inherited from djang-extensions TitleSlugDescriptionModel.
:start: The start date of the event.
:end: The end date of the event.
:title: The title of the event.
:slug: The url slug of the event.
:description: The description of the event.
"""
start = models.DateTimeField(
verbose_name=_('Start time'),
)
end = models.DateTimeField(
verbose_name=_('End time'),
)
location = models.ForeignKey(
Location,
null=True, blank=True)
def __str__(self):
return "{title} ({start})".format(title=self.title, start=arrow.get(self.start).format('MMM D, YYYY'))
def clean(self):
super().clean()
if (self.end and self.start) and self.end < self.start:
# REVIEW: would be nice if this was a part of the field validators
raise ValidationError("Start time must be earlier than end time.")
class Meta:
abstract = True
ordering = ('start', )
| gpl-3.0 |
Messaoud-Boudjada/dipy | doc/examples/reconst_csd_parallel.py | 16 | 3740 | """
=================================
Parallel reconstruction using CSD
=================================
This example shows how to use parallelism (multiprocessing) using
``peaks_from_model`` in order to speedup the signal reconstruction
process. For this example will we use the same initial steps
as we used in :ref:`example_reconst_csd`.
Import modules, fetch and read data, apply the mask and calculate the response
function.
"""
import multiprocessing
from dipy.data import fetch_stanford_hardi, read_stanford_hardi
fetch_stanford_hardi()
img, gtab = read_stanford_hardi()
data = img.get_data()
from dipy.segment.mask import median_otsu
maskdata, mask = median_otsu(data, 3, 1, False,
vol_idx=range(10, 50), dilate=2)
from dipy.reconst.csdeconv import auto_response
response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7)
data = maskdata[:, :, 33:37]
mask = mask[:, :, 33:37]
"""
Now we are ready to import the CSD model and fit the datasets.
"""
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
csd_model = ConstrainedSphericalDeconvModel(gtab, response)
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
"""
Compute the CSD-based ODFs using ``peaks_from_model``. This function has a
parameter called ``parallel`` which allows for the voxels to be processed in
parallel. If ``nbr_processes`` is None it will figure out automatically the
number of CPUs available in your system. Alternatively, you can set
``nbr_processes`` manually. Here, we show an example where we compare the
duration of execution with or without parallelism.
"""
import time
from dipy.direction import peaks_from_model
start_time = time.time()
csd_peaks_parallel = peaks_from_model(model=csd_model,
data=data,
sphere=sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask,
return_sh=True,
return_odf=False,
normalize_peaks=True,
npeaks=5,
parallel=True,
nbr_processes=None)
time_parallel = time.time() - start_time
print("peaks_from_model using " + str(multiprocessing.cpu_count())
+ " process ran in :" + str(time_parallel) + " seconds")
"""
``peaks_from_model`` using 8 processes ran in :114.425682068 seconds
"""
start_time = time.time()
csd_peaks = peaks_from_model(model=csd_model,
data=data,
sphere=sphere,
relative_peak_threshold=.5,
min_separation_angle=25,
mask=mask,
return_sh=True,
return_odf=False,
normalize_peaks=True,
npeaks=5,
parallel=False,
nbr_processes=None)
time_single = time.time() - start_time
print("peaks_from_model ran in :" + str(time_single) + " seconds")
"""
``peaks_from_model`` ran in :242.772505999 seconds
"""
print("Speedup factor : " + str(time_single / time_parallel))
"""
Speedup factor : 2.12166099088
In Windows if you get a runtime error about frozen executable please start
your script by adding your code above in a ``main`` function and use:
if __name__ == '__main__':
import multiprocessing
multiprocessing.freeze_support()
main()
"""
| bsd-3-clause |
wxgeo/geophar | wxgeometrie/sympy/polys/tests/test_numberfields.py | 4 | 28618 | """Tests for computational algebraic number field theory. """
from sympy import (S, Rational, Symbol, Poly, sqrt, I, oo, Tuple, expand,
pi, cos, sin, exp)
from sympy.utilities.pytest import raises, slow
from sympy.core.compatibility import range
from sympy.polys.numberfields import (
minimal_polynomial,
primitive_element,
is_isomorphism_possible,
field_isomorphism_pslq,
field_isomorphism,
to_number_field,
AlgebraicNumber,
isolate, IntervalPrinter,
)
from sympy.polys.polyerrors import (
IsomorphismFailed,
NotAlgebraic,
GeneratorsError,
)
from sympy.polys.polyclasses import DMP
from sympy.polys.domains import QQ
from sympy.polys.rootoftools import rootof
from sympy.polys.polytools import degree
from sympy.abc import x, y, z
Q = Rational
def test_minimal_polynomial():
assert minimal_polynomial(-7, x) == x + 7
assert minimal_polynomial(-1, x) == x + 1
assert minimal_polynomial( 0, x) == x
assert minimal_polynomial( 1, x) == x - 1
assert minimal_polynomial( 7, x) == x - 7
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(5), x) == x**2 - 5
assert minimal_polynomial(sqrt(6), x) == x**2 - 6
assert minimal_polynomial(2*sqrt(2), x) == x**2 - 8
assert minimal_polynomial(3*sqrt(5), x) == x**2 - 45
assert minimal_polynomial(4*sqrt(6), x) == x**2 - 96
assert minimal_polynomial(2*sqrt(2) + 3, x) == x**2 - 6*x + 1
assert minimal_polynomial(3*sqrt(5) + 6, x) == x**2 - 12*x - 9
assert minimal_polynomial(4*sqrt(6) + 7, x) == x**2 - 14*x - 47
assert minimal_polynomial(2*sqrt(2) - 3, x) == x**2 + 6*x + 1
assert minimal_polynomial(3*sqrt(5) - 6, x) == x**2 + 12*x - 9
assert minimal_polynomial(4*sqrt(6) - 7, x) == x**2 + 14*x - 47
assert minimal_polynomial(sqrt(1 + sqrt(6)), x) == x**4 - 2*x**2 - 5
assert minimal_polynomial(sqrt(I + sqrt(6)), x) == x**8 - 10*x**4 + 49
assert minimal_polynomial(2*I + sqrt(2 + I), x) == x**4 + 4*x**2 + 8*x + 37
assert minimal_polynomial(sqrt(2) + sqrt(3), x) == x**4 - 10*x**2 + 1
assert minimal_polynomial(
sqrt(2) + sqrt(3) + sqrt(6), x) == x**4 - 22*x**2 - 48*x - 23
a = 1 - 9*sqrt(2) + 7*sqrt(3)
assert minimal_polynomial(
1/a, x) == 392*x**4 - 1232*x**3 + 612*x**2 + 4*x - 1
assert minimal_polynomial(
1/sqrt(a), x) == 392*x**8 - 1232*x**6 + 612*x**4 + 4*x**2 - 1
raises(NotAlgebraic, lambda: minimal_polynomial(oo, x))
raises(NotAlgebraic, lambda: minimal_polynomial(2**y, x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(1), x))
assert minimal_polynomial(sqrt(2)).dummy_eq(x**2 - 2)
assert minimal_polynomial(sqrt(2), x) == x**2 - 2
assert minimal_polynomial(sqrt(2), polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(sqrt(2), x, polys=True, compose=False) == Poly(x**2 - 2)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
assert minimal_polynomial(a, x) == x**2 - 2
assert minimal_polynomial(b, x) == x**2 - 3
assert minimal_polynomial(a, x, polys=True) == Poly(x**2 - 2)
assert minimal_polynomial(b, x, polys=True) == Poly(x**2 - 3)
assert minimal_polynomial(sqrt(a/2 + 17), x) == 2*x**4 - 68*x**2 + 577
assert minimal_polynomial(sqrt(b/2 + 17), x) == 4*x**4 - 136*x**2 + 1153
a, b = sqrt(2)/3 + 7, AlgebraicNumber(sqrt(2)/3 + 7)
f = 81*x**8 - 2268*x**6 - 4536*x**5 + 22644*x**4 + 63216*x**3 - \
31608*x**2 - 189648*x + 141358
assert minimal_polynomial(sqrt(a) + sqrt(sqrt(a)), x) == f
assert minimal_polynomial(sqrt(b) + sqrt(sqrt(b)), x) == f
assert minimal_polynomial(
a**Q(3, 2), x) == 729*x**4 - 506898*x**2 + 84604519
# issue 5994
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
assert minimal_polynomial(eq, x) == 8000*x**2 - 1
ex = 1 + sqrt(2) + sqrt(3)
mp = minimal_polynomial(ex, x)
assert mp == x**4 - 4*x**3 - 4*x**2 + 16*x - 8
ex = 1/(1 + sqrt(2) + sqrt(3))
mp = minimal_polynomial(ex, x)
assert mp == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = (expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3))**Rational(1, 3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = expand((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p, x)
assert mp == x**8 - 512*x**7 - 118208*x**6 + 31131136*x**5 + 647362560*x**4 - 56026611712*x**3 + 116994310144*x**2 + 404854931456*x - 27216576512
assert minimal_polynomial(S("-sqrt(5)/2 - 1/2 + (-sqrt(5)/2 - 1/2)**2"), x) == x - 1
a = 1 + sqrt(2)
assert minimal_polynomial((a*sqrt(2) + a)**3, x) == x**2 - 198*x + 1
p = 1/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == 8*x**4 - 16*x**3 + 4*x**2 + 4*x - 1
p = 2/(1 + sqrt(2) + sqrt(3))
assert minimal_polynomial(p, x, compose=False) == x**4 - 4*x**3 + 2*x**2 + 4*x - 2
assert minimal_polynomial(1 + sqrt(2)*I, x, compose=False) == x**2 - 2*x + 3
assert minimal_polynomial(1/(1 + sqrt(2)) + 1, x, compose=False) == x**2 - 2
assert minimal_polynomial(sqrt(2)*I + I*(1 + sqrt(2)), x,
compose=False) == x**4 + 18*x**2 + 49
# minimal polynomial of I
assert minimal_polynomial(I, x, domain=QQ.algebraic_field(I)) == x - I
K = QQ.algebraic_field(I*(sqrt(2) + 1))
assert minimal_polynomial(I, x, domain=K) == x - I
assert minimal_polynomial(I, x, domain=QQ) == x**2 + 1
assert minimal_polynomial(I, x, domain='QQ(y)') == x**2 + 1
def test_minimal_polynomial_hi_prec():
p = 1/sqrt(1 - 9*sqrt(2) + 7*sqrt(3) + S(1)/10**30)
mp = minimal_polynomial(p, x)
# checked with Wolfram Alpha
assert mp.coeff(x**6) == -1232000000000000000000000000001223999999999999999999999999999987999999999999999999999999999996000000000000000000000000000000
def test_minimal_polynomial_sq():
from sympy import Add, expand_multinomial
p = expand_multinomial((1 + 5*sqrt(2) + 2*sqrt(3))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**4 - 4*x**3 - 118*x**2 + 244*x + 1321
p = expand_multinomial((1 + sqrt(2) - 2*sqrt(3) + sqrt(7))**3)
mp = minimal_polynomial(p**Rational(1, 3), x)
assert mp == x**8 - 8*x**7 - 56*x**6 + 448*x**5 + 480*x**4 - 5056*x**3 + 1984*x**2 + 7424*x - 3008
p = Add(*[sqrt(i) for i in range(1, 12)])
mp = minimal_polynomial(p, x)
assert mp.subs({x: 0}) == -71965773323122507776
def test_minpoly_compose():
# issue 6868
eq = S('''
-1/(800*sqrt(-1/240 + 1/(18000*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)) + 2*(-1/17280000 +
sqrt(15)*I/28800000)**(1/3)))''')
mp = minimal_polynomial(eq + 3, x)
assert mp == 8000*x**2 - 48000*x + 71999
# issue 5888
assert minimal_polynomial(exp(I*pi/8), x) == x**8 + 1
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(sin(pi/7) + sqrt(2), x)
assert mp == 4096*x**12 - 63488*x**10 + 351488*x**8 - 826496*x**6 + \
770912*x**4 - 268432*x**2 + 28561
mp = minimal_polynomial(cos(pi/7) + sqrt(2), x)
assert mp == 64*x**6 - 64*x**5 - 432*x**4 + 304*x**3 + 712*x**2 - \
232*x - 239
mp = minimal_polynomial(exp(I*pi/7) + sqrt(2), x)
assert mp == x**12 - 2*x**11 - 9*x**10 + 16*x**9 + 43*x**8 - 70*x**7 - 97*x**6 + 126*x**5 + 211*x**4 - 212*x**3 - 37*x**2 + 142*x + 127
mp = minimal_polynomial(exp(2*I*pi/7), x)
assert mp == x**6 + x**5 + x**4 + x**3 + x**2 + x + 1
mp = minimal_polynomial(exp(2*I*pi/15), x)
assert mp == x**8 - x**7 + x**5 - x**4 + x**3 - x + 1
mp = minimal_polynomial(cos(2*pi/7), x)
assert mp == 8*x**3 + 4*x**2 - 4*x - 1
mp = minimal_polynomial(sin(2*pi/7), x)
ex = (5*cos(2*pi/7) - 7)/(9*cos(pi/7) - 5*cos(3*pi/7))
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(-1/(2*cos(pi/7)), x) == x**3 + 2*x**2 - x - 1
assert minimal_polynomial(sin(2*pi/15), x) == \
256*x**8 - 448*x**6 + 224*x**4 - 32*x**2 + 1
assert minimal_polynomial(sin(5*pi/14), x) == 8*x**3 - 4*x**2 - 4*x + 1
assert minimal_polynomial(cos(pi/15), x) == 16*x**4 + 8*x**3 - 16*x**2 - 8*x + 1
ex = rootof(x**3 +x*4 + 1, 0)
mp = minimal_polynomial(ex, x)
assert mp == x**3 + 4*x + 1
mp = minimal_polynomial(ex + 1, x)
assert mp == x**3 - 3*x**2 + 7*x - 4
assert minimal_polynomial(exp(I*pi/3), x) == x**2 - x + 1
assert minimal_polynomial(exp(I*pi/4), x) == x**4 + 1
assert minimal_polynomial(exp(I*pi/6), x) == x**4 - x**2 + 1
assert minimal_polynomial(exp(I*pi/9), x) == x**6 - x**3 + 1
assert minimal_polynomial(exp(I*pi/10), x) == x**8 - x**6 + x**4 - x**2 + 1
assert minimal_polynomial(sin(pi/9), x) == 64*x**6 - 96*x**4 + 36*x**2 - 3
assert minimal_polynomial(sin(pi/11), x) == 1024*x**10 - 2816*x**8 + \
2816*x**6 - 1232*x**4 + 220*x**2 - 11
ex = 2**Rational(1, 3)*exp(Rational(2, 3)*I*pi)
assert minimal_polynomial(ex, x) == x**3 - 2
raises(NotAlgebraic, lambda: minimal_polynomial(cos(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(sin(pi*sqrt(2)), x))
raises(NotAlgebraic, lambda: minimal_polynomial(exp(I*pi*sqrt(2)), x))
# issue 5934
ex = 1/(-36000 - 7200*sqrt(5) + (12*sqrt(10)*sqrt(sqrt(5) + 5) +
24*sqrt(10)*sqrt(-sqrt(5) + 5))**2) + 1
raises(ZeroDivisionError, lambda: minimal_polynomial(ex, x))
ex = sqrt(1 + 2**Rational(1,3)) + sqrt(1 + 2**Rational(1,4)) + sqrt(2)
mp = minimal_polynomial(ex, x)
assert degree(mp) == 48 and mp.subs({x:0}) == -16630256576
def test_minpoly_issue_7113():
# see discussion in https://github.com/sympy/sympy/pull/2234
from sympy.simplify.simplify import nsimplify
r = nsimplify(pi, tolerance=0.000000001)
mp = minimal_polynomial(r, x)
assert mp == 1768292677839237920489538677417507171630859375*x**109 - \
2734577732179183863586489182929671773182898498218854181690460140337930774573792597743853652058046464
def test_minpoly_issue_7574():
ex = -(-1)**Rational(1, 3) + (-1)**Rational(2,3)
assert minimal_polynomial(ex, x) == x + 1
def test_primitive_element():
assert primitive_element([sqrt(2)], x) == (x**2 - 2, [1])
assert primitive_element(
[sqrt(2), sqrt(3)], x) == (x**4 - 10*x**2 + 1, [1, 1])
assert primitive_element([sqrt(2)], x, polys=True) == (Poly(x**2 - 2), [1])
assert primitive_element([sqrt(
2), sqrt(3)], x, polys=True) == (Poly(x**4 - 10*x**2 + 1), [1, 1])
assert primitive_element(
[sqrt(2)], x, ex=True) == (x**2 - 2, [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True) == \
(x**4 - 10*x**2 + 1, [1, 1], [[Q(1, 2), 0, -Q(9, 2), 0], [-
Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element(
[sqrt(2)], x, ex=True, polys=True) == (Poly(x**2 - 2), [1], [[1, 0]])
assert primitive_element([sqrt(2), sqrt(3)], x, ex=True, polys=True) == \
(Poly(x**4 - 10*x**2 + 1), [1, 1], [[Q(1, 2), 0, -Q(9, 2),
0], [-Q(1, 2), 0, Q(11, 2), 0]])
assert primitive_element([sqrt(2)], polys=True) == (Poly(x**2 - 2), [1])
raises(ValueError, lambda: primitive_element([], x, ex=False))
raises(ValueError, lambda: primitive_element([], x, ex=True))
# Issue 14117
a, b = I*sqrt(2*sqrt(2) + 3), I*sqrt(-2*sqrt(2) + 3)
assert primitive_element([a, b, I], x) == (x**4 + 6*x**2 + 1, [1, 0, 0])
def test_field_isomorphism_pslq():
a = AlgebraicNumber(I)
b = AlgebraicNumber(I*sqrt(3))
raises(NotImplementedError, lambda: field_isomorphism_pslq(a, b))
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
d = AlgebraicNumber(sqrt(2) + sqrt(3))
e = AlgebraicNumber(sqrt(2) + sqrt(3) + sqrt(7))
assert field_isomorphism_pslq(a, a) == [1, 0]
assert field_isomorphism_pslq(a, b) is None
assert field_isomorphism_pslq(a, c) is None
assert field_isomorphism_pslq(a, d) == [Q(1, 2), 0, -Q(9, 2), 0]
assert field_isomorphism_pslq(
a, e) == [Q(1, 80), 0, -Q(1, 2), 0, Q(59, 20), 0]
assert field_isomorphism_pslq(b, a) is None
assert field_isomorphism_pslq(b, b) == [1, 0]
assert field_isomorphism_pslq(b, c) is None
assert field_isomorphism_pslq(b, d) == [-Q(1, 2), 0, Q(11, 2), 0]
assert field_isomorphism_pslq(b, e) == [-Q(
3, 640), 0, Q(67, 320), 0, -Q(297, 160), 0, Q(313, 80), 0]
assert field_isomorphism_pslq(c, a) is None
assert field_isomorphism_pslq(c, b) is None
assert field_isomorphism_pslq(c, c) == [1, 0]
assert field_isomorphism_pslq(c, d) is None
assert field_isomorphism_pslq(c, e) == [Q(
3, 640), 0, -Q(71, 320), 0, Q(377, 160), 0, -Q(469, 80), 0]
assert field_isomorphism_pslq(d, a) is None
assert field_isomorphism_pslq(d, b) is None
assert field_isomorphism_pslq(d, c) is None
assert field_isomorphism_pslq(d, d) == [1, 0]
assert field_isomorphism_pslq(d, e) == [-Q(
3, 640), 0, Q(71, 320), 0, -Q(377, 160), 0, Q(549, 80), 0]
assert field_isomorphism_pslq(e, a) is None
assert field_isomorphism_pslq(e, b) is None
assert field_isomorphism_pslq(e, c) is None
assert field_isomorphism_pslq(e, d) is None
assert field_isomorphism_pslq(e, e) == [1, 0]
f = AlgebraicNumber(3*sqrt(2) + 8*sqrt(7) - 5)
assert field_isomorphism_pslq(
f, e) == [Q(3, 80), 0, -Q(139, 80), 0, Q(347, 20), 0, -Q(761, 20), -5]
def test_field_isomorphism():
assert field_isomorphism(3, sqrt(2)) == [3]
assert field_isomorphism( I*sqrt(3), I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism(-I*sqrt(3), I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism( I*sqrt(3), -I*sqrt(3)/2) == [-2, 0]
assert field_isomorphism(-I*sqrt(3), -I*sqrt(3)/2) == [ 2, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, 5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism( 2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [-S(6)/35, 0]
assert field_isomorphism(-2*I*sqrt(3)/7, -5*I*sqrt(3)/3) == [ S(6)/35, 0]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [ S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, 5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [-S(6)/35, 27]
assert field_isomorphism(
-2*I*sqrt(3)/7 + 27, -5*I*sqrt(3)/3) == [ S(6)/35, 27]
p = AlgebraicNumber( sqrt(2) + sqrt(3))
q = AlgebraicNumber(-sqrt(2) + sqrt(3))
r = AlgebraicNumber( sqrt(2) - sqrt(3))
s = AlgebraicNumber(-sqrt(2) - sqrt(3))
pos_coeffs = [ S(1)/2, S(0), -S(9)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(9)/2, S(0)]
a = AlgebraicNumber(sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
a = AlgebraicNumber(-sqrt(2))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
pos_coeffs = [ S(1)/2, S(0), -S(11)/2, S(0)]
neg_coeffs = [-S(1)/2, S(0), S(11)/2, S(0)]
a = AlgebraicNumber(sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(-sqrt(3))
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_coeffs
assert field_isomorphism(a, q, fast=True) == pos_coeffs
assert field_isomorphism(a, r, fast=True) == neg_coeffs
assert field_isomorphism(a, s, fast=True) == neg_coeffs
assert field_isomorphism(a, p, fast=False) == pos_coeffs
assert field_isomorphism(a, q, fast=False) == pos_coeffs
assert field_isomorphism(a, r, fast=False) == neg_coeffs
assert field_isomorphism(a, s, fast=False) == neg_coeffs
pos_coeffs = [ S(3)/2, S(0), -S(33)/2, -S(8)]
neg_coeffs = [-S(3)/2, S(0), S(33)/2, -S(8)]
a = AlgebraicNumber(3*sqrt(3) - 8)
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == neg_coeffs
assert field_isomorphism(a, q, fast=True) == neg_coeffs
assert field_isomorphism(a, r, fast=True) == pos_coeffs
assert field_isomorphism(a, s, fast=True) == pos_coeffs
assert field_isomorphism(a, p, fast=False) == neg_coeffs
assert field_isomorphism(a, q, fast=False) == neg_coeffs
assert field_isomorphism(a, r, fast=False) == pos_coeffs
assert field_isomorphism(a, s, fast=False) == pos_coeffs
a = AlgebraicNumber(3*sqrt(2) + 2*sqrt(3) + 1)
pos_1_coeffs = [ S(1)/2, S(0), -S(5)/2, S(1)]
neg_5_coeffs = [-S(5)/2, S(0), S(49)/2, S(1)]
pos_5_coeffs = [ S(5)/2, S(0), -S(49)/2, S(1)]
neg_1_coeffs = [-S(1)/2, S(0), S(5)/2, S(1)]
assert is_isomorphism_possible(a, p) is True
assert is_isomorphism_possible(a, q) is True
assert is_isomorphism_possible(a, r) is True
assert is_isomorphism_possible(a, s) is True
assert field_isomorphism(a, p, fast=True) == pos_1_coeffs
assert field_isomorphism(a, q, fast=True) == neg_5_coeffs
assert field_isomorphism(a, r, fast=True) == pos_5_coeffs
assert field_isomorphism(a, s, fast=True) == neg_1_coeffs
assert field_isomorphism(a, p, fast=False) == pos_1_coeffs
assert field_isomorphism(a, q, fast=False) == neg_5_coeffs
assert field_isomorphism(a, r, fast=False) == pos_5_coeffs
assert field_isomorphism(a, s, fast=False) == neg_1_coeffs
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(3))
c = AlgebraicNumber(sqrt(7))
assert is_isomorphism_possible(a, b) is True
assert is_isomorphism_possible(b, a) is True
assert is_isomorphism_possible(c, p) is False
assert field_isomorphism(sqrt(2), sqrt(3), fast=True) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=True) is None
assert field_isomorphism(sqrt(2), sqrt(3), fast=False) is None
assert field_isomorphism(sqrt(3), sqrt(2), fast=False) is None
def test_to_number_field():
assert to_number_field(sqrt(2)) == AlgebraicNumber(sqrt(2))
assert to_number_field(
[sqrt(2), sqrt(3)]) == AlgebraicNumber(sqrt(2) + sqrt(3))
a = AlgebraicNumber(sqrt(2) + sqrt(3), [S(1)/2, S(0), -S(9)/2, S(0)])
assert to_number_field(sqrt(2), sqrt(2) + sqrt(3)) == a
assert to_number_field(sqrt(2), AlgebraicNumber(sqrt(2) + sqrt(3))) == a
raises(IsomorphismFailed, lambda: to_number_field(sqrt(2), sqrt(3)))
def test_AlgebraicNumber():
minpoly, root = x**2 - 2, sqrt(2)
a = AlgebraicNumber(root, gen=x)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(0)]
assert a.native_coeffs() == [QQ(1), QQ(0)]
a = AlgebraicNumber(root, gen=x, alias='y')
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
a = AlgebraicNumber(root, gen=x, alias=Symbol('y'))
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
assert a.root == root
assert a.alias == Symbol('y')
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is True
assert AlgebraicNumber(sqrt(2), []).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), ()).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), (0, 0)).rep == DMP([], QQ)
assert AlgebraicNumber(sqrt(2), [8]).rep == DMP([QQ(8)], QQ)
assert AlgebraicNumber(sqrt(2), [S(8)/3]).rep == DMP([QQ(8, 3)], QQ)
assert AlgebraicNumber(sqrt(2), [7, 3]).rep == DMP([QQ(7), QQ(3)], QQ)
assert AlgebraicNumber(
sqrt(2), [S(7)/9, S(3)/2]).rep == DMP([QQ(7, 9), QQ(3, 2)], QQ)
assert AlgebraicNumber(sqrt(2), [1, 2, 3]).rep == DMP([QQ(2), QQ(5)], QQ)
a = AlgebraicNumber(AlgebraicNumber(root, gen=x), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert a.coeffs() == [S(1), S(2)]
assert a.native_coeffs() == [QQ(1), QQ(2)]
a = AlgebraicNumber((minpoly, root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
a = AlgebraicNumber((Poly(minpoly), root), [1, 2])
assert a.rep == DMP([QQ(1), QQ(2)], QQ)
assert a.root == root
assert a.alias is None
assert a.minpoly == minpoly
assert a.is_number
assert a.is_aliased is False
assert AlgebraicNumber( sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
assert AlgebraicNumber(-sqrt(3)).rep == DMP([ QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(2))
b = AlgebraicNumber(sqrt(2))
assert a == b
c = AlgebraicNumber(sqrt(2), gen=x)
d = AlgebraicNumber(sqrt(2), gen=x)
assert a == b
assert a == c
a = AlgebraicNumber(sqrt(2), [1, 2])
b = AlgebraicNumber(sqrt(2), [1, 3])
assert a != b and a != sqrt(2) + 3
assert (a == x) is False and (a != x) is True
a = AlgebraicNumber(sqrt(2), [1, 0])
b = AlgebraicNumber(sqrt(2), [1, 0], alias=y)
assert a.as_poly(x) == Poly(x)
assert b.as_poly() == Poly(y)
assert a.as_expr() == sqrt(2)
assert a.as_expr(x) == x
assert b.as_expr() == sqrt(2)
assert b.as_expr(x) == x
a = AlgebraicNumber(sqrt(2), [2, 3])
b = AlgebraicNumber(sqrt(2), [2, 3], alias=y)
p = a.as_poly()
assert p == Poly(2*p.gen + 3)
assert a.as_poly(x) == Poly(2*x + 3)
assert b.as_poly() == Poly(2*y + 3)
assert a.as_expr() == 2*sqrt(2) + 3
assert a.as_expr(x) == 2*x + 3
assert b.as_expr() == 2*sqrt(2) + 3
assert b.as_expr(x) == 2*x + 3
a = AlgebraicNumber(sqrt(2))
b = to_number_field(sqrt(2))
assert a.args == b.args == (sqrt(2), Tuple(1, 0))
b = AlgebraicNumber(sqrt(2), alias='alpha')
assert b.args == (sqrt(2), Tuple(1, 0), Symbol('alpha'))
a = AlgebraicNumber(sqrt(2), [1, 2, 3])
assert a.args == (sqrt(2), Tuple(1, 2, 3))
def test_to_algebraic_integer():
a = AlgebraicNumber(sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 3
assert a.root == sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(2*sqrt(3), gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(1), QQ(0)], QQ)
a = AlgebraicNumber(sqrt(3)/2, [S(7)/19, 3], gen=x).to_algebraic_integer()
assert a.minpoly == x**2 - 12
assert a.root == 2*sqrt(3)
assert a.rep == DMP([QQ(7, 19), QQ(3)], QQ)
def test_IntervalPrinter():
ip = IntervalPrinter()
assert ip.doprint(x**Q(1, 3)) == "x**(mpi('1/3'))"
assert ip.doprint(sqrt(x)) == "x**(mpi('1/2'))"
def test_isolate():
assert isolate(1) == (1, 1)
assert isolate(S(1)/2) == (S(1)/2, S(1)/2)
assert isolate(sqrt(2)) == (1, 2)
assert isolate(-sqrt(2)) == (-2, -1)
assert isolate(sqrt(2), eps=S(1)/100) == (S(24)/17, S(17)/12)
assert isolate(-sqrt(2), eps=S(1)/100) == (-S(17)/12, -S(24)/17)
raises(NotImplementedError, lambda: isolate(I))
def test_minpoly_fraction_field():
assert minimal_polynomial(1/x, y) == -x*y + 1
assert minimal_polynomial(1 / (x + 1), y) == (x + 1)*y - 1
assert minimal_polynomial(sqrt(x), y) == y**2 - x
assert minimal_polynomial(sqrt(x + 1), y) == y**2 - x - 1
assert minimal_polynomial(sqrt(x) / x, y) == x*y**2 - 1
assert minimal_polynomial(sqrt(2) * sqrt(x), y) == y**2 - 2 * x
assert minimal_polynomial(sqrt(2) + sqrt(x), y) == \
y**4 + (-2*x - 4)*y**2 + x**2 - 4*x + 4
assert minimal_polynomial(x**Rational(1,3), y) == y**3 - x
assert minimal_polynomial(x**Rational(1,3) + sqrt(x), y) == \
y**6 - 3*x*y**4 - 2*x*y**3 + 3*x**2*y**2 - 6*x**2*y - x**3 + x**2
assert minimal_polynomial(sqrt(x) / z, y) == z**2*y**2 - x
assert minimal_polynomial(sqrt(x) / (z + 1), y) == (z**2 + 2*z + 1)*y**2 - x
assert minimal_polynomial(1/x, y, polys=True) == Poly(-x*y + 1, y)
assert minimal_polynomial(1 / (x + 1), y, polys=True) == \
Poly((x + 1)*y - 1, y)
assert minimal_polynomial(sqrt(x), y, polys=True) == Poly(y**2 - x, y)
assert minimal_polynomial(sqrt(x) / z, y, polys=True) == \
Poly(z**2*y**2 - x, y)
# this is (sqrt(1 + x**3)/x).integrate(x).diff(x) - sqrt(1 + x**3)/x
a = sqrt(x)/sqrt(1 + x**(-3)) - sqrt(x**3 + 1)/x + 1/(x**(S(5)/2)* \
(1 + x**(-3))**(S(3)/2)) + 1/(x**(S(11)/2)*(1 + x**(-3))**(S(3)/2))
assert minimal_polynomial(a, y) == y
raises(NotAlgebraic, lambda: minimal_polynomial(exp(x), y))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x), x))
raises(GeneratorsError, lambda: minimal_polynomial(sqrt(x) - y, x))
raises(NotImplementedError, lambda: minimal_polynomial(sqrt(x), y, compose=False))
@slow
def test_minpoly_fraction_field_slow():
assert minimal_polynomial(minimal_polynomial(sqrt(x**Rational(1,5) - 1),
y).subs(y, sqrt(x**Rational(1,5) - 1)), z) == z
def test_minpoly_domain():
assert minimal_polynomial(sqrt(2), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - sqrt(2)
assert minimal_polynomial(sqrt(8), x, domain=QQ.algebraic_field(sqrt(2))) == \
x - 2*sqrt(2)
assert minimal_polynomial(sqrt(Rational(3,2)), x,
domain=QQ.algebraic_field(sqrt(2))) == 2*x**2 - 3
raises(NotAlgebraic, lambda: minimal_polynomial(y, x, domain=QQ))
| gpl-2.0 |
superdesk/Live-Blog | plugins/media-archive/superdesk/media_archive/core/impl/db_search.py | 2 | 19008 | '''
Created on Aug 21, 2012
@package: superdesk media archive
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Ioan v. Pocol
The implementation of database server based search API.
'''
from ally.api.type import typeFor
from ally.container.ioc import injected
from ally.support.api.util_service import namesForQuery
from ally.support.sqlalchemy.util_service import buildLimits, buildQuery
from superdesk.media_archive.api.meta_data import QMetaData
from superdesk.media_archive.api.meta_info import QMetaInfo
from superdesk.media_archive.meta.meta_data import MetaDataMapped
from superdesk.media_archive.meta.meta_info import MetaInfoMapped
from sqlalchemy.sql.expression import or_, and_, not_
from ally.support.sqlalchemy.mapper import mappingFor
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.properties import ColumnProperty
from superdesk.media_archive.api.criteria import AsLikeExpressionOrdered, AsLikeExpression
from superdesk.media_archive.meta.meta_type import MetaTypeMapped
from superdesk.media_archive.api.meta_data_info import QMetaDataInfo
from superdesk.media_archive.core.impl.query_service_creator import ISearchProvider
# --------------------------------------------------------------------
@injected
class SqlSearchProvider(ISearchProvider):
'''
Implementation @see: ISearchProvider
'''
def update(self, metaInfo, metaData):
'''
@see: ISearchProvider.update()
'''
# do nothing because all search indexes are automatically managed by database server
pass
# ----------------------------------------------------------------
def delete(self, idMetaInfo, metaType):
'''
@see: ISearchProvider.delete()
'''
# do nothing because all search indexes are automatically managed by database server
pass
# ----------------------------------------------------------------
def buildQuery(self, session, scheme, offset=None, limit=1000, qa=None, qi=None, qd=None):
'''
@see: ISearchProvider.buildQuery()
'''
metaInfos = set()
metaDatas = set()
sqlUnion = None
sqlList = list()
types = [self.queryIndexer.typesByMetaData[key] for key in self.queryIndexer.typesByMetaData.keys()]
if qa is not None:
assert isinstance(qa, QMetaDataInfo), 'Invalid query %s' % qa
if QMetaDataInfo.type in qa:
types = qa.type.values
for name, criteria in self.queryIndexer.infoCriterias.items():
if criteria is AsLikeExpression or criteria is AsLikeExpressionOrdered:
criteriaMetaInfos = self.queryIndexer.metaInfoByCriteria.get(name)
# if MetaInfo is present, add only MetaInfo
if MetaInfoMapped not in criteriaMetaInfos:
for metaInfo in criteriaMetaInfos:
if self.queryIndexer.typesByMetaInfo[metaInfo.__name__] in types: metaInfos.add(metaInfo)
elif self.queryIndexer.typesByMetaInfo[getattr(MetaInfoMapped, '__name__')] in types:
metaInfos.add(MetaInfoMapped)
for name, criteria in self.queryIndexer.dataCriterias.items():
if criteria is AsLikeExpression or criteria is AsLikeExpressionOrdered:
criteriaMetaDatas = self.queryIndexer.metaDataByCriteria.get(name)
# if MetaData is present, add only MetaData
if MetaDataMapped not in criteriaMetaDatas:
for metaData in criteriaMetaDatas:
if self.queryIndexer.typesByMetaData[metaData.__name__] in types: metaDatas.add(metaData)
elif self.queryIndexer.typesByMetaData[getattr(MetaDataMapped, '__name__')] in types:
metaDatas.add(MetaDataMapped)
if qi is not None:
assert isinstance(qi, self.QMetaInfo), 'Invalid query %s' % qi
for name in namesForQuery(qi):
if getattr(self.QMetaInfo, name) not in qi: continue
criteriaMetaInfos = self.queryIndexer.metaInfoByCriteria.get(name)
assert criteriaMetaInfos, 'No model class available for %s' % name
# if MetaInfo is present, add only MetaInfo
if MetaInfoMapped not in criteriaMetaInfos:
for metaInfo in criteriaMetaInfos:
if self.queryIndexer.typesByMetaInfo[metaInfo.__name__] in types: metaInfos.add(metaInfo)
elif self.queryIndexer.typesByMetaInfo[getattr(MetaInfoMapped, '__name__')] in types:
metaInfos.add(MetaInfoMapped)
if qd is not None:
assert isinstance(qd, self.QMetaData), 'Invalid query %s' % qd
for name in namesForQuery(qd):
if getattr(self.QMetaData, name) not in qd: continue
criteriaMetaDatas = self.queryIndexer.metaDataByCriteria.get(name)
assert criteriaMetaDatas, 'No model class available for %s' % name
# if MetaData is present, add only MetaData
if MetaDataMapped not in criteriaMetaDatas:
for metaData in criteriaMetaDatas:
if self.queryIndexer.typesByMetaData[metaData.__name__] in types: metaDatas.add(metaData)
elif self.queryIndexer.typesByMetaData[getattr(MetaDataMapped, '__name__')] in types:
metaDatas.add(MetaDataMapped)
if not metaInfos and not metaDatas:
pass;
elif metaInfos and not metaDatas:
for metaInfo in metaInfos:
sql = self.buildSubquery(session, metaInfo, MetaDataMapped, qa, qi, qd, types)
if sql: sqlList.append(sql)
elif not metaInfos and metaDatas:
for metaData in metaDatas:
sql = self.buildSubquery(session, MetaInfoMapped, metaData, qa, qi, qd, types)
if sql: sqlList.append(sql)
else:
for metaInfo in metaInfos:
metaData = self.queryIndexer.metaDatasByInfo[metaInfo.__name__]
if metaData in metaDatas:
sql = self.buildSubquery(session, metaInfo, metaData, qa, qi, qd, types)
if sql: sqlList.append(sql)
else:
sql = self.buildSubquery(session, metaInfo, MetaDataMapped, qa, qi, qd, types)
if sql: sqlList.append(sql)
for metaData in metaDatas:
if metaData is MetaDataMapped: continue
if self.queryIndexer.metaInfosByData[metaData.__name__] not in metaInfos:
sql = self.buildSubquery(session, MetaInfoMapped, metaData, qa, qi, qd, types)
if sql: sqlList.append(sql)
sqlLength = len(sqlList)
if sqlLength == 0:
sqlUnion = self.buildSubquery(session, MetaInfoMapped, MetaDataMapped, qa, qi, qd, types)
elif sqlLength == 1:
sqlUnion = sqlList[0]
else:
sqlUnion = sqlList.pop()
sqlUnion = sqlUnion.union(*sqlList)
count = sqlUnion.count()
sqlUnion = buildLimits(sqlUnion, offset, limit)
return (sqlUnion, count)
# ----------------------------------------------------------------
def buildSubquery(self, session, metaInfo, metaData, qa, qi, qd, types):
sql = session.query(MetaDataMapped)
if metaInfo == MetaInfoMapped and metaData == MetaDataMapped:
if types:
sql = sql.join(MetaTypeMapped, MetaTypeMapped.Id == MetaDataMapped.typeId)
sql = sql.filter(MetaTypeMapped.Type.in_(types))
elif metaInfo != MetaInfoMapped:
sql = sql.join(MetaTypeMapped, and_(MetaTypeMapped.Id == MetaDataMapped.typeId, MetaTypeMapped.Type == self.queryIndexer.typesByMetaInfo[metaInfo.__name__]))
elif metaData != MetaDataMapped:
sql = sql.join(MetaTypeMapped, and_(MetaTypeMapped.Id == MetaDataMapped.typeId, MetaTypeMapped.Type == self.queryIndexer.typesByMetaData[metaData.__name__]))
sql = sql.join(MetaInfoMapped, MetaDataMapped.Id == MetaInfoMapped.MetaData)
sql = sql.add_entity(MetaInfoMapped)
if qi: sql = buildQuery(sql, qi, metaInfo)
if qd: sql = buildQuery(sql, qd, metaData)
if qi and metaInfo != MetaInfoMapped:
sql = buildQuery(sql, qi, MetaInfoMapped)
if qd and metaData != MetaDataMapped:
sql = buildQuery(sql, qd, MetaDataMapped)
if qi: sql = buildExpressionQuery(sql, qi, metaInfo, qa)
if qd: sql = buildExpressionQuery(sql, qd, metaData, qa)
if qi and metaInfo != MetaInfoMapped:
sql = buildExpressionQuery(sql, qi, MetaInfoMapped, qa)
if qd and metaData != MetaDataMapped:
sql = buildExpressionQuery(sql, qd, MetaDataMapped, qa)
if qa and qa.all:
assert isinstance(qa, QMetaDataInfo), 'Invalid query %s' % qa
sql = buildAllQuery(sql, qa.all, self.queryIndexer.queryByInfo[metaInfo.__name__], metaInfo,
self.queryIndexer.queryByData[metaData.__name__], metaData)
return sql
# ----------------------------------------------------------------
def buildExpressionQuery(sql, query, mapped, qa):
'''
Builds the query on the SQL alchemy query.
@param sqlQuery: SQL alchemy
The sql alchemy query to use.
@param query: query
The REST query object to provide filtering on.
@param mapped: class
The mapped model class to use the query on.
'''
assert query is not None, 'A query object is required'
clazz = query.__class__
mapper = mappingFor(mapped)
assert isinstance(mapper, Mapper)
all = None
if qa: all = qa.all
columns = {cp.key.lower(): getattr(mapper.c, cp.key)
for cp in mapper.iterate_properties if isinstance(cp, ColumnProperty)}
columns = {criteria:columns.get(criteria.lower()) for criteria in namesForQuery(clazz)}
for criteria, column in columns.items():
if column is None or getattr(clazz, criteria) not in query: continue
crt = getattr(query, criteria)
if isinstance(crt, AsLikeExpression) or isinstance(crt, AsLikeExpressionOrdered):
# include
if AsLikeExpression.inc in crt:
for value in crt.inc:
sql = sql.filter(column.like(processLike(value)))
if all and AsLikeExpression.inc in all:
for value in all.inc:
sql = sql.filter(column.like(processLike(value)))
# extend
clauses = list()
if AsLikeExpression.ext in crt:
for value in crt.ext:
clauses.append(column.like(processLike(value)))
if all and AsLikeExpression.ext in all:
for value in all.ext:
clauses.append(column.like(processLike(value)))
length = len(clauses)
if length == 1: sql = sql.filter(clauses[0])
elif length > 1: sql = sql.filter(or_(*clauses))
# exclude
if AsLikeExpression.exc in crt:
for value in crt.exc:
sql = sql.filter(not_(column.like(processLike(value))))
if all and AsLikeExpression.exc in all:
for value in all.exc:
sql = sql.filter(not_(column.like(processLike(value))))
return sql
# ----------------------------------------------------------------
def buildAllQuery(sql, all, qMetaInfo, metaInfo, qMetaData, metaData):
'''
Builds the query for all criteria.
@param sql: SQL alchemy
The sql alchemy query to use.
@param qMetaInfo: query
The REST query object to provide filtering on for meta info.
@param metaInfo: class
The meta info mapped model class to use the query on.
@param qMetaData: query
The REST query object to provide filtering on for meta data
@param metaData: class
The meta data mapped model class to use the query on.
'''
infoMapper = mappingFor(metaInfo)
assert isinstance(infoMapper, Mapper)
dataMapper = mappingFor(metaData)
assert isinstance(dataMapper, Mapper)
baseInfoMapper = mappingFor(MetaInfoMapped)
assert isinstance(infoMapper, Mapper)
baseDataMapper = mappingFor(MetaDataMapped)
assert isinstance(dataMapper, Mapper)
infoProperties = {cp.key.lower(): getattr(infoMapper.c, cp.key)
for cp in infoMapper.iterate_properties if isinstance(cp, ColumnProperty)}
dataProperties = {cp.key.lower(): getattr(dataMapper.c, cp.key)
for cp in dataMapper.iterate_properties if isinstance(cp, ColumnProperty)}
baseInfoProperties = {cp.key.lower(): getattr(baseInfoMapper.c, cp.key)
for cp in baseInfoMapper.iterate_properties if isinstance(cp, ColumnProperty)}
baseDataProperties = {cp.key.lower(): getattr(baseDataMapper.c, cp.key)
for cp in baseDataMapper.iterate_properties if isinstance(cp, ColumnProperty)}
infoQueryType = typeFor(qMetaInfo)
dataQueryType = typeFor(qMetaData)
baseInfoQueryType = typeFor(QMetaInfo)
baseDataQueryType = typeFor(QMetaData)
if all.inc:
for value in all.inc:
clauses = list()
for criteria, crtClass in infoQueryType.query.criterias.items():
column = infoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
for criteria, crtClass in dataQueryType.query.criterias.items():
column = dataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
if metaInfo != MetaInfoMapped:
for criteria, crtClass in baseInfoQueryType.query.criterias.items():
column = baseInfoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
if metaData != MetaDataMapped:
for criteria, crtClass in baseDataQueryType.query.criterias.items():
column = baseDataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
length = len(clauses)
if length == 1: sql = sql.filter(clauses[0])
elif length > 1: sql = sql.filter(or_(*clauses))
if all.ext:
clauses = list()
for value in all.ext:
for criteria, crtClass in infoQueryType.query.criterias.items():
column = infoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
for criteria, crtClass in dataQueryType.query.criterias.items():
column = dataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
if metaInfo != MetaInfoMapped:
for criteria, crtClass in baseInfoQueryType.query.criterias.items():
column = baseInfoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
if metaData != MetaDataMapped:
for criteria, crtClass in baseDataQueryType.query.criterias.items():
column = baseDataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(column.like(processLike(value)))
length = len(clauses)
if length == 1: sql = sql.filter(clauses[0])
elif length > 1: sql = sql.filter(or_(*clauses))
if all.exc:
clauses = list()
for value in all.exc:
for criteria, crtClass in infoQueryType.query.criterias.items():
column = infoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(not_(column.like(processLike(value))))
for criteria, crtClass in dataQueryType.query.criterias.items():
column = dataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(not_(column.like(processLike(value))))
if metaInfo != MetaInfoMapped:
for criteria, crtClass in baseInfoQueryType.query.criterias.items():
column = baseInfoProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(not_(column.like(processLike(value))))
if metaData != MetaDataMapped:
for criteria, crtClass in baseDataQueryType.query.criterias.items():
column = baseDataProperties.get(criteria.lower())
if column is None: continue
if crtClass == AsLikeExpression or crtClass == AsLikeExpressionOrdered:
clauses.append(not_(column.like(processLike(value))))
length = len(clauses)
if length == 1: sql = sql.filter(clauses[0])
elif length > 1: sql = sql.filter(and_(*clauses))
return sql
# ----------------------------------------------------------------
def processLike(value):
assert isinstance(value, str), 'Invalid like value %s' % value
if not value:
return '%'
if not value.endswith('%'):
value = value + '%'
if not value.startswith('%'):
value = '%' + value
return value
| agpl-3.0 |
twoerner/bitbake | lib/bb/cache_extra.py | 14 | 3113 | # ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
#
# Extra RecipeInfo will be all defined in this file. Currently,
# Only Hob (Image Creator) Requests some extra fields. So
# HobRecipeInfo is defined. It's named HobRecipeInfo because it
# is introduced by 'hob'. Users could also introduce other
# RecipeInfo or simply use those already defined RecipeInfo.
# In the following patch, this newly defined new extra RecipeInfo
# will be dynamically loaded and used for loading/saving the extra
# cache fields
# Copyright (C) 2011, Intel Corporation. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from bb.cache import RecipeInfoCommon
class HobRecipeInfo(RecipeInfoCommon):
__slots__ = ()
classname = "HobRecipeInfo"
# please override this member with the correct data cache file
# such as (bb_cache.dat, bb_extracache_hob.dat)
cachefile = "bb_extracache_" + classname +".dat"
# override this member with the list of extra cache fields
# that this class will provide
cachefields = ['summary', 'license', 'section',
'description', 'homepage', 'bugtracker',
'prevision', 'files_info']
def __init__(self, filename, metadata):
self.summary = self.getvar('SUMMARY', metadata)
self.license = self.getvar('LICENSE', metadata)
self.section = self.getvar('SECTION', metadata)
self.description = self.getvar('DESCRIPTION', metadata)
self.homepage = self.getvar('HOMEPAGE', metadata)
self.bugtracker = self.getvar('BUGTRACKER', metadata)
self.prevision = self.getvar('PR', metadata)
self.files_info = self.getvar('FILES_INFO', metadata)
@classmethod
def init_cacheData(cls, cachedata):
# CacheData in Hob RecipeInfo Class
cachedata.summary = {}
cachedata.license = {}
cachedata.section = {}
cachedata.description = {}
cachedata.homepage = {}
cachedata.bugtracker = {}
cachedata.prevision = {}
cachedata.files_info = {}
def add_cacheData(self, cachedata, fn):
cachedata.summary[fn] = self.summary
cachedata.license[fn] = self.license
cachedata.section[fn] = self.section
cachedata.description[fn] = self.description
cachedata.homepage[fn] = self.homepage
cachedata.bugtracker[fn] = self.bugtracker
cachedata.prevision[fn] = self.prevision
cachedata.files_info[fn] = self.files_info
| gpl-2.0 |
jhbradley/moose | framework/scripts/find_hung_process.py | 28 | 5808 | #!/usr/bin/env python
# This script can be used to figure out if a job on a cluster is hung. If all goes well, it'll print the unique
# stack traces out.
import sys, os, re, subprocess
from tempfile import TemporaryFile
from optparse import OptionParser, OptionGroup, Values
##################################################################
# Modify the following variable(s) for your cluster or use one of the versions below
### FISSION
#node_name_pattern = re.compile("(fission-\d{4})")
#pstack_binary = 'pstack'
### BECHLER
#node_name_pattern = re.compile("(b\d{2})")
#pstack_binary = 'pstack'
### FALCON
node_name_pattern = re.compile("(r\di\dn\d{1,2})")
pstack_binary = 'gstack'
##################################################################
def generateTraces(job_num, application_name, num_hosts):
command = "qstat -n " + job_num
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = p.communicate()[0]
# The lists of hosts
hosts = []
# The array of jobs
jobs = []
# The machine name should go here!
host_strs = node_name_pattern.findall(output)
for i in host_strs:
hosts.append(i)
# Launch all the jobs
if num_hosts == 0:
num_hosts = len(hosts)
for i in range(len(hosts)):
if i >= num_hosts:
continue
#command = "ssh " + host + " \"ps -e | grep " + application_name + " | awk '{print \$1}' | xargs -I {} gdb --batch --pid={} -ex bt 2>&1 | grep '^#' \""
command = "ssh " + hosts[i] + " \"ps -e | grep " + application_name + " | awk '{print \$1}' | xargs -I '{}' sh -c 'echo Host: " + hosts[i] + " PID: {}; " + pstack_binary + " {}; printf '*%.0s' {1..80}; echo' \""
f = TemporaryFile()
p = subprocess.Popen(command, stdout=f, close_fds=False, shell=True)
jobs.append((p, f))
# Now process the output from each of the jobs
traces = []
for (p, f) in jobs:
p.wait()
f.seek(0)
output = f.read()
f.close()
# strip blank lines
output = os.linesep.join([s for s in output.splitlines() if s])
traces.extend(splitTraces(output))
return traces
def readTracesFromFile(filename):
f = open(filename)
data = f.read()
return splitTraces(data)
def splitTraces(trace_string):
trace_regex = re.compile("^\**\n", re.M)
traces = trace_regex.split(trace_string)
# # Only keep lines beginning with a #
# throw_away = re.compile("^[^#].*", re.M)
# traces = [throw_away.sub("", trace) for trace in traces]
return traces
# Process the individual traces
def processTraces(traces, num_lines_to_keep):
unique_stack_traces = {}
last_lines_regex = re.compile("(?:.*\n){" + str(num_lines_to_keep) + "}\Z", re.M)
host_regex = re.compile("^(Host.*)", re.M)
for trace in traces:
if len(trace) == 0:
continue
# Grab the host and PID
m = host_regex.search(trace)
if m:
host_pid = m.group(1)
# If the user requested to save only the last few lines, do that here
if num_lines_to_keep:
m = last_lines_regex.search(trace)
if m:
trace = m.group(0)
unique = ''
for bt in unique_stack_traces:
if compareTraces(trace, bt):
unique = bt
if unique == '':
unique_stack_traces[trace] = [host_pid]
else:
unique_stack_traces[unique].append(host_pid)
return unique_stack_traces
def compareTraces(trace1, trace2):
lines1 = trace1.split("\n")
lines2 = trace2.split("\n")
if len(lines1) != len(lines2):
return False
# Only compare the stack trace part - not the memory addresses
# Note this subroutine may need tweaking if the stack trace is different
# on the current machine
memory_re = re.compile("0x[0-9a-f]*")
for i in xrange(len(lines1)):
line1 = lines1[i].split()[2:]
line2 = lines2[i].split()[2:]
# Let's strip out all the memory addresses too
line1 = [memory_re.sub("0x...", line) for line in line1]
line2 = [memory_re.sub("0x...", line) for line in line2]
if line1 != line2:
return False
return True
def main():
parser = OptionParser(usage='Usage: %prog [options] <PBS Job num> <Application>')
parser.add_option('-s', '--stacks', action='store', dest='stacks', type='int', default=0, help="The number of stack frames to keep and compare for uniqueness (Default: ALL)")
parser.add_option('-n', '--hosts', action='store', dest='hosts', type='int', default=0, help="The number of hosts to visit (Default: ALL)")
parser.add_option('-f', '--force', action='store_true', dest='force', default=False, help="Whether or not to force a regen if a cache file exists")
(options, args) = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(1)
# The PBS job number and the application should be passed on the command line
# Additionally, an optional argument of the number of frames to keep (compare) may be passed
job_num = args[0]
application = args[1]
num_to_keep = options.stacks
num_hosts = options.hosts
# first see if there is a cache file available
cache_filename = application + '.' + job_num + '.cache'
traces = []
if not os.path.exists(cache_filename) or options.force:
traces = generateTraces(job_num, application, options.hosts)
# Cache the restuls to a file
cache_file = open(cache_filename, 'w')
for trace in traces:
cache_file.write(trace + "*"*80 + "\n")
cache_file.write("\n")
cache_file.close()
# Process the traces to collapse them into unique stacks
traces = readTracesFromFile(cache_filename)
unique_stack_traces = processTraces(traces, num_to_keep)
print "Unique Stack Traces"
for trace, count in unique_stack_traces.iteritems():
print "*"*80 + "\nCount: " + str(len(count)) + "\n"
if len(count) < 10:
print "\n".join(count)
print "\n" + trace
if __name__ == '__main__':
main()
| lgpl-2.1 |
nioinnovation/safepickle | safepickle/encoding.py | 1 | 1118 | from .types import TypesManager
def encode(obj):
""" Encodes an item preparing it to be json serializable
Encode relies on defined custom types to provide encoding, which in turn
are responsible of using the 'encode' function parameter passed to them
to recursively encoded contained items.
Args:
obj: item to encode
Returns:
encoded item
"""
# handle basic types separately
if obj is None or isinstance(obj, (bool, int, float, str)):
return obj
for type_ in TypesManager.get_types():
if type_.can_encode(obj):
return type_.encode(obj, encode)
raise TypeError("Type: '{}' is not supported".format(type(obj)))
def decode(dct):
""" Object hook to use to decode object literals.
This function is called from within json loading mechanism
for every literal
Args:
dct (dict): literal to decode
Returns:
decoded literal
"""
for type_ in TypesManager.get_types():
if type_.can_decode(dct):
return type_.decode(dct)
return dct
| apache-2.0 |
yencarnacion/jaikuengine | .google_appengine/lib/jinja2-2.6/jinja2/testsuite/inheritance.py | 90 | 7682 | # -*- coding: utf-8 -*-
"""
jinja2.testsuite.inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the template inheritance feature.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import unittest
from jinja2.testsuite import JinjaTestCase
from jinja2 import Environment, DictLoader
LAYOUTTEMPLATE = '''\
|{% block block1 %}block 1 from layout{% endblock %}
|{% block block2 %}block 2 from layout{% endblock %}
|{% block block3 %}
{% block block4 %}nested block 4 from layout{% endblock %}
{% endblock %}|'''
LEVEL1TEMPLATE = '''\
{% extends "layout" %}
{% block block1 %}block 1 from level1{% endblock %}'''
LEVEL2TEMPLATE = '''\
{% extends "level1" %}
{% block block2 %}{% block block5 %}nested block 5 from level2{%
endblock %}{% endblock %}'''
LEVEL3TEMPLATE = '''\
{% extends "level2" %}
{% block block5 %}block 5 from level3{% endblock %}
{% block block4 %}block 4 from level3{% endblock %}
'''
LEVEL4TEMPLATE = '''\
{% extends "level3" %}
{% block block3 %}block 3 from level4{% endblock %}
'''
WORKINGTEMPLATE = '''\
{% extends "layout" %}
{% block block1 %}
{% if false %}
{% block block2 %}
this should workd
{% endblock %}
{% endif %}
{% endblock %}
'''
env = Environment(loader=DictLoader({
'layout': LAYOUTTEMPLATE,
'level1': LEVEL1TEMPLATE,
'level2': LEVEL2TEMPLATE,
'level3': LEVEL3TEMPLATE,
'level4': LEVEL4TEMPLATE,
'working': WORKINGTEMPLATE
}), trim_blocks=True)
class InheritanceTestCase(JinjaTestCase):
def test_layout(self):
tmpl = env.get_template('layout')
assert tmpl.render() == ('|block 1 from layout|block 2 from '
'layout|nested block 4 from layout|')
def test_level1(self):
tmpl = env.get_template('level1')
assert tmpl.render() == ('|block 1 from level1|block 2 from '
'layout|nested block 4 from layout|')
def test_level2(self):
tmpl = env.get_template('level2')
assert tmpl.render() == ('|block 1 from level1|nested block 5 from '
'level2|nested block 4 from layout|')
def test_level3(self):
tmpl = env.get_template('level3')
assert tmpl.render() == ('|block 1 from level1|block 5 from level3|'
'block 4 from level3|')
def test_level4(sel):
tmpl = env.get_template('level4')
assert tmpl.render() == ('|block 1 from level1|block 5 from '
'level3|block 3 from level4|')
def test_super(self):
env = Environment(loader=DictLoader({
'a': '{% block intro %}INTRO{% endblock %}|'
'BEFORE|{% block data %}INNER{% endblock %}|AFTER',
'b': '{% extends "a" %}{% block data %}({{ '
'super() }}){% endblock %}',
'c': '{% extends "b" %}{% block intro %}--{{ '
'super() }}--{% endblock %}\n{% block data '
'%}[{{ super() }}]{% endblock %}'
}))
tmpl = env.get_template('c')
assert tmpl.render() == '--INTRO--|BEFORE|[(INNER)]|AFTER'
def test_working(self):
tmpl = env.get_template('working')
def test_reuse_blocks(self):
tmpl = env.from_string('{{ self.foo() }}|{% block foo %}42'
'{% endblock %}|{{ self.foo() }}')
assert tmpl.render() == '42|42|42'
def test_preserve_blocks(self):
env = Environment(loader=DictLoader({
'a': '{% if false %}{% block x %}A{% endblock %}{% endif %}{{ self.x() }}',
'b': '{% extends "a" %}{% block x %}B{{ super() }}{% endblock %}'
}))
tmpl = env.get_template('b')
assert tmpl.render() == 'BA'
def test_dynamic_inheritance(self):
env = Environment(loader=DictLoader({
'master1': 'MASTER1{% block x %}{% endblock %}',
'master2': 'MASTER2{% block x %}{% endblock %}',
'child': '{% extends master %}{% block x %}CHILD{% endblock %}'
}))
tmpl = env.get_template('child')
for m in range(1, 3):
assert tmpl.render(master='master%d' % m) == 'MASTER%dCHILD' % m
def test_multi_inheritance(self):
env = Environment(loader=DictLoader({
'master1': 'MASTER1{% block x %}{% endblock %}',
'master2': 'MASTER2{% block x %}{% endblock %}',
'child': '''{% if master %}{% extends master %}{% else %}{% extends
'master1' %}{% endif %}{% block x %}CHILD{% endblock %}'''
}))
tmpl = env.get_template('child')
assert tmpl.render(master='master2') == 'MASTER2CHILD'
assert tmpl.render(master='master1') == 'MASTER1CHILD'
assert tmpl.render() == 'MASTER1CHILD'
def test_scoped_block(self):
env = Environment(loader=DictLoader({
'master.html': '{% for item in seq %}[{% block item scoped %}'
'{% endblock %}]{% endfor %}'
}))
t = env.from_string('{% extends "master.html" %}{% block item %}'
'{{ item }}{% endblock %}')
assert t.render(seq=range(5)) == '[0][1][2][3][4]'
def test_super_in_scoped_block(self):
env = Environment(loader=DictLoader({
'master.html': '{% for item in seq %}[{% block item scoped %}'
'{{ item }}{% endblock %}]{% endfor %}'
}))
t = env.from_string('{% extends "master.html" %}{% block item %}'
'{{ super() }}|{{ item * 2 }}{% endblock %}')
assert t.render(seq=range(5)) == '[0|0][1|2][2|4][3|6][4|8]'
def test_scoped_block_after_inheritance(self):
env = Environment(loader=DictLoader({
'layout.html': '''
{% block useless %}{% endblock %}
''',
'index.html': '''
{%- extends 'layout.html' %}
{% from 'helpers.html' import foo with context %}
{% block useless %}
{% for x in [1, 2, 3] %}
{% block testing scoped %}
{{ foo(x) }}
{% endblock %}
{% endfor %}
{% endblock %}
''',
'helpers.html': '''
{% macro foo(x) %}{{ the_foo + x }}{% endmacro %}
'''
}))
rv = env.get_template('index.html').render(the_foo=42).split()
assert rv == ['43', '44', '45']
class BugFixTestCase(JinjaTestCase):
def test_fixed_macro_scoping_bug(self):
assert Environment(loader=DictLoader({
'test.html': '''\
{% extends 'details.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block inner_box %}
{{ my_macro() }}
{% endblock %}
''',
'details.html': '''\
{% extends 'standard.html' %}
{% macro my_macro() %}
my_macro
{% endmacro %}
{% block content %}
{% block outer_box %}
outer_box
{% block inner_box %}
inner_box
{% endblock %}
{% endblock %}
{% endblock %}
''',
'standard.html': '''
{% block content %} {% endblock %}
'''
})).get_template("test.html").render().split() == [u'outer_box', u'my_macro']
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(InheritanceTestCase))
suite.addTest(unittest.makeSuite(BugFixTestCase))
return suite
| apache-2.0 |
pligor/predicting-future-product-prices | 04_time_series_prediction/gp_opt/price_history_27_gp_opt.py | 1 | 6918 | from models.model_21_price_history_seq2seq_dyn_dec_ins import PriceHistorySeq2SeqDynDecIns
import pickle
import dill
from os import path, remove
import numpy as np
from skopt.space.space import Integer, Real, Categorical
from skopt import gp_minimize
import tensorflow as tf
from mylibs.jupyter_notebook_helper import MyOptimizeResult
class PriceHistoryGpOpt(object):
NUM_GPUS = 1
LAMDA2_COUNT = 3
# (silly?) idea: instead of the full training dataset random instances on every bayesian optimization run (not sure about this)
def __init__(self, model, stats_npy_filename, cv_score_dict_npy_filename, res_gp_filename, bayes_opt_dir,
random_state=None, plotter=None, **kwargs):
super(PriceHistoryGpOpt, self).__init__()
self.model = model
self.static_params = kwargs
self.plotter = plotter
self.random_state = random_state
self.stats_filepath = bayes_opt_dir + '/' + stats_npy_filename + '.npy'
self.cv_score_dict_filepath = bayes_opt_dir + '/' + cv_score_dict_npy_filename + '.npy'
self.res_gp_filepath = bayes_opt_dir + '/{}.pickle'.format(res_gp_filename)
def run_opt(self, n_random_starts, n_calls):
if path.isfile(self.res_gp_filepath):
with open(self.res_gp_filepath) as fp: # Python 3: open(..., 'rb')
opt_res = pickle.load(fp)
else:
res_gp = self.gpOptimization(n_random_starts=n_random_starts, n_calls=n_calls)
opt_res = MyOptimizeResult(res_gp=res_gp)
with open(self.res_gp_filepath, 'w') as fp: # Python 3: open(..., 'wb')
pickle.dump(opt_res, fp)
return opt_res
def objective(self, params): # Here we define the metric we want to minimise
params_str = "params: {}".format(params)
print 'num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim, lambads'
print params_str
# try:
cv_score, stats_list = self.get_or_calc(params=params)
# save everytime in case it crashes
self.__save_dictionary(filepath=self.stats_filepath, key=params, val=stats_list)
self.__save_dictionary(filepath=self.cv_score_dict_filepath, key=params, val=cv_score)
if self.plotter is not None:
self.plotter(stats_list=stats_list, label_text=params_str)
# except AssertionError:
# cv_score = None
#
# return None
return cv_score # minimize validation error
def get_or_calc(self, params):
params = tuple(params)
if path.isfile(self.cv_score_dict_filepath):
cv_score_dict = np.load(self.cv_score_dict_filepath)[()]
if params in cv_score_dict:
stats_dic = np.load(self.stats_filepath)[()]
assert params in stats_dic, 'if you have created a cv score you must have saved the stats list before'
cv_score, stats_list = cv_score_dict[params], stats_dic[params]
else:
cv_score, stats_list = self.calc(params=params)
else:
cv_score, stats_list = self.calc(params=params)
return cv_score, stats_list
def calc(self, params):
num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim, mobile_attrs_dim = params[
:-self.LAMDA2_COUNT]
lamda2_list = params[-self.LAMDA2_COUNT:]
cv_score, stats_list = self.model.get_cross_validation_score(
enc_num_units=num_units,
dec_num_units=num_units,
keep_prob_rnn_out=keep_prob_rnn_out,
keep_prob_readout=keep_prob_readout,
learning_rate=learning_rate,
rnn_hidden_dim=rnn_hidden_dim,
mobile_attrs_dim=mobile_attrs_dim,
lamda2=lamda2_list,
# DO NOT TEST
decoder_first_input=PriceHistorySeq2SeqDynDecIns.DECODER_FIRST_INPUT.ZEROS,
batch_norm_enabled=True,
**self.static_params
)
return cv_score, stats_list
@staticmethod
def __save_dictionary(filepath, key, val):
if filepath is not None:
stats_dic = np.load(filepath)[()] if path.isfile(filepath) else dict()
stats_dic[tuple(key)] = val
np.save(filepath, stats_dic)
# def __clear_previously_saved_files(self):
# #filepaths = [self.stats_filepath, self.cv_score_dict_filepath]
# filepaths = [self.stats_filepath,]
# for filepath in filepaths:
# if path.isfile(filepath):
# remove(self.stats_filepath) # delete previously saved file
def gpOptimization(self, n_random_starts, n_calls):
# self.__clear_previously_saved_files()
# here we will exploit the information from the previous experiment to calibrate what we think are the best parameters
# best_params = [500, #50 was obviously small so we are going to range it from 300 to 700
# tf.nn.tanh, #activation we are not going to try and guess via gp opt, but just use this one
# 0.0001, #since we had as optimal the smallest one we are going to try and allow also smaller values
# 0.62488034788862112,
# 0.001]
num_units = Integer(300, 600) # the upper limit is mostly because of computational resources
rnn_hidden_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
mobile_attrs_dim = Integer(100, 300) # the upper limit is mostly because of computational resources
keep_prob_rnn_out = Real(0.5, 1.0, prior='uniform') # uniform or log-uniform
keep_prob_readout = Real(0.5, 1.0, prior='uniform')
learning_rate = Real(1e-6, 1e-2, prior='log-uniform') # uniform or log-uniform
lamda2_list = [Real(1e-5, 1e0, prior='log-uniform')] * self.LAMDA2_COUNT # uniform or log-uniform
space = [num_units, keep_prob_rnn_out, keep_prob_readout, learning_rate, rnn_hidden_dim,
mobile_attrs_dim] + lamda2_list
return gp_minimize(
func=self.objective, # function that we wish to minimise
dimensions=space, # the search space for the hyper-parameters
# x0=x0, #inital values for the hyper-parameters
n_calls=n_calls, # number of times the function will be evaluated
random_state=self.random_state, # random seed
n_random_starts=n_random_starts, # before we start modelling the optimised function with a GP Regression
# model, we want to try a few random choices for the hyper-parameters.
# kappa=1.9, # trade-off between exploration vs. exploitation.
n_jobs=self.NUM_GPUS
)
| agpl-3.0 |
shreyasva/tensorflow | tensorflow/tensorboard/backend/float_wrapper_test.py | 4 | 1963 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
from tensorflow.python.platform import googletest
from tensorflow.tensorboard.backend import float_wrapper
_INFINITY = float('inf')
class FloatWrapperTest(googletest.TestCase):
def _assertWrapsAs(self, to_wrap, expected):
"""Asserts that |to_wrap| becomes |expected| when wrapped."""
actual = float_wrapper.WrapSpecialFloats(to_wrap)
for a, e in zip(actual, expected):
self.assertEqual(e, a)
def testWrapsPrimitives(self):
self._assertWrapsAs(_INFINITY, 'Infinity')
self._assertWrapsAs(-_INFINITY, '-Infinity')
self._assertWrapsAs(float('nan'), 'NaN')
def testWrapsObjectValues(self):
self._assertWrapsAs({'x': _INFINITY}, {'x': 'Infinity'})
def testWrapsObjectKeys(self):
self._assertWrapsAs({_INFINITY: 'foo'}, {'Infinity': 'foo'})
def testWrapsInListsAndTuples(self):
self._assertWrapsAs([_INFINITY], ['Infinity'])
# map() returns a list even if the argument is a tuple.
self._assertWrapsAs((_INFINITY,), ['Infinity',])
def testWrapsRecursively(self):
self._assertWrapsAs({'x': [_INFINITY]}, {'x': ['Infinity']})
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
ifduyue/sentry | src/sentry/api/endpoints/system_health.py | 2 | 1205 | from __future__ import absolute_import
import itertools
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from sentry import status_checks
from sentry.status_checks import sort_by_severity
from sentry.api.base import Endpoint
from sentry.auth.superuser import is_active_superuser
from sentry.utils.hashlib import md5_text
class SystemHealthEndpoint(Endpoint):
permission_classes = (IsAuthenticated, )
def get(self, request):
if not is_active_superuser(request):
return Response()
results = status_checks.check_all()
return Response(
{
'problems': [
{
'id': md5_text(problem.message).hexdigest(),
'message': problem.message,
'severity': problem.severity,
'url': problem.url,
} for problem in
sort_by_severity(itertools.chain.from_iterable(results.values()))
],
'healthy':
{type(check).__name__: not problems for check, problems in results.items()},
}
)
| bsd-3-clause |
marcomaccio/python-docs-samples | scripts/auto_link_to_docs.py | 1 | 3864 | #!/usr/bin/env python
# Copyright (C) 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Process docs-links.json and updates all READMEs and replaces
<!-- auto-doc-link --><!-- end-auto-doc-link -->
With a generated list of documentation backlinks.
"""
from collections import defaultdict
import json
import os
import re
REPO_ROOT = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..'))
DOC_SITE_ROOT = 'https://cloud.google.com'
AUTO_DOC_LINK_EXP = re.compile(
r'<!-- auto-doc-link -->.*?<!-- end-auto-doc-link -->\n',
re.DOTALL)
def invert_docs_link_map(docs_links):
"""
The docs links map is in this format:
{
"doc_path": [
"file_path",
]
}
This transforms it to:
{
"file_path": [
"doc_path",
]
}
"""
files_to_docs = defaultdict(list)
for doc, files in docs_links.iteritems():
for file in files:
files_to_docs[file].append(doc)
files_to_docs[file] = list(set(files_to_docs[file]))
return files_to_docs
def collect_docs_for_readmes(files_to_docs):
"""
There's a one-to-many relationship between readmes and files. This method
finds the readme for each file and consolidates all docs references.
"""
readmes_to_docs = defaultdict(list)
for file, docs in files_to_docs.iteritems():
readme = get_readme_path(file)
readmes_to_docs[readme].extend(docs)
readmes_to_docs[readme] = list(set(readmes_to_docs[readme]))
return readmes_to_docs
def linkify(docs):
"""Adds the documentation site root to doc paths, creating a full URL."""
return [DOC_SITE_ROOT + x for x in docs]
def replace_contents(file_path, regex, new_content):
with open(file_path, 'r+') as f:
content = f.read()
content = regex.sub(new_content, content)
f.seek(0)
f.write(content)
def get_readme_path(file_path):
"""Gets the readme for an associated sample file, basically just the
README.md in the same directory."""
dir = os.path.dirname(file_path)
readme = os.path.join(
REPO_ROOT, dir, 'README.md')
return readme
def generate_doc_link_statement(docs):
links = linkify(docs)
if len(links) == 1:
return """<!-- auto-doc-link -->
These samples are used on the following documentation page:
> {}
<!-- end-auto-doc-link -->
""".format(links.pop())
else:
return """<!-- auto-doc-link -->
These samples are used on the following documentation pages:
>
{}
<!-- end-auto-doc-link -->
""".format('\n'.join(['* {}'.format(x) for x in links]))
def update_readme(readme_path, docs):
if not os.path.exists(readme_path):
print('{} doesn\'t exist'.format(readme_path))
return
replace_contents(
readme_path,
AUTO_DOC_LINK_EXP,
generate_doc_link_statement(docs))
print('Updated {}'.format(readme_path))
def main():
docs_links = json.load(open(
os.path.join(REPO_ROOT, 'scripts', 'docs-links.json'), 'r'))
files_to_docs = invert_docs_link_map(docs_links)
readmes_to_docs = collect_docs_for_readmes(files_to_docs)
for readme, docs in readmes_to_docs.iteritems():
update_readme(readme, docs)
if __name__ == '__main__':
main()
| apache-2.0 |
jdgwartney/boundary-plugin-shell | metric_item_test.py | 3 | 1923 | #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from metric_item import MetricItem
class TestMetricItem(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
m = MetricItem();
def tearDown(self):
unittest.TestCase.tearDown(self)
def testConstructor(self):
m = MetricItem()
def testName(self):
m = MetricItem();
m.setName("foo")
self.assertEquals(m.getName(),"foo","Names not equal")
def testNameType(self):
m = MetricItem();
m.setName("foo")
self.assertTrue(type(m.getName()) == str,"Name is not a string")
def testPollInterval(self):
m = MetricItem();
m.setPollingInterval(100)
self.assertEquals(m.getPollingInterval(),100,"Poll intervals not equal")
def testPollIntervalType(self):
m = MetricItem();
m.setPollingInterval(1000)
self.assertTrue(type(m.getPollingInterval()) == int)
def testPollInterval(self):
m = MetricItem();
m.setDebug(True)
self.assertEquals(m.getDebug(),True,"Debug not equal")
def testCommand(self):
m = MetricItem()
m.setCommand("snafu")
self.assertEquals(m.getCommand(),"snafu","Commands not equal")
if __name__ == '__main__':
unittest.main() | apache-2.0 |
varunagrawal/azure-services | varunagrawal/site-packages/django/utils/unittest/main.py | 332 | 9388 | """Unittest main program"""
import sys
import os
import types
from django.utils.unittest import loader, runner
try:
from django.utils.unittest.signals import installHandler
except ImportError:
installHandler = None
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s test_module.TestClass - run tests from
test_module.TestClass
%(progName)s test_module.TestClass.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=None,
testLoader=loader.defaultTestLoader, exit=True,
verbosity=1, failfast=None, catchbreak=None, buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print msg
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False and installHandler is not None:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print self.USAGE % usage
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None and installHandler is not None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False and installHandler is not None:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None and installHandler is not None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
def main_():
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
| gpl-2.0 |
mlgruby/mining | mining/utils/__init__.py | 1 | 1603 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import re
import os
import unicodedata
import ConfigParser
from bson import ObjectId
from datetime import datetime
from mining.settings import PROJECT_PATH
def slugfy(text):
slug = unicodedata.normalize("NFKD", text).encode("UTF-8", "ignore")
slug = re.sub(r"[^\w]+", " ", slug)
slug = "-".join(slug.lower().strip().split())
if not slug:
return None
return slug
def conf(section, ini="mining.ini"):
config = ConfigParser.ConfigParser()
config.read(os.path.join(PROJECT_PATH, ini))
_dict = {}
options = config.options(section)
for option in options:
try:
_dict[option] = config.get(section, option)
except:
_dict[option] = None
if 'sql_conn_params' in options:
import ast
_dict['sql_conn_params'] = ast.literal_eval(_dict['sql_conn_params'])
else:
_dict['sql_conn_params'] = {}
return _dict
def log_it(s, name=u"core"):
with open("/tmp/openmining-{}.log".format(name), "a") as log:
msg = u"{} => {}\n".format(datetime.now(), s)
log.write(msg.encode('utf-8'))
def parse_dumps(obj):
if isinstance(obj, datetime):
return str(obj.strftime("%Y-%m-%d %H:%M:%S"))
if isinstance(obj, ObjectId):
return str(obj)
return json.JSONEncoder.default(obj)
def __from__(path):
try:
_import = path.split('.')[-1]
_from = u".".join(path.split('.')[:-1])
return getattr(__import__(_from, fromlist=[_import]), _import)
except:
return object
| mit |
nopjmp/SickRage | lib/twilio/access_token.py | 21 | 4382 | import time
from twilio import jwt
class IpMessagingGrant(object):
""" Grant to access Twilio IP Messaging """
def __init__(self, service_sid=None, endpoint_id=None,
deployment_role_sid=None, push_credential_sid=None):
self.service_sid = service_sid
self.endpoint_id = endpoint_id
self.deployment_role_sid = deployment_role_sid
self.push_credential_sid = push_credential_sid
@property
def key(self):
return "ip_messaging"
def to_payload(self):
grant = {}
if self.service_sid:
grant['service_sid'] = self.service_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
if self.deployment_role_sid:
grant['deployment_role_sid'] = self.deployment_role_sid
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
return grant
class ConversationsGrant(object):
""" Grant to access Twilio Conversations """
def __init__(self, configuration_profile_sid=None):
self.configuration_profile_sid = configuration_profile_sid
@property
def key(self):
return "rtc"
def to_payload(self):
grant = {}
if self.configuration_profile_sid:
grant['configuration_profile_sid'] = self.configuration_profile_sid
return grant
class VoiceGrant(object):
""" Grant to access Twilio Programmable Voice"""
def __init__(self,
outgoing_application_sid=None,
outgoing_application_params=None,
push_credential_sid=None,
endpoint_id=None):
self.outgoing_application_sid = outgoing_application_sid
""" :type : str """
self.outgoing_application_params = outgoing_application_params
""" :type : dict """
self.push_credential_sid = push_credential_sid
""" :type : str """
self.endpoint_id = endpoint_id
""" :type : str """
@property
def key(self):
return "voice"
def to_payload(self):
grant = {}
if self.outgoing_application_sid:
grant['outgoing'] = {}
grant['outgoing']['application_sid'] = \
self.outgoing_application_sid
if self.outgoing_application_params:
grant['outgoing']['params'] = self.outgoing_application_params
if self.push_credential_sid:
grant['push_credential_sid'] = self.push_credential_sid
if self.endpoint_id:
grant['endpoint_id'] = self.endpoint_id
return grant
class VideoGrant(object):
""" Grant to access Twilio Video """
def __init__(self, configuration_profile_sid=None):
self.configuration_profile_sid = configuration_profile_sid
@property
def key(self):
return "video"
def to_payload(self):
grant = {}
if self.configuration_profile_sid:
grant['configuration_profile_sid'] = self.configuration_profile_sid
return grant
class AccessToken(object):
""" Access Token used to access Twilio Resources """
def __init__(self, account_sid, signing_key_sid, secret,
identity=None, ttl=3600, nbf=None):
self.account_sid = account_sid
self.signing_key_sid = signing_key_sid
self.secret = secret
self.identity = identity
self.ttl = ttl
self.nbf = nbf
self.grants = []
def add_grant(self, grant):
self.grants.append(grant)
def to_jwt(self, algorithm='HS256'):
now = int(time.time())
headers = {
"typ": "JWT",
"cty": "twilio-fpa;v=1"
}
grants = {}
if self.identity:
grants["identity"] = self.identity
for grant in self.grants:
grants[grant.key] = grant.to_payload()
payload = {
"jti": '{0}-{1}'.format(self.signing_key_sid, now),
"iss": self.signing_key_sid,
"sub": self.account_sid,
"exp": now + self.ttl,
"grants": grants
}
if self.nbf is not None:
payload['nbf'] = self.nbf
return jwt.encode(payload, self.secret, headers=headers,
algorithm=algorithm)
def __str__(self):
return self.to_jwt()
| gpl-3.0 |
inspirehep/inspire-schemas | tests/unit/test_job_builder.py | 1 | 16050 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2019 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import pytest
from inspire_schemas.builders.jobs import JobBuilder
def test_no_data():
expected = {
'_collections': ['Jobs'],
'status': 'pending'
}
builder = JobBuilder()
assert builder.record == expected
def test_data_in_init():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'some_key': 'some_value',
'some_key_with_list': ['some', 'list'],
}
builder = JobBuilder(expected)
assert builder.record == expected
def test_ensure_field_no_field():
builder = JobBuilder()
assert 'test_field' not in builder.record
builder._ensure_field('test_field', default_value='test_value')
assert 'test_field' in builder.record
assert builder.record['test_field'] == 'test_value'
def test_ensure_field_existing_field():
builder = JobBuilder()
assert 'status' in builder.record
builder._ensure_field('status', 'other_status')
assert builder.record['status'] == 'pending'
def test_ensure_field_separate():
builder = JobBuilder()
obj = {'field_one': 'value'}
builder._ensure_field('test_field', default_value='test_value', obj=obj)
builder._ensure_field('field_one', 'wrong_value', obj=obj)
assert 'test_field' in obj
assert obj['test_field'] == 'test_value'
assert obj['field_one'] == 'value'
def test_ensure_list_field_missing():
builder = JobBuilder()
assert 'list_field' not in builder.record
builder._ensure_list_field('list_field')
assert 'list_field' in builder.record
assert builder.record['list_field'] == []
def test_prepare_url():
expected1 = {'value': 'http://url1.com'}
expected2 = {'description': 'Url description', 'value': 'http://url2.com'}
builder = JobBuilder()
url1 = builder._prepare_url('http://url1.com')
url2 = builder._prepare_url('http://url2.com', 'Url description')
with pytest.raises(TypeError):
builder._prepare_url(description='only description')
assert url1 == expected1
assert url2 == expected2
def test_ensure_list_on_existing():
builder = JobBuilder()
builder._ensure_list_field('_collections')
assert builder.record['_collections'] == ['Jobs']
def test_ensure_dict_field_missing():
builder = JobBuilder()
builder.record['existing_dict'] = {'some_dict': 'some_value'}
assert 'dict_field' not in builder.record
builder._ensure_dict_field('dict_field')
assert 'dict_field' in builder.record
assert builder.record['dict_field'] == {}
def test_ensure_dict_field_existing():
builder = JobBuilder()
builder.record['existing_dict'] = {'some_dict': 'some_value'}
builder._ensure_dict_field('existing_dict')
assert builder.record['existing_dict'] == {'some_dict': 'some_value'}
def test_sourced_dict_local_source():
builder = JobBuilder(source='global')
expected = {
'source': 'local',
'value': 'foo'
}
result = builder._sourced_dict('local', value='foo')
assert result == expected
def test_sourced_dict_global_source():
builder = JobBuilder(source='global')
expected = {
'source': 'global',
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_sourced_dict_no_source():
builder = JobBuilder()
expected = {
'value': 'foo'
}
result = builder._sourced_dict(None, value='foo')
assert result == expected
def test_append_to_field_some_simple_data():
builder = JobBuilder()
builder._append_to('test_field', 'first_element')
assert 'test_field' in builder.record
assert builder.record['test_field'] == ['first_element']
builder._append_to('test_field', 'second_element')
assert builder.record['test_field'] == ['first_element', 'second_element']
def test_append_to_field_duplicated_simple_data():
builder = JobBuilder()
builder._append_to('test_field', 'first_element')
builder._append_to('test_field', 'second_element')
builder._append_to('test_field', 'first_element')
builder._append_to('test_field', 'second_element')
assert builder.record['test_field'] == ['first_element', 'second_element']
def test_append_to_field_complex_data():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', element_one)
assert builder.record['some_field'] == [element_one]
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_append_to_field_dumplicated_complex_data():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', element_one)
builder._append_to('some_field', element_two)
builder._append_to('some_field', element_one)
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_append_to_field_from_kwargs():
element_one = {
'key': 'value',
'list_key': ['some', 'values'],
'dict_key': {
'key': 'another_value',
'something': 'else'
}
}
element_two = {
'key': 'value2',
'other_list_key': ['some', 'values'],
}
builder = JobBuilder()
builder._append_to('some_field', **element_one)
assert builder.record['some_field'] == [element_one]
builder._append_to('some_field', element_two)
assert builder.record['some_field'] == [element_one, element_two]
def test_add_private_note_with_source():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'_private_notes': [{'source': 'http://some/source', 'value': 'Note'}]
}
builder = JobBuilder()
builder.add_private_note("Note", "http://some/source")
assert builder.record == expected
def test_add_private_note_without_source():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'_private_notes': [{'value': 'Note'}]
}
builder = JobBuilder()
builder.add_private_note("Note", "")
assert builder.record == expected
def test_add_accelerator_experiment():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'accelerator_experiments': [{
'accelerator': 'accelerator',
'curated_relation': False,
'experiment': 'test1',
'institution': 'test2',
'legacy_name': 'test3',
'record': {'$ref': 'http://something'}
}]
}
builder = JobBuilder()
builder.add_accelerator_experiment(
'accelerator', False, 'test1', 'test2', 'test3', 'http://something'
)
assert builder.record == expected
def test_add_acquisition_source():
expected = {
'_collections': ['Jobs'],
'status': 'pending',
'acquisition_source': {
'source': 'source',
'submission_number': '12345',
'datetime': '1999-02-01',
'email': 'email@email.com',
'method': 'method',
'orcid': 'orcid',
'internal_uid': 'uuid'
}
}
expected2 = {
'_collections': ['Jobs'],
'status': 'pending',
'acquisition_source': {'submission_number': 'None', 'email': 'blah@email.gov'}
}
builder = JobBuilder()
builder.add_acquisition_source(
'1999-02-01', 'email@email.com', 'uuid', 'method', 'orcid', 'source', 12345
)
assert builder.record == expected
builder.add_acquisition_source(email='blah@email.gov')
assert builder.record == expected2
def test_add_arxiv_category():
expected = {
'_collections': ['Jobs'], 'status': 'pending',
'arxiv_categories': ['cat1', 'cat2']
}
builder = JobBuilder()
builder.add_arxiv_category('cat1')
builder.add_arxiv_category('cat2')
builder.add_arxiv_category('other')
builder.add_arxiv_category(''.join(list('other')))
assert builder.record == expected
def test_add_contact():
expected = [
{
'name': 'name',
'email': 'email',
'curated_relation': True,
'record': {'$ref': 'http://nothing'}
},
{
'name': 'name2',
'email': 'email2'
},
{
'name': 'name3',
},
{
'email': 'email3'
}
]
builder = JobBuilder()
builder.add_contact(
name='name', email='email', curated_relation=True, record='http://nothing'
)
builder.add_contact(
name='name2',
email='email2'
)
builder.add_contact(name='name3')
builder.add_contact(email='email3')
assert builder.record['contact_details'] == expected
def test_add_external_system_identifiers():
expected = [
{'schema': 'schema1', 'value': 'value1'},
{'schema': 'schema2', 'value': 'value2'}
]
builder = JobBuilder()
builder.add_external_system_identifiers('value1', 'schema1')
builder.add_external_system_identifiers(schema='schema2', value='value2')
with pytest.raises(TypeError):
builder.add_external_system_identifiers('aaaaa')
assert builder.record['external_system_identifiers'] == expected
def test_add_institution():
expected = [
{
'value': 'value',
'curated_relation': False,
'record': {'$ref': 'http://xyz'}
},
{'value': 'value2'}
]
builder = JobBuilder()
builder.add_institution(
value='value',
curated_relation=False,
record={'$ref': 'http://xyz'}
)
builder.add_institution('value2')
with pytest.raises(TypeError):
builder.add_institution(record='blah')
assert builder.record['institutions'] == expected
def test_add_rank():
expected = ['Rank1', 'Rank2']
builder = JobBuilder()
builder.add_rank('Rank1')
builder.add_rank('Rank2')
assert builder.record['ranks'] == expected
def test_add_reference_emails():
expected = {'emails': ['email@domain.xxx', 'other@cern.ch']}
builder = JobBuilder()
builder.add_reference_email('email@domain.xxx')
builder.add_reference_email('other@cern.ch')
builder.add_reference_email('')
assert builder.record['reference_letters'] == expected
def test_reference_urls():
expected = {
'urls': [
{'value': 'http://some_url.ch'},
{'value': 'http://other.url.com', 'description': 'url description'}
]
}
builder = JobBuilder()
builder.add_reference_url('http://some_url.ch')
builder.add_reference_url('http://other.url.com', "url description")
builder.add_reference_url('')
assert builder.record['reference_letters'] == expected
def test_add_reference_both():
expected = {
'emails': ['poczta@domena.pl', 'postane@domain.tr'],
'urls': [
{'value': 'https://jakas_strona.pl'},
{'value': 'http://xyz.uk', 'description': 'Some description'}
]
}
builder = JobBuilder()
builder.add_reference_email('poczta@domena.pl')
builder.add_reference_email('postane@domain.tr')
builder.add_reference_url("https://jakas_strona.pl")
builder.add_reference_url('http://xyz.uk', 'Some description')
assert builder.record['reference_letters'] == expected
def test_add_region():
expected = ['Region1', 'Region2']
builder = JobBuilder()
builder.add_region('Region1')
builder.add_region('Region2')
assert builder.record['regions'] == expected
def test_add_url():
expected = [
{'value': 'http://url.com'},
{'value': 'https://url2.ch', 'description': 'Description for this url'}
]
builder = JobBuilder()
builder.add_url('http://url.com')
builder.add_url('https://url2.ch', 'Description for this url')
with pytest.raises(TypeError):
builder.add_url(description="some description")
assert builder.record['urls'] == expected
def test_set_deadline():
expected1 = "2099-02-15"
expected2 = "1099-09-20"
builder = JobBuilder()
builder.set_deadline(expected1)
assert builder.record['deadline_date'] == expected1
builder.set_deadline(expected2)
assert builder.record['deadline_date'] == expected2
def test_set_external_job_identifier():
expected1 = 'Identifier1'
expected2 = 'Other Identifier'
builder = JobBuilder()
builder.set_external_job_identifier(expected1)
assert builder.record['external_job_identifier'] == expected1
builder.set_external_job_identifier(expected2)
assert builder.record['external_job_identifier'] == expected2
def test_set_description():
def test_set_deadline():
expected1 = "Description"
expected2 = "Other Description"
builder = JobBuilder()
builder.set_description(expected1)
assert builder.record['description'] == expected1
builder.set_description(expected2)
assert builder.record['description'] == expected2
def test_set_status():
expected1 = 'pending'
expected2 = 'closed'
builder = JobBuilder()
builder.set_status(expected1)
assert builder.record['status'] == expected1
builder.set_status(expected2)
assert builder.record['status'] == expected2
def test_set_title():
expected1 = 'TITLE1'
expected2 = 'TITLE2'
builder = JobBuilder()
builder.set_title(expected1)
assert builder.record['position'] == expected1
builder.set_title(expected2)
assert builder.record['position'] == expected2
def test_process_reference_contact_list():
contacts = [
"some.email@cern.ch",
"http://some-url.com/other/?url=1&part=2",
"other@email.com"
]
builder = JobBuilder()
builder.add_reference_contacts(contacts)
expected_data = {
'emails': [
'some.email@cern.ch',
'other@email.com'
],
'urls': [
{'value': 'http://some-url.com/other/?url=1&part=2'}
]
}
assert builder.record['reference_letters'] == expected_data
def test_sanitization_of_description():
expected = '<div>Some text <em>emphasized</em> linking to <a href="http://example.com">'\
'http://example.com</a></div>'
description = '<div>Some <span>text</span> <em class="shiny">emphasized</em> linking to '\
'http://example.com</div>'
builder = JobBuilder()
builder.set_description(description)
assert builder.record['description'] == expected
| gpl-2.0 |
FireBladeNooT/Medusa_1_6 | lib/github/tests/IssueComment.py | 1 | 3235 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. #
# http://pygithub.github.io/PyGithub/v1/index.html #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import Framework
import datetime
class IssueComment(Framework.TestCase):
def setUp(self):
Framework.TestCase.setUp(self)
self.comment = self.g.get_user().get_repo("PyGithub").get_issue(28).get_comment(5808311)
def testAttributes(self):
self.assertEqual(self.comment.body, "Comment created by PyGithub")
self.assertEqual(self.comment.created_at, datetime.datetime(2012, 5, 20, 11, 46, 42))
self.assertEqual(self.comment.id, 5808311)
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 20, 11, 46, 42))
self.assertEqual(self.comment.url, "https://api.github.com/repos/jacquev6/PyGithub/issues/comments/5808311")
self.assertEqual(self.comment.user.login, "jacquev6")
self.assertEqual(self.comment.html_url, "https://github.com/jacquev6/PyGithub/issues/28#issuecomment-5808311")
# test __repr__() based on this attributes
self.assertEqual(self.comment.__repr__(), 'IssueComment(user=NamedUser(login="jacquev6"), id=5808311)')
def testEdit(self):
self.comment.edit("Comment edited by PyGithub")
self.assertEqual(self.comment.body, "Comment edited by PyGithub")
self.assertEqual(self.comment.updated_at, datetime.datetime(2012, 5, 20, 11, 53, 59))
def testDelete(self):
self.comment.delete()
| gpl-3.0 |
Trust-Code/server-tools | auth_admin_passkey/tests/test_auth_admin_passkey.py | 61 | 3838 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Admin Passkey module for OpenERP
# Copyright (C) 2013-2014 GRAP (http://www.grap.coop)
# @author Sylvain LE GAL (https://twitter.com/legalsylvain)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import threading
from openerp.tests.common import TransactionCase
class TestAuthAdminPasskey(TransactionCase):
"""Tests for 'Auth Admin Passkey' Module"""
# Overload Section
def setUp(self):
super(TestAuthAdminPasskey, self).setUp()
# Get Registries
self.imd_obj = self.registry('ir.model.data')
self.ru_obj = self.registry('res.users')
# Get Database name
self.db = threading.current_thread().dbname
# Get ids from xml_ids
self.admin_user_id = self.imd_obj.get_object_reference(
self.cr, self.uid, 'base', 'user_root')[1]
self.demo_user_id = self.imd_obj.get_object_reference(
self.cr, self.uid, 'base', 'user_demo')[1]
# Test Section
def test_01_normal_login_admin_succeed(self):
"""[Regression Test]
Test the succeed of login with 'admin' / 'admin'"""
res = self.ru_obj.authenticate(self.db, 'admin', 'admin', {})
self.assertEqual(
res, self.admin_user_id,
"'admin' / 'admin' login must succeed.")
def test_02_normal_login_admin_fail(self):
"""[Regression Test]
Test the fail of login with 'admin' / 'bad_password'"""
res = self.ru_obj.authenticate(self.db, 'admin', 'bad_password', {})
self.assertEqual(
res, False,
"'admin' / 'bad_password' login must fail.")
def test_03_normal_login_demo_succeed(self):
"""[Regression Test]
Test the succeed of login with 'demo' / 'demo'"""
res = self.ru_obj.authenticate(self.db, 'demo', 'demo', {})
self.assertEqual(
res, self.demo_user_id,
"'demo' / 'demo' login must succeed.")
def test_04_normal_login_demo_fail(self):
"""[Regression Test]
Test the fail of login with 'demo' / 'bad_password'"""
res = self.ru_obj.authenticate(self.db, 'demo', 'bad_password', {})
self.assertEqual(
res, False,
"'demo' / 'bad_password' login must fail.")
def test_05_passkey_login_demo_succeed(self):
"""[New Feature]
Test the succeed of login with 'demo' / 'admin'"""
res = self.ru_obj.authenticate(self.db, 'demo', 'admin', {})
self.assertEqual(
res, self.demo_user_id,
"'demo' / 'admin' login must succeed.")
def test_06_passkey_login_demo_succeed(self):
"""[Bug #1319391]
Test the correct behaviour of login with 'bad_login' / 'admin'"""
exception_raised = False
try:
self.ru_obj.authenticate(self.db, 'bad_login', 'admin', {})
except:
exception_raised = True
self.assertEqual(
exception_raised, False,
"'bad_login' / 'admin' musn't raise Error.")
| agpl-3.0 |
dominicelse/scipy | benchmarks/benchmarks/sparse.py | 1 | 10059 | """
Simple benchmarks for the sparse module
"""
from __future__ import division, print_function, absolute_import
import warnings
import time
import timeit
import numpy
import numpy as np
from numpy import ones, array, asarray, empty, random, zeros
try:
from scipy import sparse
from scipy.sparse import (csr_matrix, coo_matrix, dia_matrix, lil_matrix,
dok_matrix, rand, SparseEfficiencyWarning)
except ImportError:
pass
from .common import Benchmark
def random_sparse(m, n, nnz_per_row):
rows = numpy.arange(m).repeat(nnz_per_row)
cols = numpy.random.randint(0, n, size=nnz_per_row*m)
vals = numpy.random.random_sample(m*nnz_per_row)
return coo_matrix((vals, (rows, cols)), (m, n)).tocsr()
# TODO move this to a matrix gallery and add unittests
def poisson2d(N, dtype='d', format=None):
"""
Return a sparse matrix for the 2D Poisson problem
with standard 5-point finite difference stencil on a
square N-by-N grid.
"""
if N == 1:
diags = asarray([[4]], dtype=dtype)
return dia_matrix((diags, [0]), shape=(1, 1)).asformat(format)
offsets = array([0, -N, N, -1, 1])
diags = empty((5, N**2), dtype=dtype)
diags[0] = 4 # main diagonal
diags[1:] = -1 # all offdiagonals
diags[3, N-1::N] = 0 # first lower diagonal
diags[4, N::N] = 0 # first upper diagonal
return dia_matrix((diags, offsets), shape=(N**2, N**2)).asformat(format)
class Arithmetic(Benchmark):
param_names = ['format', 'XY', 'op']
params = [
['csr'],
['AA', 'AB', 'BA', 'BB'],
['__add__', '__sub__', 'multiply', '__mul__']
]
def setup(self, format, XY, op):
matrices = dict(A=poisson2d(250, format=format),
B=poisson2d(250, format=format)**2)
x = matrices[XY[0]]
self.y = matrices[XY[1]]
self.fn = getattr(x, op)
self.fn(self.y) # warmup
def time_arithmetic(self, format, XY, op):
self.fn(self.y)
class Sort(Benchmark):
params = ['Rand10', 'Rand25', 'Rand50', 'Rand100', 'Rand200']
param_names = ['matrix']
def setup(self, matrix):
n = 10000
if matrix.startswith('Rand'):
k = int(matrix[4:])
self.A = random_sparse(n, n, k)
self.A.has_sorted_indices = False
self.A.indices[:2] = 2, 1
else:
raise NotImplementedError()
def time_sort(self, matrix):
"""sort CSR column indices"""
self.A.sort_indices()
class Matvec(Benchmark):
params = [
['Identity', 'Poisson5pt', 'Block2x2', 'Block3x3'],
['dia', 'csr', 'csc', 'dok', 'lil', 'coo', 'bsr']
]
param_names = ['matrix', 'format']
def setup(self, matrix, format):
if matrix == 'Identity':
if format in ('lil', 'dok'):
raise NotImplementedError()
self.A = sparse.eye(10000, 10000, format=format)
elif matrix == 'Poisson5pt':
self.A = poisson2d(300, format=format)
elif matrix == 'Block2x2':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (2, 2)
self.A = sparse.kron(poisson2d(150),
ones(b)).tobsr(blocksize=b).asformat(format)
elif matrix == 'Block3x3':
if format not in ('csr', 'bsr'):
raise NotImplementedError()
b = (3, 3)
self.A = sparse.kron(poisson2d(100),
ones(b)).tobsr(blocksize=b).asformat(format)
else:
raise NotImplementedError()
self.x = ones(self.A.shape[1], dtype=float)
def time_matvec(self, matrix, format):
self.A * self.x
class Matvecs(Benchmark):
params = ['dia', 'coo', 'csr', 'csc', 'bsr']
param_names = ["format"]
def setup(self, format):
self.A = poisson2d(300, format=format)
self.x = ones((self.A.shape[1], 10), dtype=self.A.dtype)
def time_matvecs(self, format):
self.A * self.x
class Matmul(Benchmark):
def setup(self):
H1, W1 = 1, 100000
H2, W2 = W1, 1000
C1 = 10
C2 = 1000000
random.seed(0)
matrix1 = lil_matrix(zeros((H1, W1)))
matrix2 = lil_matrix(zeros((H2, W2)))
for i in range(C1):
matrix1[random.randint(H1), random.randint(W1)] = random.rand()
for i in range(C2):
matrix2[random.randint(H2), random.randint(W2)] = random.rand()
self.matrix1 = matrix1.tocsr()
self.matrix2 = matrix2.tocsr()
def time_large(self):
for i in range(100):
self.matrix1 * self.matrix2
class Construction(Benchmark):
params = [
['Empty', 'Identity', 'Poisson5pt'],
['lil', 'dok']
]
param_names = ['matrix', 'format']
def setup(self, name, format):
if name == 'Empty':
self.A = coo_matrix((10000, 10000))
elif name == 'Identity':
self.A = sparse.eye(10000, format='coo')
else:
self.A = poisson2d(100, format='coo')
formats = {'lil': lil_matrix, 'dok': dok_matrix}
self.cls = formats[format]
def time_construction(self, name, format):
T = self.cls(self.A.shape)
for i, j, v in zip(self.A.row, self.A.col, self.A.data):
T[i, j] = v
class Conversion(Benchmark):
params = [
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
['csr', 'csc', 'coo', 'dia', 'lil', 'dok'],
]
param_names = ['from_format', 'to_format']
def setup(self, fromfmt, tofmt):
base = poisson2d(100, format=fromfmt)
try:
self.fn = getattr(base, 'to' + tofmt)
except:
def fn():
raise RuntimeError()
self.fn = fn
def time_conversion(self, fromfmt, tofmt):
self.fn()
class Getset(Benchmark):
params = [
[1, 10, 100, 1000, 10000],
['different', 'same'],
['csr', 'csc', 'lil', 'dok']
]
param_names = ['N', 'sparsity pattern', 'format']
unit = "seconds"
def setup(self, N, sparsity_pattern, format):
if format == 'dok' and N > 500:
raise NotImplementedError()
self.A = rand(1000, 1000, density=1e-5)
A = self.A
N = int(N)
# indices to assign to
i, j = [], []
while len(i) < N:
n = N - len(i)
ip = numpy.random.randint(0, A.shape[0], size=n)
jp = numpy.random.randint(0, A.shape[1], size=n)
i = numpy.r_[i, ip]
j = numpy.r_[j, jp]
v = numpy.random.rand(n)
if N == 1:
i = int(i)
j = int(j)
v = float(v)
base = A.asformat(format)
self.m = base.copy()
self.i = i
self.j = j
self.v = v
def _timeit(self, kernel, recopy):
min_time = 1e99
if not recopy:
kernel(self.m, self.i, self.j, self.v)
number = 1
start = time.time()
while time.time() - start < 0.1:
if recopy:
m = self.m.copy()
else:
m = self.m
while True:
duration = timeit.timeit(
lambda: kernel(m, self.i, self.j, self.v), number=number)
if duration > 1e-5:
break
else:
number *= 10
min_time = min(min_time, duration/number)
return min_time
def track_fancy_setitem(self, N, sparsity_pattern, format):
def kernel(A, i, j, v):
A[i, j] = v
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
return self._timeit(kernel, sparsity_pattern == 'different')
def time_fancy_getitem(self, N, sparsity_pattern, format):
self.m[self.i, self.j]
class NullSlice(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 100000
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_getrow(self, density, format):
self.X.getrow(100)
def time_getcol(self, density, format):
self.X.getcol(100)
def time_3_rows(self, density, format):
self.X[[0, 100, 105], :]
def time_10000_rows(self, density, format):
self.X[np.arange(10000), :]
def time_3_cols(self, density, format):
self.X[:, [0, 100, 105]]
def time_100_cols(self, density, format):
self.X[:, np.arange(100)]
class Diagonal(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_diagonal(self, density, format):
self.X.diagonal()
class Sum(Benchmark):
params = [[0.01, 0.1, 0.5], ['csr', 'csc', 'coo', 'lil', 'dok', 'dia']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 1000
if format == 'dok' and n * density >= 500:
raise NotImplementedError()
self.X = sparse.rand(n, n, format=format, density=density)
def time_sum(self, density, format):
self.X.sum()
def time_sum_axis0(self, density, format):
self.X.sum(axis=0)
def time_sum_axis1(self, density, format):
self.X.sum(axis=1)
class Iteration(Benchmark):
params = [[0.05, 0.01], ['csr', 'csc', 'lil']]
param_names = ['density', 'format']
def setup(self, density, format):
n = 500
k = 1000
self.X = sparse.rand(n, k, format=format, density=density)
def time_iteration(self, density, format):
for row in self.X:
pass
| bsd-3-clause |
0x7E/ubuntu-tweak | ubuntutweak/factory.py | 4 | 4248 | # Ubuntu Tweak - Ubuntu Configuration Tool
#
# Copyright (C) 2007-2011 Tualatrix Chou <tualatrix@gmail.com>
#
# Ubuntu Tweak is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Ubuntu Tweak is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ubuntu Tweak; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
import logging
from gi.repository import Gtk
from ubuntutweak.gui.dialogs import *
from ubuntutweak.gui.widgets import *
from ubuntutweak.gui.containers import *
from ubuntutweak.common.debug import run_traceback
log = logging.getLogger('factory')
def on_reset_button_clicked(widget, reset_target):
if hasattr(reset_target, 'reset'):
log.debug("Reset value to %s by %s" % \
(widget.get_default_value(), reset_target))
reset_target.reset()
else:
log.debug("No reset function to call for: %s" % reset_target)
class WidgetFactory:
composite_capable = ('SpinButton', 'Entry', 'ComboBox',
'Scale', 'FontButton', 'ColorButton', 'Switch')
@classmethod
def create(cls, widget, **kwargs):
if widget in cls.composite_capable and kwargs.has_key('label'):
return getattr(cls, 'do_composite_create')(widget, **kwargs)
else:
return getattr(cls, 'do_create')(widget, **kwargs)
@classmethod
def do_composite_create(cls, widget, **kwargs):
label = Gtk.Label(label=kwargs.pop('label'))
signal_dict = kwargs.pop('signal_dict', None)
reverse = kwargs.get('reverse', False)
enable_reset = kwargs.has_key('enable_reset')
if enable_reset:
enable_reset = kwargs.pop('enable_reset')
try:
new_widget = globals().get(widget)(**kwargs)
except Exception, e:
log.error(run_traceback('error', text_only=True))
if enable_reset:
return [None, None, None]
else:
return [None, None]
if signal_dict:
for signal, method in signal_dict.items():
new_widget.connect(signal, method)
if enable_reset:
try:
reset_button = ResetButton(new_widget.get_setting(),
reverse=reverse)
reset_button.connect('clicked', on_reset_button_clicked, new_widget)
except Exception, e:
log.error(run_traceback('error', text_only=True))
reset_button = None
finally:
return label, new_widget, reset_button
return label, new_widget
@classmethod
def do_create(cls, widget, **kwargs):
signal_dict = kwargs.pop('signal_dict', None)
blank_label = kwargs.pop('blank_label', None)
reverse = kwargs.get('reverse', False)
enable_reset = kwargs.has_key('enable_reset')
if enable_reset:
kwargs.pop('enable_reset')
new_widget = globals().get(widget)(**kwargs)
if signal_dict:
for signal, method in signal_dict.items():
new_widget.connect(signal, method)
if enable_reset:
try:
reset_button = ResetButton(new_widget.get_setting(),
reverse=reverse)
reset_button.connect('clicked', on_reset_button_clicked, new_widget)
if blank_label:
return Gtk.Label(), new_widget, reset_button
else:
return new_widget, reset_button
except Exception, e:
log.error(run_traceback('error', text_only=True))
if blank_label:
return Gtk.Label(), new_widget
else:
return new_widget
| gpl-2.0 |
dpetzold/django | tests/urlpatterns_reverse/views.py | 67 | 1466 | from functools import partial, update_wrapper
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse
from django.views.generic import RedirectView
def empty_view(request, *args, **kwargs):
return HttpResponse('')
def absolute_kwargs_view(request, arg1=1, arg2=2):
return HttpResponse('')
def defaults_view(request, arg1, arg2):
pass
def nested_view(request):
pass
def erroneous_view(request):
import non_existent # NOQA
def pass_resolver_match_view(request, *args, **kwargs):
response = HttpResponse('')
response.resolver_match = request.resolver_match
return response
uncallable = None # neither a callable nor a string
class ViewClass(object):
def __call__(self, request, *args, **kwargs):
return HttpResponse('')
view_class_instance = ViewClass()
class LazyRedirectView(RedirectView):
url = reverse_lazy('named-lazy-url-redirected-to')
@user_passes_test(lambda u: u.is_authenticated(), login_url=reverse_lazy('some-login-page'))
def login_required_view(request):
return HttpResponse('Hello you')
def bad_view(request, *args, **kwargs):
raise ValueError("I don't think I'm getting good value for this view")
empty_view_partial = partial(empty_view, template_name="template.html")
empty_view_wrapped = update_wrapper(
partial(empty_view, template_name="template.html"), empty_view,
)
| bsd-3-clause |
chatcannon/numpy | numpy/doc/basics.py | 39 | 7870 | """
============
Array basics
============
Array types and conversions between types
=========================================
NumPy supports a much greater variety of numerical types than Python does.
This section shows which are available, and how to modify an array's data-type.
========== ==========================================================
Data type Description
========== ==========================================================
bool_ Boolean (True or False) stored as a byte
int_ Default integer type (same as C ``long``; normally either
``int64`` or ``int32``)
intc Identical to C ``int`` (normally ``int32`` or ``int64``)
intp Integer used for indexing (same as C ``ssize_t``; normally
either ``int32`` or ``int64``)
int8 Byte (-128 to 127)
int16 Integer (-32768 to 32767)
int32 Integer (-2147483648 to 2147483647)
int64 Integer (-9223372036854775808 to 9223372036854775807)
uint8 Unsigned integer (0 to 255)
uint16 Unsigned integer (0 to 65535)
uint32 Unsigned integer (0 to 4294967295)
uint64 Unsigned integer (0 to 18446744073709551615)
float_ Shorthand for ``float64``.
float16 Half precision float: sign bit, 5 bits exponent,
10 bits mantissa
float32 Single precision float: sign bit, 8 bits exponent,
23 bits mantissa
float64 Double precision float: sign bit, 11 bits exponent,
52 bits mantissa
complex_ Shorthand for ``complex128``.
complex64 Complex number, represented by two 32-bit floats (real
and imaginary components)
complex128 Complex number, represented by two 64-bit floats (real
and imaginary components)
========== ==========================================================
Additionally to ``intc`` the platform dependent C integer types ``short``,
``long``, ``longlong`` and their unsigned versions are defined.
NumPy numerical types are instances of ``dtype`` (data-type) objects, each
having unique characteristics. Once you have imported NumPy using
::
>>> import numpy as np
the dtypes are available as ``np.bool_``, ``np.float32``, etc.
Advanced types, not listed in the table above, are explored in
section :ref:`structured_arrays`.
There are 5 basic numerical types representing booleans (bool), integers (int),
unsigned integers (uint) floating point (float) and complex. Those with numbers
in their name indicate the bitsize of the type (i.e. how many bits are needed
to represent a single value in memory). Some types, such as ``int`` and
``intp``, have differing bitsizes, dependent on the platforms (e.g. 32-bit
vs. 64-bit machines). This should be taken into account when interfacing
with low-level code (such as C or Fortran) where the raw memory is addressed.
Data-types can be used as functions to convert python numbers to array scalars
(see the array scalar section for an explanation), python sequences of numbers
to arrays of that type, or as arguments to the dtype keyword that many numpy
functions or methods accept. Some examples::
>>> import numpy as np
>>> x = np.float32(1.0)
>>> x
1.0
>>> y = np.int_([1,2,4])
>>> y
array([1, 2, 4])
>>> z = np.arange(3, dtype=np.uint8)
>>> z
array([0, 1, 2], dtype=uint8)
Array types can also be referred to by character codes, mostly to retain
backward compatibility with older packages such as Numeric. Some
documentation may still refer to these, for example::
>>> np.array([1, 2, 3], dtype='f')
array([ 1., 2., 3.], dtype=float32)
We recommend using dtype objects instead.
To convert the type of an array, use the .astype() method (preferred) or
the type itself as a function. For example: ::
>>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
array([ 0., 1., 2.])
>>> np.int8(z)
array([0, 1, 2], dtype=int8)
Note that, above, we use the *Python* float object as a dtype. NumPy knows
that ``int`` refers to ``np.int_``, ``bool`` means ``np.bool_``,
that ``float`` is ``np.float_`` and ``complex`` is ``np.complex_``.
The other data-types do not have Python equivalents.
To determine the type of an array, look at the dtype attribute::
>>> z.dtype
dtype('uint8')
dtype objects also contain information about the type, such as its bit-width
and its byte-order. The data type can also be used indirectly to query
properties of the type, such as whether it is an integer::
>>> d = np.dtype(int)
>>> d
dtype('int32')
>>> np.issubdtype(d, int)
True
>>> np.issubdtype(d, float)
False
Array Scalars
=============
NumPy generally returns elements of arrays as array scalars (a scalar
with an associated dtype). Array scalars differ from Python scalars, but
for the most part they can be used interchangeably (the primary
exception is for versions of Python older than v2.x, where integer array
scalars cannot act as indices for lists and tuples). There are some
exceptions, such as when code requires very specific attributes of a scalar
or when it checks specifically whether a value is a Python scalar. Generally,
problems are easily fixed by explicitly converting array scalars
to Python scalars, using the corresponding Python type function
(e.g., ``int``, ``float``, ``complex``, ``str``, ``unicode``).
The primary advantage of using array scalars is that
they preserve the array type (Python may not have a matching scalar type
available, e.g. ``int16``). Therefore, the use of array scalars ensures
identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
Extended Precision
==================
Python's floating-point numbers are usually 64-bit floating-point numbers,
nearly equivalent to ``np.float64``. In some unusual situations it may be
useful to use floating-point numbers with more precision. Whether this
is possible in numpy depends on the hardware and on the development
environment: specifically, x86 machines provide hardware floating-point
with 80-bit precision, and while most C compilers provide this as their
``long double`` type, MSVC (standard for Windows builds) makes
``long double`` identical to ``double`` (64 bits). NumPy makes the
compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with``np.finfo(np.longdouble)``.
NumPy does not provide a dtype with more precision than C
``long double``s; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
padded with zero bits, either to 96 or 128 bits. Which is more efficient
depends on hardware and development environment; typically on 32-bit
systems they are padded to 96 bits, while on 64-bit systems they are
typically padded to 128 bits. ``np.longdouble`` is padded to the system
default; ``np.float96`` and ``np.float128`` are provided for users who
want specific padding. In spite of the names, ``np.float96`` and
``np.float128`` provide only as much precision as ``np.longdouble``,
that is, 80 bits on most x86 machines and 64 bits in standard
Windows builds.
Be warned that even if ``np.longdouble`` offers more precision than
python ``float``, it is easy to lose that extra precision, since
python often forces values to pass through ``float``. For example,
the ``%`` formatting operator requires its arguments to be converted
to standard python types, and it is therefore impossible to preserve
extended precision even if many decimal places are requested. It can
be useful to test your code with the value
``1 + np.finfo(np.longdouble).eps``.
"""
from __future__ import division, absolute_import, print_function
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/sdb/db/manager/sdbmanager.py | 153 | 27253 | # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import boto
import re
from boto.utils import find_class
import uuid
from boto.sdb.db.key import Key
from boto.sdb.db.blob import Blob
from boto.sdb.db.property import ListProperty, MapProperty
from datetime import datetime, date, time
from boto.exception import SDBPersistenceError, S3ResponseError
from boto.compat import map, six, long_type
ISO8601 = '%Y-%m-%dT%H:%M:%SZ'
class TimeDecodeError(Exception):
pass
class SDBConverter(object):
"""
Responsible for converting base Python types to format compatible
with underlying database. For SimpleDB, that means everything
needs to be converted to a string when stored in SimpleDB and from
a string when retrieved.
To convert a value, pass it to the encode or decode method. The
encode method will take a Python native value and convert to DB
format. The decode method will take a DB format value and convert
it to Python native format. To find the appropriate method to
call, the generic encode/decode methods will look for the
type-specific method by searching for a method
called"encode_<type name>" or "decode_<type name>".
"""
def __init__(self, manager):
# Do a delayed import to prevent possible circular import errors.
from boto.sdb.db.model import Model
self.model_class = Model
self.manager = manager
self.type_map = {bool: (self.encode_bool, self.decode_bool),
int: (self.encode_int, self.decode_int),
float: (self.encode_float, self.decode_float),
self.model_class: (
self.encode_reference, self.decode_reference
),
Key: (self.encode_reference, self.decode_reference),
datetime: (self.encode_datetime, self.decode_datetime),
date: (self.encode_date, self.decode_date),
time: (self.encode_time, self.decode_time),
Blob: (self.encode_blob, self.decode_blob),
str: (self.encode_string, self.decode_string),
}
if six.PY2:
self.type_map[long] = (self.encode_long, self.decode_long)
def encode(self, item_type, value):
try:
if self.model_class in item_type.mro():
item_type = self.model_class
except:
pass
if item_type in self.type_map:
encode = self.type_map[item_type][0]
return encode(value)
return value
def decode(self, item_type, value):
if item_type in self.type_map:
decode = self.type_map[item_type][1]
return decode(value)
return value
def encode_list(self, prop, value):
if value in (None, []):
return []
if not isinstance(value, list):
# This is a little trick to avoid encoding when it's just a single value,
# since that most likely means it's from a query
item_type = getattr(prop, "item_type")
return self.encode(item_type, value)
# Just enumerate(value) won't work here because
# we need to add in some zero padding
# We support lists up to 1,000 attributes, since
# SDB technically only supports 1024 attributes anyway
values = {}
for k, v in enumerate(value):
values["%03d" % k] = v
return self.encode_map(prop, values)
def encode_map(self, prop, value):
import urllib
if value is None:
return None
if not isinstance(value, dict):
raise ValueError('Expected a dict value, got %s' % type(value))
new_value = []
for key in value:
item_type = getattr(prop, "item_type")
if self.model_class in item_type.mro():
item_type = self.model_class
encoded_value = self.encode(item_type, value[key])
if encoded_value is not None:
new_value.append('%s:%s' % (urllib.quote(key), encoded_value))
return new_value
def encode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.encode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.encode_map(prop, value)
else:
return self.encode(prop.data_type, value)
def decode_list(self, prop, value):
if not isinstance(value, list):
value = [value]
if hasattr(prop, 'item_type'):
item_type = getattr(prop, "item_type")
dec_val = {}
for val in value:
if val is not None:
k, v = self.decode_map_element(item_type, val)
try:
k = int(k)
except:
k = v
dec_val[k] = v
value = dec_val.values()
return value
def decode_map(self, prop, value):
if not isinstance(value, list):
value = [value]
ret_value = {}
item_type = getattr(prop, "item_type")
for val in value:
k, v = self.decode_map_element(item_type, val)
ret_value[k] = v
return ret_value
def decode_map_element(self, item_type, value):
"""Decode a single element for a map"""
import urllib
key = value
if ":" in value:
key, value = value.split(':', 1)
key = urllib.unquote(key)
if self.model_class in item_type.mro():
value = item_type(id=value)
else:
value = self.decode(item_type, value)
return (key, value)
def decode_prop(self, prop, value):
if isinstance(prop, ListProperty):
return self.decode_list(prop, value)
elif isinstance(prop, MapProperty):
return self.decode_map(prop, value)
else:
return self.decode(prop.data_type, value)
def encode_int(self, value):
value = int(value)
value += 2147483648
return '%010d' % value
def decode_int(self, value):
try:
value = int(value)
except:
boto.log.error("Error, %s is not an integer" % value)
value = 0
value = int(value)
value -= 2147483648
return int(value)
def encode_long(self, value):
value = long_type(value)
value += 9223372036854775808
return '%020d' % value
def decode_long(self, value):
value = long_type(value)
value -= 9223372036854775808
return value
def encode_bool(self, value):
if value == True or str(value).lower() in ("true", "yes"):
return 'true'
else:
return 'false'
def decode_bool(self, value):
if value.lower() == 'true':
return True
else:
return False
def encode_float(self, value):
"""
See http://tools.ietf.org/html/draft-wood-ldapext-float-00.
"""
s = '%e' % value
l = s.split('e')
mantissa = l[0].ljust(18, '0')
exponent = l[1]
if value == 0.0:
case = '3'
exponent = '000'
elif mantissa[0] != '-' and exponent[0] == '+':
case = '5'
exponent = exponent[1:].rjust(3, '0')
elif mantissa[0] != '-' and exponent[0] == '-':
case = '4'
exponent = 999 + int(exponent)
exponent = '%03d' % exponent
elif mantissa[0] == '-' and exponent[0] == '-':
case = '2'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = exponent[1:].rjust(3, '0')
else:
case = '1'
mantissa = '%f' % (10 + float(mantissa))
mantissa = mantissa.ljust(18, '0')
exponent = 999 - int(exponent)
exponent = '%03d' % exponent
return '%s %s %s' % (case, exponent, mantissa)
def decode_float(self, value):
case = value[0]
exponent = value[2:5]
mantissa = value[6:]
if case == '3':
return 0.0
elif case == '5':
pass
elif case == '4':
exponent = '%03d' % (int(exponent) - 999)
elif case == '2':
mantissa = '%f' % (float(mantissa) - 10)
exponent = '-' + exponent
else:
mantissa = '%f' % (float(mantissa) - 10)
exponent = '%03d' % abs((int(exponent) - 999))
return float(mantissa + 'e' + exponent)
def encode_datetime(self, value):
if isinstance(value, six.string_types):
return value
if isinstance(value, datetime):
return value.strftime(ISO8601)
else:
return value.isoformat()
def decode_datetime(self, value):
"""Handles both Dates and DateTime objects"""
if value is None:
return value
try:
if "T" in value:
if "." in value:
# Handle true "isoformat()" dates, which may have a microsecond on at the end of them
return datetime.strptime(value.split(".")[0], "%Y-%m-%dT%H:%M:%S")
else:
return datetime.strptime(value, ISO8601)
else:
value = value.split("-")
return date(int(value[0]), int(value[1]), int(value[2]))
except Exception:
return None
def encode_date(self, value):
if isinstance(value, six.string_types):
return value
return value.isoformat()
def decode_date(self, value):
try:
value = value.split("-")
return date(int(value[0]), int(value[1]), int(value[2]))
except:
return None
encode_time = encode_date
def decode_time(self, value):
""" converts strings in the form of HH:MM:SS.mmmmmm
(created by datetime.time.isoformat()) to
datetime.time objects.
Timzone-aware strings ("HH:MM:SS.mmmmmm+HH:MM") won't
be handled right now and will raise TimeDecodeError.
"""
if '-' in value or '+' in value:
# TODO: Handle tzinfo
raise TimeDecodeError("Can't handle timezone aware objects: %r" % value)
tmp = value.split('.')
arg = map(int, tmp[0].split(':'))
if len(tmp) == 2:
arg.append(int(tmp[1]))
return time(*arg)
def encode_reference(self, value):
if value in (None, 'None', '', ' '):
return None
if isinstance(value, six.string_types):
return value
else:
return value.id
def decode_reference(self, value):
if not value or value == "None":
return None
return value
def encode_blob(self, value):
if not value:
return None
if isinstance(value, six.string_types):
return value
if not value.id:
bucket = self.manager.get_blob_bucket()
key = bucket.new_key(str(uuid.uuid4()))
value.id = "s3://%s/%s" % (key.bucket.name, key.name)
else:
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value.id)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
key = bucket.get_key(match.group(2))
else:
raise SDBPersistenceError("Invalid Blob ID: %s" % value.id)
if value.value is not None:
key.set_contents_from_string(value.value)
return value.id
def decode_blob(self, value):
if not value:
return None
match = re.match("^s3:\/\/([^\/]*)\/(.*)$", value)
if match:
s3 = self.manager.get_s3_connection()
bucket = s3.get_bucket(match.group(1), validate=False)
try:
key = bucket.get_key(match.group(2))
except S3ResponseError as e:
if e.reason != "Forbidden":
raise
return None
else:
return None
if key:
return Blob(file=key, id="s3://%s/%s" % (key.bucket.name, key.name))
else:
return None
def encode_string(self, value):
"""Convert ASCII, Latin-1 or UTF-8 to pure Unicode"""
if not isinstance(value, str):
return value
try:
return six.text_type(value, 'utf-8')
except:
# really, this should throw an exception.
# in the interest of not breaking current
# systems, however:
arr = []
for ch in value:
arr.append(six.unichr(ord(ch)))
return u"".join(arr)
def decode_string(self, value):
"""Decoding a string is really nothing, just
return the value as-is"""
return value
class SDBManager(object):
def __init__(self, cls, db_name, db_user, db_passwd,
db_host, db_port, db_table, ddl_dir, enable_ssl,
consistent=None):
self.cls = cls
self.db_name = db_name
self.db_user = db_user
self.db_passwd = db_passwd
self.db_host = db_host
self.db_port = db_port
self.db_table = db_table
self.ddl_dir = ddl_dir
self.enable_ssl = enable_ssl
self.s3 = None
self.bucket = None
self.converter = SDBConverter(self)
self._sdb = None
self._domain = None
if consistent is None and hasattr(cls, "__consistent__"):
consistent = cls.__consistent__
self.consistent = consistent
@property
def sdb(self):
if self._sdb is None:
self._connect()
return self._sdb
@property
def domain(self):
if self._domain is None:
self._connect()
return self._domain
def _connect(self):
args = dict(aws_access_key_id=self.db_user,
aws_secret_access_key=self.db_passwd,
is_secure=self.enable_ssl)
try:
region = [x for x in boto.sdb.regions() if x.endpoint == self.db_host][0]
args['region'] = region
except IndexError:
pass
self._sdb = boto.connect_sdb(**args)
# This assumes that the domain has already been created
# It's much more efficient to do it this way rather than
# having this make a roundtrip each time to validate.
# The downside is that if the domain doesn't exist, it breaks
self._domain = self._sdb.lookup(self.db_name, validate=False)
if not self._domain:
self._domain = self._sdb.create_domain(self.db_name)
def _object_lister(self, cls, query_lister):
for item in query_lister:
obj = self.get_object(cls, item.name, item)
if obj:
yield obj
def encode_value(self, prop, value):
if value is None:
return None
if not prop:
return str(value)
return self.converter.encode_prop(prop, value)
def decode_value(self, prop, value):
return self.converter.decode_prop(prop, value)
def get_s3_connection(self):
if not self.s3:
self.s3 = boto.connect_s3(self.db_user, self.db_passwd)
return self.s3
def get_blob_bucket(self, bucket_name=None):
s3 = self.get_s3_connection()
bucket_name = "%s-%s" % (s3.aws_access_key_id, self.domain.name)
bucket_name = bucket_name.lower()
try:
self.bucket = s3.get_bucket(bucket_name)
except:
self.bucket = s3.create_bucket(bucket_name)
return self.bucket
def load_object(self, obj):
if not obj._loaded:
a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
if '__type__' in a:
for prop in obj.properties(hidden=False):
if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
try:
setattr(obj, prop.name, value)
except Exception as e:
boto.log.exception(e)
obj._loaded = True
def get_object(self, cls, id, a=None):
obj = None
if not a:
a = self.domain.get_attributes(id, consistent_read=self.consistent)
if '__type__' in a:
if not cls or a['__type__'] != cls.__name__:
cls = find_class(a['__module__'], a['__type__'])
if cls:
params = {}
for prop in cls.properties(hidden=False):
if prop.name in a:
value = self.decode_value(prop, a[prop.name])
value = prop.make_value_from_datastore(value)
params[prop.name] = value
obj = cls(id, **params)
obj._loaded = True
else:
s = '(%s) class %s.%s not found' % (id, a['__module__'], a['__type__'])
boto.log.info('sdbmanager: %s' % s)
return obj
def get_object_from_id(self, id):
return self.get_object(None, id)
def query(self, query):
query_str = "select * from `%s` %s" % (self.domain.name, self._build_filter_part(query.model_class, query.filters, query.sort_by, query.select))
if query.limit:
query_str += " limit %s" % query.limit
rs = self.domain.select(query_str, max_items=query.limit, next_token=query.next_token)
query.rs = rs
return self._object_lister(query.model_class, rs)
def count(self, cls, filters, quick=True, sort_by=None, select=None):
"""
Get the number of results that would
be returned in this query
"""
query = "select count(*) from `%s` %s" % (self.domain.name, self._build_filter_part(cls, filters, sort_by, select))
count = 0
for row in self.domain.select(query):
count += int(row['Count'])
if quick:
return count
return count
def _build_filter(self, property, name, op, val):
if name == "__id__":
name = 'itemName()'
if name != "itemName()":
name = '`%s`' % name
if val is None:
if op in ('is', '='):
return "%(name)s is null" % {"name": name}
elif op in ('is not', '!='):
return "%s is not null" % name
else:
val = ""
if property.__class__ == ListProperty:
if op in ("is", "="):
op = "like"
elif op in ("!=", "not"):
op = "not like"
if not(op in ["like", "not like"] and val.startswith("%")):
val = "%%:%s" % val
return "%s %s '%s'" % (name, op, val.replace("'", "''"))
def _build_filter_part(self, cls, filters, order_by=None, select=None):
"""
Build the filter part
"""
import types
query_parts = []
order_by_filtered = False
if order_by:
if order_by[0] == "-":
order_by_method = "DESC"
order_by = order_by[1:]
else:
order_by_method = "ASC"
if select:
if order_by and order_by in select:
order_by_filtered = True
query_parts.append("(%s)" % select)
if isinstance(filters, six.string_types):
query = "WHERE %s AND `__type__` = '%s'" % (filters, cls.__name__)
if order_by in ["__id__", "itemName()"]:
query += " ORDER BY itemName() %s" % order_by_method
elif order_by is not None:
query += " ORDER BY `%s` %s" % (order_by, order_by_method)
return query
for filter in filters:
filter_parts = []
filter_props = filter[0]
if not isinstance(filter_props, list):
filter_props = [filter_props]
for filter_prop in filter_props:
(name, op) = filter_prop.strip().split(" ", 1)
value = filter[1]
property = cls.find_property(name)
if name == order_by:
order_by_filtered = True
if types.TypeType(value) == list:
filter_parts_sub = []
for val in value:
val = self.encode_value(property, val)
if isinstance(val, list):
for v in val:
filter_parts_sub.append(self._build_filter(property, name, op, v))
else:
filter_parts_sub.append(self._build_filter(property, name, op, val))
filter_parts.append("(%s)" % (" OR ".join(filter_parts_sub)))
else:
val = self.encode_value(property, value)
if isinstance(val, list):
for v in val:
filter_parts.append(self._build_filter(property, name, op, v))
else:
filter_parts.append(self._build_filter(property, name, op, val))
query_parts.append("(%s)" % (" or ".join(filter_parts)))
type_query = "(`__type__` = '%s'" % cls.__name__
for subclass in self._get_all_decendents(cls).keys():
type_query += " or `__type__` = '%s'" % subclass
type_query += ")"
query_parts.append(type_query)
order_by_query = ""
if order_by:
if not order_by_filtered:
query_parts.append("`%s` LIKE '%%'" % order_by)
if order_by in ["__id__", "itemName()"]:
order_by_query = " ORDER BY itemName() %s" % order_by_method
else:
order_by_query = " ORDER BY `%s` %s" % (order_by, order_by_method)
if len(query_parts) > 0:
return "WHERE %s %s" % (" AND ".join(query_parts), order_by_query)
else:
return ""
def _get_all_decendents(self, cls):
"""Get all decendents for a given class"""
decendents = {}
for sc in cls.__sub_classes__:
decendents[sc.__name__] = sc
decendents.update(self._get_all_decendents(sc))
return decendents
def query_gql(self, query_string, *args, **kwds):
raise NotImplementedError("GQL queries not supported in SimpleDB")
def save_object(self, obj, expected_value=None):
if not obj.id:
obj.id = str(uuid.uuid4())
attrs = {'__type__': obj.__class__.__name__,
'__module__': obj.__class__.__module__,
'__lineage__': obj.get_lineage()}
del_attrs = []
for property in obj.properties(hidden=False):
value = property.get_value_for_datastore(obj)
if value is not None:
value = self.encode_value(property, value)
if value == []:
value = None
if value is None:
del_attrs.append(property.name)
continue
attrs[property.name] = value
if property.unique:
try:
args = {property.name: value}
obj2 = next(obj.find(**args))
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % property.name)
except(StopIteration):
pass
# Convert the Expected value to SDB format
if expected_value:
prop = obj.find_property(expected_value[0])
v = expected_value[1]
if v is not None and not isinstance(v, bool):
v = self.encode_value(prop, v)
expected_value[1] = v
self.domain.put_attributes(obj.id, attrs, replace=True, expected_value=expected_value)
if len(del_attrs) > 0:
self.domain.delete_attributes(obj.id, del_attrs)
return obj
def delete_object(self, obj):
self.domain.delete_attributes(obj.id)
def set_property(self, prop, obj, name, value):
setattr(obj, name, value)
value = prop.get_value_for_datastore(obj)
value = self.encode_value(prop, value)
if prop.unique:
try:
args = {prop.name: value}
obj2 = next(obj.find(**args))
if obj2.id != obj.id:
raise SDBPersistenceError("Error: %s must be unique!" % prop.name)
except(StopIteration):
pass
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def get_property(self, prop, obj, name):
a = self.domain.get_attributes(obj.id, consistent_read=self.consistent)
# try to get the attribute value from SDB
if name in a:
value = self.decode_value(prop, a[name])
value = prop.make_value_from_datastore(value)
setattr(obj, prop.name, value)
return value
raise AttributeError('%s not found' % name)
def set_key_value(self, obj, name, value):
self.domain.put_attributes(obj.id, {name: value}, replace=True)
def delete_key_value(self, obj, name):
self.domain.delete_attributes(obj.id, name)
def get_key_value(self, obj, name):
a = self.domain.get_attributes(obj.id, name, consistent_read=self.consistent)
if name in a:
return a[name]
else:
return None
def get_raw_item(self, obj):
return self.domain.get_item(obj.id)
| bsd-3-clause |
ianyh/heroku-buildpack-python-opencv | vendor/.heroku/lib/python2.7/test/test_MimeWriter.py | 138 | 7593 | """Test program for MimeWriter module.
The test program was too big to comfortably fit in the MimeWriter
class, so it's here in its own file.
This should generate Barry's example, modulo some quotes and newlines.
"""
import unittest, StringIO
from test.test_support import run_unittest, import_module
import_module("MimeWriter", deprecated=True)
from MimeWriter import MimeWriter
SELLER = '''\
INTERFACE Seller-1;
TYPE Seller = OBJECT
DOCUMENTATION "A simple Seller interface to test ILU"
METHODS
price():INTEGER,
END;
'''
BUYER = '''\
class Buyer:
def __setup__(self, maxprice):
self._maxprice = maxprice
def __main__(self, kos):
"""Entry point upon arrival at a new KOS."""
broker = kos.broker()
# B4 == Barry's Big Bass Business :-)
seller = broker.lookup('Seller_1.Seller', 'B4')
if seller:
price = seller.price()
print 'Seller wants $', price, '... '
if price > self._maxprice:
print 'too much!'
else:
print "I'll take it!"
else:
print 'no seller found here'
''' # Don't ask why this comment is here
STATE = '''\
# instantiate a buyer instance and put it in a magic place for the KOS
# to find.
__kp__ = Buyer()
__kp__.__setup__(500)
'''
SIMPLE_METADATA = [
("Interpreter", "python"),
("Interpreter-Version", "1.3"),
("Owner-Name", "Barry Warsaw"),
("Owner-Rendezvous", "bwarsaw@cnri.reston.va.us"),
("Home-KSS", "kss.cnri.reston.va.us"),
("Identifier", "hdl://cnri.kss/my_first_knowbot"),
("Launch-Date", "Mon Feb 12 16:39:03 EST 1996"),
]
COMPLEX_METADATA = [
("Metadata-Type", "complex"),
("Metadata-Key", "connection"),
("Access", "read-only"),
("Connection-Description", "Barry's Big Bass Business"),
("Connection-Id", "B4"),
("Connection-Direction", "client"),
]
EXTERNAL_METADATA = [
("Metadata-Type", "complex"),
("Metadata-Key", "generic-interface"),
("Access", "read-only"),
("Connection-Description", "Generic Interface for All Knowbots"),
("Connection-Id", "generic-kp"),
("Connection-Direction", "client"),
]
OUTPUT = '''\
From: bwarsaw@cnri.reston.va.us
Date: Mon Feb 12 17:21:48 EST 1996
To: kss-submit@cnri.reston.va.us
MIME-Version: 1.0
Content-Type: multipart/knowbot;
boundary="801spam999";
version="0.1"
This is a multi-part message in MIME format.
--801spam999
Content-Type: multipart/knowbot-metadata;
boundary="802spam999"
--802spam999
Content-Type: message/rfc822
KP-Metadata-Type: simple
KP-Access: read-only
KPMD-Interpreter: python
KPMD-Interpreter-Version: 1.3
KPMD-Owner-Name: Barry Warsaw
KPMD-Owner-Rendezvous: bwarsaw@cnri.reston.va.us
KPMD-Home-KSS: kss.cnri.reston.va.us
KPMD-Identifier: hdl://cnri.kss/my_first_knowbot
KPMD-Launch-Date: Mon Feb 12 16:39:03 EST 1996
--802spam999
Content-Type: text/isl
KP-Metadata-Type: complex
KP-Metadata-Key: connection
KP-Access: read-only
KP-Connection-Description: Barry's Big Bass Business
KP-Connection-Id: B4
KP-Connection-Direction: client
INTERFACE Seller-1;
TYPE Seller = OBJECT
DOCUMENTATION "A simple Seller interface to test ILU"
METHODS
price():INTEGER,
END;
--802spam999
Content-Type: message/external-body;
access-type="URL";
URL="hdl://cnri.kss/generic-knowbot"
Content-Type: text/isl
KP-Metadata-Type: complex
KP-Metadata-Key: generic-interface
KP-Access: read-only
KP-Connection-Description: Generic Interface for All Knowbots
KP-Connection-Id: generic-kp
KP-Connection-Direction: client
--802spam999--
--801spam999
Content-Type: multipart/knowbot-code;
boundary="803spam999"
--803spam999
Content-Type: text/plain
KP-Module-Name: BuyerKP
class Buyer:
def __setup__(self, maxprice):
self._maxprice = maxprice
def __main__(self, kos):
"""Entry point upon arrival at a new KOS."""
broker = kos.broker()
# B4 == Barry's Big Bass Business :-)
seller = broker.lookup('Seller_1.Seller', 'B4')
if seller:
price = seller.price()
print 'Seller wants $', price, '... '
if price > self._maxprice:
print 'too much!'
else:
print "I'll take it!"
else:
print 'no seller found here'
--803spam999--
--801spam999
Content-Type: multipart/knowbot-state;
boundary="804spam999"
KP-Main-Module: main
--804spam999
Content-Type: text/plain
KP-Module-Name: main
# instantiate a buyer instance and put it in a magic place for the KOS
# to find.
__kp__ = Buyer()
__kp__.__setup__(500)
--804spam999--
--801spam999--
'''
class MimewriterTest(unittest.TestCase):
def test(self):
buf = StringIO.StringIO()
# Toplevel headers
toplevel = MimeWriter(buf)
toplevel.addheader("From", "bwarsaw@cnri.reston.va.us")
toplevel.addheader("Date", "Mon Feb 12 17:21:48 EST 1996")
toplevel.addheader("To", "kss-submit@cnri.reston.va.us")
toplevel.addheader("MIME-Version", "1.0")
# Toplevel body parts
f = toplevel.startmultipartbody("knowbot", "801spam999",
[("version", "0.1")], prefix=0)
f.write("This is a multi-part message in MIME format.\n")
# First toplevel body part: metadata
md = toplevel.nextpart()
md.startmultipartbody("knowbot-metadata", "802spam999")
# Metadata part 1
md1 = md.nextpart()
md1.addheader("KP-Metadata-Type", "simple")
md1.addheader("KP-Access", "read-only")
m = MimeWriter(md1.startbody("message/rfc822"))
for key, value in SIMPLE_METADATA:
m.addheader("KPMD-" + key, value)
m.flushheaders()
del md1
# Metadata part 2
md2 = md.nextpart()
for key, value in COMPLEX_METADATA:
md2.addheader("KP-" + key, value)
f = md2.startbody("text/isl")
f.write(SELLER)
del md2
# Metadata part 3
md3 = md.nextpart()
f = md3.startbody("message/external-body",
[("access-type", "URL"),
("URL", "hdl://cnri.kss/generic-knowbot")])
m = MimeWriter(f)
for key, value in EXTERNAL_METADATA:
md3.addheader("KP-" + key, value)
md3.startbody("text/isl")
# Phantom body doesn't need to be written
md.lastpart()
# Second toplevel body part: code
code = toplevel.nextpart()
code.startmultipartbody("knowbot-code", "803spam999")
# Code: buyer program source
buyer = code.nextpart()
buyer.addheader("KP-Module-Name", "BuyerKP")
f = buyer.startbody("text/plain")
f.write(BUYER)
code.lastpart()
# Third toplevel body part: state
state = toplevel.nextpart()
state.addheader("KP-Main-Module", "main")
state.startmultipartbody("knowbot-state", "804spam999")
# State: a bunch of assignments
st = state.nextpart()
st.addheader("KP-Module-Name", "main")
f = st.startbody("text/plain")
f.write(STATE)
state.lastpart()
# End toplevel body parts
toplevel.lastpart()
self.assertEqual(buf.getvalue(), OUTPUT)
def test_main():
run_unittest(MimewriterTest)
if __name__ == '__main__':
test_main()
| mit |
koobonil/Boss2D | Boss2D/addon/tensorflow-1.2.1_for_boss/tensorflow/contrib/learn/python/learn/__init__.py | 80 | 2585 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level API for learning with TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.contrib.learn.python.learn import basic_session_run_hooks
from tensorflow.contrib.learn.python.learn import datasets
from tensorflow.contrib.learn.python.learn import estimators
from tensorflow.contrib.learn.python.learn import graph_actions
from tensorflow.contrib.learn.python.learn import learn_io as io
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors
from tensorflow.contrib.learn.python.learn import ops
from tensorflow.contrib.learn.python.learn import preprocessing
from tensorflow.contrib.learn.python.learn import utils
from tensorflow.contrib.learn.python.learn.dataframe import *
from tensorflow.contrib.learn.python.learn.estimators import *
from tensorflow.contrib.learn.python.learn.evaluable import Evaluable
from tensorflow.contrib.learn.python.learn.experiment import Experiment
from tensorflow.contrib.learn.python.learn.export_strategy import ExportStrategy
from tensorflow.contrib.learn.python.learn.graph_actions import evaluate
from tensorflow.contrib.learn.python.learn.graph_actions import infer
from tensorflow.contrib.learn.python.learn.graph_actions import run_feeds
from tensorflow.contrib.learn.python.learn.graph_actions import run_n
from tensorflow.contrib.learn.python.learn.graph_actions import train
from tensorflow.contrib.learn.python.learn.learn_io import *
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.learn.python.learn.monitors import NanLossDuringTrainingError
from tensorflow.contrib.learn.python.learn.trainable import Trainable
from tensorflow.contrib.learn.python.learn.utils import *
# pylint: enable=wildcard-import
| mit |
googleapis/googleapis-gen | google/ads/googleads/v8/googleads-py/tests/unit/gapic/googleads.v8/services/test_custom_audience_service.py | 1 | 35288 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import mock
import grpc
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from google.ads.googleads.v8.enums.types import custom_audience_member_type
from google.ads.googleads.v8.enums.types import custom_audience_status
from google.ads.googleads.v8.enums.types import custom_audience_type
from google.ads.googleads.v8.resources.types import custom_audience
from google.ads.googleads.v8.services.services.custom_audience_service import CustomAudienceServiceClient
from google.ads.googleads.v8.services.services.custom_audience_service import transports
from google.ads.googleads.v8.services.types import custom_audience_service
from google.api_core import client_options
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert CustomAudienceServiceClient._get_default_mtls_endpoint(None) is None
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint
assert CustomAudienceServiceClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
def test_custom_audience_service_client_from_service_account_info():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory:
factory.return_value = creds
info = {"valid": True}
client = CustomAudienceServiceClient.from_service_account_info(info)
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_from_service_account_file():
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory:
factory.return_value = creds
client = CustomAudienceServiceClient.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
client = CustomAudienceServiceClient.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_client_get_transport_class():
transport = CustomAudienceServiceClient.get_transport_class()
assert transport == transports.CustomAudienceServiceGrpcTransport
transport = CustomAudienceServiceClient.get_transport_class("grpc")
assert transport == transports.CustomAudienceServiceGrpcTransport
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
def test_custom_audience_service_client_client_options():
# Check that if channel is provided we won't create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials()
)
client = CustomAudienceServiceClient(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.CustomAudienceServiceClient.get_transport_class') as gtc:
client = CustomAudienceServiceClient(transport="grpc")
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT
# is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_MTLS_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = CustomAudienceServiceClient()
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}):
with pytest.raises(ValueError):
client = CustomAudienceServiceClient()
@mock.patch.object(CustomAudienceServiceClient, "DEFAULT_ENDPOINT", modify_default_endpoint(CustomAudienceServiceClient))
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
@pytest.mark.parametrize("use_client_cert_env", ["true", "false"])
def test_custom_audience_service_client_mtls_env_auto(use_client_cert_env):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
options = client_options.ClientOptions(client_cert_source=client_cert_source_callback)
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
ssl_channel_creds = mock.Mock()
with mock.patch('grpc.ssl_channel_credentials', return_value=ssl_channel_creds):
grpc_transport.return_value = None
client = CustomAudienceServiceClient(client_options=options)
if use_client_cert_env == "false":
expected_ssl_channel_creds = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_ssl_channel_creds = ssl_channel_creds
expected_host = client.DEFAULT_MTLS_ENDPOINT
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
with mock.patch('google.auth.transport.grpc.SslCredentials.ssl_credentials', new_callable=mock.PropertyMock) as ssl_credentials_mock:
if use_client_cert_env == "false":
is_mtls_mock.return_value = False
ssl_credentials_mock.return_value = None
expected_host = client.DEFAULT_ENDPOINT
expected_ssl_channel_creds = None
else:
is_mtls_mock.return_value = True
ssl_credentials_mock.return_value = mock.Mock()
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_ssl_channel_creds = ssl_credentials_mock.return_value
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=expected_ssl_channel_creds,
credentials=None,
host=expected_host,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}):
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
with mock.patch('google.auth.transport.grpc.SslCredentials.__init__', return_value=None):
with mock.patch('google.auth.transport.grpc.SslCredentials.is_mtls', new_callable=mock.PropertyMock) as is_mtls_mock:
is_mtls_mock.return_value = False
grpc_transport.return_value = None
client = CustomAudienceServiceClient()
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host=client.DEFAULT_ENDPOINT,
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_custom_audience_service_client_client_options_from_dict():
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceGrpcTransport.__init__') as grpc_transport:
grpc_transport.return_value = None
client = CustomAudienceServiceClient(
client_options={'api_endpoint': 'squid.clam.whelk'}
)
grpc_transport.assert_called_once_with(
ssl_channel_credentials=None,
credentials=None,
host="squid.clam.whelk",
client_info=transports.base.DEFAULT_CLIENT_INFO,
)
def test_get_custom_audience(transport: str = 'grpc', request_type=custom_audience_service.GetCustomAudienceRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience(
resource_name='resource_name_value',
id=205,
status=custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN,
name='name_value',
type_=custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN,
description='description_value',
)
response = client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.GetCustomAudienceRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience.CustomAudience)
assert response.resource_name == 'resource_name_value'
assert response.id == 205
assert response.status == custom_audience_status.CustomAudienceStatusEnum.CustomAudienceStatus.UNKNOWN
assert response.name == 'name_value'
assert response.type_ == custom_audience_type.CustomAudienceTypeEnum.CustomAudienceType.UNKNOWN
assert response.description == 'description_value'
def test_get_custom_audience_from_dict():
test_get_custom_audience(request_type=dict)
def test_get_custom_audience_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.GetCustomAudienceRequest()
request.resource_name = 'resource_name/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
call.return_value = custom_audience.CustomAudience()
client.get_custom_audience(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'resource_name=resource_name/value',
) in kw['metadata']
def test_get_custom_audience_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.get_custom_audience),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience.CustomAudience()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_custom_audience(
resource_name='resource_name_value',
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].resource_name == 'resource_name_value'
def test_get_custom_audience_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_custom_audience(
custom_audience_service.GetCustomAudienceRequest(),
resource_name='resource_name_value',
)
def test_mutate_custom_audiences(transport: str = 'grpc', request_type=custom_audience_service.MutateCustomAudiencesRequest):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse(
)
response = client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == custom_audience_service.MutateCustomAudiencesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, custom_audience_service.MutateCustomAudiencesResponse)
def test_mutate_custom_audiences_from_dict():
test_mutate_custom_audiences(request_type=dict)
def test_mutate_custom_audiences_field_headers():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = custom_audience_service.MutateCustomAudiencesRequest()
request.customer_id = 'customer_id/value'
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
client.mutate_custom_audiences(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
'x-goog-request-params',
'customer_id=customer_id/value',
) in kw['metadata']
def test_mutate_custom_audiences_flattened():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.mutate_custom_audiences),
'__call__') as call:
# Designate an appropriate return value for the call.
call.return_value = custom_audience_service.MutateCustomAudiencesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.mutate_custom_audiences(
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0].customer_id == 'customer_id_value'
assert args[0].operations == [custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))]
def test_mutate_custom_audiences_flattened_error():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.mutate_custom_audiences(
custom_audience_service.MutateCustomAudiencesRequest(),
customer_id='customer_id_value',
operations=[custom_audience_service.CustomAudienceOperation(update_mask=field_mask_pb2.FieldMask(paths=['paths_value']))],
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = CustomAudienceServiceClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.CustomAudienceServiceGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.CustomAudienceServiceGrpcTransport,
)
@pytest.mark.parametrize("transport_class", [
transports.CustomAudienceServiceGrpcTransport,
])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_custom_audience_service_base_transport():
# Instantiate the base transport.
with mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport.__init__') as Transport:
Transport.return_value = None
transport = transports.CustomAudienceServiceTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
'get_custom_audience',
'mutate_custom_audiences',
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
def test_custom_audience_service_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, 'default') as adc, mock.patch('google.ads.googleads.v8.services.services.custom_audience_service.transports.CustomAudienceServiceTransport._prep_wrapped_messages') as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.CustomAudienceServiceTransport()
adc.assert_called_once()
def test_custom_audience_service_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
CustomAudienceServiceClient()
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_transport_auth_adc():
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transports.CustomAudienceServiceGrpcTransport(host="squid.clam.whelk")
adc.assert_called_once_with(scopes=(
'https://www.googleapis.com/auth/adwords',
))
def test_custom_audience_service_host_no_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com'),
)
assert client.transport._host == 'googleads.googleapis.com:443'
def test_custom_audience_service_host_with_port():
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(api_endpoint='googleads.googleapis.com:8000'),
)
assert client.transport._host == 'googleads.googleapis.com:8000'
def test_custom_audience_service_grpc_transport_channel():
channel = grpc.insecure_channel('http://localhost/')
# Check that channel is used if provided.
transport = transports.CustomAudienceServiceGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport])
def test_custom_audience_service_transport_channel_mtls_with_client_cert_source(
transport_class
):
with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred:
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, 'default') as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
@pytest.mark.parametrize("transport_class", [transports.CustomAudienceServiceGrpcTransport,])
def test_custom_audience_service_transport_channel_mtls_with_adc(
transport_class
):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(transport_class, "create_channel", autospec=True) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=(
'https://www.googleapis.com/auth/adwords',
),
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_custom_audience_path():
customer_id = "squid"
custom_audience_id = "clam"
expected = "customers/{customer_id}/customAudiences/{custom_audience_id}".format(customer_id=customer_id, custom_audience_id=custom_audience_id, )
actual = CustomAudienceServiceClient.custom_audience_path(customer_id, custom_audience_id)
assert expected == actual
def test_parse_custom_audience_path():
expected = {
"customer_id": "whelk",
"custom_audience_id": "octopus",
}
path = CustomAudienceServiceClient.custom_audience_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_custom_audience_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "oyster"
expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, )
actual = CustomAudienceServiceClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "nudibranch",
}
path = CustomAudienceServiceClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "cuttlefish"
expected = "folders/{folder}".format(folder=folder, )
actual = CustomAudienceServiceClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "mussel",
}
path = CustomAudienceServiceClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "winkle"
expected = "organizations/{organization}".format(organization=organization, )
actual = CustomAudienceServiceClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nautilus",
}
path = CustomAudienceServiceClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "scallop"
expected = "projects/{project}".format(project=project, )
actual = CustomAudienceServiceClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "abalone",
}
path = CustomAudienceServiceClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "squid"
location = "clam"
expected = "projects/{project}/locations/{location}".format(project=project, location=location, )
actual = CustomAudienceServiceClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "whelk",
"location": "octopus",
}
path = CustomAudienceServiceClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = CustomAudienceServiceClient.parse_common_location_path(path)
assert expected == actual
def test_client_withDEFAULT_CLIENT_INFO():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
client = CustomAudienceServiceClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(transports.CustomAudienceServiceTransport, '_prep_wrapped_messages') as prep:
transport_class = CustomAudienceServiceClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
| apache-2.0 |
HyperBaton/ansible | test/units/modules/network/fortios/test_fortios_application_group.py | 21 | 8298 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_application_group
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_application_group.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_application_group_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_group': {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
expected_data = {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
}
set_method_mock.assert_called_with('application', 'group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_group_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_group': {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
expected_data = {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
}
set_method_mock.assert_called_with('application', 'group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_group_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_group': {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'group', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_application_group_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'application_group': {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
delete_method_mock.assert_called_with('application', 'group', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_application_group_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_group': {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
expected_data = {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
}
set_method_mock.assert_called_with('application', 'group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_application_group_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'application_group': {
'random_attribute_not_valid': 'tag', 'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
},
'vdom': 'root'}
is_error, changed, response = fortios_application_group.fortios_application(input_data, fos_instance)
expected_data = {'comment': 'Comment',
'name': 'default_name_4',
'type': 'application'
}
set_method_mock.assert_called_with('application', 'group', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
coronary/RandomEpisode | depends/Lib/site-packages/tmdbsimple/people.py | 1 | 7235 | # -*- coding: utf-8 -*-
"""
tmdbsimple.people
~~~~~~~~~~~~~~~~~
This module implements the People, Credits, and Jobs functionality
of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2017 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class People(TMDB):
"""
People functionality.
See: http://docs.themoviedb.apiary.io/#people
"""
BASE_PATH = 'person'
URLS = {
'info': '/{id}',
'movie_credits': '/{id}/movie_credits',
'tv_credits': '/{id}/tv_credits',
'combined_credits': '/{id}/combined_credits',
'external_ids': '/{id}/external_ids',
'images': '/{id}/images',
'changes': '/{id}/changes',
'popular': '/popular',
'latest': '/latest',
}
def __init__(self, id=0):
super(People, self).__init__()
self.id = id
def info(self, **kwargs):
"""
Get the general person information for a specific id.
Args:
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def movie_credits(self, **kwargs):
"""
Get the movie credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('movie_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv_credits(self, **kwargs):
"""
Get the TV credits for a specific person id.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('tv_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def combined_credits(self, **kwargs):
"""
Get the combined (movie and TV) credits for a specific person id.
To get the expanded details for each TV record, call the /credit method
with the provided credit_id. This will provide details about which
episode and/or season the credit is for.
Args:
language: (optional) ISO 639-1 code.
append_to_response: (optional) Comma separated, any person method.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('combined_credits')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def external_ids(self, **kwargs):
"""
Get the external ids for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('external_ids')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def images(self, **kwargs):
"""
Get the images for a specific person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('images')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def changes(self, **kwargs):
"""
Get the changes for a specific person id.
Changes are grouped by key, and ordered by date in descending order.
By default, only the last 24 hours of changes are returned. The maximum
number of days that can be returned in a single request is 14. The
language is present on fields that are translatable.
Args:
start_date: (optional) Expected format is 'YYYY-MM-DD'.
end_date: (optional) Expected format is 'YYYY-MM-DD'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_id_path('changes')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def popular(self, **kwargs):
"""
Get the list of popular people on The Movie Database. This list
refreshes every day.
Args:
page: (optional) Minimum 1, maximum 1000.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('popular')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def latest(self, **kwargs):
"""
Get the latest person id.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('latest')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Credits(TMDB):
"""
Credits functionality.
See: http://docs.themoviedb.apiary.io/#credits
"""
BASE_PATH = 'credit'
URLS = {
'info': '/{credit_id}',
}
def __init__(self, credit_id):
super(Credits, self).__init__()
self.credit_id = credit_id
def info(self, **kwargs):
"""
Get the detailed information about a particular credit record. This is
currently only supported with the new credit model found in TV. These
ids can be found from any TV credit response as well as the tv_credits
and combined_credits methods for people.
The episodes object returns a list of episodes and are generally going
to be guest stars. The season array will return a list of season
numbers. Season credits are credits that were marked with the
"add to every season" option in the editing interface and are
assumed to be "season regulars".
Args:
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_credit_id_path('info')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
class Jobs(TMDB):
"""
Jobs functionality.
See: http://docs.themoviedb.apiary.io/#jobs
"""
BASE_PATH = 'job'
URLS = {
'list': '/list',
}
def list(self, **kwargs):
"""
Get a list of valid jobs.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
| mit |
ruslanloman/nova | nova/virt/libvirt/volume/volume.py | 1 | 16814 | # Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import os
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
self.connector.disconnect_volume(connection_info['data'], None)
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISER', utils._get_root_helper(),
use_multipath=CONF.libvirt.iser_use_multipath,
device_scan_attempts=CONF.libvirt.num_iser_scan_tries,
transport=self._get_transport())
def _get_transport(self):
return 'iser'
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
| apache-2.0 |
Quihico/repository.spartacus | script.trtv/streaming.py | 1 | 24691 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Sean Poyser and Richard Dean (write2dixie@gmail.com) - With acknowledgement to some original code by twinther (Tommy Winther)
#
import xbmc
from xml.etree import ElementTree
from xml.parsers.expat import ExpatError
import ConfigParser
import os
import re
import xbmcaddon
import urllib
import requests
import json
import dixie
ADDON = dixie.ADDON
SF_METALLIQ = dixie.GetSetting('SF_METALLIQ')
autoplay = dixie.GetSetting('autoplay')
LOCAL = dixie.GetSetting('local.ini') == 'true'
FTVINI = dixie.GetSetting('ftv.ini')
datapath = dixie.PROFILE
class StreamsService(object):
def __init__(self):
self.addonsParser = ConfigParser.ConfigParser(dict_type=OrderedDict)
self.addonsParser.optionxform = lambda option: option
iniFiles = self.getIniFiles()
for file in iniFiles:
try: self.addonsParser.read(file)
except: pass
def getIniFiles(self):
files = []
import glob
ini = os.path.join(datapath, 'ini', '*.*')
files = glob.glob(ini)
for i in range(10):
file = dixie.GetSetting('INI_%d' % i)
if len(file) > 0:
if file not in files:
files.append(file)
if os.path.exists(os.path.join(datapath,'addons.ini')):
files.append(os.path.join(datapath, 'addons.ini'))
return files
def loadFavourites(self):
entries = list()
path = xbmc.translatePath('special://profile/favourites.xml')
if os.path.exists(path):
f = open(path)
xml = f.read()
f.close()
try:
doc = ElementTree.fromstring(xml)
for node in doc.findall('favourite'):
value = node.text
value = node.text.replace(',return','')
if value[0:11] == 'PlayMedia("':
value = value[11:-2]
elif value[0:10] == 'PlayMedia(':
value = value[10:-1]
elif value[0:22] == 'ActivateWindow(10025,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10025,':
value = value[22:-1]
elif value[0:22] == 'ActivateWindow(10001,"':
value = value[22:-2]
elif value[0:21] == 'ActivateWindow(10001,':
value = value[22:-1]
else:
continue
entries.append((node.get('name'), value))
except ExpatError:
pass
return entries
def loadPlaylist(self):
iptv_type = dixie.GetSetting('playlist.type')
IPTV_URL = '0'
IPTV_FILE = '1'
entries = list()
label = ''
value = ''
if iptv_type == IPTV_FILE:
path = os.path.join(dixie.GetSetting('playlist.file'))
else:
url = dixie.GetSetting('playlist.url')
path = os.path.join(datapath, 'playlist.m3u')
try:
request = requests.get(url)
playlist = request.content
with open(path, 'wb') as f:
f.write(playlist)
except: pass
if os.path.exists(path):
f = open(path)
playlist = f.readlines()
f.close()
for line in playlist:
if line.startswith('#EXTINF:'):
label = line.split(',')[-1].strip()
elif line.startswith('rtmp') or line.startswith('rtmpe') or line.startswith('rtsp') or line.startswith('http'):
value = line.replace('rtmp://$OPT:rtmp-raw=', '').replace('\n', '')
entries.append((label, value))
return entries
def locateSuperFavourites(self, title):
SUPERFAVES = 'plugin.program.super.favourites'
SF_INSTALLED = xbmc.getCondVisibility('System.HasAddon(%s)' % SUPERFAVES) == 1
if not SF_INSTALLED:
return None
sfAddon = xbmcaddon.Addon(id = SUPERFAVES)
# Detect the root folder for SF items, set to default if not already set
ROOT = sfAddon.getSetting('FOLDER')
if not ROOT:
ROOT = 'special://profile/addon_data/plugin.program.super.favourites'
folder = os.path.join(ROOT, 'Super Favourites')
items = []
self._locateSuperFavourites(title.upper(), folder, items)
return items
def _locateSuperFavourites(self, title, folder, items):
import sfile
import settings
import urllib
current, dirs, files = sfile.walk(folder)
for dir in dirs:
folder = os.path.join(current, dir)
# check against SF list, if it exists then match up
if dir.upper() == title:
# cfg = os.path.join(folder, 'folder.cfg')
# autoplay = settings.get('AUTOPLAY', cfg)
if autoplay == 'true':
uTitle = urllib.quote_plus(title)
mode = 5400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
else:
uTitle = urllib.quote_plus(title)
mode = 400
uFolder = urllib.quote_plus(folder)
toAdd = 'plugin://plugin.program.super.favourites/?label=%s&mode=%d&path=%s' % (uTitle, mode, uFolder)
toAdd = '__SF__ActivateWindow(10025,"%s",return)' % toAdd
xbmc.log('##### FOLDER: %s' % folder)
if os.path.exists(xbmc.translatePath(os.path.join(folder,'favourites.xml'))):
items.append(['SF_'+folder, toAdd])
self._locateSuperFavourites(title, folder, items)
def getAddons(self):
return self.addonsParser.sections()
def getAddonStreams(self, id):
return self.addonsParser.items(id)
def detectStream(self, channel, catchup=''):
"""
@param channel:
@type channel: source.Channel
"""
matches = list()
xbmc.log('CATCHUP: %s'%catchup)
# If user chooses to watch via catchup then call meta addons
if catchup != '':
catchup = catchup.replace(' ','+')
stream = ('plugin://plugin.video.metalliq/%s' % (catchup))
matches.append(('plugin.video.metalliq', 'Catchup', [str(stream)]))
# For a live tv selection grab valid ini files and present options
else:
# Get any Super Favourites with channel name
superFaves = self.locateSuperFavourites(channel.id)
xbmc.log('### SF: %s' % superFaves)
if superFaves:
if len(superFaves) == 1 and not '-metalliq' in superFaves[0][0]:
matches.append((superFaves[0][0], 'Social Share', superFaves[0][1]))
elif len(superFaves) == 1 and '-metalliq' in superFaves[0][0] and SF_METALLIQ == 'true':
matches.append((superFaves[0][0], 'MetalliQ', superFaves[0][1]))
else:
index = 0
for superFave in superFaves:
if '-metalliq' in superFave[0] and SF_METALLIQ == 'true':
label = 'MetalliQ'
matches.append((superFave[0], label, superFave[1]))
elif not '-metalliq' in superFave[0]:
if len(superFaves) == 2 and ('-metalliq' in superFaves[0][0] or '-metalliq' in superFaves[1][0]):
label = 'Social Share'
else:
index += 1
label = 'Social Share (%d)' % index
matches.append((superFave[0], label, superFave[1]))
# Get any Add-ons with channel name
for id in self.getAddons():
try:
xbmcaddon.Addon(id)
except Exception:
pass # ignore addons that are not installed
for (label, stream) in self.getAddonStreams(id):
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper().replace('_',' ')
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
# If meta is chosen we clean the name up a bit more
if SF_METALLIQ == 'false':
if id == "plugin.video.metalliq" or id == "plugin.video.meta":
label = channel.title
chanx = channel.title.replace(" ","+").replace("_","+")
if chanx.endswith("%20HDTV"):
chanx = chanx.replace("%20HDTV","")
if chanx.endswith("%20HD"):
chanx = chanx.replace("%20HD","")
if chanx.endswith("%20PLUS1"):
chanx = chanx.replace("%20PLUS1","")
stream = str(stream.replace("<channel>",'live/%s/None/en'% chanx))
xbmc.log('STREAM: %s'%stream)
if type(stream) is list:
stream = stream[0]
if (channel_temp in label_temp) or (label_temp in channel_temp):
# Workaround for getting clean id if ini contains badly formatted items
if stream.startswith('plugin://') and not 'plugin.program.super.favourites' in stream:
idtemp = stream.split('plugin://')[1]
xbmc.log('idtemp: %s' % idtemp)
id = idtemp.split('/')[0]
# Clean up badly formatted labels in the ini files
label = re.sub('[:\\/?\<>|"]', '', label)
label = label.strip()
try:
label = label.encode('ascii', 'ignore')
except:
try:
label = label.decode('utf-8').encode('ascii', 'ignore')
except:
label = label
matches.append((id, label, stream))
# Get any Kodi Favourites with channel name
kodiFaves = self.loadFavourites()
if kodiFaves:
id = 'kodi-favourite'
for (label, stream) in kodiFaves:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get any Playlist entries with channel name
iptvPlaylist = self.loadPlaylist()
if iptvPlaylist:
id = 'iptv-playlist'
for (label, stream) in iptvPlaylist:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
# Get entries from PVRchannels with channel name
import pvr
PVRchannels = pvr.getPVRChannels()
if PVRchannels:
id = 'xbmc.pvr'
for (label, stream) in PVRchannels:
label = label.upper()
label_temp = label.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(label_temp) > 9:
label_temp = label_temp.replace('CINEMA','').replace('MOVIES','')
channel.title = channel.title.upper()
channel_temp = channel.title.replace(' ','').replace('_','').replace('HD','').replace('1','ONE').replace('2','TWO').replace('3','THREE').replace('4','FOUR').replace('5','FIVE').replace('6','SIX').replace('7','SEVEN').replace('8','EIGHT').replace('9','NINE').replace('0','ZERO').replace('SPORTS','SPORT').replace('|','').replace(':','').replace('(','').replace(')','').replace('=','')
if len(channel_temp) > 9:
channel_temp = channel_temp.replace('CINEMA','').replace('MOVIES','')
if (channel_temp in label_temp) or (label_temp in channel_temp):
matches.append((id, label, stream))
xbmc.log('### matches length: %s' % len(matches))
# if len(matches) == 1:
# return [matches[0][0],matches[0][1],str(matches[0][2])]
# else:
return matches
class OrderedDict(dict):
# From: http://code.activestate.com/recipes/576693/
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| gpl-2.0 |
joker946/nova | nova/tests/unit/console/test_serial.py | 60 | 4892 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Serial Console."""
import socket
import mock
import six.moves
from nova.console import serial
from nova import exception
from nova import test
class SerialTestCase(test.NoDBTestCase):
def setUp(self):
super(SerialTestCase, self).setUp()
serial.ALLOCATED_PORTS = set()
def test_get_port_range(self):
start, stop = serial._get_port_range()
self.assertEqual(10000, start)
self.assertEqual(20000, stop)
def test_get_port_range_customized(self):
self.flags(port_range='30000:40000', group='serial_console')
start, stop = serial._get_port_range()
self.assertEqual(30000, start)
self.assertEqual(40000, stop)
def test_get_port_range_bad_range(self):
self.flags(port_range='40000:30000', group='serial_console')
start, stop = serial._get_port_range()
self.assertEqual(10000, start)
self.assertEqual(20000, stop)
def test_get_port_range_not_numeric(self):
self.flags(port_range='xxx:yyy', group='serial_console')
start, stop = serial._get_port_range()
self.assertEqual(10000, start)
self.assertEqual(20000, stop)
def test_get_port_range_invalid_syntax(self):
self.flags(port_range='10:20:30', group='serial_console')
start, stop = serial._get_port_range()
self.assertEqual(10000, start)
self.assertEqual(20000, stop)
@mock.patch('socket.socket')
def test_verify_port(self, fake_socket):
s = mock.MagicMock()
fake_socket.return_value = s
serial._verify_port('127.0.0.1', 10)
s.bind.assert_called_once_with(('127.0.0.1', 10))
@mock.patch('socket.socket')
def test_verify_port_in_use(self, fake_socket):
s = mock.MagicMock()
s.bind.side_effect = socket.error()
fake_socket.return_value = s
self.assertRaises(
exception.SocketPortInUseException,
serial._verify_port, '127.0.0.1', 10)
s.bind.assert_called_once_with(('127.0.0.1', 10))
@mock.patch('nova.console.serial._verify_port', lambda x, y: None)
def test_acquire_port(self):
start, stop = 15, 20
self.flags(
port_range='%d:%d' % (start, stop),
group='serial_console')
for port in six.moves.range(start, stop):
self.assertEqual(port, serial.acquire_port('127.0.0.1'))
for port in six.moves.range(start, stop):
self.assertEqual(port, serial.acquire_port('127.0.0.2'))
self.assertTrue(10, len(serial.ALLOCATED_PORTS))
@mock.patch('nova.console.serial._verify_port')
def test_acquire_port_in_use(self, fake_verify_port):
def port_10000_already_used(host, port):
if port == 10000 and host == '127.0.0.1':
raise exception.SocketPortInUseException(
port=port,
host=host,
error="already in use")
fake_verify_port.side_effect = port_10000_already_used
self.assertEqual(10001, serial.acquire_port('127.0.0.1'))
self.assertEqual(10000, serial.acquire_port('127.0.0.2'))
self.assertNotIn(('127.0.0.1', 10000), serial.ALLOCATED_PORTS)
self.assertIn(('127.0.0.1', 10001), serial.ALLOCATED_PORTS)
self.assertIn(('127.0.0.2', 10000), serial.ALLOCATED_PORTS)
@mock.patch('nova.console.serial._verify_port')
def test_acquire_port_not_ble_to_bind_at_any_port(self, fake_verify_port):
start, stop = 15, 20
self.flags(
port_range='%d:%d' % (start, stop),
group='serial_console')
fake_verify_port.side_effect = (
exception.SocketPortRangeExhaustedException(host='127.0.0.1'))
self.assertRaises(
exception.SocketPortRangeExhaustedException,
serial.acquire_port, '127.0.0.1')
def test_release_port(self):
serial.ALLOCATED_PORTS.add(('127.0.0.1', 100))
serial.ALLOCATED_PORTS.add(('127.0.0.2', 100))
self.assertEqual(2, len(serial.ALLOCATED_PORTS))
serial.release_port('127.0.0.1', 100)
self.assertEqual(1, len(serial.ALLOCATED_PORTS))
serial.release_port('127.0.0.2', 100)
self.assertEqual(0, len(serial.ALLOCATED_PORTS))
| apache-2.0 |
moorecoin/MooreCoinMiningAlgorithm | contrib/devtools/update-translations.py | 1 | 6779 | #!/usr/bin/python
# copyright (c) 2014 wladimir j. van der laan
# distributed under the mit software license, see the accompanying
# file copying or http://www.opensource.org/licenses/mit-license.php.
'''
run this script from the root of the repository to update all translations from
transifex.
it will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
todo:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.elementtree as et
# name of transifex tool
tx = 'tx'
# name of source language file
source_lang = 'moorecoin_en.ts'
# directory with locale files
locale_dir = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('no .git directory found')
print('execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([tx, 'pull', '-f']):
print('error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''find all format specifiers in a string.'''
pos = 0
specifiers = []
while true:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''split format specifiers between numeric (qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except indexerror:
errors.append("parse error in translation '%s'" % sanitize_string(translation))
return false
else:
if source_f != translation_f:
errors.append("mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return false
return true
def all_ts_files(suffix=''):
for filename in os.listdir(locale_dir):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == source_lang+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(locale_dir, filename)
yield(filename, filepath)
fix_re = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''remove invalid characters from translation string'''
return fix_re.sub(b'', s)
# override cdata escape function to make our output match qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = none
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=false):
print('checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = et._escape_cdata
et._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = false
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = et.xmlparser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the xml parser will fail
data = remove_invalid_characters(data)
tree = et.parse(io.bytesio(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is none:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = true
# remove location tags
for location in message.findall('location'):
message.remove(location)
# remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some xml to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.bytesio()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
| mit |
kssim/efp | making_decisions/python/bmi_calculator.py | 1 | 1254 | # Pratice 19. BMI Calculator
# Output:
# Your BMI is 19.5.
# You are within the ideal weight range.
# Or
# Your BMI is 32.5.
# You are overweight. You should see your doctor.
# Formula:
# bmi = (weight / (height x height)) x 703
# Standard:
# BMI 18.5 ~ 25 is nomal weight.
# Constraint:
# - Ensure your program takes only numeric data.
# Don't let the user continue unless the data is valid.
#!/usr/bin/env python
from __future__ import division
import sys
def input_process(in_question):
return input(in_question) if sys.version_info >= (3,0) else raw_input(in_question)
if __name__ == '__main__':
try:
weight = int(input_process('What is your weight(pound)? '))
height = int(input_process('What is your height(inch)? '))
except:
print ('You must input only numbers.')
else:
bmi_convert_value = 703
bmi_raw_data = float(weight / (height * height))
bmi = bmi_raw_data * bmi_convert_value
print ('Your BMI is %s' % bmi)
if bmi < 18.5:
print ('You are within the ideal weight range.')
elif bmi > 25:
print ('You are overweight. You should see your doctor.')
else:
print ('You are nomal weight.')
| mit |
jamielennox/keystone | keystone/token/persistence/backends/memcache.py | 18 | 1147 | # Copyright 2013 Metacloud, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from keystone.token.persistence.backends import kvs
CONF = cfg.CONF
class Token(kvs.Token):
kvs_backend = 'openstack.kvs.Memcached'
memcached_backend = 'memcached'
def __init__(self, *args, **kwargs):
kwargs['memcached_backend'] = self.memcached_backend
kwargs['no_expiry_keys'] = [self.revocation_key]
kwargs['memcached_expire_time'] = CONF.token.expiration
kwargs['url'] = CONF.memcache.servers
super(Token, self).__init__(*args, **kwargs)
| apache-2.0 |
hagabbar/pycbc_copy | pycbc/psd/analytical.py | 2 | 4927 | #!/usr/bin/python
# Copyright (C) 2012-2016 Alex Nitz, Tito Dal Canton, Leo Singer
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides reference PSDs from LALSimulation.
"""
from pycbc.types import FrequencySeries, zeros
import lal
import lalsimulation
import numpy
# build a list of usable PSD functions from lalsimulation
_name_prefix = 'SimNoisePSD'
_name_suffix = 'Ptr'
_name_blacklist = ('FromFile', 'MirrorTherm', 'Quantum', 'Seismic', 'Shot', 'SuspTherm')
_psd_list = []
for _name in lalsimulation.__dict__:
if _name != _name_prefix and _name.startswith(_name_prefix) and not _name.endswith(_name_suffix):
_name = _name[len(_name_prefix):]
if _name not in _name_blacklist:
_psd_list.append(_name)
_psd_list = sorted(_psd_list)
# add functions wrapping lalsimulation PSDs
for _name in _psd_list:
exec("""
def %s(length, delta_f, low_freq_cutoff):
\"\"\"Return a FrequencySeries containing the %s PSD from LALSimulation.
\"\"\"
return from_string("%s", length, delta_f, low_freq_cutoff)
""" % (_name, _name, _name))
def get_psd_model_list():
""" Returns a list of available reference PSD functions.
Returns
-------
list
Returns a list of names of reference PSD functions.
"""
return get_lalsim_psd_list() + get_pycbc_psd_list()
def get_lalsim_psd_list():
"""Return a list of available reference PSD functions from LALSimulation.
"""
return _psd_list
def get_pycbc_psd_list():
""" Return a list of available reference PSD functions coded in PyCBC.
Returns
-------
list
Returns a list of names of all reference PSD functions coded in PyCBC.
"""
pycbc_analytical_psd_list = pycbc_analytical_psds.keys()
pycbc_analytical_psd_list.sort()
return pycbc_analytical_psd_list
def from_string(psd_name, length, delta_f, low_freq_cutoff):
"""Generate a frequency series containing a LALSimulation PSD specified
by name.
Parameters
----------
psd_name : string
PSD name as found in LALSimulation, minus the SimNoisePSD prefix.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series.
low_freq_cutoff : float
Frequencies below this value are set to zero.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# check if valid PSD model
if psd_name not in get_psd_model_list():
raise ValueError(psd_name + ' not found among analytical '
'PSD functions.')
# if PSD model is in LALSimulation
if psd_name in get_lalsim_psd_list():
lalseries = lal.CreateREAL8FrequencySeries(
'', lal.LIGOTimeGPS(0), 0, delta_f, lal.DimensionlessUnit, length)
try:
func = lalsimulation.__dict__[
_name_prefix + psd_name + _name_suffix]
except KeyError:
func = lalsimulation.__dict__[_name_prefix + psd_name]
func(lalseries, low_freq_cutoff)
else:
lalsimulation.SimNoisePSD(lalseries, 0, func)
psd = FrequencySeries(lalseries.data.data, delta_f=delta_f)
# if PSD model is coded in PyCBC
else:
func = pycbc_analytical_psds[psd_name]
psd = func(length, delta_f, low_freq_cutoff)
# zero-out content below low-frequency cutoff
kmin = int(low_freq_cutoff / delta_f)
psd.data[:kmin] = 0
return psd
def flat_unity(length, delta_f, low_freq_cutoff):
""" Returns a FrequencySeries of ones above the low_frequency_cutoff.
Parameters
----------
length : int
Length of output Frequencyseries.
delta_f : float
Frequency step for output FrequencySeries.
low_freq_cutoff : int
Low-frequency cutoff for output FrequencySeries.
Returns
-------
FrequencySeries
Returns a FrequencySeries containing the unity PSD model.
"""
fseries = FrequencySeries(numpy.ones(length), delta_f=delta_f)
kmin = int(low_freq_cutoff / fseries.delta_f)
fseries.data[:kmin] = 0
return fseries
# dict of analytical PSDs coded in PyCBC
pycbc_analytical_psds = {
'flat_unity' : flat_unity,
}
| gpl-3.0 |
tomtor/QGIS | tests/src/python/test_db_manager_spatialite.py | 15 | 26811 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the DBManager SPATIALITE plugin
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-10-17'
__copyright__ = 'Copyright 2016, Even Rouault'
import qgis # NOQA
import os
import tempfile
import shutil
from osgeo import gdal, ogr, osr
from qgis.core import QgsDataSourceUri, QgsSettings
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
from plugins.db_manager.db_plugins import supportedDbTypes, createDbPlugin
from plugins.db_manager.db_plugins.plugin import TableField
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class TestPyQgsDBManagerSpatialite(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsDBManagerSpatialite.com")
QCoreApplication.setApplicationName("TestPyQgsDBManagerSpatialite")
QgsSettings().clear()
start_app()
cls.basetestpath = tempfile.mkdtemp()
cls.test_spatialite = os.path.join(cls.basetestpath, 'TestPyQgsDBManagerSpatialite.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(cls.test_spatialite)
lyr = ds.CreateLayer('testlayer', geom_type=ogr.wkbLineString, options=['SPATIAL_INDEX=NO'])
cls.supportsAlterFieldDefn = lyr.TestCapability(ogr.OLCAlterFieldDefn) == 1
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
f.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING(1 2,3 4)'))
lyr.CreateFeature(f)
f = None
ds = None
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
QgsSettings().clear()
shutil.rmtree(cls.basetestpath, True)
def testSupportedDbTypes(self):
self.assertIn('spatialite', supportedDbTypes())
def testCreateDbPlugin(self):
plugin = createDbPlugin('spatialite')
self.assertIsNotNone(plugin)
def testConnect(self):
connection_name = 'testConnect'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connections = plugin.connections()
self.assertEqual(len(connections), 1)
connection = createDbPlugin('spatialite', connection_name + '_does_not_exist')
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
connection = connections[0]
connection.connect()
connection.reconnect()
connection.remove()
self.assertEqual(len(plugin.connections()), 0)
connection = createDbPlugin('spatialite', connection_name)
connection_succeeded = False
try:
connection.connect()
connection_succeeded = True
except:
pass
self.assertFalse(connection_succeeded, 'exception should have been raised')
def testExecuteRegExp(self):
"""This test checks for REGEXP syntax support, which is enabled in Qgis.utils' spatialite_connection()"""
connection_name = 'testListLayer'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
db.connector._execute(None, 'SELECT \'ABC\' REGEXP \'[CBA]\'')
def testListLayer(self):
connection_name = 'testListLayer'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testlayer')
info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# # GDAL 2.2.0
# expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testlayer (<a href="action:trigger/trigger_insert_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testlayer" AFTER INSERT ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testlayer'; END </td></tr><tr><td>trigger_delete_feature_count_testlayer (<a href="action:trigger/trigger_delete_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testlayer" AFTER DELETE ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testlayer'; END </td></tr></table></div></div>"""
# # GDAL 2.3.0
# expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td>geom </td></tr><tr><td>Geometry: </td><td>LINESTRING </td></tr><tr><td>Dimension: </td><td>XY </td></tr><tr><td>Spatial ref: </td><td>Undefined (-1) </td></tr><tr><td>Extent: </td><td>1.00000, 2.00000 - 3.00000, 4.00000 </td></tr></table><p><warning> No spatial index defined (<a href="action:spatialindex/create">create it</a>)</p></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>geom </td><td>LINESTRING </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testlayer (<a href="action:trigger/trigger_insert_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testlayer" AFTER INSERT ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testlayer'); END </td></tr><tr><td>trigger_delete_feature_count_testlayer (<a href="action:trigger/trigger_delete_feature_count_testlayer/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testlayer" AFTER DELETE ON "testlayer" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testlayer'); END </td></tr></table></div></div>"""
# GDAL 2.3.0
expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">ogc_fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>GEOMETRY </td><td>BLOB </td><td>Y </td><td> </td></tr><tr><td>2 </td><td>text_field </td><td>VARCHAR </td><td>Y </td><td> </td></tr></table></div></div>"""
self.assertIn(info.toHtml(), [expected_html])
connection.remove()
def testCreateRenameDeleteTable(self):
connection_name = 'testCreateRenameDeleteTable'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteTable.spatialite')
shutil.copy(self.test_spatialite, test_spatialite_new)
uri.setDatabase(test_spatialite_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertTrue(table.rename('newName'))
self.assertEqual(table.name, 'newName')
connection.reconnect()
db = connection.database()
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'newName')
fields = []
geom = ['geometry', 'POINT', 4326, 3]
field1 = TableField(table)
field1.name = 'fid'
field1.dataType = 'INTEGER'
field1.notNull = True
field1.primaryKey = True
field2 = TableField(table)
field2.name = 'str_field'
field2.dataType = 'TEXT'
field2.modifier = 20
fields = [field1, field2]
self.assertTrue(db.createVectorTable('newName2', fields, geom))
tables = db.tables()
self.assertEqual(len(tables), 2)
new_table = tables[1]
self.assertEqual(new_table.name, 'newName2')
fields = new_table.fields()
self.assertEqual(len(fields), 2)
# self.assertFalse(new_table.hasSpatialIndex())
# self.assertTrue(new_table.createSpatialIndex())
# self.assertTrue(new_table.hasSpatialIndex())
self.assertTrue(new_table.delete())
tables = db.tables()
self.assertEqual(len(tables), 1)
connection.remove()
def testCreateRenameDeleteFields(self):
if not self.supportsAlterFieldDefn:
return
connection_name = 'testCreateRenameDeleteFields'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite_new = os.path.join(self.basetestpath, 'testCreateRenameDeleteFields.spatialite')
shutil.copy(self.test_spatialite, test_spatialite_new)
uri.setDatabase(test_spatialite_new)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
field_before_count = len(table.fields())
field = TableField(table)
field.name = 'real_field'
field.dataType = 'DOUBLE'
self.assertTrue(table.addField(field))
self.assertEqual(len(table.fields()), field_before_count + 1)
# not supported in spatialite
# self.assertTrue(field.update('real_field2', new_type_str='TEXT (30)', new_not_null=True, new_default_str='foo'))
field = table.fields()[field_before_count]
self.assertEqual(field.name, 'real_field')
self.assertEqual(field.dataType, 'DOUBLE')
# self.assertEqual(field.notNull, 1)
# self.assertEqual(field.default, "'foo'")
# self.assertTrue(table.deleteField(field))
# self.assertEqual(len(table.fields()), field_before_count)
connection.remove()
def testTableDataModel(self):
connection_name = 'testTableDataModel'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
uri.setDatabase(self.test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testlayer')
model = table.tableDataModel(None)
self.assertEqual(model.rowCount(), 1)
self.assertEqual(model.getData(0, 0), 1) # fid
wkb = model.getData(0, 1)
geometry = ogr.CreateGeometryFromWkb(wkb)
self.assertEqual(geometry.ExportToWkt(), 'LINESTRING (1 2,3 4)')
self.assertEqual(model.getData(0, 2), 'foo')
connection.remove()
# def testRaster(self):
# if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
# return
# connection_name = 'testRaster'
# plugin = createDbPlugin('spatialite')
# uri = QgsDataSourceUri()
# test_spatialite_new = os.path.join(self.basetestpath, 'testRaster.spatialite')
# shutil.copy(self.test_spatialite, test_spatialite_new)
# mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
# mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
# sr = osr.SpatialReference()
# sr.ImportFromEPSG(4326)
# mem_ds.SetProjection(sr.ExportToWkt())
# mem_ds.GetRasterBand(1).Fill(255)
# gdal.GetDriverByName('SQLite').CreateCopy(test_spatialite_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table'])
# mem_ds = None
# uri.setDatabase(test_spatialite_new)
# self.assertTrue(plugin.addConnection(connection_name, uri))
# connection = createDbPlugin('spatialite', connection_name)
# connection.connect()
# db = connection.database()
# self.assertIsNotNone(db)
# tables = db.tables()
# self.assertEqual(len(tables), 2)
# table = None
# for i in range(2):
# if tables[i].name == 'raster_table':
# table = tables[i]
# break
# self.assertIsNotNone(table)
# info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>Unknown (<a href="action:rows/count">find out</a>) </td></tr></table></div></div><div class="section"><h2>GeoPackage</h2><div><table><tr><td>Column: </td><td> </td></tr><tr><td>Geometry: </td><td>RASTER </td></tr><tr><td>Spatial ref: </td><td>WGS 84 geodetic (4326) </td></tr><tr><td>Extent: </td><td>2.00000, 48.80000 - 2.20000, 49.00000 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">id </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>zoom_level </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>2 </td><td>tile_column </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>3 </td><td>tile_row </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>4 </td><td>tile_data </td><td>BLOB </td><td>N </td><td> </td></tr></table></div></div><div class="section"><h2>Indexes</h2><div><table class="header"><tr><th>Name </th><th>Column(s) </th></tr><tr><td>sqlite_autoindex_raster_table_1 </td><td>zoom_level<br>tile_column<br>tile_row </td></tr></table></div></div>"""
# self.assertEqual(info.toHtml(), expected_html)
# connection.remove()
# def testTwoRaster(self):
# if int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 0, 2):
# return
# connection_name = 'testTwoRaster'
# plugin = createDbPlugin('spatialite')
# uri = QgsDataSourceUri()
# test_spatialite_new = os.path.join(self.basetestpath, 'testTwoRaster.spatialite')
# shutil.copy(self.test_spatialite, test_spatialite_new)
# mem_ds = gdal.GetDriverByName('MEM').Create('', 20, 20)
# mem_ds.SetGeoTransform([2, 0.01, 0, 49, 0, -0.01])
# sr = osr.SpatialReference()
# sr.ImportFromEPSG(4326)
# mem_ds.SetProjection(sr.ExportToWkt())
# mem_ds.GetRasterBand(1).Fill(255)
# for i in range(2):
# gdal.GetDriverByName('SQLite').CreateCopy(test_spatialite_new, mem_ds, options=['APPEND_SUBDATASET=YES', 'RASTER_TABLE=raster_table%d' % (i + 1)])
# mem_ds = None
# uri.setDatabase(test_spatialite_new)
# self.assertTrue(plugin.addConnection(connection_name, uri))
# connection = createDbPlugin('spatialite', connection_name)
# connection.connect()
# db = connection.database()
# self.assertIsNotNone(db)
# tables = db.tables()
# self.assertEqual(len(tables), 3)
# table = None
# for i in range(2):
# if tables[i].name.startswith('raster_table'):
# table = tables[i]
# info = table.info()
# info.toHtml()
# connection.remove()
def testNonSpatial(self):
connection_name = 'testnonspatial'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite = os.path.join(self.basetestpath, 'testnonspatial.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(test_spatialite)
lyr = ds.CreateLayer('testnonspatial', geom_type=ogr.wkbNone)
lyr.CreateField(ogr.FieldDefn('text_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['text_field'] = 'foo'
lyr.CreateFeature(f)
f = None
ds = None
uri.setDatabase(test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
tables = db.tables()
self.assertEqual(len(tables), 1)
table = tables[0]
self.assertEqual(table.name, 'testnonspatial')
info = table.info()
# expected_html = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>Y </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div>"""
# # GDAL 2.2.0
# expected_html_2 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testnonspatial (<a href="action:trigger/trigger_insert_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testnonspatial" AFTER INSERT ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE table_name = 'testnonspatial'; END </td></tr><tr><td>trigger_delete_feature_count_testnonspatial (<a href="action:trigger/trigger_delete_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testnonspatial" AFTER DELETE ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE table_name = 'testnonspatial'; END </td></tr></table></div></div>"""
# # GDAL 2.3.0
# expected_html_3 = """<div class="section"><h2>General info</h2><div><table><tr><td>Relation type: </td><td>Table </td></tr><tr><td>Rows: </td><td>1 </td></tr></table></div></div><div class="section"><h2>Fields</h2><div><table class="header"><tr><th># </th><th>Name </th><th>Type </th><th>Null </th><th>Default </th></tr><tr><td>0 </td><td class="underline">fid </td><td>INTEGER </td><td>N </td><td> </td></tr><tr><td>1 </td><td>text_field </td><td>TEXT </td><td>Y </td><td> </td></tr></table></div></div><div class="section"><h2>Triggers</h2><div><table class="header"><tr><th>Name </th><th>Function </th></tr><tr><td>trigger_insert_feature_count_testnonspatial (<a href="action:trigger/trigger_insert_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_insert_feature_count_testnonspatial" AFTER INSERT ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count + 1 WHERE lower(table_name) = lower('testnonspatial'); END </td></tr><tr><td>trigger_delete_feature_count_testnonspatial (<a href="action:trigger/trigger_delete_feature_count_testnonspatial/delete">delete</a>) </td><td>CREATE TRIGGER "trigger_delete_feature_count_testnonspatial" AFTER DELETE ON "testnonspatial" BEGIN UPDATE spatialite_ogr_contents SET feature_count = feature_count - 1 WHERE lower(table_name) = lower('testnonspatial'); END </td></tr></table></div></div>"""
# self.assertIn(info.toHtml(), [expected_html, expected_html_2, expected_html_3], info.toHtml())
connection.remove()
def testAllGeometryTypes(self):
connection_name = 'testAllGeometryTypes'
plugin = createDbPlugin('spatialite')
uri = QgsDataSourceUri()
test_spatialite = os.path.join(self.basetestpath, 'testAllGeometryTypes.spatialite')
ds = ogr.GetDriverByName('SQLite').CreateDataSource(test_spatialite)
ds.CreateLayer('testPoint', geom_type=ogr.wkbPoint)
ds.CreateLayer('testLineString', geom_type=ogr.wkbLineString)
ds.CreateLayer('testPolygon', geom_type=ogr.wkbPolygon)
ds.CreateLayer('testMultiPoint', geom_type=ogr.wkbMultiPoint)
ds.CreateLayer('testMultiLineString', geom_type=ogr.wkbMultiLineString)
ds.CreateLayer('testMultiPolygon', geom_type=ogr.wkbMultiPolygon)
ds.CreateLayer('testGeometryCollection', geom_type=ogr.wkbGeometryCollection)
ds.CreateLayer('testCircularString', geom_type=ogr.wkbCircularString)
ds.CreateLayer('testCompoundCurve', geom_type=ogr.wkbCompoundCurve)
ds.CreateLayer('testCurvePolygon', geom_type=ogr.wkbCurvePolygon)
ds.CreateLayer('testMultiCurve', geom_type=ogr.wkbMultiCurve)
ds.CreateLayer('testMultiSurface', geom_type=ogr.wkbMultiSurface)
ds = None
uri.setDatabase(test_spatialite)
self.assertTrue(plugin.addConnection(connection_name, uri))
connection = createDbPlugin('spatialite', connection_name)
connection.connect()
db = connection.database()
self.assertIsNotNone(db)
# tables = db.tables()
# for i in range(len(tables)):
# table = tables[i]
# info = table.info()
connection.remove()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
yhpeng-git/mxnet | example/rcnn/rcnn/tools/reeval.py | 14 | 1487 | import argparse
import cPickle
import os
import mxnet as mx
from ..logger import logger
from ..config import config, default, generate_config
from ..dataset import *
def reeval(args):
# load imdb
imdb = eval(args.dataset)(args.image_set, args.root_path, args.dataset_path)
# load detection results
cache_file = os.path.join(imdb.cache_path, imdb.name, 'detections.pkl')
with open(cache_file) as f:
detections = cPickle.load(f)
# eval
imdb.evaluate_detections(detections)
def parse_args():
parser = argparse.ArgumentParser(description='imdb test')
# general
parser.add_argument('--network', help='network name', default=default.network, type=str)
parser.add_argument('--dataset', help='dataset name', default=default.dataset, type=str)
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset)
parser.add_argument('--image_set', help='image_set name', default=default.image_set, type=str)
parser.add_argument('--root_path', help='output data folder', default=default.root_path, type=str)
parser.add_argument('--dataset_path', help='dataset path', default=default.dataset_path, type=str)
# other
parser.add_argument('--no_shuffle', help='disable random shuffle', action='store_true')
args = parser.parse_args()
return args
def main():
args = parse_args()
logger.info('Called with argument: %s' % args)
reeval(args)
if __name__ == '__main__':
main()
| apache-2.0 |
barsnadcat/evegant | Process.py | 1 | 1289 |
from unittest import TestCase
from unittest.mock import Mock
from Schemes import Blueprint
from ItemStack import ItemStack
class TestProcess(TestCase):
def test_InitProcess(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 1))
process = Process(scheme)
assert process.inputs[0].ammount == 1
def test_SetRuns(self):
scheme = Blueprint(0, "Name", 0, [ItemStack(0, 1)], ItemStack(0, 2))
process = Process(scheme)
process.SetRuns(2)
assert process.inputs[0].ammount == 2
assert process.outputs[0].ammount == 4
from copy import copy
class Process:
def __init__(self, aScheme):
self.scheme = aScheme
self.runs = 1
self.inputs = [copy(inp) for inp in aScheme.GetInputs()]
self.outputs = [copy(out) for out in aScheme.GetOutputs()]
self.runsChangedCallback = None
self.manual = False
def SetRuns(self, aRuns):
if self.runs == aRuns:
return
self.runs = aRuns
schemeInputs = self.scheme.GetInputs()
for i in range(len(self.inputs)):
self.inputs[i].ammount = schemeInputs[i].ammount * aRuns
schemeOutputs = self.scheme.GetOutputs()
for i in range(len(self.outputs)):
self.outputs[i].ammount = schemeOutputs[i].ammount * aRuns
if self.manual and self.runsChangedCallback:
self.runsChangedCallback()
| gpl-3.0 |
hippich/p2pool | p2pool/networks/bitcoin.py | 5 | 2145 | from p2pool.bitcoin import networks
# CHAIN_LENGTH = number of shares back client keeps
# REAL_CHAIN_LENGTH = maximum number of shares back client uses to compute payout
# REAL_CHAIN_LENGTH must always be <= CHAIN_LENGTH
# REAL_CHAIN_LENGTH must be changed in sync with all other clients
# changes can be done by changing one, then the other
PARENT = networks.nets['bitcoin']
SHARE_PERIOD = 30 # seconds
CHAIN_LENGTH = 24*60*60//10 # shares
REAL_CHAIN_LENGTH = 24*60*60//10 # shares
TARGET_LOOKBEHIND = 200 # shares
SPREAD = 3 # blocks
IDENTIFIER = 'fc70035c7a81bc6f'.decode('hex')
PREFIX = '2472ef181efcd37b'.decode('hex')
P2P_PORT = 9333
MIN_TARGET = 0
MAX_TARGET = 2**256//2**32 - 1
PERSIST = True
WORKER_PORT = 9332
BOOTSTRAP_ADDRS = 'forre.st vps.forre.st portals94.ns01.us 54.227.25.14 119.1.96.99 204.10.105.113 76.104.150.248 89.71.151.9 76.114.13.54 72.201.24.106 79.160.2.128 207.244.175.195 168.7.116.243 94.23.215.27 218.54.45.177 5.9.157.150 78.155.217.76 91.154.90.163 173.52.43.124 78.225.49.209 220.135.57.230 169.237.101.193:8335 98.236.74.28 204.19.23.19 98.122.165.84:8338 71.90.88.222 67.168.132.228 193.6.148.18 80.218.174.253 50.43.56.102 68.13.4.106 24.246.31.2 176.31.208.222 1.202.128.218 86.155.135.31 204.237.15.51 5.12.158.126:38007 202.60.68.242 94.19.53.147 65.130.126.82 184.56.21.182 213.112.114.73 218.242.51.246 86.173.200.160 204.15.85.157 37.59.15.50 62.217.124.203 80.87.240.47 198.61.137.12 108.161.134.32 198.154.60.183:10333 71.39.52.34:9335 46.23.72.52:9343 83.143.42.177 192.95.61.149 144.76.17.34 46.65.68.119 188.227.176.66:9336 75.142.155.245:9336 213.67.135.99 76.115.224.177 50.148.193.245 64.53.185.79 80.65.30.137 109.126.14.42 76.84.63.146 62.213.58.41 61.219.119.37 209.195.4.74 114.32.105.215 221.15.35.2 78.46.88.136 211.100.23.119 84.75.252.230 123.243.155.184:9350 68.193.128.182'.split(' ')
ANNOUNCE_CHANNEL = '#p2pool'
VERSION_CHECK = lambda v: None if 100000 <= v else 'Bitcoin version too old. Upgrade to 0.11.2 or newer!' # not a bug. BIP65 support is ensured by SOFTFORKS_REQUIRED
VERSION_WARNING = lambda v: None
SOFTFORKS_REQUIRED = set(['bip65'])
MINIMUM_PROTOCOL_VERSION = 1500
| gpl-3.0 |
jianC/kernel_htc_lexikon-3.0 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
redsolution/django-menu-proxy | menuproxy/utils.py | 1 | 10086 | # -*- coding: utf-8 -*-
from django import conf
from django.core.cache import cache
from importpath import importpath
METHODS = (
'replace', # Указывает, что объект point следует заменить объектом object
'insert', # Указывает, что к списку дочерних элементов inside-правила нужно добавить элемент object
'children', # Указывает, что к списку дочерних элементов inside-правила нужно добавить дочерние элементы object-а
)
def get_title(menu_proxy, object):
"""Correct value returned by menu_proxy.title function"""
result = menu_proxy.title(object)
if result is None:
return u''
return unicode(result)
def get_url(menu_proxy, object):
"""Correct value returned by menu_proxy.url function"""
result = menu_proxy.url(object)
if result is None:
return u''
return unicode(result)
def get_ancestors(menu_proxy, object):
"""Correct value returned by menu_proxy.ancestors function"""
result = menu_proxy.ancestors(object)
if result is None:
return []
return [value for value in result]
def get_children(menu_proxy, object, lasy):
"""
Call ``children`` or ``lasy_children`` function for ``menu_proxy``.
Pass to it ``object``.
Correct result.
"""
if lasy:
result = menu_proxy.lasy_children(object)
else:
result = menu_proxy.children(object)
if result is None:
return []
return [value for value in result]
class DoesNotDefined(object):
"""
Class to indicate that value was not pressend in rule.
"""
pass
def try_to_import(value, exception_text):
"""
If ``value`` is not None and is not DoesNotDefined
then try to import specified by ``value`` path.
"""
if value is not DoesNotDefined and value is not None:
return importpath(value, exception_text)
return value
def get_rules():
"""Return dictionary of rules with settings"""
rules = cache.get('menuproxy.rules', None)
if rules is not None:
return rules
rules = {}
sequence = {None: []}
def add_to_sequence(rule, value):
if rule not in sequence:
sequence[rule] = []
sequence[rule].append(value)
rules[None] = MenuRule(name=None, method='replace', proxy=None, rules=rules)
for kwargs in getattr(conf.settings, 'MENU_PROXY_RULES', []):
rule = MenuRule(rules=rules, **kwargs)
rules[rule.name] = rule
add_to_sequence(rule.name, rule.name)
add_to_sequence(rule.inside, rule.name)
for name, rule in rules.iteritems():
rule.sequence = [rules[item] for item in sequence[name]]
cache.set('menuproxy.rules', rules)
return rules
def get_front_page(rules):
"""If MENU_PROXY_FRONT_PAGED is True and there is front page return MenuItem for it"""
front_page = cache.get('menuproxy.front_page', DoesNotDefined)
if front_page is not DoesNotDefined:
return front_page
front_page = None
if getattr(conf.settings, 'MENU_PROXY_FRONT_PAGED', True):
root = MenuItem(None, DoesNotDefined)
children = root.children(False)
if children:
front_page = children[0]
cache.set('menuproxy.front_page', front_page)
return front_page
class MenuRule(object):
"""Rule"""
def __init__(self, name, method, proxy, rules, inside=None,
model=DoesNotDefined, point=DoesNotDefined, object=DoesNotDefined,
point_function=DoesNotDefined, object_function=DoesNotDefined, **other):
self.name = name
self.method = method
assert self.method in METHODS, 'menuproxy does`t support method: %s' % self.method
self.inside = inside
self.model = try_to_import(model, 'model class')
self.point = try_to_import(point, 'mount point')
if callable(self.point) and self.point is not DoesNotDefined:
self.point = self.point()
if self.point is DoesNotDefined:
self.point_function = try_to_import(point_function, 'mount point function')
else:
self.point_function = DoesNotDefined
self.object = try_to_import(object, 'mount object')
if callable(self.object) and self.object is not DoesNotDefined:
self.object = self.object()
if self.object is DoesNotDefined:
self.object_function = try_to_import(object_function, 'mount object function')
else:
self.object_function = DoesNotDefined
self.proxy = try_to_import(proxy, 'MenuProxy class')
other.update(self.__dict__)
if callable(self.proxy) and self.proxy is not DoesNotDefined:
self.proxy = self.proxy(**other)
self.rules = rules
self.sequence = []
def _get_point(self, object, forward):
if self.point is not DoesNotDefined:
return self.point
elif self.point_function is not DoesNotDefined:
return self.point_function(object, forward)
else:
return DoesNotDefined
def _get_object(self, object, forward):
if self.object is not DoesNotDefined:
return self.object
elif self.object_function is not DoesNotDefined:
return self.object_function(object, forward)
else:
return DoesNotDefined
def forward_point(self, object):
return self._get_point(object, True)
def backward_point(self, object):
return self._get_point(object, False)
def forward_object(self, object):
return self._get_object(object, True)
def backward_object(self, object):
return self._get_object(object, False)
class MenuItem(object):
"""Objects of this class will be send to templates. Class provide to walk through nested rules"""
active = False
current = False
def __init__(self, name=None, object=None):
if isinstance(object, MenuItem):
self.rules = object.rules
self.name, self.object = object.name, object.object
else:
self.rules = get_rules()
for rule in self.rules[name].sequence:
if rule.name != name and rule.method == 'replace':
point = rule.forward_point(object)
if point is DoesNotDefined or point == object:
self.name, self.object = rule.name, rule.forward_object(object)
break
else:
self.name, self.object = name, object
self.front_paged_ancestors = False
def title(self):
"""Returns title for object"""
if hasattr(self, '_title'):
return getattr(self, '_title')
title = get_title(self.rules[self.name].proxy, self.object)
setattr(self, '_title', title)
return title
def url(self):
"""Returns url for object"""
if hasattr(self, '_url'):
return getattr(self, '_url')
url = get_url(self.rules[self.name].proxy, self.object)
setattr(self, '_url', url)
return url
def ancestors(self):
"""Returns ancestors for object, started from top level"""
if hasattr(self, '_ancestors'):
return getattr(self, '_ancestors')
ancestors = []
name = self.name
object = self.object
while True:
items = get_ancestors(self.rules[name].proxy, object)
until = self.rules[name].backward_object(object)
items.reverse()
for item in items:
ancestors.insert(0, MenuItem(name, item))
if item == until:
break
method, object, name = self.rules[name].method, self.rules[name].backward_point(object), self.rules[name].inside
if name is None:
break
if method != 'replace':
ancestors.insert(0, MenuItem(name, object))
front_page = get_front_page(self.rules)
if front_page is not None:
if not ancestors or ancestors[0].object != front_page.object:
if (front_page.name, front_page.object) != (self.name, self.object):
self.front_paged_ancestors = True
ancestors.insert(0, front_page)
setattr(self, '_ancestors', ancestors)
return ancestors
def ancestors_for_menu(self):
"""
Returns ancestors for show_menu tags.
Ancestors will not contain front page and will contain object itself.
"""
ancestors = self.ancestors()
if self.front_paged_ancestors:
ancestors = ancestors[1:]
else:
ancestors = ancestors[:]
ancestors.append(self)
return ancestors
def children(self, lasy=False):
"""Returns children for object"""
if lasy:
field_name = '_children_lasy'
else:
field_name = '_children'
if hasattr(self, field_name):
return getattr(self, field_name)
children = []
for rule in self.rules[self.name].sequence:
point = rule.forward_point(self.object)
if rule.name == self.name:
children += [MenuItem(self.name, item) for item in get_children(
self.rules[self.name].proxy, self.object, lasy)
]
elif point is DoesNotDefined or point == self.object:
object = rule.forward_object(self.object)
if rule.method == 'insert' and not lasy:
children += [MenuItem(rule.name, object)]
elif rule.method == 'children':
children += [MenuItem(rule.name, item) for item in get_children(
rule.proxy, object, lasy)
]
setattr(self, field_name, children)
return children
| gpl-3.0 |
monkeysecurity/zerotodocker | genie/2.2.1/example/setup.py | 11 | 4427 | #!/usr/bin/python2.7
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#######################################################################################################################
# This script assumes an empty Genie database and assumes that Genie is running on the same host as where the script
# is executed. If not then change localhost below to the hostname where Genie is running.
#######################################################################################################################
import genie2.client.wrapper
import genie2.model.Command
import genie2.model.Cluster
import sys
from subprocess import call
# Create a Genie client which proxies API calls through wrapper which retries failures based on various return codes
genie = genie2.client.wrapper.Genie2("http://localhost:8080/genie",
genie2.client.wrapper.RetryPolicy(
tries=8, none_on_404=True, no_retry_http_codes=range(400, 500)
))
commands = list()
# Create a new command instance and set the required fields
hadoop_command = genie2.model.Command.Command()
hadoop_command.name = "hadoop"
hadoop_command.user = "root"
hadoop_command.version = "2.6.0"
hadoop_command.jobType = "hadoop"
hadoop_command.tags = list()
hadoop_command.tags.append("type:hadoop")
hadoop_command.tags.append("ver:2.6.0")
hadoop_command.tags.append("misc:mr2")
hadoop_command.status = "ACTIVE"
hadoop_command.executable = "/apps/hadoop/2.6.0/bin/hadoop"
# Could set command id here or let it be set automatically
# cmd.id = "hadoop240"
hadoop_command = genie.createCommand(hadoop_command)
print >>sys.stderr, "Successfully registered the Hadoop command with Genie. Command id =", hadoop_command.id
commands.append(hadoop_command)
# Create a new command instance and set the required fields
pig_command = genie2.model.Command.Command()
pig_command.name = "pig"
pig_command.user = "root"
pig_command.version = "0.14.0"
pig_command.jobType = "pig"
pig_command.tags = list()
pig_command.tags.append("type:pig")
pig_command.tags.append("ver:0.14.0")
pig_command.tags.append("misc:mr2")
pig_command.status = "ACTIVE"
pig_command.executable = "/apps/pig/0.14.0/bin/pig"
# Could set command id here or let it be set automatically
# cmd.id = "pig14"
pig_command = genie.createCommand(pig_command)
print >>sys.stderr, "Successfully registered the Pig command with Genie. Command id =", pig_command.id
commands.append(pig_command)
cluster = genie2.model.Cluster.Cluster()
cluster.name = "h2query"
cluster.version = "2.6.0"
cluster.user = "root"
cluster.status = "UP"
cluster.clusterType = "yarn"
cluster.tags = list()
cluster.tags.append("sched:adhoc")
cluster.tags.append("type:yarn")
cluster.tags.append("ver:2.4.0")
cluster.configs = list()
cluster.configs.append("file:///apps/genie/hadoop/2.6.0/conf/core-site.xml")
cluster.configs.append("file:///apps/genie/hadoop/2.6.0/conf/mapred-site.xml")
cluster.configs.append("file:///apps/genie/hadoop/2.6.0/conf/yarn-site.xml")
# Could set cluster id here or let it be set automatically
# cluster.id = "h2prod"
cluster = genie.createCluster(cluster)
print >>sys.stderr, "Successfully registered the Hadoop cluster with Genie. Cluster id =", cluster.id
# Add the commands to the cluster
commands = genie.addCommandsForCluster(cluster.id, commands)
print >>sys.stderr, "Successfully linked the Hadoop and Pig commands to the cluster"
try:
return_code = call("hadoop fs -put /apps/genie/pig/0.14.0/tutorial/excite.log.bz2", shell=True)
if return_code == 0:
print >>sys.stderr, "Successfully put /apps/genie/pig/0.14.0/tutorial/excite.log.bz2 in HDFS"
else:
print >>sys.stderr, "Failed to put /apps/genie/pig/0.14.0/tutorial/excite.log.bz2 in HDFS:", return_code
except OSError as e:
print >>sys.stderr, "Execution failed:", e
| apache-2.0 |
vpalatin/libsigrokdecode | decoders/arm_tpiu/__init__.py | 10 | 1146 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2015 Petteri Aimonen <jpa@sigrok.mail.kapsi.fi>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
'''
This decoder stacks on top of the 'uart' decoder and decodes the frame format
of ARMv7m Trace Port Interface Unit. It filters the data coming from various
trace sources (such as ARMv7m ITM and ETM blocks) into separate streams that
can be further decoded by other PDs.
'''
from .pd import Decoder
| gpl-3.0 |
clbarnes/bctpy | test/basic_test.py | 2 | 1469 | from .load_samples import load_sample, load_directed_sample
import bct
import numpy as np
def test_threshold_proportional():
x = load_sample()
x = bct.threshold_proportional(x, .5, copy=True)
assert np.allclose(np.sum(x), 22548.51206965)
def test_threshold_proportional_nocopy():
x = load_sample()
bct.threshold_proportional(x, .3, copy=False)
assert np.allclose(np.sum(x), 15253.75425406)
def test_threshold_proportional_directed():
x = load_directed_sample()
bct.threshold_proportional(x, .28, copy=False)
assert np.sum(x) == 3410
# assert np.allclose( np.sum(x), 32852.72485433 )
def test_threshold_absolute():
x = load_sample()
x = bct.threshold_absolute(x, 2.1)
assert np.allclose(np.sum(x), 13280.17768104)
def test_strengths_und():
x = load_sample()
s = bct.strengths_und(x)
assert np.allclose(np.sum(x), 38967.38702018)
def test_degrees_und():
x = load_sample()
s = bct.degrees_und(bct.threshold_proportional(x, .26))
assert np.sum(s) == 4916
def test_binarize():
x = load_sample()
s = bct.binarize(bct.threshold_proportional(x, .41))
assert np.sum(s) == 7752
def test_normalize():
x = load_sample()
s = bct.normalize(bct.threshold_proportional(x, .79))
assert np.allclose(np.sum(s), 3327.96285964)
def test_invert():
x = load_sample()
s = bct.invert(bct.threshold_proportional(x, .13))
assert np.allclose(np.sum(s), 790.43107587)
| gpl-3.0 |
irisfeng/CodeScanner | SZQRCodeViewController/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/pylib/gyp/generator/msvs.py | 37 | 125839 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import ntpath
import os
import posixpath
import re
import subprocess
import sys
import gyp.common
import gyp.easy_xml as easy_xml
import gyp.MSVSNew as MSVSNew
import gyp.MSVSProject as MSVSProject
import gyp.MSVSSettings as MSVSSettings
import gyp.MSVSToolFile as MSVSToolFile
import gyp.MSVSUserFile as MSVSUserFile
import gyp.MSVSUtil as MSVSUtil
import gyp.MSVSVersion as MSVSVersion
from gyp.common import GypError
# TODO: Remove once bots are on 2.7, http://crbug.com/241769
def _import_OrderedDict():
import collections
try:
return collections.OrderedDict
except AttributeError:
import gyp.ordered_dict
return gyp.ordered_dict.OrderedDict
OrderedDict = _import_OrderedDict()
# Regular expression for validating Visual Studio GUIDs. If the GUID
# contains lowercase hex letters, MSVS will be fine. However,
# IncrediBuild BuildConsole will parse the solution file, but then
# silently skip building the target causing hard to track down errors.
# Note that this only happens with the BuildConsole, and does not occur
# if IncrediBuild is executed from inside Visual Studio. This regex
# validates that the string looks like a GUID with all uppercase hex
# letters.
VALID_MSVS_GUID_CHARS = re.compile('^[A-F0-9\-]+$')
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '.exe',
'STATIC_LIB_PREFIX': '',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.lib',
'SHARED_LIB_SUFFIX': '.dll',
'INTERMEDIATE_DIR': '$(IntDir)',
'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
'OS': 'win',
'PRODUCT_DIR': '$(OutDir)',
'LIB_DIR': '$(OutDir)lib',
'RULE_INPUT_ROOT': '$(InputName)',
'RULE_INPUT_DIRNAME': '$(InputDir)',
'RULE_INPUT_EXT': '$(InputExt)',
'RULE_INPUT_NAME': '$(InputFileName)',
'RULE_INPUT_PATH': '$(InputPath)',
'CONFIGURATION_NAME': '$(ConfigurationName)',
}
# The msvs specific sections that hold paths
generator_additional_path_sections = [
'msvs_cygwin_dirs',
'msvs_props',
]
generator_additional_non_configuration_keys = [
'msvs_cygwin_dirs',
'msvs_cygwin_shell',
'msvs_large_pdb',
'msvs_shard',
'msvs_external_builder',
'msvs_external_builder_out_dir',
'msvs_external_builder_build_cmd',
'msvs_external_builder_clean_cmd',
'msvs_external_builder_clcompile_cmd',
]
# List of precompiled header related keys.
precomp_keys = [
'msvs_precompiled_header',
'msvs_precompiled_source',
]
cached_username = None
cached_domain = None
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[2] = next
next[1] = prev
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
# TODO(gspencer): Switch the os.environ calls to be
# win32api.GetDomainName() and win32api.GetUserName() once the
# python version in depot_tools has been updated to work on Vista
# 64-bit.
def _GetDomainAndUserName():
if sys.platform not in ('win32', 'cygwin'):
return ('DOMAIN', 'USERNAME')
global cached_username
global cached_domain
if not cached_domain or not cached_username:
domain = os.environ.get('USERDOMAIN')
username = os.environ.get('USERNAME')
if not domain or not username:
call = subprocess.Popen(['net', 'config', 'Workstation'],
stdout=subprocess.PIPE)
config = call.communicate()[0]
username_re = re.compile('^User name\s+(\S+)', re.MULTILINE)
username_match = username_re.search(config)
if username_match:
username = username_match.group(1)
domain_re = re.compile('^Logon domain\s+(\S+)', re.MULTILINE)
domain_match = domain_re.search(config)
if domain_match:
domain = domain_match.group(1)
cached_domain = domain
cached_username = username
return (cached_domain, cached_username)
fixpath_prefix = None
def _NormalizedSource(source):
"""Normalize the path.
But not if that gets rid of a variable, as this may expand to something
larger than one directory.
Arguments:
source: The path to be normalize.d
Returns:
The normalized path.
"""
normalized = os.path.normpath(source)
if source.count('$') == normalized.count('$'):
source = normalized
return source
def _FixPath(path):
"""Convert paths to a form that will make sense in a vcproj file.
Arguments:
path: The path to convert, may contain / etc.
Returns:
The path with all slashes made into backslashes.
"""
if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
path = os.path.join(fixpath_prefix, path)
path = path.replace('/', '\\')
path = _NormalizedSource(path)
if path and path[-1] == '\\':
path = path[:-1]
return path
def _FixPaths(paths):
"""Fix each of the paths of the list."""
return [_FixPath(i) for i in paths]
def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
list_excluded=True, msvs_version=None):
"""Converts a list split source file paths into a vcproj folder hierarchy.
Arguments:
sources: A list of source file paths split.
prefix: A list of source file path layers meant to apply to each of sources.
excluded: A set of excluded files.
msvs_version: A MSVSVersion object.
Returns:
A hierarchy of filenames and MSVSProject.Filter objects that matches the
layout of the source tree.
For example:
_ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
prefix=['joe'])
-->
[MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
"""
if not prefix: prefix = []
result = []
excluded_result = []
folders = OrderedDict()
# Gather files into the final result, excluded, or folders.
for s in sources:
if len(s) == 1:
filename = _NormalizedSource('\\'.join(prefix + s))
if filename in excluded:
excluded_result.append(filename)
else:
result.append(filename)
elif msvs_version and not msvs_version.UsesVcxproj():
# For MSVS 2008 and earlier, we need to process all files before walking
# the sub folders.
if not folders.get(s[0]):
folders[s[0]] = []
folders[s[0]].append(s[1:])
else:
contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(s[0], contents=contents)
result.append(contents)
# Add a folder for excluded files.
if excluded_result and list_excluded:
excluded_folder = MSVSProject.Filter('_excluded_files',
contents=excluded_result)
result.append(excluded_folder)
if msvs_version and msvs_version.UsesVcxproj():
return result
# Populate all the folders.
for f in folders:
contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
excluded=excluded,
list_excluded=list_excluded,
msvs_version=msvs_version)
contents = MSVSProject.Filter(f, contents=contents)
result.append(contents)
return result
def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
if not value: return
_ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
# TODO(bradnelson): ugly hack, fix this more generally!!!
if 'Directories' in setting or 'Dependencies' in setting:
if type(value) == str:
value = value.replace('/', '\\')
else:
value = [i.replace('/', '\\') for i in value]
if not tools.get(tool_name):
tools[tool_name] = dict()
tool = tools[tool_name]
if tool.get(setting):
if only_if_unset: return
if type(tool[setting]) == list and type(value) == list:
tool[setting] += value
else:
raise TypeError(
'Appending "%s" to a non-list setting "%s" for tool "%s" is '
'not allowed, previous value: %s' % (
value, setting, tool_name, str(tool[setting])))
else:
tool[setting] = value
def _ConfigPlatform(config_data):
return config_data.get('msvs_configuration_platform', 'Win32')
def _ConfigBaseName(config_name, platform_name):
if config_name.endswith('_' + platform_name):
return config_name[0:-len(platform_name) - 1]
else:
return config_name
def _ConfigFullName(config_name, config_data):
platform_name = _ConfigPlatform(config_data)
return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
quote_cmd, do_setup_env):
if [x for x in cmd if '$(InputDir)' in x]:
input_dir_preamble = (
'set INPUTDIR=$(InputDir)\n'
'set INPUTDIR=%INPUTDIR:$(ProjectDir)=%\n'
'set INPUTDIR=%INPUTDIR:~0,-1%\n'
)
else:
input_dir_preamble = ''
if cygwin_shell:
# Find path to cygwin.
cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
# Prepare command.
direct_cmd = cmd
direct_cmd = [i.replace('$(IntDir)',
'`cygpath -m "${INTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(OutDir)',
'`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
direct_cmd = [i.replace('$(InputDir)',
'`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
if has_input_path:
direct_cmd = [i.replace('$(InputPath)',
'`cygpath -m "${INPUTPATH}"`')
for i in direct_cmd]
direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
# direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
direct_cmd = ' '.join(direct_cmd)
# TODO(quote): regularize quoting path names throughout the module
cmd = ''
if do_setup_env:
cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
cmd += 'set CYGWIN=nontsec&& '
if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
if direct_cmd.find('INTDIR') >= 0:
cmd += 'set INTDIR=$(IntDir)&& '
if direct_cmd.find('OUTDIR') >= 0:
cmd += 'set OUTDIR=$(OutDir)&& '
if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
cmd += 'set INPUTPATH=$(InputPath) && '
cmd += 'bash -c "%(cmd)s"'
cmd = cmd % {'cygwin_dir': cygwin_dir,
'cmd': direct_cmd}
return input_dir_preamble + cmd
else:
# Convert cat --> type to mimic unix.
if cmd[0] == 'cat':
command = ['type']
else:
command = [cmd[0].replace('/', '\\')]
# Add call before command to ensure that commands can be tied together one
# after the other without aborting in Incredibuild, since IB makes a bat
# file out of the raw command string, and some commands (like python) are
# actually batch files themselves.
command.insert(0, 'call')
# Fix the paths
# TODO(quote): This is a really ugly heuristic, and will miss path fixing
# for arguments like "--arg=path" or "/opt:path".
# If the argument starts with a slash or dash, it's probably a command line
# switch
arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
if quote_cmd:
# Support a mode for using cmd directly.
# Convert any paths to native form (first element is used directly).
# TODO(quote): regularize quoting path names throughout the module
arguments = ['"%s"' % i for i in arguments]
# Collapse into a single command.
return input_dir_preamble + ' '.join(command + arguments)
def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
# Currently this weird argument munging is used to duplicate the way a
# python script would need to be run as part of the chrome tree.
# Eventually we should add some sort of rule_default option to set this
# per project. For now the behavior chrome needs is the default.
mcs = rule.get('msvs_cygwin_shell')
if mcs is None:
mcs = int(spec.get('msvs_cygwin_shell', 1))
elif isinstance(mcs, str):
mcs = int(mcs)
quote_cmd = int(rule.get('msvs_quote_cmd', 1))
return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
quote_cmd, do_setup_env=do_setup_env)
def _AddActionStep(actions_dict, inputs, outputs, description, command):
"""Merge action into an existing list of actions.
Care must be taken so that actions which have overlapping inputs either don't
get assigned to the same input, or get collapsed into one.
Arguments:
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
inputs: list of inputs
outputs: list of outputs
description: description of the action
command: command line to execute
"""
# Require there to be at least one input (call sites will ensure this).
assert inputs
action = {
'inputs': inputs,
'outputs': outputs,
'description': description,
'command': command,
}
# Pick where to stick this action.
# While less than optimal in terms of build time, attach them to the first
# input for now.
chosen_input = inputs[0]
# Add it there.
if chosen_input not in actions_dict:
actions_dict[chosen_input] = []
actions_dict[chosen_input].append(action)
def _AddCustomBuildToolForMSVS(p, spec, primary_input,
inputs, outputs, description, cmd):
"""Add a custom build tool to execute something.
Arguments:
p: the target project
spec: the target project dict
primary_input: input file to attach the build tool to
inputs: list of inputs
outputs: list of outputs
description: description of the action
cmd: command line to execute
"""
inputs = _FixPaths(inputs)
outputs = _FixPaths(outputs)
tool = MSVSProject.Tool(
'VCCustomBuildTool',
{'Description': description,
'AdditionalDependencies': ';'.join(inputs),
'Outputs': ';'.join(outputs),
'CommandLine': cmd,
})
# Add to the properties of primary input for each config.
for config_name, c_data in spec['configurations'].iteritems():
p.AddFileConfig(_FixPath(primary_input),
_ConfigFullName(config_name, c_data), tools=[tool])
def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
"""Add actions accumulated into an actions_dict, merging as needed.
Arguments:
p: the target project
spec: the target project dict
actions_dict: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
"""
for primary_input in actions_dict:
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions_dict[primary_input]:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
commands.append(action['command'])
# Add the custom build step for one input file.
description = ', and also '.join(descriptions)
command = '\r\n'.join(commands)
_AddCustomBuildToolForMSVS(p, spec,
primary_input=primary_input,
inputs=inputs,
outputs=outputs,
description=description,
cmd=command)
def _RuleExpandPath(path, input_file):
"""Given the input file to which a rule applied, string substitute a path.
Arguments:
path: a path to string expand
input_file: the file to which the rule applied.
Returns:
The string substituted path.
"""
path = path.replace('$(InputName)',
os.path.splitext(os.path.split(input_file)[1])[0])
path = path.replace('$(InputDir)', os.path.dirname(input_file))
path = path.replace('$(InputExt)',
os.path.splitext(os.path.split(input_file)[1])[1])
path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
path = path.replace('$(InputPath)', input_file)
return path
def _FindRuleTriggerFiles(rule, sources):
"""Find the list of files which a particular rule applies to.
Arguments:
rule: the rule in question
sources: the set of all known source files for this project
Returns:
The list of sources that trigger a particular rule.
"""
return rule.get('rule_sources', [])
def _RuleInputsAndOutputs(rule, trigger_file):
"""Find the inputs and outputs generated by a rule.
Arguments:
rule: the rule in question.
trigger_file: the main trigger for this rule.
Returns:
The pair of (inputs, outputs) involved in this rule.
"""
raw_inputs = _FixPaths(rule.get('inputs', []))
raw_outputs = _FixPaths(rule.get('outputs', []))
inputs = OrderedSet()
outputs = OrderedSet()
inputs.add(trigger_file)
for i in raw_inputs:
inputs.add(_RuleExpandPath(i, trigger_file))
for o in raw_outputs:
outputs.add(_RuleExpandPath(o, trigger_file))
return (inputs, outputs)
def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
"""Generate a native rules file.
Arguments:
p: the target project
rules: the set of rules to include
output_dir: the directory in which the project/gyp resides
spec: the project dict
options: global generator options
"""
rules_filename = '%s%s.rules' % (spec['target_name'],
options.suffix)
rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
spec['target_name'])
# Add each rule.
for r in rules:
rule_name = r['rule_name']
rule_ext = r['extension']
inputs = _FixPaths(r.get('inputs', []))
outputs = _FixPaths(r.get('outputs', []))
# Skip a rule with no action and no inputs.
if 'action' not in r and not r.get('rule_sources', []):
continue
cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
do_setup_env=True)
rules_file.AddCustomBuildRule(name=rule_name,
description=r.get('message', rule_name),
extensions=[rule_ext],
additional_dependencies=inputs,
outputs=outputs,
cmd=cmd)
# Write out rules file.
rules_file.WriteIfChanged()
# Add rules file to project.
p.AddToolFile(rules_filename)
def _Cygwinify(path):
path = path.replace('$(OutDir)', '$(OutDirCygwin)')
path = path.replace('$(IntDir)', '$(IntDirCygwin)')
return path
def _GenerateExternalRules(rules, output_dir, spec,
sources, options, actions_to_add):
"""Generate an external makefile to do a set of rules.
Arguments:
rules: the list of rules to include
output_dir: path containing project and gyp files
spec: project specification data
sources: set of sources known
options: global generator options
actions_to_add: The list of actions we will add to.
"""
filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
# Find cygwin style versions of some paths.
mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
# Gather stuff needed to emit all: target.
all_inputs = OrderedSet()
all_outputs = OrderedSet()
all_output_dirs = OrderedSet()
first_outputs = []
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
all_inputs.update(OrderedSet(inputs))
all_outputs.update(OrderedSet(outputs))
# Only use one target from each rule as the dependency for
# 'all' so we don't try to build each rule multiple times.
first_outputs.append(list(outputs)[0])
# Get the unique output directories for this rule.
output_dirs = [os.path.split(i)[0] for i in outputs]
for od in output_dirs:
all_output_dirs.add(od)
first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
# Write out all: target, including mkdir for each output directory.
mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
for od in all_output_dirs:
if od:
mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
mk_file.write('\n')
# Define how each output is generated.
for rule in rules:
trigger_files = _FindRuleTriggerFiles(rule, sources)
for tf in trigger_files:
# Get all the inputs and outputs for this rule for this trigger file.
inputs, outputs = _RuleInputsAndOutputs(rule, tf)
inputs = [_Cygwinify(i) for i in inputs]
outputs = [_Cygwinify(i) for i in outputs]
# Prepare the command line for this rule.
cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
cmd = ['"%s"' % i for i in cmd]
cmd = ' '.join(cmd)
# Add it to the makefile.
mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
mk_file.write('\t%s\n\n' % cmd)
# Close up the file.
mk_file.close()
# Add makefile to list of sources.
sources.add(filename)
# Add a build action to call makefile.
cmd = ['make',
'OutDir=$(OutDir)',
'IntDir=$(IntDir)',
'-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
'-f', filename]
cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
# Insert makefile as 0'th input, so it gets the action attached there,
# as this is easier to understand from in the IDE.
all_inputs = list(all_inputs)
all_inputs.insert(0, filename)
_AddActionStep(actions_to_add,
inputs=_FixPaths(all_inputs),
outputs=_FixPaths(all_outputs),
description='Running external rules for %s' %
spec['target_name'],
command=cmd)
def _EscapeEnvironmentVariableExpansion(s):
"""Escapes % characters.
Escapes any % characters so that Windows-style environment variable
expansions will leave them alone.
See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
to understand why we have to do this.
Args:
s: The string to be escaped.
Returns:
The escaped string.
"""
s = s.replace('%', '%%')
return s
quote_replacer_regex = re.compile(r'(\\*)"')
def _EscapeCommandLineArgumentForMSVS(s):
"""Escapes a Windows command-line argument.
So that the Win32 CommandLineToArgv function will turn the escaped result back
into the original string.
See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
("Parsing C++ Command-Line Arguments") to understand why we have to do
this.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a literal quote, CommandLineToArgv requires an odd number of
# backslashes preceding it, and it produces half as many literal backslashes
# (rounded down). So we need to produce 2n+1 backslashes.
return 2 * match.group(1) + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex.sub(_Replace, s)
# Now add unescaped quotes so that any whitespace is interpreted literally.
s = '"' + s + '"'
return s
delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
def _EscapeVCProjCommandLineArgListItem(s):
"""Escapes command line arguments for MSVS.
The VCProj format stores string lists in a single string using commas and
semi-colons as separators, which must be quoted if they are to be
interpreted literally. However, command-line arguments may already have
quotes, and the VCProj parser is ignorant of the backslash escaping
convention used by CommandLineToArgv, so the command-line quotes and the
VCProj quotes may not be the same quotes. So to store a general
command-line argument in a VCProj list, we need to parse the existing
quoting according to VCProj's convention and quote any delimiters that are
not already quoted by that convention. The quotes that we add will also be
seen by CommandLineToArgv, so if backslashes precede them then we also have
to escape those backslashes according to the CommandLineToArgv
convention.
Args:
s: the string to be escaped.
Returns:
the escaped string.
"""
def _Replace(match):
# For a non-literal quote, CommandLineToArgv requires an even number of
# backslashes preceding it, and it produces half as many literal
# backslashes. So we need to produce 2n backslashes.
return 2 * match.group(1) + '"' + match.group(2) + '"'
segments = s.split('"')
# The unquoted segments are at the even-numbered indices.
for i in range(0, len(segments), 2):
segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
# Concatenate back into a single string
s = '"'.join(segments)
if len(segments) % 2 == 0:
# String ends while still quoted according to VCProj's convention. This
# means the delimiter and the next list item that follow this one in the
# .vcproj file will be misinterpreted as part of this item. There is nothing
# we can do about this. Adding an extra quote would correct the problem in
# the VCProj but cause the same problem on the final command-line. Moving
# the item to the end of the list does works, but that's only possible if
# there's only one such item. Let's just warn the user.
print >> sys.stderr, ('Warning: MSVS may misinterpret the odd number of ' +
'quotes in ' + s)
return s
def _EscapeCppDefineForMSVS(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSVS(s)
s = _EscapeVCProjCommandLineArgListItem(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
quote_replacer_regex2 = re.compile(r'(\\+)"')
def _EscapeCommandLineArgumentForMSBuild(s):
"""Escapes a Windows command-line argument for use by MSBuild."""
def _Replace(match):
return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
# Escape all quotes so that they are interpreted literally.
s = quote_replacer_regex2.sub(_Replace, s)
return s
def _EscapeMSBuildSpecialCharacters(s):
escape_dictionary = {
'%': '%25',
'$': '%24',
'@': '%40',
"'": '%27',
';': '%3B',
'?': '%3F',
'*': '%2A'
}
result = ''.join([escape_dictionary.get(c, c) for c in s])
return result
def _EscapeCppDefineForMSBuild(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = _EscapeEnvironmentVariableExpansion(s)
s = _EscapeCommandLineArgumentForMSBuild(s)
s = _EscapeMSBuildSpecialCharacters(s)
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
s = s.replace('#', '\\%03o' % ord('#'))
return s
def _GenerateRulesForMSVS(p, output_dir, options, spec,
sources, excluded_sources,
actions_to_add):
"""Generate all the rules for a particular project.
Arguments:
p: the project
output_dir: directory to emit rules to
options: global options passed to the generator
spec: the specification for this project
sources: the set of all known source files in this project
excluded_sources: the set of sources excluded from normal processing
actions_to_add: deferred list of actions to add in
"""
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
# Handle rules that use a native rules file.
if rules_native:
_GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
# Handle external rules (non-native rules).
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
def _AdjustSourcesForRules(spec, rules, sources, excluded_sources):
# Add outputs generated by each rule (if applicable).
for rule in rules:
# Done if not processing outputs as sources.
if int(rule.get('process_outputs_as_sources', False)):
# Add in the outputs from this rule.
trigger_files = _FindRuleTriggerFiles(rule, sources)
for trigger_file in trigger_files:
inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
inputs = OrderedSet(_FixPaths(inputs))
outputs = OrderedSet(_FixPaths(outputs))
inputs.remove(_FixPath(trigger_file))
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
sources.update(outputs)
def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
"""Take inputs with actions attached out of the list of exclusions.
Arguments:
excluded_sources: list of source files not to be built.
actions_to_add: dict of actions keyed on source file they're attached to.
Returns:
excluded_sources with files that have actions attached removed.
"""
must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
return [s for s in excluded_sources if s not in must_keep]
def _GetDefaultConfiguration(spec):
return spec['configurations'][spec['default_configuration']]
def _GetGuidOfProject(proj_path, spec):
"""Get the guid for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
Returns:
the guid.
Raises:
ValueError: if the specified GUID is invalid.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
# Decide the guid of the project.
guid = default_config.get('msvs_guid')
if guid:
if VALID_MSVS_GUID_CHARS.match(guid) is None:
raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
(guid, VALID_MSVS_GUID_CHARS.pattern))
guid = '{%s}' % guid
guid = guid or MSVSNew.MakeGuid(proj_path)
return guid
def _GetMsbuildToolsetOfProject(proj_path, spec, version):
"""Get the platform toolset for the project.
Arguments:
proj_path: Path of the vcproj or vcxproj file to generate.
spec: The target dictionary containing the properties of the target.
version: The MSVSVersion object.
Returns:
the platform toolset string or None.
"""
# Pluck out the default configuration.
default_config = _GetDefaultConfiguration(spec)
toolset = default_config.get('msbuild_toolset')
if not toolset and version.DefaultToolset():
toolset = version.DefaultToolset()
return toolset
def _GenerateProject(project, options, version, generator_flags):
"""Generates a vcproj file.
Arguments:
project: the MSVSProject object.
options: global generator options.
version: the MSVSVersion object.
generator_flags: dict of generator-specific flags.
Returns:
A list of source files that cannot be found on disk.
"""
default_config = _GetDefaultConfiguration(project.spec)
# Skip emitting anything if told to with msvs_existing_vcproj option.
if default_config.get('msvs_existing_vcproj'):
return []
if version.UsesVcxproj():
return _GenerateMSBuildProject(project, options, version, generator_flags)
else:
return _GenerateMSVSProject(project, options, version, generator_flags)
def _GenerateMSVSProject(project, options, version, generator_flags):
"""Generates a .vcproj file. It may create .rules and .user files too.
Arguments:
project: The project object we will generate the file for.
options: Global options passed to the generator.
version: The VisualStudioVersion object.
generator_flags: dict of generator-specific flags.
"""
spec = project.spec
gyp.common.EnsureDirExists(project.path)
platforms = _GetUniquePlatforms(spec)
p = MSVSProject.Writer(project.path, version, spec['target_name'],
project.guid, platforms)
# Get directory project file is in.
project_dir = os.path.split(project.path)[0]
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
config_type = _GetMSVSConfigurationType(spec, project.build_file)
for config_name, config in spec['configurations'].iteritems():
_AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
# Prepare list of sources and excluded sources.
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
_GenerateRulesForMSVS(p, project_dir, options, spec,
sources, excluded_sources,
actions_to_add)
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
sources, excluded_sources,
list_excluded, version))
# Add in files.
missing_sources = _VerifySourcesExist(sources, project_dir)
p.AddFiles(sources)
_AddToolFilesToMSVS(p, spec)
_HandlePreCompiledHeaders(p, sources, spec)
_AddActions(actions_to_add, spec, relative_path_of_gyp_file)
_AddCopies(actions_to_add, spec)
_WriteMSVSUserFile(project.path, version, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
_ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded)
_AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
# Write it out.
p.WriteIfChanged()
return missing_sources
def _GetUniquePlatforms(spec):
"""Returns the list of unique platforms for this spec, e.g ['win32', ...].
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
# Gather list of unique platforms.
platforms = OrderedSet()
for configuration in spec['configurations']:
platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
platforms = list(platforms)
return platforms
def _CreateMSVSUserFile(proj_path, version, spec):
"""Generates a .user file for the user running this Gyp program.
Arguments:
proj_path: The path of the project file being created. The .user file
shares the same path (with an appropriate suffix).
version: The VisualStudioVersion object.
spec: The target dictionary containing the properties of the target.
Returns:
The MSVSUserFile object created.
"""
(domain, username) = _GetDomainAndUserName()
vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
user_file = MSVSUserFile.Writer(vcuser_filename, version,
spec['target_name'])
return user_file
def _GetMSVSConfigurationType(spec, build_file):
"""Returns the configuration type for this project.
It's a number defined by Microsoft. May raise an exception.
Args:
spec: The target dictionary containing the properties of the target.
build_file: The path of the gyp file.
Returns:
An integer, the configuration type.
"""
try:
config_type = {
'executable': '1', # .exe
'shared_library': '2', # .dll
'loadable_module': '2', # .dll
'static_library': '4', # .lib
'none': '10', # Utility type
}[spec['type']]
except KeyError:
if spec.get('type'):
raise GypError('Target type %s is not a valid target type for '
'target %s in %s.' %
(spec['type'], spec['target_name'], build_file))
else:
raise GypError('Missing type field for target %s in %s.' %
(spec['target_name'], build_file))
return config_type
def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
"""Adds a configuration to the MSVS project.
Many settings in a vcproj file are specific to a configuration. This
function the main part of the vcproj file that's configuration specific.
Arguments:
p: The target project being generated.
spec: The target dictionary containing the properties of the target.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
config: The dictionary that defines the special processing to be done
for this configuration.
"""
# Get the information for this configuration
include_dirs, resource_include_dirs = _GetIncludeDirs(config)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(config)
out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
defines = _GetDefines(config)
defines = [_EscapeCppDefineForMSVS(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(config)
prebuild = config.get('msvs_prebuild')
postbuild = config.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = config.get('msvs_precompiled_header')
# Prepare the list of tools as a dictionary.
tools = dict()
# Add in user specified msvs_settings.
msvs_settings = config.get('msvs_settings', {})
MSVSSettings.ValidateMSVSSettings(msvs_settings)
# Prevent default library inheritance from the environment.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
for tool in msvs_settings:
settings = config['msvs_settings'][tool]
for setting in settings:
_ToolAppend(tools, tool, setting, settings[setting])
# Add the information to the appropriate tool
_ToolAppend(tools, 'VCCLCompilerTool',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(tools, 'VCResourceCompilerTool',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries.
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
_ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
# Add defines.
_ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
_ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
defines)
# Change program database directory to prevent collisions.
_ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
'$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
# Add disabled warnings.
_ToolAppend(tools, 'VCCLCompilerTool',
'DisableSpecificWarnings', disabled_warnings)
# Add Pre-build.
_ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
# Add Post-build.
_ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
_ToolAppend(tools, 'VCCLCompilerTool',
'PrecompiledHeaderThrough', precompiled_header)
_ToolAppend(tools, 'VCCLCompilerTool',
'ForcedIncludeFiles', precompiled_header)
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
_AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
def _GetIncludeDirs(config):
"""Returns the list of directories to be used for #include directives.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
# TODO(bradnelson): include_dirs should really be flexible enough not to
# require this sort of thing.
include_dirs = (
config.get('include_dirs', []) +
config.get('msvs_system_include_dirs', []))
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
include_dirs = _FixPaths(include_dirs)
resource_include_dirs = _FixPaths(resource_include_dirs)
return include_dirs, resource_include_dirs
def _GetLibraryDirs(config):
"""Returns the list of directories to be used for library search paths.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of directory paths.
"""
library_dirs = config.get('library_dirs', [])
library_dirs = _FixPaths(library_dirs)
return library_dirs
def _GetLibraries(spec):
"""Returns the list of libraries for this configuration.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
The list of directory paths.
"""
libraries = spec.get('libraries', [])
# Strip out -l, as it is not used on windows (but is needed so we can pass
# in libraries that are assumed to be in the default library path).
# Also remove duplicate entries, leaving only the last duplicate, while
# preserving order.
found = OrderedSet()
unique_libraries_list = []
for entry in reversed(libraries):
library = re.sub('^\-l', '', entry)
if not os.path.splitext(library)[1]:
library += '.lib'
if library not in found:
found.add(library)
unique_libraries_list.append(library)
unique_libraries_list.reverse()
return unique_libraries_list
def _GetOutputFilePathAndTool(spec, msbuild):
"""Returns the path and tool to use for this target.
Figures out the path of the file this spec will create and the name of
the VC tool that will create it.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A triple of (file path, name of the vc tool, name of the msbuild tool)
"""
# Select a name for the output file.
out_file = ''
vc_tool = ''
msbuild_tool = ''
output_file_map = {
'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
}
output_file_props = output_file_map.get(spec['type'])
if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
vc_tool, msbuild_tool, out_dir, suffix = output_file_props
if spec.get('standalone_static_library', 0):
out_dir = '$(OutDir)'
out_dir = spec.get('product_dir', out_dir)
product_extension = spec.get('product_extension')
if product_extension:
suffix = '.' + product_extension
elif msbuild:
suffix = '$(TargetExt)'
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
out_file = ntpath.join(out_dir, prefix + product_name + suffix)
return out_file, vc_tool, msbuild_tool
def _GetOutputTargetExt(spec):
"""Returns the extension for this target, including the dot
If product_extension is specified, set target_extension to this to avoid
MSB8012, returns None otherwise. Ignores any target_extension settings in
the input files.
Arguments:
spec: The target dictionary containing the properties of the target.
Returns:
A string with the extension, or None
"""
target_extension = spec.get('product_extension')
if target_extension:
return '.' + target_extension
return None
def _GetDefines(config):
"""Returns the list of preprocessor definitions for this configuation.
Arguments:
config: The dictionary that defines the special processing to be done
for this configuration.
Returns:
The list of preprocessor definitions.
"""
defines = []
for d in config.get('defines', []):
if type(d) == list:
fd = '='.join([str(dpart) for dpart in d])
else:
fd = str(d)
defines.append(fd)
return defines
def _GetDisabledWarnings(config):
return [str(i) for i in config.get('msvs_disabled_warnings', [])]
def _GetModuleDefinition(spec):
def_file = ''
if spec['type'] in ['shared_library', 'loadable_module', 'executable']:
def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
if len(def_files) == 1:
def_file = _FixPath(def_files[0])
elif def_files:
raise ValueError(
'Multiple module definition files in one target, target %s lists '
'multiple .def files: %s' % (
spec['target_name'], ' '.join(def_files)))
return def_file
def _ConvertToolsToExpectedForm(tools):
"""Convert tools to a form expected by Visual Studio.
Arguments:
tools: A dictionary of settings; the tool name is the key.
Returns:
A list of Tool objects.
"""
tool_list = []
for tool, settings in tools.iteritems():
# Collapse settings with lists.
settings_fixed = {}
for setting, value in settings.iteritems():
if type(value) == list:
if ((tool == 'VCLinkerTool' and
setting == 'AdditionalDependencies') or
setting == 'AdditionalOptions'):
settings_fixed[setting] = ' '.join(value)
else:
settings_fixed[setting] = ';'.join(value)
else:
settings_fixed[setting] = value
# Add in this tool.
tool_list.append(MSVSProject.Tool(tool, settings_fixed))
return tool_list
def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
"""Add to the project file the configuration specified by config.
Arguments:
p: The target project being generated.
spec: the target project dict.
tools: A dictionary of settings; the tool name is the key.
config: The dictionary that defines the special processing to be done
for this configuration.
config_type: The configuration type, a number as defined by Microsoft.
config_name: The name of the configuration.
"""
attributes = _GetMSVSAttributes(spec, config, config_type)
# Add in this configuration.
tool_list = _ConvertToolsToExpectedForm(tools)
p.AddConfig(_ConfigFullName(config_name, config),
attrs=attributes, tools=tool_list)
def _GetMSVSAttributes(spec, config, config_type):
# Prepare configuration attributes.
prepared_attrs = {}
source_attrs = config.get('msvs_configuration_attributes', {})
for a in source_attrs:
prepared_attrs[a] = source_attrs[a]
# Add props files.
vsprops_dirs = config.get('msvs_props', [])
vsprops_dirs = _FixPaths(vsprops_dirs)
if vsprops_dirs:
prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
# Set configuration type.
prepared_attrs['ConfigurationType'] = config_type
output_dir = prepared_attrs.get('OutputDirectory',
'$(SolutionDir)$(ConfigurationName)')
prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in prepared_attrs:
intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
else:
intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
prepared_attrs['IntermediateDirectory'] = intermediate
return prepared_attrs
def _AddNormalizedSources(sources_set, sources_array):
sources_set.update(_NormalizedSource(s) for s in sources_array)
def _PrepareListOfSources(spec, generator_flags, gyp_file):
"""Prepare list of sources and excluded sources.
Besides the sources specified directly in the spec, adds the gyp file so
that a change to it will cause a re-compile. Also adds appropriate sources
for actions and copies. Assumes later stage will un-exclude files which
have custom build steps attached.
Arguments:
spec: The target dictionary containing the properties of the target.
gyp_file: The name of the gyp file.
Returns:
A pair of (list of sources, list of excluded sources).
The sources will be relative to the gyp file.
"""
sources = OrderedSet()
_AddNormalizedSources(sources, spec.get('sources', []))
excluded_sources = OrderedSet()
# Add in the gyp file.
if not generator_flags.get('standalone'):
sources.add(gyp_file)
# Add in 'action' inputs and outputs.
for a in spec.get('actions', []):
inputs = a['inputs']
inputs = [_NormalizedSource(i) for i in inputs]
# Add all inputs to sources and excluded sources.
inputs = OrderedSet(inputs)
sources.update(inputs)
if not spec.get('msvs_external_builder'):
excluded_sources.update(inputs)
if int(a.get('process_outputs_as_sources', False)):
_AddNormalizedSources(sources, a.get('outputs', []))
# Add in 'copies' inputs and outputs.
for cpy in spec.get('copies', []):
_AddNormalizedSources(sources, cpy.get('files', []))
return (sources, excluded_sources)
def _AdjustSourcesAndConvertToFilterHierarchy(
spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
"""Adjusts the list of sources and excluded sources.
Also converts the sets to lists.
Arguments:
spec: The target dictionary containing the properties of the target.
options: Global generator options.
gyp_dir: The path to the gyp file being processed.
sources: A set of sources to be included for this project.
excluded_sources: A set of sources to be excluded for this project.
version: A MSVSVersion object.
Returns:
A trio of (list of sources, list of excluded sources,
path of excluded IDL file)
"""
# Exclude excluded sources coming into the generator.
excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
# Add excluded sources into sources for good measure.
sources.update(excluded_sources)
# Convert to proper windows form.
# NOTE: sources goes from being a set to a list here.
# NOTE: excluded_sources goes from being a set to a list here.
sources = _FixPaths(sources)
# Convert to proper windows form.
excluded_sources = _FixPaths(excluded_sources)
excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
precompiled_related = _GetPrecompileRelatedFiles(spec)
# Find the excluded ones, minus the precompiled header related ones.
fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
# Convert to folders and the right slashes.
sources = [i.split('\\') for i in sources]
sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
list_excluded=list_excluded,
msvs_version=version)
# Prune filters with a single child to flatten ugly directory structures
# such as ../../src/modules/module1 etc.
if version.UsesVcxproj():
while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
and len(set([s.name for s in sources])) == 1:
assert all([len(s.contents) == 1 for s in sources])
sources = [s.contents[0] for s in sources]
else:
while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
sources = sources[0].contents
return sources, excluded_sources, excluded_idl
def _IdlFilesHandledNonNatively(spec, sources):
# If any non-native rules use 'idl' as an extension exclude idl files.
# Gather a list here to use later.
using_idl = False
for rule in spec.get('rules', []):
if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
using_idl = True
break
if using_idl:
excluded_idl = [i for i in sources if i.endswith('.idl')]
else:
excluded_idl = []
return excluded_idl
def _GetPrecompileRelatedFiles(spec):
# Gather a list of precompiled header related sources.
precompiled_related = []
for _, config in spec['configurations'].iteritems():
for k in precomp_keys:
f = config.get(k)
if f:
precompiled_related.append(_FixPath(f))
return precompiled_related
def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
list_excluded):
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
for file_name, excluded_configs in exclusions.iteritems():
if (not list_excluded and
len(excluded_configs) == len(spec['configurations'])):
# If we're not listing excluded files, then they won't appear in the
# project, so don't try to configure them to be excluded.
pass
else:
for config_name, config in excluded_configs:
p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
{'ExcludedFromBuild': 'true'})
def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
exclusions = {}
# Exclude excluded sources from being built.
for f in excluded_sources:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
# Don't do this for ones that are precompiled header related.
if f not in precomped:
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
# If any non-native rules use 'idl' as an extension exclude idl files.
# Exclude them now.
for f in excluded_idl:
excluded_configs = []
for config_name, config in spec['configurations'].iteritems():
excluded_configs.append((config_name, config))
exclusions[f] = excluded_configs
return exclusions
def _AddToolFilesToMSVS(p, spec):
# Add in tool files (rules).
tool_files = OrderedSet()
for _, config in spec['configurations'].iteritems():
for f in config.get('msvs_tool_files', []):
tool_files.add(f)
for f in tool_files:
p.AddToolFile(f)
def _HandlePreCompiledHeaders(p, sources, spec):
# Pre-compiled header source stubs need a different compiler flag
# (generate precompiled header) and any source file not of the same
# kind (i.e. C vs. C++) as the precompiled header source stub needs
# to have use of precompiled headers disabled.
extensions_excluded_from_precompile = []
for config_name, config in spec['configurations'].iteritems():
source = config.get('msvs_precompiled_source')
if source:
source = _FixPath(source)
# UsePrecompiledHeader=1 for if using precompiled headers.
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '1'})
p.AddFileConfig(source, _ConfigFullName(config_name, config),
{}, tools=[tool])
basename, extension = os.path.splitext(source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
def DisableForSourceTree(source_tree):
for source in source_tree:
if isinstance(source, MSVSProject.Filter):
DisableForSourceTree(source.contents)
else:
basename, extension = os.path.splitext(source)
if extension in extensions_excluded_from_precompile:
for config_name, config in spec['configurations'].iteritems():
tool = MSVSProject.Tool('VCCLCompilerTool',
{'UsePrecompiledHeader': '0',
'ForcedIncludeFiles': '$(NOINHERIT)'})
p.AddFileConfig(_FixPath(source),
_ConfigFullName(config_name, config),
{}, tools=[tool])
# Do nothing if there was no precompiled source.
if extensions_excluded_from_precompile:
DisableForSourceTree(sources)
def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
# Add actions.
actions = spec.get('actions', [])
# Don't setup_env every time. When all the actions are run together in one
# batch file in VS, the PATH will grow too long.
# Membership in this set means that the cygwin environment has been set up,
# and does not need to be set up again.
have_setup_env = set()
for a in actions:
# Attach actions to the gyp file if nothing else is there.
inputs = a.get('inputs') or [relative_path_of_gyp_file]
attached_to = inputs[0]
need_setup_env = attached_to not in have_setup_env
cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
do_setup_env=need_setup_env)
have_setup_env.add(attached_to)
# Add the action.
_AddActionStep(actions_to_add,
inputs=inputs,
outputs=a.get('outputs', []),
description=a.get('message', a['action_name']),
command=cmd)
def _WriteMSVSUserFile(project_path, version, spec):
# Add run_as and test targets.
if 'run_as' in spec:
run_as = spec['run_as']
action = run_as.get('action', [])
environment = run_as.get('environment', [])
working_directory = run_as.get('working_directory', '.')
elif int(spec.get('test', 0)):
action = ['$(TargetPath)', '--gtest_print_time']
environment = []
working_directory = '.'
else:
return # Nothing to add
# Write out the user file.
user_file = _CreateMSVSUserFile(project_path, version, spec)
for config_name, c_data in spec['configurations'].iteritems():
user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
action, environment, working_directory)
user_file.WriteIfChanged()
def _AddCopies(actions_to_add, spec):
copies = _GetCopies(spec)
for inputs, outputs, cmd, description in copies:
_AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
description=description, command=cmd)
def _GetCopies(spec):
copies = []
# Add copies.
for cpy in spec.get('copies', []):
for src in cpy.get('files', []):
dst = os.path.join(cpy['destination'], os.path.basename(src))
# _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
# outputs, so do the same for our generated command line.
if src.endswith('/'):
src_bare = src[:-1]
base_dir = posixpath.split(src_bare)[0]
outer_dir = posixpath.split(src_bare)[1]
cmd = 'cd "%s" && xcopy /e /f /y "%s" "%s\\%s\\"' % (
_FixPath(base_dir), outer_dir, _FixPath(dst), outer_dir)
copies.append(([src], ['dummy_copies', dst], cmd,
'Copying %s to %s' % (src, dst)))
else:
cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
_FixPath(cpy['destination']), _FixPath(src), _FixPath(dst))
copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, dst)))
return copies
def _GetPathDict(root, path):
# |path| will eventually be empty (in the recursive calls) if it was initially
# relative; otherwise it will eventually end up as '\', 'D:\', etc.
if not path or path.endswith(os.sep):
return root
parent, folder = os.path.split(path)
parent_dict = _GetPathDict(root, parent)
if folder not in parent_dict:
parent_dict[folder] = dict()
return parent_dict[folder]
def _DictsToFolders(base_path, bucket, flat):
# Convert to folders recursively.
children = []
for folder, contents in bucket.iteritems():
if type(contents) == dict:
folder_children = _DictsToFolders(os.path.join(base_path, folder),
contents, flat)
if flat:
children += folder_children
else:
folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
name='(' + folder + ')',
entries=folder_children)
children.append(folder_children)
else:
children.append(contents)
return children
def _CollapseSingles(parent, node):
# Recursively explorer the tree of dicts looking for projects which are
# the sole item in a folder which has the same name as the project. Bring
# such projects up one level.
if (type(node) == dict and
len(node) == 1 and
node.keys()[0] == parent + '.vcproj'):
return node[node.keys()[0]]
if type(node) != dict:
return node
for child in node:
node[child] = _CollapseSingles(child, node[child])
return node
def _GatherSolutionFolders(sln_projects, project_objects, flat):
root = {}
# Convert into a tree of dicts on path.
for p in sln_projects:
gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
gyp_dir = os.path.dirname(gyp_file)
path_dict = _GetPathDict(root, gyp_dir)
path_dict[target + '.vcproj'] = project_objects[p]
# Walk down from the top until we hit a folder that has more than one entry.
# In practice, this strips the top-level "src/" dir from the hierarchy in
# the solution.
while len(root) == 1 and type(root[root.keys()[0]]) == dict:
root = root[root.keys()[0]]
# Collapse singles.
root = _CollapseSingles('', root)
# Merge buckets until everything is a root entry.
return _DictsToFolders('', root, flat)
def _GetPathOfProject(qualified_target, spec, options, msvs_version):
default_config = _GetDefaultConfiguration(spec)
proj_filename = default_config.get('msvs_existing_vcproj')
if not proj_filename:
proj_filename = (spec['target_name'] + options.suffix +
msvs_version.ProjectExtension())
build_file = gyp.common.BuildFile(qualified_target)
proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
fix_prefix = None
if options.generator_output:
project_dir_path = os.path.dirname(os.path.abspath(proj_path))
proj_path = os.path.join(options.generator_output, proj_path)
fix_prefix = gyp.common.RelativePath(project_dir_path,
os.path.dirname(proj_path))
return proj_path, fix_prefix
def _GetPlatformOverridesOfProject(spec):
# Prepare a dict indicating which project configurations are used for which
# solution configurations for this target.
config_platform_overrides = {}
for config_name, c in spec['configurations'].iteritems():
config_fullname = _ConfigFullName(config_name, c)
platform = c.get('msvs_target_platform', _ConfigPlatform(c))
fixed_config_fullname = '%s|%s' % (
_ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
config_platform_overrides[config_fullname] = fixed_config_fullname
return config_platform_overrides
def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
"""Create a MSVSProject object for the targets found in target list.
Arguments:
target_list: the list of targets to generate project objects for.
target_dicts: the dictionary of specifications.
options: global generator options.
msvs_version: the MSVSVersion object.
Returns:
A set of created projects, keyed by target.
"""
global fixpath_prefix
# Generate each project.
projects = {}
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise GypError(
'Multiple toolsets not supported in msvs build (target %s)' %
qualified_target)
proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
options, msvs_version)
guid = _GetGuidOfProject(proj_path, spec)
overrides = _GetPlatformOverridesOfProject(spec)
build_file = gyp.common.BuildFile(qualified_target)
# Create object for this project.
obj = MSVSNew.MSVSProject(
proj_path,
name=spec['target_name'],
guid=guid,
spec=spec,
build_file=build_file,
config_platform_overrides=overrides,
fixpath_prefix=fixpath_prefix)
# Set project toolset if any (MS build only)
if msvs_version.UsesVcxproj():
obj.set_msbuild_toolset(
_GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
projects[qualified_target] = obj
# Set all the dependencies, but not if we are using an external builder like
# ninja
for project in projects.values():
if not project.spec.get('msvs_external_builder'):
deps = project.spec.get('dependencies', [])
deps = [projects[d] for d in deps]
project.set_dependencies(deps)
return projects
def _InitNinjaFlavor(options, target_list, target_dicts):
"""Initialize targets for the ninja flavor.
This sets up the necessary variables in the targets to generate msvs projects
that use ninja as an external builder. The variables in the spec are only set
if they have not been set. This allows individual specs to override the
default values initialized here.
Arguments:
options: Options provided to the generator.
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
"""
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec.get('msvs_external_builder'):
# The spec explicitly defined an external builder, so don't change it.
continue
path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
spec['msvs_external_builder'] = 'ninja'
if not spec.get('msvs_external_builder_out_dir'):
spec['msvs_external_builder_out_dir'] = \
options.depth + '/out/$(Configuration)'
if not spec.get('msvs_external_builder_build_cmd'):
spec['msvs_external_builder_build_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clean_cmd'):
spec['msvs_external_builder_clean_cmd'] = [
path_to_ninja,
'-C',
'$(OutDir)',
'-t',
'clean',
'$(ProjectName)',
]
if not spec.get('msvs_external_builder_clcompile_cmd'):
spec['msvs_external_builder_clcompile_cmd'] = [
sys.executable,
'$(OutDir)/gyp-win-tool',
'cl-compile',
'$(ProjectDir)',
'$(SelectedFiles)',
]
def CalculateVariables(default_variables, params):
"""Generated variables that require params to be known."""
generator_flags = params.get('generator_flags', {})
# Select project file format version (if unset, default to auto detecting).
msvs_version = MSVSVersion.SelectVisualStudioVersion(
generator_flags.get('msvs_version', 'auto'))
# Stash msvs_version for later (so we don't have to probe the system twice).
params['msvs_version'] = msvs_version
# Set a variable so conditions can be based on msvs_version.
default_variables['MSVS_VERSION'] = msvs_version.ShortName()
# To determine processor word size on Windows, in addition to checking
# PROCESSOR_ARCHITECTURE (which reflects the word size of the current
# process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
# contains the actual word size of the system when running thru WOW64).
if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
default_variables['MSVS_OS_BITS'] = 64
else:
default_variables['MSVS_OS_BITS'] = 32
if gyp.common.GetFlavor(params) == 'ninja':
default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
def PerformBuild(data, configurations, params):
options = params['options']
msvs_version = params['msvs_version']
devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
sln_path = build_file_root + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
for config in configurations:
arguments = [devenv, sln_path, '/Build', config]
print 'Building [%s]: %s' % (config, arguments)
rtn = subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate .sln and .vcproj files.
This is the entry point for this generator.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dictionary containing per .gyp data.
"""
global fixpath_prefix
options = params['options']
# Get the project file format version back out of where we stashed it in
# GeneratorCalculatedVariables.
msvs_version = params['msvs_version']
generator_flags = params.get('generator_flags', {})
# Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
(target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
# Optionally use the large PDB workaround for targets marked with
# 'msvs_large_pdb': 1.
(target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
# Optionally configure each spec to use ninja as the external builder.
if params.get('flavor') == 'ninja':
_InitNinjaFlavor(options, target_list, target_dicts)
# Prepare the set of configurations.
configs = set()
for qualified_target in target_list:
spec = target_dicts[qualified_target]
for config_name, config in spec['configurations'].iteritems():
configs.add(_ConfigFullName(config_name, config))
configs = list(configs)
# Figure out all the projects that will be generated and their guids
project_objects = _CreateProjectObjects(target_list, target_dicts, options,
msvs_version)
# Generate each project.
missing_sources = []
for project in project_objects.values():
fixpath_prefix = project.fixpath_prefix
missing_sources.extend(_GenerateProject(project, options, msvs_version,
generator_flags))
fixpath_prefix = None
for build_file in data:
# Validate build_file extension
if not build_file.endswith('.gyp'):
continue
sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
if options.generator_output:
sln_path = os.path.join(options.generator_output, sln_path)
# Get projects in the solution, and their dependents.
sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
# Create folder hierarchy.
root_entries = _GatherSolutionFolders(
sln_projects, project_objects, flat=msvs_version.FlatSolution())
# Create solution.
sln = MSVSNew.MSVSSolution(sln_path,
entries=root_entries,
variants=configs,
websiteProperties=False,
version=msvs_version)
sln.Write()
if missing_sources:
error_message = "Missing input files:\n" + \
'\n'.join(set(missing_sources))
if generator_flags.get('msvs_error_on_missing_sources', False):
raise GypError(error_message)
else:
print >> sys.stdout, "Warning: " + error_message
def _GenerateMSBuildFiltersFile(filters_path, source_files,
extension_to_rule_name):
"""Generate the filters file.
This file is used by Visual Studio to organize the presentation of source
files into folders.
Arguments:
filters_path: The path of the file to be created.
source_files: The hierarchical structure of all the sources.
extension_to_rule_name: A dictionary mapping file extensions to rules.
"""
filter_group = []
source_group = []
_AppendFiltersForMSBuild('', source_files, extension_to_rule_name,
filter_group, source_group)
if filter_group:
content = ['Project',
{'ToolsVersion': '4.0',
'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
},
['ItemGroup'] + filter_group,
['ItemGroup'] + source_group
]
easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
elif os.path.exists(filters_path):
# We don't need this filter anymore. Delete the old filter file.
os.unlink(filters_path)
def _AppendFiltersForMSBuild(parent_filter_name, sources,
extension_to_rule_name,
filter_group, source_group):
"""Creates the list of filters and sources to be added in the filter file.
Args:
parent_filter_name: The name of the filter under which the sources are
found.
sources: The hierarchy of filters and sources to process.
extension_to_rule_name: A dictionary mapping file extensions to rules.
filter_group: The list to which filter entries will be appended.
source_group: The list to which source entries will be appeneded.
"""
for source in sources:
if isinstance(source, MSVSProject.Filter):
# We have a sub-filter. Create the name of that sub-filter.
if not parent_filter_name:
filter_name = source.name
else:
filter_name = '%s\\%s' % (parent_filter_name, source.name)
# Add the filter to the group.
filter_group.append(
['Filter', {'Include': filter_name},
['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
# Recurse and add its dependents.
_AppendFiltersForMSBuild(filter_name, source.contents,
extension_to_rule_name,
filter_group, source_group)
else:
# It's a source. Create a source entry.
_, element = _MapFileToMsBuildSourceType(source, extension_to_rule_name)
source_entry = [element, {'Include': source}]
# Specify the filter it is part of, if any.
if parent_filter_name:
source_entry.append(['Filter', parent_filter_name])
source_group.append(source_entry)
def _MapFileToMsBuildSourceType(source, extension_to_rule_name):
"""Returns the group and element type of the source file.
Arguments:
source: The source file name.
extension_to_rule_name: A dictionary mapping file extensions to rules.
Returns:
A pair of (group this file should be part of, the label of element)
"""
_, ext = os.path.splitext(source)
if ext in extension_to_rule_name:
group = 'rule'
element = extension_to_rule_name[ext]
elif ext in ['.cc', '.cpp', '.c', '.cxx']:
group = 'compile'
element = 'ClCompile'
elif ext in ['.h', '.hxx']:
group = 'include'
element = 'ClInclude'
elif ext == '.rc':
group = 'resource'
element = 'ResourceCompile'
elif ext == '.idl':
group = 'midl'
element = 'Midl'
else:
group = 'none'
element = 'None'
return (group, element)
def _GenerateRulesForMSBuild(output_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name):
# MSBuild rules are implemented using three files: an XML file, a .targets
# file and a .props file.
# See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
# for more details.
rules = spec.get('rules', [])
rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
msbuild_rules = []
for rule in rules_native:
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
msbuild_rule = MSBuildRule(rule, spec)
msbuild_rules.append(msbuild_rule)
extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
if msbuild_rules:
base = spec['target_name'] + options.suffix
props_name = base + '.props'
targets_name = base + '.targets'
xml_name = base + '.xml'
props_files_of_rules.add(props_name)
targets_files_of_rules.add(targets_name)
props_path = os.path.join(output_dir, props_name)
targets_path = os.path.join(output_dir, targets_name)
xml_path = os.path.join(output_dir, xml_name)
_GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
_GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
_GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
if rules_external:
_GenerateExternalRules(rules_external, output_dir, spec,
sources, options, actions_to_add)
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
class MSBuildRule(object):
"""Used to store information used to generate an MSBuild rule.
Attributes:
rule_name: The rule name, sanitized to use in XML.
target_name: The name of the target.
after_targets: The name of the AfterTargets element.
before_targets: The name of the BeforeTargets element.
depends_on: The name of the DependsOn element.
compute_output: The name of the ComputeOutput element.
dirs_to_make: The name of the DirsToMake element.
inputs: The name of the _inputs element.
tlog: The name of the _tlog element.
extension: The extension this rule applies to.
description: The message displayed when this rule is invoked.
additional_dependencies: A string listing additional dependencies.
outputs: The outputs of this rule.
command: The command used to run the rule.
"""
def __init__(self, rule, spec):
self.display_name = rule['rule_name']
# Assure that the rule name is only characters and numbers
self.rule_name = re.sub(r'\W', '_', self.display_name)
# Create the various element names, following the example set by the
# Visual Studio 2008 to 2010 conversion. I don't know if VS2010
# is sensitive to the exact names.
self.target_name = '_' + self.rule_name
self.after_targets = self.rule_name + 'AfterTargets'
self.before_targets = self.rule_name + 'BeforeTargets'
self.depends_on = self.rule_name + 'DependsOn'
self.compute_output = 'Compute%sOutput' % self.rule_name
self.dirs_to_make = self.rule_name + 'DirsToMake'
self.inputs = self.rule_name + '_inputs'
self.tlog = self.rule_name + '_tlog'
self.extension = rule['extension']
if not self.extension.startswith('.'):
self.extension = '.' + self.extension
self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
rule.get('message', self.rule_name))
old_additional_dependencies = _FixPaths(rule.get('inputs', []))
self.additional_dependencies = (
';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_additional_dependencies]))
old_outputs = _FixPaths(rule.get('outputs', []))
self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
for i in old_outputs])
old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
do_setup_env=True)
self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
"""Generate the .props file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
for rule in msbuild_rules:
content.extend([
['PropertyGroup',
{'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
"'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
rule.after_targets)
},
[rule.before_targets, 'Midl'],
[rule.after_targets, 'CustomBuild'],
],
['PropertyGroup',
[rule.depends_on,
{'Condition': "'$(ConfigurationType)' != 'Makefile'"},
'_SelectedFiles;$(%s)' % rule.depends_on
],
],
['ItemDefinitionGroup',
[rule.rule_name,
['CommandLineTemplate', rule.command],
['Outputs', rule.outputs],
['ExecutionDescription', rule.description],
['AdditionalDependencies', rule.additional_dependencies],
],
]
])
easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
"""Generate the .targets file."""
content = ['Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
}
]
item_group = [
'ItemGroup',
['PropertyPageSchema',
{'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
]
]
for rule in msbuild_rules:
item_group.append(
['AvailableItemName',
{'Include': rule.rule_name},
['Targets', rule.target_name],
])
content.append(item_group)
for rule in msbuild_rules:
content.append(
['UsingTask',
{'TaskName': rule.rule_name,
'TaskFactory': 'XamlTaskFactory',
'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
},
['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
])
for rule in msbuild_rules:
rule_name = rule.rule_name
target_outputs = '%%(%s.Outputs)' % rule_name
target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
'$(MSBuildProjectFile)') % (rule_name, rule_name)
rule_inputs = '%%(%s.Identity)' % rule_name
extension_condition = ("'%(Extension)'=='.obj' or "
"'%(Extension)'=='.res' or "
"'%(Extension)'=='.rsc' or "
"'%(Extension)'=='.lib'")
remove_section = [
'ItemGroup',
{'Condition': "'@(SelectedFiles)' != ''"},
[rule_name,
{'Remove': '@(%s)' % rule_name,
'Condition': "'%(Identity)' != '@(SelectedFiles)'"
}
]
]
inputs_section = [
'ItemGroup',
[rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
]
logging_section = [
'ItemGroup',
[rule.tlog,
{'Include': '%%(%s.Outputs)' % rule_name,
'Condition': ("'%%(%s.Outputs)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" %
(rule_name, rule_name))
},
['Source', "@(%s, '|')" % rule_name],
['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
],
]
message_section = [
'Message',
{'Importance': 'High',
'Text': '%%(%s.ExecutionDescription)' % rule_name
}
]
write_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).write.1.tlog',
'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
rule.tlog)
}
]
read_tlog_section = [
'WriteLinesToFile',
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule.tlog, rule.tlog),
'File': '$(IntDir)$(ProjectName).read.1.tlog',
'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
}
]
command_and_input_section = [
rule_name,
{'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
"'true'" % (rule_name, rule_name),
'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
'Inputs': rule_inputs
}
]
content.extend([
['Target',
{'Name': rule.target_name,
'BeforeTargets': '$(%s)' % rule.before_targets,
'AfterTargets': '$(%s)' % rule.after_targets,
'Condition': "'@(%s)' != ''" % rule_name,
'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
rule.compute_output),
'Outputs': target_outputs,
'Inputs': target_inputs
},
remove_section,
inputs_section,
logging_section,
message_section,
write_tlog_section,
read_tlog_section,
command_and_input_section,
],
['PropertyGroup',
['ComputeLinkInputsTargets',
'$(ComputeLinkInputsTargets);',
'%s;' % rule.compute_output
],
['ComputeLibInputsTargets',
'$(ComputeLibInputsTargets);',
'%s;' % rule.compute_output
],
],
['Target',
{'Name': rule.compute_output,
'Condition': "'@(%s)' != ''" % rule_name
},
['ItemGroup',
[rule.dirs_to_make,
{'Condition': "'@(%s)' != '' and "
"'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
'Include': '%%(%s.Outputs)' % rule_name
}
],
['Link',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['Lib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
['ImpLib',
{'Include': '%%(%s.Identity)' % rule.dirs_to_make,
'Condition': extension_condition
}
],
],
['MakeDir',
{'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
rule.dirs_to_make)
}
]
],
])
easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
# Generate the .xml file
content = [
'ProjectSchemaDefinitions',
{'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
'assembly=Microsoft.Build.Framework'),
'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
'xmlns:transformCallback':
'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
}
]
for rule in msbuild_rules:
content.extend([
['Rule',
{'Name': rule.rule_name,
'PageTemplate': 'tool',
'DisplayName': rule.display_name,
'Order': '200'
},
['Rule.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name
}
]
],
['Rule.Categories',
['Category',
{'Name': 'General'},
['Category.DisplayName',
['sys:String', 'General'],
],
],
['Category',
{'Name': 'Command Line',
'Subtype': 'CommandLine'
},
['Category.DisplayName',
['sys:String', 'Command Line'],
],
],
],
['StringListProperty',
{'Name': 'Inputs',
'Category': 'Command Line',
'IsRequired': 'true',
'Switch': ' '
},
['StringListProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': rule.rule_name,
'SourceType': 'Item'
}
]
],
],
['StringProperty',
{'Name': 'CommandLineTemplate',
'DisplayName': 'Command Line',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['DynamicEnumProperty',
{'Name': rule.before_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute Before'],
],
['DynamicEnumProperty.Description',
['sys:String', 'Specifies the targets for the build customization'
' to run before.'
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.before_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'HasConfigurationCondition': 'true'
}
]
],
],
['DynamicEnumProperty',
{'Name': rule.after_targets,
'Category': 'General',
'EnumProvider': 'Targets',
'IncludeInCommandLine': 'False'
},
['DynamicEnumProperty.DisplayName',
['sys:String', 'Execute After'],
],
['DynamicEnumProperty.Description',
['sys:String', ('Specifies the targets for the build customization'
' to run after.')
],
],
['DynamicEnumProperty.ProviderSettings',
['NameValuePair',
{'Name': 'Exclude',
'Value': '^%s|^Compute' % rule.after_targets
}
]
],
['DynamicEnumProperty.DataSource',
['DataSource',
{'Persistence': 'ProjectFile',
'ItemType': '',
'HasConfigurationCondition': 'true'
}
]
],
],
['StringListProperty',
{'Name': 'Outputs',
'DisplayName': 'Outputs',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringProperty',
{'Name': 'ExecutionDescription',
'DisplayName': 'Execution Description',
'Visible': 'False',
'IncludeInCommandLine': 'False'
}
],
['StringListProperty',
{'Name': 'AdditionalDependencies',
'DisplayName': 'Additional Dependencies',
'IncludeInCommandLine': 'False',
'Visible': 'false'
}
],
['StringProperty',
{'Subtype': 'AdditionalOptions',
'Name': 'AdditionalOptions',
'Category': 'Command Line'
},
['StringProperty.DisplayName',
['sys:String', 'Additional Options'],
],
['StringProperty.Description',
['sys:String', 'Additional Options'],
],
],
],
['ItemType',
{'Name': rule.rule_name,
'DisplayName': rule.display_name
}
],
['FileExtension',
{'Name': '*' + rule.extension,
'ContentType': rule.rule_name
}
],
['ContentType',
{'Name': rule.rule_name,
'DisplayName': '',
'ItemType': rule.rule_name
}
]
])
easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
def _GetConfigurationAndPlatform(name, settings):
configuration = name.rsplit('_', 1)[0]
platform = settings.get('msvs_configuration_platform', 'Win32')
return (configuration, platform)
def _GetConfigurationCondition(name, settings):
return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
_GetConfigurationAndPlatform(name, settings))
def _GetMSBuildProjectConfigurations(configurations):
group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
for (name, settings) in sorted(configurations.iteritems()):
configuration, platform = _GetConfigurationAndPlatform(name, settings)
designation = '%s|%s' % (configuration, platform)
group.append(
['ProjectConfiguration', {'Include': designation},
['Configuration', configuration],
['Platform', platform]])
return [group]
def _GetMSBuildGlobalProperties(spec, guid, gyp_file_name):
namespace = os.path.splitext(gyp_file_name)[0]
return [
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', guid],
['Keyword', 'Win32Proj'],
['RootNamespace', namespace],
['IgnoreWarnCompileDuplicatedFilename', 'true'],
]
]
def _GetMSBuildConfigurationDetails(spec, build_file):
properties = {}
for name, settings in spec['configurations'].iteritems():
msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
condition = _GetConfigurationCondition(name, settings)
character_set = msbuild_attributes.get('CharacterSet')
_AddConditionalProperty(properties, condition, 'ConfigurationType',
msbuild_attributes['ConfigurationType'])
if character_set:
_AddConditionalProperty(properties, condition, 'CharacterSet',
character_set)
return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
def _GetMSBuildLocalProperties(msbuild_toolset):
# Currently the only local property we support is PlatformToolset
properties = {}
if msbuild_toolset:
properties = [
['PropertyGroup', {'Label': 'Locals'},
['PlatformToolset', msbuild_toolset],
]
]
return properties
def _GetMSBuildPropertySheets(configurations):
user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
additional_props = {}
props_specified = False
for name, settings in sorted(configurations.iteritems()):
configuration = _GetConfigurationCondition(name, settings)
if settings.has_key('msbuild_props'):
additional_props[configuration] = _FixPaths(settings['msbuild_props'])
props_specified = True
else:
additional_props[configuration] = ''
if not props_specified:
return [
['ImportGroup',
{'Label': 'PropertySheets'},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
]
else:
sheets = []
for condition, props in additional_props.iteritems():
import_group = [
'ImportGroup',
{'Label': 'PropertySheets',
'Condition': condition
},
['Import',
{'Project': user_props,
'Condition': "exists('%s')" % user_props,
'Label': 'LocalAppDataPlatform'
}
]
]
for props_file in props:
import_group.append(['Import', {'Project':props_file}])
sheets.append(import_group)
return sheets
def _ConvertMSVSBuildAttributes(spec, config, build_file):
config_type = _GetMSVSConfigurationType(spec, build_file)
msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
msbuild_attributes = {}
for a in msvs_attributes:
if a in ['IntermediateDirectory', 'OutputDirectory']:
directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
if not directory.endswith('\\'):
directory += '\\'
msbuild_attributes[a] = directory
elif a == 'CharacterSet':
msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
elif a == 'ConfigurationType':
msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
else:
print 'Warning: Do not know how to convert MSVS attribute ' + a
return msbuild_attributes
def _ConvertMSVSCharacterSet(char_set):
if char_set.isdigit():
char_set = {
'0': 'MultiByte',
'1': 'Unicode',
'2': 'MultiByte',
}[char_set]
return char_set
def _ConvertMSVSConfigurationType(config_type):
if config_type.isdigit():
config_type = {
'1': 'Application',
'2': 'DynamicLibrary',
'4': 'StaticLibrary',
'10': 'Utility'
}[config_type]
return config_type
def _GetMSBuildAttributes(spec, config, build_file):
if 'msbuild_configuration_attributes' not in config:
msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
else:
config_type = _GetMSVSConfigurationType(spec, build_file)
config_type = _ConvertMSVSConfigurationType(config_type)
msbuild_attributes = config.get('msbuild_configuration_attributes', {})
msbuild_attributes.setdefault('ConfigurationType', config_type)
output_dir = msbuild_attributes.get('OutputDirectory',
'$(SolutionDir)$(Configuration)')
msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
if 'IntermediateDirectory' not in msbuild_attributes:
intermediate = _FixPath('$(Configuration)') + '\\'
msbuild_attributes['IntermediateDirectory'] = intermediate
if 'CharacterSet' in msbuild_attributes:
msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
msbuild_attributes['CharacterSet'])
if 'TargetName' not in msbuild_attributes:
prefix = spec.get('product_prefix', '')
product_name = spec.get('product_name', '$(ProjectName)')
target_name = prefix + product_name
msbuild_attributes['TargetName'] = target_name
if spec.get('msvs_external_builder'):
external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
# Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
# (depending on the tool used) to avoid MSB8012 warning.
msbuild_tool_map = {
'executable': 'Link',
'shared_library': 'Link',
'loadable_module': 'Link',
'static_library': 'Lib',
}
msbuild_tool = msbuild_tool_map.get(spec['type'])
if msbuild_tool:
msbuild_settings = config['finalized_msbuild_settings']
out_file = msbuild_settings[msbuild_tool].get('OutputFile')
if out_file:
msbuild_attributes['TargetPath'] = _FixPath(out_file)
target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
if target_ext:
msbuild_attributes['TargetExt'] = target_ext
return msbuild_attributes
def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
# TODO(jeanluc) We could optimize out the following and do it only if
# there are actions.
# TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
new_paths = []
cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
if cygwin_dirs:
cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
new_paths.append(cyg_path)
# TODO(jeanluc) Change the convention to have both a cygwin_dir and a
# python_dir.
python_path = cyg_path.replace('cygwin\\bin', 'python_26')
new_paths.append(python_path)
if new_paths:
new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
properties = {}
for (name, configuration) in sorted(configurations.iteritems()):
condition = _GetConfigurationCondition(name, configuration)
attributes = _GetMSBuildAttributes(spec, configuration, build_file)
msbuild_settings = configuration['finalized_msbuild_settings']
_AddConditionalProperty(properties, condition, 'IntDir',
attributes['IntermediateDirectory'])
_AddConditionalProperty(properties, condition, 'OutDir',
attributes['OutputDirectory'])
_AddConditionalProperty(properties, condition, 'TargetName',
attributes['TargetName'])
if attributes.get('TargetPath'):
_AddConditionalProperty(properties, condition, 'TargetPath',
attributes['TargetPath'])
if attributes.get('TargetExt'):
_AddConditionalProperty(properties, condition, 'TargetExt',
attributes['TargetExt'])
if new_paths:
_AddConditionalProperty(properties, condition, 'ExecutablePath',
new_paths)
tool_settings = msbuild_settings.get('', {})
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild('', name, value)
_AddConditionalProperty(properties, condition, name, formatted_value)
return _GetMSBuildPropertyGroup(spec, None, properties)
def _AddConditionalProperty(properties, condition, name, value):
"""Adds a property / conditional value pair to a dictionary.
Arguments:
properties: The dictionary to be modified. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
condition: The condition under which the named property has the value.
name: The name of the property.
value: The value of the property.
"""
if name not in properties:
properties[name] = {}
values = properties[name]
if value not in values:
values[value] = []
conditions = values[value]
conditions.append(condition)
# Regex for msvs variable references ( i.e. $(FOO) ).
MSVS_VARIABLE_REFERENCE = re.compile('\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
def _GetMSBuildPropertyGroup(spec, label, properties):
"""Returns a PropertyGroup definition for the specified properties.
Arguments:
spec: The target project dict.
label: An optional label for the PropertyGroup.
properties: The dictionary to be converted. The key is the name of the
property. The value is itself a dictionary; its key is the value and
the value a list of condition for which this value is true.
"""
group = ['PropertyGroup']
if label:
group.append({'Label': label})
num_configurations = len(spec['configurations'])
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
edges = set()
for value in sorted(properties[node].keys()):
# Add to edges all $(...) references to variables.
#
# Variable references that refer to names not in properties are excluded
# These can exist for instance to refer built in definitions like
# $(SolutionDir).
#
# Self references are ignored. Self reference is used in a few places to
# append to the default value. I.e. PATH=$(PATH);other_path
edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
if v in properties and v != node]))
return edges
properties_ordered = gyp.common.TopologicallySorted(
properties.keys(), GetEdges)
# Walk properties in the reverse of a topological sort on
# user_of_variable -> used_variable as this ensures variables are
# defined before they are used.
# NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
for name in reversed(properties_ordered):
values = properties[name]
for value, conditions in sorted(values.iteritems()):
if len(conditions) == num_configurations:
# If the value is the same all configurations,
# just add one unconditional entry.
group.append([name, value])
else:
for condition in conditions:
group.append([name, {'Condition': condition}, value])
return [group]
def _GetMSBuildToolSettingsSections(spec, configurations):
groups = []
for (name, configuration) in sorted(configurations.iteritems()):
msbuild_settings = configuration['finalized_msbuild_settings']
group = ['ItemDefinitionGroup',
{'Condition': _GetConfigurationCondition(name, configuration)}
]
for tool_name, tool_settings in sorted(msbuild_settings.iteritems()):
# Skip the tool named '' which is a holder of global settings handled
# by _GetMSBuildConfigurationGlobalProperties.
if tool_name:
if tool_settings:
tool = [tool_name]
for name, value in sorted(tool_settings.iteritems()):
formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
value)
tool.append([name, formatted_value])
group.append(tool)
groups.append(group)
return groups
def _FinalizeMSBuildSettings(spec, configuration):
if 'msbuild_settings' in configuration:
converted = False
msbuild_settings = configuration['msbuild_settings']
MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
else:
converted = True
msvs_settings = configuration.get('msvs_settings', {})
msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
include_dirs, resource_include_dirs = _GetIncludeDirs(configuration)
libraries = _GetLibraries(spec)
library_dirs = _GetLibraryDirs(configuration)
out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
target_ext = _GetOutputTargetExt(spec)
defines = _GetDefines(configuration)
if converted:
# Visual Studio 2010 has TR1
defines = [d for d in defines if d != '_HAS_TR1=0']
# Warn of ignored settings
ignored_settings = ['msvs_tool_files']
for ignored_setting in ignored_settings:
value = configuration.get(ignored_setting)
if value:
print ('Warning: The automatic conversion to MSBuild does not handle '
'%s. Ignoring setting of %s' % (ignored_setting, str(value)))
defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
disabled_warnings = _GetDisabledWarnings(configuration)
prebuild = configuration.get('msvs_prebuild')
postbuild = configuration.get('msvs_postbuild')
def_file = _GetModuleDefinition(spec)
precompiled_header = configuration.get('msvs_precompiled_header')
# Add the information to the appropriate tool
# TODO(jeanluc) We could optimize and generate these settings only if
# the corresponding files are found, e.g. don't generate ResourceCompile
# if you don't have any resources.
_ToolAppend(msbuild_settings, 'ClCompile',
'AdditionalIncludeDirectories', include_dirs)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'AdditionalIncludeDirectories', resource_include_dirs)
# Add in libraries, note that even for empty libraries, we want this
# set, to prevent inheriting default libraries from the enviroment.
_ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
libraries)
_ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
library_dirs)
if out_file:
_ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
only_if_unset=True)
if target_ext:
_ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
only_if_unset=True)
# Add defines.
_ToolAppend(msbuild_settings, 'ClCompile',
'PreprocessorDefinitions', defines)
_ToolAppend(msbuild_settings, 'ResourceCompile',
'PreprocessorDefinitions', defines)
# Add disabled warnings.
_ToolAppend(msbuild_settings, 'ClCompile',
'DisableSpecificWarnings', disabled_warnings)
# Turn on precompiled headers if appropriate.
if precompiled_header:
precompiled_header = os.path.split(precompiled_header)[1]
_ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
_ToolAppend(msbuild_settings, 'ClCompile',
'PrecompiledHeaderFile', precompiled_header)
_ToolAppend(msbuild_settings, 'ClCompile',
'ForcedIncludeFiles', [precompiled_header])
# Loadable modules don't generate import libraries;
# tell dependent projects to not expect one.
if spec['type'] == 'loadable_module':
_ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
# Set the module definition file if any.
if def_file:
_ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
configuration['finalized_msbuild_settings'] = msbuild_settings
if prebuild:
_ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
if postbuild:
_ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
def _GetValueFormattedForMSBuild(tool_name, name, value):
if type(value) == list:
# For some settings, VS2010 does not automatically extends the settings
# TODO(jeanluc) Is this what we want?
if name in ['AdditionalIncludeDirectories',
'AdditionalLibraryDirectories',
'AdditionalOptions',
'DelayLoadDLLs',
'DisableSpecificWarnings',
'PreprocessorDefinitions']:
value.append('%%(%s)' % name)
# For most tools, entries in a list should be separated with ';' but some
# settings use a space. Check for those first.
exceptions = {
'ClCompile': ['AdditionalOptions'],
'Link': ['AdditionalOptions'],
'Lib': ['AdditionalOptions']}
if tool_name in exceptions and name in exceptions[tool_name]:
char = ' '
else:
char = ';'
formatted_value = char.join(
[MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
else:
formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
return formatted_value
def _VerifySourcesExist(sources, root_dir):
"""Verifies that all source files exist on disk.
Checks that all regular source files, i.e. not created at run time,
exist on disk. Missing files cause needless recompilation but no otherwise
visible errors.
Arguments:
sources: A recursive list of Filter/file names.
root_dir: The root directory for the relative path names.
Returns:
A list of source files that cannot be found on disk.
"""
missing_sources = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
else:
if '$' not in source:
full_path = os.path.join(root_dir, source)
if not os.path.exists(full_path):
missing_sources.append(full_path)
return missing_sources
def _GetMSBuildSources(spec, sources, exclusions, extension_to_rule_name,
actions_spec, sources_handled_by_action, list_excluded):
groups = ['none', 'midl', 'include', 'compile', 'resource', 'rule']
grouped_sources = {}
for g in groups:
grouped_sources[g] = []
_AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action, list_excluded)
sources = []
for g in groups:
if grouped_sources[g]:
sources.append(['ItemGroup'] + grouped_sources[g])
if actions_spec:
sources.append(['ItemGroup'] + actions_spec)
return sources
def _AddSources2(spec, sources, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded):
extensions_excluded_from_precompile = []
for source in sources:
if isinstance(source, MSVSProject.Filter):
_AddSources2(spec, source.contents, exclusions, grouped_sources,
extension_to_rule_name, sources_handled_by_action,
list_excluded)
else:
if not source in sources_handled_by_action:
detail = []
excluded_configurations = exclusions.get(source, [])
if len(excluded_configurations) == len(spec['configurations']):
detail.append(['ExcludedFromBuild', 'true'])
else:
for config_name, configuration in sorted(excluded_configurations):
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['ExcludedFromBuild',
{'Condition': condition},
'true'])
# Add precompile if needed
for config_name, configuration in spec['configurations'].iteritems():
precompiled_source = configuration.get('msvs_precompiled_source', '')
if precompiled_source != '':
precompiled_source = _FixPath(precompiled_source)
if not extensions_excluded_from_precompile:
# If the precompiled header is generated by a C source, we must
# not try to use it for C++ sources, and vice versa.
basename, extension = os.path.splitext(precompiled_source)
if extension == '.c':
extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
else:
extensions_excluded_from_precompile = ['.c']
if precompiled_source == source:
condition = _GetConfigurationCondition(config_name, configuration)
detail.append(['PrecompiledHeader',
{'Condition': condition},
'Create'
])
else:
# Turn off precompiled header usage for source files of a
# different type than the file that generated the
# precompiled header.
for extension in extensions_excluded_from_precompile:
if source.endswith(extension):
detail.append(['PrecompiledHeader', ''])
detail.append(['ForcedIncludeFiles', ''])
group, element = _MapFileToMsBuildSourceType(source,
extension_to_rule_name)
grouped_sources[group].append([element, {'Include': source}] + detail)
def _GetMSBuildProjectReferences(project):
references = []
if project.dependencies:
group = ['ItemGroup']
for dependency in project.dependencies:
guid = dependency.guid
project_dir = os.path.split(project.path)[0]
relative_path = gyp.common.RelativePath(dependency.path, project_dir)
project_ref = ['ProjectReference',
{'Include': relative_path},
['Project', guid],
['ReferenceOutputAssembly', 'false']
]
for config in dependency.spec.get('configurations', {}).itervalues():
# If it's disabled in any config, turn it off in the reference.
if config.get('msvs_2010_disable_uldi_when_referenced', 0):
project_ref.append(['UseLibraryDependencyInputs', 'false'])
break
group.append(project_ref)
references.append(group)
return references
def _GenerateMSBuildProject(project, options, version, generator_flags):
spec = project.spec
configurations = spec['configurations']
project_dir, project_file_name = os.path.split(project.path)
gyp.common.EnsureDirExists(project.path)
# Prepare list of sources and excluded sources.
gyp_path = _NormalizedSource(project.build_file)
relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
gyp_file = os.path.split(project.build_file)[1]
sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
gyp_file)
# Add rules.
actions_to_add = {}
props_files_of_rules = set()
targets_files_of_rules = set()
extension_to_rule_name = {}
list_excluded = generator_flags.get('msvs_list_excluded_files', True)
# Don't generate rules if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_GenerateRulesForMSBuild(project_dir, options, spec,
sources, excluded_sources,
props_files_of_rules, targets_files_of_rules,
actions_to_add, extension_to_rule_name)
else:
rules = spec.get('rules', [])
_AdjustSourcesForRules(spec, rules, sources, excluded_sources)
sources, excluded_sources, excluded_idl = (
_AdjustSourcesAndConvertToFilterHierarchy(spec, options,
project_dir, sources,
excluded_sources,
list_excluded, version))
# Don't add actions if we are using an external builder like ninja.
if not spec.get('msvs_external_builder'):
_AddActions(actions_to_add, spec, project.build_file)
_AddCopies(actions_to_add, spec)
# NOTE: this stanza must appear after all actions have been decided.
# Don't excluded sources with actions attached, or they won't run.
excluded_sources = _FilterActionsFromExcluded(
excluded_sources, actions_to_add)
exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
spec, actions_to_add)
_GenerateMSBuildFiltersFile(project.path + '.filters', sources,
extension_to_rule_name)
missing_sources = _VerifySourcesExist(sources, project_dir)
for configuration in configurations.itervalues():
_FinalizeMSBuildSettings(spec, configuration)
# Add attributes to root element
import_default_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
import_cpp_props_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
import_cpp_targets_section = [
['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
content = [
'Project',
{'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
'ToolsVersion': version.ProjectVersion(),
'DefaultTargets': 'Build'
}]
content += _GetMSBuildProjectConfigurations(configurations)
content += _GetMSBuildGlobalProperties(spec, project.guid, project_file_name)
content += import_default_section
content += _GetMSBuildConfigurationDetails(spec, project.build_file)
content += _GetMSBuildLocalProperties(project.msbuild_toolset)
content += import_cpp_props_section
content += _GetMSBuildExtensions(props_files_of_rules)
content += _GetMSBuildPropertySheets(configurations)
content += macro_section
content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
project.build_file)
content += _GetMSBuildToolSettingsSections(spec, configurations)
content += _GetMSBuildSources(
spec, sources, exclusions, extension_to_rule_name, actions_spec,
sources_handled_by_action, list_excluded)
content += _GetMSBuildProjectReferences(project)
content += import_cpp_targets_section
content += _GetMSBuildExtensionTargets(targets_files_of_rules)
if spec.get('msvs_external_builder'):
content += _GetMSBuildExternalBuilderTargets(spec)
# TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
# has_run_as = _WriteMSVSUserFile(project.path, version, spec)
easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
return missing_sources
def _GetMSBuildExternalBuilderTargets(spec):
"""Return a list of MSBuild targets for external builders.
The "Build" and "Clean" targets are always generated. If the spec contains
'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
be generated, to support building selected C/C++ files.
Arguments:
spec: The gyp target spec.
Returns:
List of MSBuild 'Target' specs.
"""
build_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_build_cmd'],
False, False, False, False)
build_target = ['Target', {'Name': 'Build'}]
build_target.append(['Exec', {'Command': build_cmd}])
clean_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clean_cmd'],
False, False, False, False)
clean_target = ['Target', {'Name': 'Clean'}]
clean_target.append(['Exec', {'Command': clean_cmd}])
targets = [build_target, clean_target]
if spec.get('msvs_external_builder_clcompile_cmd'):
clcompile_cmd = _BuildCommandLineForRuleRaw(
spec, spec['msvs_external_builder_clcompile_cmd'],
False, False, False, False)
clcompile_target = ['Target', {'Name': 'ClCompile'}]
clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
targets.append(clcompile_target)
return targets
def _GetMSBuildExtensions(props_files_of_rules):
extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
for props_file in props_files_of_rules:
extensions.append(['Import', {'Project': props_file}])
return [extensions]
def _GetMSBuildExtensionTargets(targets_files_of_rules):
targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
for targets_file in sorted(targets_files_of_rules):
targets_node.append(['Import', {'Project': targets_file}])
return [targets_node]
def _GenerateActionsForMSBuild(spec, actions_to_add):
"""Add actions accumulated into an actions_to_add, merging as needed.
Arguments:
spec: the target project dict
actions_to_add: dictionary keyed on input name, which maps to a list of
dicts describing the actions attached to that input file.
Returns:
A pair of (action specification, the sources handled by this action).
"""
sources_handled_by_action = OrderedSet()
actions_spec = []
for primary_input, actions in actions_to_add.iteritems():
inputs = OrderedSet()
outputs = OrderedSet()
descriptions = []
commands = []
for action in actions:
inputs.update(OrderedSet(action['inputs']))
outputs.update(OrderedSet(action['outputs']))
descriptions.append(action['description'])
cmd = action['command']
# For most actions, add 'call' so that actions that invoke batch files
# return and continue executing. msbuild_use_call provides a way to
# disable this but I have not seen any adverse effect from doing that
# for everything.
if action.get('msbuild_use_call', True):
cmd = 'call ' + cmd
commands.append(cmd)
# Add the custom build action for one input file.
description = ', and also '.join(descriptions)
# We can't join the commands simply with && because the command line will
# get too long. See also _AddActions: cygwin's setup_env mustn't be called
# for every invocation or the command that sets the PATH will grow too
# long.
command = (
'\r\nif %errorlevel% neq 0 exit /b %errorlevel%\r\n'.join(commands))
_AddMSBuildAction(spec,
primary_input,
inputs,
outputs,
command,
description,
sources_handled_by_action,
actions_spec)
return actions_spec, sources_handled_by_action
def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
sources_handled_by_action, actions_spec):
command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
primary_input = _FixPath(primary_input)
inputs_array = _FixPaths(inputs)
outputs_array = _FixPaths(outputs)
additional_inputs = ';'.join([i for i in inputs_array
if i != primary_input])
outputs = ';'.join(outputs_array)
sources_handled_by_action.add(primary_input)
action_spec = ['CustomBuild', {'Include': primary_input}]
action_spec.extend(
# TODO(jeanluc) 'Document' for all or just if as_sources?
[['FileType', 'Document'],
['Command', command],
['Message', description],
['Outputs', outputs]
])
if additional_inputs:
action_spec.append(['AdditionalInputs', additional_inputs])
actions_spec.append(action_spec)
| mit |
nirvn/QGIS | tests/src/python/test_provider_ogr_gpkg.py | 1 | 28594 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for the OGR/GPKG provider.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Even Rouault'
__date__ = '2016-04-21'
__copyright__ = 'Copyright 2016, Even Rouault'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import shutil
import sys
import tempfile
import time
import qgis # NOQA
from osgeo import gdal, ogr
from qgis.core import (QgsFeature, QgsFieldConstraints, QgsGeometry,
QgsRectangle, QgsSettings, QgsVectorLayer,
QgsVectorLayerExporter, QgsPointXY)
from qgis.PyQt.QtCore import QCoreApplication
from qgis.testing import start_app, unittest
def GDAL_COMPUTE_VERSION(maj, min, rev):
return ((maj) * 1000000 + (min) * 10000 + (rev) * 100)
class ErrorReceiver():
def __init__(self):
self.msg = None
def receiveError(self, msg):
self.msg = msg
def count_opened_filedescriptors(filename_to_test):
count = -1
if sys.platform.startswith('linux'):
count = 0
open_files_dirname = '/proc/%d/fd' % os.getpid()
filenames = os.listdir(open_files_dirname)
for filename in filenames:
full_filename = open_files_dirname + '/' + filename
if os.path.exists(full_filename):
link = os.readlink(full_filename)
if os.path.basename(link) == os.path.basename(filename_to_test):
count += 1
return count
class TestPyQgsOGRProviderGpkg(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("TestPyQgsOGRProviderGpkg.com")
QCoreApplication.setApplicationName("TestPyQgsOGRProviderGpkg")
QgsSettings().clear()
start_app()
# Create test layer
cls.basetestpath = tempfile.mkdtemp()
@classmethod
def tearDownClass(cls):
"""Run after all tests"""
shutil.rmtree(cls.basetestpath, True)
QgsSettings().clear()
def testSingleToMultiPolygonPromotion(self):
tmpfile = os.path.join(self.basetestpath, 'testSingleToMultiPolygonPromotion.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('MultiPolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testCurveGeometryType(self):
tmpfile = os.path.join(self.basetestpath, 'testCurveGeometryType.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbCurvePolygon)
ds = None
vl = QgsVectorLayer('{}'.format(tmpfile), 'test', 'ogr')
self.assertEqual(vl.dataProvider().subLayers(), ['0:test:0:CurvePolygon:geom'])
f = QgsFeature()
f.setGeometry(QgsGeometry.fromWkt('POLYGON ((0 0,0 1,1 1,0 0))'))
vl.dataProvider().addFeatures([f])
got = [feat for feat in vl.getFeatures()][0]
got_geom = got.geometry()
reference = QgsGeometry.fromWkt('CurvePolygon (((0 0, 0 1, 1 1, 0 0)))')
# The geometries must be binarily identical
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def internalTestBug15351(self, orderClosing):
tmpfile = os.path.join(self.basetestpath, 'testBug15351.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (3 50)')))
# Iterate over features (will open a new OGR connection), but do not
# close the iterator for now
it = vl.getFeatures()
f = QgsFeature()
it.nextFeature(f)
if orderClosing == 'closeIter_commit_closeProvider':
it = None
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
# Close layer and iterator in different orders
if orderClosing == 'closeIter_commit_closeProvider':
vl = None
elif orderClosing == 'commit_closeProvider_closeIter':
vl = None
it = None
else:
assert orderClosing == 'commit_closeIter_closeProvider'
it = None
vl = None
# Test that we succeeded restoring default journal mode, and we
# are not let in WAL mode.
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
# We need GDAL 2.0 to issue PRAGMA journal_mode
# Note: for that case, we don't strictly need turning on WAL
def testBug15351_closeIter_commit_closeProvider(self):
self.internalTestBug15351('closeIter_commit_closeProvider')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeProvider_closeIter(self):
self.internalTestBug15351('commit_closeProvider_closeIter')
# We need GDAL 2.0 to issue PRAGMA journal_mode
def testBug15351_commit_closeIter_closeProvider(self):
self.internalTestBug15351('commit_closeIter_closeProvider')
@unittest.skip(int(gdal.VersionInfo('VERSION_NUM')) < GDAL_COMPUTE_VERSION(2, 1, 2))
def testGeopackageExtentUpdate(self):
''' test https://issues.qgis.org/issues/15273 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageExtentUpdate.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 0.5)'))
lyr.CreateFeature(f)
f = None
gdal.ErrorReset()
ds.ExecuteSQL('RECOMPUTE EXTENT ON test')
has_error = gdal.GetLastErrorMsg() != ''
ds = None
if has_error:
print('Too old GDAL trunk version. Please update')
return
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test moving a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1, QgsGeometry.fromWkt('Point (0.5 0)')))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 1.0))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
# Test deleting a geometry that touches the bbox
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(2))
self.assertTrue(vl.commitChanges())
reference = QgsGeometry.fromRect(QgsRectangle(0.5, 0.0, 1.0, 0.5))
provider_extent = QgsGeometry.fromRect(vl.extent())
self.assertTrue(QgsGeometry.compare(provider_extent.asPolygon()[0], reference.asPolygon()[0], 0.00001),
provider_extent.asPolygon()[0])
def testSelectSubsetString(self):
tmpfile = os.path.join(self.basetestpath, 'testSelectSubsetString.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'baz'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layerid=0'.format(tmpfile), 'test', 'ogr')
vl.setSubsetString("SELECT fid, foo FROM test WHERE foo = 'baz'")
got = [feat for feat in vl.getFeatures()]
self.assertEqual(len(got), 1)
def testStyle(self):
# First test with invalid URI
vl = QgsVectorLayer('/idont/exist.gpkg', 'test', 'ogr')
self.assertFalse(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, -1)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('/idont/exist.gpkg')
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertNotEqual(errorMsg, "")
# Now with valid URI
tmpfile = os.path.join(self.basetestpath, 'testStyle.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('test2', geom_type=ogr.wkbMultiPolygon)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f['foo'] = 'bar'
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer('{}|layername=test'.format(tmpfile), 'test', 'ogr')
self.assertTrue(vl.isValid())
vl2 = QgsVectorLayer('{}|layername=test2'.format(tmpfile), 'test2', 'ogr')
self.assertTrue(vl2.isValid())
self.assertTrue(vl.dataProvider().isSaveAndLoadStyleToDatabaseSupported())
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 0)
self.assertEqual(idlist, [])
self.assertEqual(namelist, [])
self.assertEqual(desclist, [])
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, success = vl.loadNamedStyle('{}|layerid=0'.format(tmpfile))
self.assertFalse(success)
errorMsg = vl.saveStyleToDatabase("name", "description", False, "")
self.assertEqual(errorMsg, "")
qml, errmsg = vl.getStyleFromDatabase("not_existing")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
qml, errmsg = vl.getStyleFromDatabase("100")
self.assertEqual(qml, "")
self.assertNotEqual(errmsg, "")
qml, errmsg = vl.getStyleFromDatabase("1")
self.assertTrue(qml.startswith('<!DOCTYPE qgis'), qml)
self.assertEqual(errmsg, "")
# Try overwrite it but simulate answer no
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", False)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertNotEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description'])
# Try overwrite it and simulate answer yes
settings = QgsSettings()
settings.setValue("/qgis/overwriteStyle", True)
errorMsg = vl.saveStyleToDatabase("name", "description_bis", False, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 1)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1'])
self.assertEqual(namelist, ['name'])
self.assertEqual(desclist, ['description_bis'])
errorMsg = vl2.saveStyleToDatabase("name_test2", "description_test2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name2", "description2", True, "")
self.assertEqual(errorMsg, "")
errorMsg = vl.saveStyleToDatabase("name3", "description3", True, "")
self.assertEqual(errorMsg, "")
related_count, idlist, namelist, desclist, errmsg = vl.listStylesInDatabase()
self.assertEqual(related_count, 3)
self.assertEqual(errmsg, "")
self.assertEqual(idlist, ['1', '3', '4', '2'])
self.assertEqual(namelist, ['name', 'name2', 'name3', 'name_test2'])
self.assertEqual(desclist, ['description_bis', 'description2', 'description3', 'name_test2'])
# Check that layers_style table is not list in subLayers()
vl = QgsVectorLayer(tmpfile, 'test', 'ogr')
sublayers = vl.dataProvider().subLayers()
self.assertEqual(len(sublayers), 2, sublayers)
def testDisablewalForSqlite3(self):
''' Test disabling walForSqlite3 setting '''
QgsSettings().setValue("/qgis/walForSqlite3", False)
tmpfile = os.path.join(self.basetestpath, 'testDisablewalForSqlite3.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr0', ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn('attr1', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile), u'test', u'ogr')
# Test that we are using default delete mode and not WAL
ds = ogr.Open(tmpfile)
lyr = ds.ExecuteSQL('PRAGMA journal_mode')
f = lyr.GetNextFeature()
res = f.GetField(0)
ds.ReleaseResultSet(lyr)
ds = None
self.assertEqual(res, 'delete')
self.assertTrue(vl.startEditing())
feature = next(vl.getFeatures())
self.assertTrue(vl.changeAttributeValue(feature.id(), 1, 1001))
# Commit changes
cbk = ErrorReceiver()
vl.dataProvider().raiseError.connect(cbk.receiveError)
self.assertTrue(vl.commitChanges())
self.assertIsNone(cbk.msg)
vl = None
QgsSettings().setValue("/qgis/walForSqlite3", None)
def testSimulatedDBManagerImport(self):
uri = 'point?field=f1:int'
uri += '&field=f2:double(6,4)'
uri += '&field=f3:string(20)'
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 1
f['f2'] = 123.456
f['f3'] = '12345678.90123456789'
f2 = QgsFeature(lyr.fields())
f2['f1'] = 2
lyr.dataProvider().addFeatures([f, f2])
tmpfile = os.path.join(self.basetestpath, 'testSimulatedDBManagerImport.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds = None
options = {}
options['update'] = True
options['driverName'] = 'GPKG'
options['layerName'] = 'my_out_table'
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 1)
self.assertEqual(f['f2'], 123.456)
self.assertEqual(f['f3'], '12345678.90123456789')
f = next(features)
self.assertEqual(f['f1'], 2)
features = None
# Test overwriting without overwrite option
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.ErrCreateDataSource)
# Test overwriting
lyr = QgsVectorLayer(uri, "x", "memory")
self.assertTrue(lyr.isValid())
f = QgsFeature(lyr.fields())
f['f1'] = 3
lyr.dataProvider().addFeatures([f])
options['overwrite'] = True
err = QgsVectorLayerExporter.exportLayer(lyr, tmpfile, "ogr", lyr.crs(), False, options)
self.assertEqual(err[0], QgsVectorLayerExporter.NoError,
'unexpected import error {0}'.format(err))
lyr = QgsVectorLayer(tmpfile + "|layername=my_out_table", "y", "ogr")
self.assertTrue(lyr.isValid())
features = lyr.getFeatures()
f = next(features)
self.assertEqual(f['f1'], 3)
features = None
def testGeopackageTwoLayerEdition(self):
''' test https://issues.qgis.org/issues/17034 '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageTwoLayerEdition.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('layer1', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 0)'))
lyr.CreateFeature(f)
f = None
lyr = ds.CreateLayer('layer2', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('attr', ogr.OFTInteger))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(1 1)'))
lyr.CreateFeature(f)
f = None
ds = None
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
# Edit vl1, vl2 multiple times
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (2 2)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (3 3)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeAttributeValue(1, 1, 100))
self.assertTrue(vl2.changeAttributeValue(1, 1, 101))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
self.assertTrue(vl1.startEditing())
self.assertTrue(vl2.startEditing())
self.assertTrue(vl1.changeGeometry(1, QgsGeometry.fromWkt('Point (4 4)')))
self.assertTrue(vl2.changeGeometry(1, QgsGeometry.fromWkt('Point (5 5)')))
self.assertTrue(vl1.commitChanges())
self.assertTrue(vl2.commitChanges())
vl1 = None
vl2 = None
# Check everything is as expected after re-opening
vl1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer1", u'layer1', u'ogr')
vl2 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=layer2", u'layer2', u'ogr')
got = [feat for feat in vl1.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 100)
reference = QgsGeometry.fromWkt('Point (4 4)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
got = [feat for feat in vl2.getFeatures()][0]
got_geom = got.geometry()
self.assertEqual(got['attr'], 101)
reference = QgsGeometry.fromWkt('Point (5 5)')
self.assertEqual(got_geom.asWkb(), reference.asWkb(), 'Expected {}, got {}'.format(reference.asWkt(), got_geom.asWkt()))
def testGeopackageManyLayers(self):
''' test opening more than 64 layers without running out of Spatialite connections '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageManyLayers.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
for i in range(70):
lyr = ds.CreateLayer('layer%d' % i, geom_type=ogr.wkbPoint)
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POINT(%d 0)' % i))
lyr.CreateFeature(f)
f = None
ds = None
vl_tab = []
for i in range(70):
layername = 'layer%d' % i
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl.isValid())
vl_tab += [vl]
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 1)
for i in range(70):
got = [feat for feat in vl.getFeatures()]
self.assertTrue(len(got) == 1)
# We shouldn't have more than 2 file handles opened:
# one shared by the QgsOgrProvider object
# one shared by the feature iterators
count = count_opened_filedescriptors(tmpfile)
if count > 0:
self.assertEqual(count, 2)
# Re-open an already opened layers. We should get a new handle
layername = 'layer%d' % 0
vl_extra0 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra0.isValid())
countNew = count_opened_filedescriptors(tmpfile)
if countNew > 0:
self.assertLessEqual(countNew, 4) # for some reason we get 4 and not 3
layername = 'layer%d' % 1
vl_extra1 = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + layername, layername, u'ogr')
self.assertTrue(vl_extra1.isValid())
countNew2 = count_opened_filedescriptors(tmpfile)
self.assertEqual(countNew2, countNew)
def testGeopackageRefreshIfTableListUpdated(self):
''' test that creating/deleting a layer is reflected when opening a new layer '''
tmpfile = os.path.join(self.basetestpath, 'testGeopackageRefreshIfTableListUpdated.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
ds.CreateLayer('test', geom_type=ogr.wkbPoint)
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(vl.extent().isNull())
time.sleep(1) # so timestamp gets updated
ds = ogr.Open(tmpfile, update=1)
ds.CreateLayer('test2', geom_type=ogr.wkbPoint)
ds = None
vl2 = QgsVectorLayer(u'{}'.format(tmpfile), 'test', u'ogr')
vl2.subLayers()
self.assertEqual(vl2.dataProvider().subLayers(), ['0:test:0:Point:geom', '1:test2:0:Point:geom'])
def testGeopackageLargeFID(self):
tmpfile = os.path.join(self.basetestpath, 'testGeopackageLargeFID.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPoint)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
vl = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
f = QgsFeature()
f.setAttributes([1234567890123, None])
self.assertTrue(vl.startEditing())
self.assertTrue(vl.dataProvider().addFeatures([f]))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['fid'], 1234567890123)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.changeGeometry(1234567890123, QgsGeometry.fromWkt('Point (3 50)')))
self.assertTrue(vl.changeAttributeValue(1234567890123, 1, 'foo'))
self.assertTrue(vl.commitChanges())
got = [feat for feat in vl.getFeatures()][0]
self.assertEqual(got['str_field'], 'foo')
got_geom = got.geometry()
self.assertIsNotNone(got_geom)
self.assertTrue(vl.startEditing())
self.assertTrue(vl.deleteFeature(1234567890123))
self.assertTrue(vl.commitChanges())
def test_AddFeatureNullFid(self):
"""Test gpkg feature with NULL fid can be added"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
ds = None
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
# Check that pk field has unique constraint
fields = layer.fields()
pkfield = fields.at(0)
self.assertTrue(pkfield.constraints().constraints() & QgsFieldConstraints.ConstraintUnique)
# Test add feature with default Fid (NULL)
layer.startEditing()
f = QgsFeature()
feat = QgsFeature(layer.fields())
feat.setGeometry(QgsGeometry.fromWkt('Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))'))
feat.setAttribute(1, 'test_value')
layer.addFeature(feat)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 1)
def test_SplitFeature(self):
"""Test gpkg feature can be split"""
tmpfile = os.path.join(self.basetestpath, 'testGeopackageSplitFeatures.gpkg')
ds = ogr.GetDriverByName('GPKG').CreateDataSource(tmpfile)
lyr = ds.CreateLayer('test', geom_type=ogr.wkbPolygon)
lyr.CreateField(ogr.FieldDefn('str_field', ogr.OFTString))
f = ogr.Feature(lyr.GetLayerDefn())
f.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0,0 1,1 1,1 0,0 0))'))
lyr.CreateFeature(f)
f = None
ds = None
# Split features
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertTrue(layer.isValid())
self.assertTrue(layer.isSpatial())
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0 0, 0 1, 1 1, 1 0, 0 0))')
layer.startEditing()
self.assertEqual(layer.splitFeatures([QgsPointXY(0.5, 0), QgsPointXY(0.5, 1)], 0), 0)
self.assertTrue(layer.commitChanges())
self.assertEqual(layer.featureCount(), 2)
layer = QgsVectorLayer(u'{}'.format(tmpfile) + "|layername=" + "test", 'test', u'ogr')
self.assertEqual(layer.featureCount(), 2)
self.assertEqual([f for f in layer.getFeatures()][0].geometry().asWkt(), 'Polygon ((0.5 0, 0.5 1, 1 1, 1 0, 0.5 0))')
self.assertEqual([f for f in layer.getFeatures()][1].geometry().asWkt(), 'Polygon ((0.5 1, 0.5 0, 0 0, 0 1, 0.5 1))')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
z1gm4/desarrollo_web_udp | env/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/__init__.py | 499 | 5766 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
from __future__ import absolute_import, division, unicode_literals
__all__ = ["getTreeWalker", "pprint", "dom", "etree", "genshistream", "lxmletree",
"pulldom"]
import sys
from .. import constants
from ..utils import default_etree
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are:
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom"):
name = "%s.%s" % (__name__, treeType)
__import__(name)
mod = sys.modules[name]
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
from . import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "lxml":
from . import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
def concatenateCharacterTokens(tokens):
pendingCharacters = []
for token in tokens:
type = token["type"]
if type in ("Characters", "SpaceCharacters"):
pendingCharacters.append(token["data"])
else:
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
pendingCharacters = []
yield token
if pendingCharacters:
yield {"type": "Characters", "data": "".join(pendingCharacters)}
def pprint(walker):
"""Pretty printer for tree walkers"""
output = []
indent = 0
for token in concatenateCharacterTokens(walker):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
# tag name
if token["namespace"] and token["namespace"] != constants.namespaces["html"]:
if token["namespace"] in constants.prefixes:
ns = constants.prefixes[token["namespace"]]
else:
ns = token["namespace"]
name = "%s %s" % (ns, token["name"])
else:
name = token["name"]
output.append("%s<%s>" % (" " * indent, name))
indent += 2
# attributes (sorted for consistent ordering)
attrs = token["data"]
for (namespace, localname), value in sorted(attrs.items()):
if namespace:
if namespace in constants.prefixes:
ns = constants.prefixes[namespace]
else:
ns = namespace
name = "%s %s" % (ns, localname)
else:
name = localname
output.append("%s%s=\"%s\"" % (" " * indent, name, value))
# self-closing
if type == "EmptyTag":
indent -= 2
elif type == "EndTag":
indent -= 2
elif type == "Comment":
output.append("%s<!-- %s -->" % (" " * indent, token["data"]))
elif type == "Doctype":
if token["name"]:
if token["publicId"]:
output.append("""%s<!DOCTYPE %s "%s" "%s">""" %
(" " * indent,
token["name"],
token["publicId"],
token["systemId"] if token["systemId"] else ""))
elif token["systemId"]:
output.append("""%s<!DOCTYPE %s "" "%s">""" %
(" " * indent,
token["name"],
token["systemId"]))
else:
output.append("%s<!DOCTYPE %s>" % (" " * indent,
token["name"]))
else:
output.append("%s<!DOCTYPE >" % (" " * indent,))
elif type == "Characters":
output.append("%s\"%s\"" % (" " * indent, token["data"]))
elif type == "SpaceCharacters":
assert False, "concatenateCharacterTokens should have got rid of all Space tokens"
else:
raise ValueError("Unknown token type, %s" % type)
return "\n".join(output)
| gpl-3.0 |
demarle/VTK | Rendering/Core/Testing/Python/ImageActorStressed.py | 20 | 4147 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Create the RenderWindow, Renderer and both Actors
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# First one tests the changing display extent without
# changing the size of the display extent (so it
# reuses a texture, but not a contiguous one)
gsOne = vtk.vtkImageEllipsoidSource()
gsOne.SetWholeExtent(0,999,0,999,0,0)
gsOne.SetCenter(500,500,0)
gsOne.SetRadius(300,400,0)
gsOne.SetInValue(0)
gsOne.SetOutValue(255)
gsOne.SetOutputScalarTypeToUnsignedChar()
ssOne = vtk.vtkImageShiftScale()
ssOne.SetInputConnection(gsOne.GetOutputPort())
ssOne.SetOutputScalarTypeToUnsignedChar()
ssOne.SetShift(0)
ssOne.SetScale(1)
ssOne.UpdateWholeExtent()
iaOne = vtk.vtkImageActor()
iaOne.GetMapper().SetInputConnection(ssOne.GetOutputPort())
ren1.AddActor(iaOne)
# The second one tests a really large texture
gsTwo = vtk.vtkImageEllipsoidSource()
gsTwo.SetWholeExtent(1000,8999,1000,8999,0,0)
gsTwo.SetCenter(4000,4000,0)
gsTwo.SetRadius(1800,1800,0)
gsTwo.SetInValue(250)
gsTwo.SetOutValue(150)
gsTwo.SetOutputScalarTypeToUnsignedChar()
ssTwo = vtk.vtkImageShiftScale()
ssTwo.SetInputConnection(gsTwo.GetOutputPort())
ssTwo.SetOutputScalarTypeToUnsignedChar()
ssTwo.SetShift(0)
ssTwo.SetScale(1)
ssTwo.UpdateWholeExtent()
iaTwo = vtk.vtkImageActor()
iaTwo.GetMapper().SetInputConnection(ssTwo.GetOutputPort())
iaTwo.SetScale(0.1,0.1,1.0)
iaTwo.AddPosition(1000,1000,0)
ren1.AddActor(iaTwo)
# The third one will test changing input and a
# power of two texture
gsThree = vtk.vtkImageEllipsoidSource()
gsThree.SetWholeExtent(0,511,2000,2511,0,0)
gsThree.SetCenter(255,2255,0)
gsThree.SetRadius(100,200,0)
gsThree.SetInValue(250)
gsThree.SetOutValue(0)
gsThree.SetOutputScalarTypeToUnsignedChar()
ssThree = vtk.vtkImageShiftScale()
ssThree.SetInputConnection(gsThree.GetOutputPort())
ssThree.SetOutputScalarTypeToUnsignedChar()
ssThree.SetShift(0)
ssThree.SetScale(1)
ssThree.UpdateWholeExtent()
iaThree = vtk.vtkImageActor()
iaThree.GetMapper().SetInputConnection(ssThree.GetOutputPort())
ren1.AddActor(iaThree)
# Same as first one, but the display extents
# represent contiguous section of memory that
# are powers of two
gsFour = vtk.vtkImageEllipsoidSource()
gsFour.SetWholeExtent(2000,2511,0,511,0,0)
gsFour.SetCenter(2255,255,0)
gsFour.SetRadius(130,130,0)
gsFour.SetInValue(40)
gsFour.SetOutValue(190)
gsFour.SetOutputScalarTypeToUnsignedChar()
ssFour = vtk.vtkImageShiftScale()
ssFour.SetInputConnection(gsFour.GetOutputPort())
ssFour.SetOutputScalarTypeToUnsignedChar()
ssFour.SetShift(0)
ssFour.SetScale(1)
ssFour.UpdateWholeExtent()
iaFour = vtk.vtkImageActor()
iaFour.GetMapper().SetInputConnection(ssFour.GetOutputPort())
ren1.AddActor(iaFour)
# Same as previous one, but the display extents
# represent contiguous section of memory that
# are not powers of two
gsFive = vtk.vtkImageEllipsoidSource()
gsFive.SetWholeExtent(1200,1712,0,512,0,0)
gsFive.SetCenter(1456,256,0)
gsFive.SetRadius(130,180,0)
gsFive.SetInValue(190)
gsFive.SetOutValue(100)
gsFive.SetOutputScalarTypeToUnsignedChar()
ssFive = vtk.vtkImageShiftScale()
ssFive.SetInputConnection(gsFive.GetOutputPort())
ssFive.SetOutputScalarTypeToUnsignedChar()
ssFive.SetShift(0)
ssFive.SetScale(1)
ssFive.UpdateWholeExtent()
iaFive = vtk.vtkImageActor()
iaFive.GetMapper().SetInputConnection(ssFive.GetOutputPort())
ren1.AddActor(iaFive)
ren1.SetBackground(0.1,0.2,0.4)
renWin.SetSize(400,400)
# render the image
ren1.ResetCamera()
ren1.GetActiveCamera().Zoom(1.5)
renWin.Render()
# prevent the tk window from showing up then start the event loop
renWin.Render()
iaOne.SetDisplayExtent(200,999,200,999,0,0)
iaFour.SetDisplayExtent(2000,2511,0,300,0,0)
iaFive.SetDisplayExtent(1200,1712,0,300,0,0)
gsThree.SetRadius(120,120,0)
renWin.Render()
iaOne.SetDisplayExtent(0,799,0,799,0,0)
iaFour.SetDisplayExtent(2000,2511,200,500,0,0)
iaFive.SetDisplayExtent(1200,1712,200,500,0,0)
gsThree.SetRadius(150,150,0)
renWin.Render()
# --- end of script --
| bsd-3-clause |
o-schneider/selenium | py/selenium/webdriver/support/expected_conditions.py | 69 | 9915 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import NoAlertPresentException
"""
* Canned "Expected Conditions" which are generally useful within webdriver
* tests.
"""
class title_is(object):
"""An expectation for checking the title of a page.
title is the expected title, which must be an exact match
returns True if the title matches, false otherwise."""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title == driver.title
class title_contains(object):
""" An expectation for checking that the title contains a case-sensitive
substring. title is the fragment of title expected
returns True when the title matches, False otherwise
"""
def __init__(self, title):
self.title = title
def __call__(self, driver):
return self.title in driver.title
class presence_of_element_located(object):
""" An expectation for checking that an element is present on the DOM
of a page. This does not necessarily mean that the element is visible.
locator - used to find the element
returns the WebElement once it is located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator)
class visibility_of_element_located(object):
""" An expectation for checking that an element is present on the DOM of a
page and visible. Visibility means that the element is not only displayed
but also has a height and width that is greater than 0.
locator - used to find the element
returns the WebElement once it is located and visible
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator))
except StaleElementReferenceException:
return False
class visibility_of(object):
""" An expectation for checking that an element, known to be present on the
DOM of a page, is visible. Visibility means that the element is not only
displayed but also has a height and width that is greater than 0.
element is the WebElement
returns the (same) WebElement once it is visible
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return _element_if_visible(self.element)
def _element_if_visible(element, visibility=True):
return element if element.is_displayed() == visibility else False
class presence_of_all_elements_located(object):
""" An expectation for checking that there is at least one element present
on a web page.
locator is used to find the element
returns the list of WebElements once they are located
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_elements(driver, self.locator)
class text_to_be_present_in_element(object):
""" An expectation for checking if the given text is present in the
specified element.
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try :
element_text = _find_element(driver, self.locator).text
return self.text in element_text
except StaleElementReferenceException:
return False
class text_to_be_present_in_element_value(object):
"""
An expectation for checking if the given text is present in the element's
locator, text
"""
def __init__(self, locator, text_):
self.locator = locator
self.text = text_
def __call__(self, driver):
try:
element_text = _find_element(driver,
self.locator).get_attribute("value")
if element_text:
return self.text in element_text
else:
return False
except StaleElementReferenceException:
return False
class frame_to_be_available_and_switch_to_it(object):
""" An expectation for checking whether the given frame is available to
switch to. If the frame is available it switches the given driver to the
specified frame.
"""
def __init__(self, locator):
self.frame_locator = locator
def __call__(self, driver):
try:
if isinstance(self.frame_locator, tuple):
driver.switch_to.frame(_find_element(driver,
self.frame_locator))
else:
driver.switch_to.frame(self.frame_locator)
return True
except NoSuchFrameException:
return False
class invisibility_of_element_located(object):
""" An Expectation for checking that an element is either invisible or not
present on the DOM.
locator used to find the element
"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
try:
return _element_if_visible(_find_element(driver, self.locator), False)
except (NoSuchElementException, StaleElementReferenceException):
# In the case of NoSuchElement, returns true because the element is
# not present in DOM. The try block checks if the element is present
# but is invisible.
# In the case of StaleElementReference, returns true because stale
# element reference implies that element is no longer visible.
return True
class element_to_be_clickable(object):
""" An Expectation for checking an element is visible and enabled such that
you can click it."""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
element = visibility_of_element_located(self.locator)(driver)
if element and element.is_enabled():
return element
else:
return False
class staleness_of(object):
""" Wait until an element is no longer attached to the DOM.
element is the element to wait for.
returns False if the element is still attached to the DOM, true otherwise.
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
try:
# Calling any method forces a staleness check
self.element.is_enabled()
return False
except StaleElementReferenceException as expected:
return True
class element_to_be_selected(object):
""" An expectation for checking the selection is selected.
element is WebElement object
"""
def __init__(self, element):
self.element = element
def __call__(self, ignored):
return self.element.is_selected()
class element_located_to_be_selected(object):
"""An expectation for the element to be located is selected.
locator is a tuple of (by, path)"""
def __init__(self, locator):
self.locator = locator
def __call__(self, driver):
return _find_element(driver, self.locator).is_selected()
class element_selection_state_to_be(object):
""" An expectation for checking if the given element is selected.
element is WebElement object
is_selected is a Boolean."
"""
def __init__(self, element, is_selected):
self.element = element
self.is_selected = is_selected
def __call__(self, ignored):
return self.element.is_selected() == self.is_selected
class element_located_selection_state_to_be(object):
""" An expectation to locate an element and check if the selection state
specified is in that state.
locator is a tuple of (by, path)
is_selected is a boolean
"""
def __init__(self, locator, is_selected):
self.locator = locator
self.is_selected = is_selected
def __call__(self, driver):
try:
element = _find_element(driver, self.locator)
return element.is_selected() == self.is_selected
except StaleElementReferenceException:
return False
class alert_is_present(object):
""" Expect an alert to be present."""
def __init__(self):
pass
def __call__(self, driver):
try:
alert = driver.switch_to.alert
alert.text
return alert
except NoAlertPresentException:
return False
def _find_element(driver, by):
"""Looks up an element. Logs and re-raises ``WebDriverException``
if thrown."""
try :
return driver.find_element(*by)
except NoSuchElementException as e:
raise e
except WebDriverException as e:
raise e
def _find_elements(driver, by):
try :
return driver.find_elements(*by)
except WebDriverException as e:
raise e
| apache-2.0 |
SamYaple/neutron | neutron/context.py | 21 | 4591 | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Context: context for security/db session."""
import copy
import datetime
from debtcollector import removals
from oslo_context import context as oslo_context
from oslo_log import log as logging
from neutron.db import api as db_api
from neutron import policy
LOG = logging.getLogger(__name__)
class ContextBase(oslo_context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
@removals.removed_kwarg('read_deleted')
def __init__(self, user_id, tenant_id, is_admin=None, roles=None,
timestamp=None, request_id=None, tenant_name=None,
user_name=None, overwrite=True, auth_token=None,
is_advsvc=None, **kwargs):
"""Object initialization.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
super(ContextBase, self).__init__(auth_token=auth_token,
user=user_id, tenant=tenant_id,
is_admin=is_admin,
request_id=request_id,
overwrite=overwrite)
self.user_name = user_name
self.tenant_name = tenant_name
if not timestamp:
timestamp = datetime.datetime.utcnow()
self.timestamp = timestamp
self.roles = roles or []
self.is_advsvc = is_advsvc
if self.is_advsvc is None:
self.is_advsvc = self.is_admin or policy.check_is_advsvc(self)
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
@property
def project_id(self):
return self.tenant
@property
def tenant_id(self):
return self.tenant
@tenant_id.setter
def tenant_id(self, tenant_id):
self.tenant = tenant_id
@property
def user_id(self):
return self.user
@user_id.setter
def user_id(self, user_id):
self.user = user_id
def to_dict(self):
context = super(ContextBase, self).to_dict()
context.update({
'user_id': self.user_id,
'tenant_id': self.tenant_id,
'project_id': self.project_id,
'roles': self.roles,
'timestamp': str(self.timestamp),
'tenant_name': self.tenant_name,
'project_name': self.tenant_name,
'user_name': self.user_name,
})
return context
@classmethod
def from_dict(cls, values):
return cls(**values)
@removals.removed_kwarg('read_deleted')
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
context.is_admin = True
if 'admin' not in [x.lower() for x in context.roles]:
context.roles = context.roles + ["admin"]
return context
class Context(ContextBase):
def __init__(self, *args, **kwargs):
super(Context, self).__init__(*args, **kwargs)
self._session = None
@property
def session(self):
if self._session is None:
self._session = db_api.get_session()
return self._session
@removals.removed_kwarg('read_deleted')
@removals.removed_kwarg('load_admin_roles')
def get_admin_context(read_deleted="no", load_admin_roles=True):
return Context(user_id=None,
tenant_id=None,
is_admin=True,
overwrite=False)
@removals.removed_kwarg('read_deleted')
def get_admin_context_without_session(read_deleted="no"):
return ContextBase(user_id=None,
tenant_id=None,
is_admin=True)
| apache-2.0 |
shakamunyi/neutron | neutron/tests/unit/plugins/ml2/drivers/ext_test.py | 45 | 8660 | # Copyright 2015 Intel Corporation.
# Copyright 2015 Isaku Yamahata <isaku.yamahata at intel com>
# <isaku.yamahata at gmail com>
# All Rights Reserved.
#
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
import oslo_db.sqlalchemy.session
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.db import model_base
from neutron.db import models_v2
from neutron.plugins.ml2 import driver_api
from neutron.tests.unit.plugins.ml2 import extensions as test_extensions
class TestExtensionDriverBase(driver_api.ExtensionDriver):
_supported_extension_aliases = 'fake_extension'
def initialize(self):
extensions.append_api_extensions_path(test_extensions.__path__)
@property
def extension_alias(self):
return self._supported_extension_aliases
class TestExtensionDriver(TestExtensionDriverBase):
def initialize(self):
super(TestExtensionDriver, self).initialize()
self.network_extension = 'Test_Network_Extension'
self.subnet_extension = 'Test_Subnet_Extension'
self.port_extension = 'Test_Port_Extension'
def _check_create(self, session, data, result):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(data, dict))
assert('id' not in data)
assert(isinstance(result, dict))
assert(result['id'] is not None)
def _check_update(self, session, data, result):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(data, dict))
assert(isinstance(result, dict))
assert(result['id'] is not None)
def _check_extend(self, session, result, db_entry,
expected_db_entry_class):
assert(isinstance(session, oslo_db.sqlalchemy.session.Session))
assert(isinstance(result, dict))
assert(result['id'] is not None)
assert(isinstance(db_entry, expected_db_entry_class))
assert(db_entry.id == result['id'])
def process_create_network(self, plugin_context, data, result):
session = plugin_context.session
self._check_create(session, data, result)
result['network_extension'] = self.network_extension + '_create'
def process_update_network(self, plugin_context, data, result):
session = plugin_context.session
self._check_update(session, data, result)
self.network_extension = data['network_extension']
result['network_extension'] = self.network_extension + '_update'
def extend_network_dict(self, session, net_db, result):
self._check_extend(session, result, net_db, models_v2.Network)
result['network_extension'] = self.network_extension + '_extend'
def process_create_subnet(self, plugin_context, data, result):
session = plugin_context.session
self._check_create(session, data, result)
result['subnet_extension'] = self.subnet_extension + '_create'
def process_update_subnet(self, plugin_context, data, result):
session = plugin_context.session
self._check_update(session, data, result)
self.subnet_extension = data['subnet_extension']
result['subnet_extension'] = self.subnet_extension + '_update'
def extend_subnet_dict(self, session, subnet_db, result):
self._check_extend(session, result, subnet_db, models_v2.Subnet)
result['subnet_extension'] = self.subnet_extension + '_extend'
def process_create_port(self, plugin_context, data, result):
session = plugin_context.session
self._check_create(session, data, result)
result['port_extension'] = self.port_extension + '_create'
def process_update_port(self, plugin_context, data, result):
session = plugin_context.session
self._check_update(session, data, result)
self.port_extension = data['port_extension']
result['port_extension'] = self.port_extension + '_update'
def extend_port_dict(self, session, port_db, result):
self._check_extend(session, result, port_db, models_v2.Port)
result['port_extension'] = self.port_extension + '_extend'
class TestNetworkExtension(model_base.BASEV2):
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
value = sa.Column(sa.String(64))
network = orm.relationship(
models_v2.Network,
backref=orm.backref('extension', cascade='delete', uselist=False))
class TestSubnetExtension(model_base.BASEV2):
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
value = sa.Column(sa.String(64))
subnet = orm.relationship(
models_v2.Subnet,
backref=orm.backref('extension', cascade='delete', uselist=False))
class TestPortExtension(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
value = sa.Column(sa.String(64))
port = orm.relationship(
models_v2.Port,
backref=orm.backref('extension', cascade='delete', uselist=False))
class TestDBExtensionDriver(TestExtensionDriverBase):
def _get_value(self, data, key):
value = data[key]
if not attributes.is_attr_set(value):
value = ''
return value
def process_create_network(self, plugin_context, data, result):
session = plugin_context.session
value = self._get_value(data, 'network_extension')
record = TestNetworkExtension(network_id=result['id'], value=value)
session.add(record)
result['network_extension'] = value
def process_update_network(self, plugin_context, data, result):
session = plugin_context.session
record = (session.query(TestNetworkExtension).
filter_by(network_id=result['id']).one())
value = data.get('network_extension')
if value and value != record.value:
record.value = value
result['network_extension'] = record.value
def extend_network_dict(self, session, net_db, result):
result['network_extension'] = net_db.extension.value
def process_create_subnet(self, plugin_context, data, result):
session = plugin_context.session
value = self._get_value(data, 'subnet_extension')
record = TestSubnetExtension(subnet_id=result['id'], value=value)
session.add(record)
result['subnet_extension'] = value
def process_update_subnet(self, plugin_context, data, result):
session = plugin_context.session
record = (session.query(TestSubnetExtension).
filter_by(subnet_id=result['id']).one())
value = data.get('subnet_extension')
if value and value != record.value:
record.value = value
result['subnet_extension'] = record.value
def extend_subnet_dict(self, session, subnet_db, result):
value = subnet_db.extension.value if subnet_db.extension else ''
result['subnet_extension'] = value
def process_create_port(self, plugin_context, data, result):
session = plugin_context.session
value = self._get_value(data, 'port_extension')
record = TestPortExtension(port_id=result['id'], value=value)
session.add(record)
result['port_extension'] = value
def process_update_port(self, plugin_context, data, result):
session = plugin_context.session
record = (session.query(TestPortExtension).
filter_by(port_id=result['id']).one())
value = data.get('port_extension')
if value and value != record.value:
record.value = value
result['port_extension'] = record.value
def extend_port_dict(self, session, port_db, result):
value = port_db.extension.value if port_db.extension else ''
result['port_extension'] = value
| apache-2.0 |
Chemcy/vnpy | vn.tutorial/tick2trade/vn.trader_t2t/uiBasicWidget.py | 9 | 44280 | # encoding: UTF-8
import json
import csv
import os
from collections import OrderedDict
from PyQt4 import QtGui, QtCore
from eventEngine import *
from vtFunction import *
from vtGateway import *
#----------------------------------------------------------------------
def loadFont():
"""载入字体设置"""
fileName = 'VT_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
fileName = os.path.join(path, fileName)
try:
f = file(fileName)
setting = json.load(f)
family = setting['fontFamily']
size = setting['fontSize']
font = QtGui.QFont(family, size)
except:
font = QtGui.QFont(u'微软雅黑', 12)
return font
BASIC_FONT = loadFont()
########################################################################
class BasicCell(QtGui.QTableWidgetItem):
"""基础的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BasicCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == '0' or text == '0.0':
self.setText('')
else:
self.setText(text)
########################################################################
class NumCell(QtGui.QTableWidgetItem):
"""用来显示数字的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NumCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
# 考虑到NumCell主要用来显示OrderID和TradeID之类的整数字段,
# 这里的数据转化方式使用int类型。但是由于部分交易接口的委托
# 号和成交号可能不是纯数字的形式,因此补充了一个try...except
try:
num = int(text)
self.setData(QtCore.Qt.DisplayRole, num)
except ValueError:
self.setText(text)
########################################################################
class DirectionCell(QtGui.QTableWidgetItem):
"""用来显示买卖方向的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(DirectionCell, self).__init__()
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if text == DIRECTION_LONG or text == DIRECTION_NET:
self.setForeground(QtGui.QColor('red'))
elif text == DIRECTION_SHORT:
self.setForeground(QtGui.QColor('green'))
self.setText(text)
########################################################################
class NameCell(QtGui.QTableWidgetItem):
"""用来显示合约中文的单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(NameCell, self).__init__()
self.mainEngine = mainEngine
self.data = None
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
if self.mainEngine:
# 首先尝试正常获取合约对象
contract = self.mainEngine.getContract(text)
# 如果能读取合约信息
if contract:
self.setText(contract.name)
########################################################################
class BidCell(QtGui.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(BidCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(255,174,201))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class AskCell(QtGui.QTableWidgetItem):
"""买价单元格"""
#----------------------------------------------------------------------
def __init__(self, text=None, mainEngine=None):
"""Constructor"""
super(AskCell, self).__init__()
self.data = None
self.setForeground(QtGui.QColor('black'))
self.setBackground(QtGui.QColor(160,255,160))
if text:
self.setContent(text)
#----------------------------------------------------------------------
def setContent(self, text):
"""设置内容"""
self.setText(text)
########################################################################
class BasicMonitor(QtGui.QTableWidget):
"""
基础监控
headerDict中的值对应的字典格式如下
{'chinese': u'中文名', 'cellType': BasicCell}
"""
signal = QtCore.pyqtSignal(type(Event()))
#----------------------------------------------------------------------
def __init__(self, mainEngine=None, eventEngine=None, parent=None):
"""Constructor"""
super(BasicMonitor, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 保存表头标签用
self.headerDict = OrderedDict() # 有序字典,key是英文名,value是对应的配置字典
self.headerList = [] # 对应self.headerDict.keys()
# 保存相关数据用
self.dataDict = {} # 字典,key是字段对应的数据,value是保存相关单元格的字典
self.dataKey = '' # 字典键对应的数据字段
# 监控的事件类型
self.eventType = ''
# 字体
self.font = None
# 保存数据对象到单元格
self.saveData = False
# 默认不允许根据表头进行排序,需要的组件可以开启
self.sorting = False
# 初始化右键菜单
self.initMenu()
#----------------------------------------------------------------------
def setHeaderDict(self, headerDict):
"""设置表头有序字典"""
self.headerDict = headerDict
self.headerList = headerDict.keys()
#----------------------------------------------------------------------
def setDataKey(self, dataKey):
"""设置数据字典的键"""
self.dataKey = dataKey
#----------------------------------------------------------------------
def setEventType(self, eventType):
"""设置监控的事件类型"""
self.eventType = eventType
#----------------------------------------------------------------------
def setFont(self, font):
"""设置字体"""
self.font = font
#----------------------------------------------------------------------
def setSaveData(self, saveData):
"""设置是否要保存数据到单元格"""
self.saveData = saveData
#----------------------------------------------------------------------
def initTable(self):
"""初始化表格"""
# 设置表格的列数
col = len(self.headerDict)
self.setColumnCount(col)
# 设置列表头
labels = [d['chinese'] for d in self.headerDict.values()]
self.setHorizontalHeaderLabels(labels)
# 关闭左边的垂直表头
self.verticalHeader().setVisible(False)
# 设为不可编辑
self.setEditTriggers(self.NoEditTriggers)
# 设为行交替颜色
self.setAlternatingRowColors(True)
# 设置允许排序
self.setSortingEnabled(self.sorting)
#----------------------------------------------------------------------
def registerEvent(self):
"""注册GUI更新相关的事件监听"""
self.signal.connect(self.updateEvent)
self.eventEngine.register(self.eventType, self.signal.emit)
#----------------------------------------------------------------------
def updateEvent(self, event):
"""收到事件更新"""
data = event.dict_['data']
self.updateData(data)
#----------------------------------------------------------------------
def updateData(self, data):
"""将数据更新到表格中"""
# 如果允许了排序功能,则插入数据前必须关闭,否则插入新的数据会变乱
if self.sorting:
self.setSortingEnabled(False)
# 如果设置了dataKey,则采用存量更新模式
if self.dataKey:
key = data.__getattribute__(self.dataKey)
# 如果键在数据字典中不存在,则先插入新的一行,并创建对应单元格
if key not in self.dataDict:
self.insertRow(0)
d = {}
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
self.setItem(0, n, cell)
d[header] = cell
self.dataDict[key] = d
# 否则如果已经存在,则直接更新相关单元格
else:
d = self.dataDict[key]
for header in self.headerList:
content = safeUnicode(data.__getattribute__(header))
cell = d[header]
cell.setContent(content)
if self.saveData: # 如果设置了保存数据对象,则进行对象保存
cell.data = data
# 否则采用增量更新模式
else:
self.insertRow(0)
for n, header in enumerate(self.headerList):
content = safeUnicode(data.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content, self.mainEngine)
if self.font:
cell.setFont(self.font)
if self.saveData:
cell.data = data
self.setItem(0, n, cell)
# 调整列宽
self.resizeColumns()
# 重新打开排序
if self.sorting:
self.setSortingEnabled(True)
#----------------------------------------------------------------------
def resizeColumns(self):
"""调整各列的大小"""
self.horizontalHeader().resizeSections(QtGui.QHeaderView.ResizeToContents)
#----------------------------------------------------------------------
def setSorting(self, sorting):
"""设置是否允许根据表头排序"""
self.sorting = sorting
#----------------------------------------------------------------------
def saveToCsv(self):
"""保存表格内容到CSV文件"""
# 先隐藏右键菜单
self.menu.close()
# 获取想要保存的文件名
path = QtGui.QFileDialog.getSaveFileName(self, '保存数据', '', 'CSV(*.csv)')
try:
if not path.isEmpty():
with open(unicode(path), 'wb') as f:
writer = csv.writer(f)
# 保存标签
headers = [header.encode('gbk') for header in self.headerList]
writer.writerow(headers)
# 保存每行内容
for row in range(self.rowCount()):
rowdata = []
for column in range(self.columnCount()):
item = self.item(row, column)
if item is not None:
rowdata.append(
unicode(item.text()).encode('gbk'))
else:
rowdata.append('')
writer.writerow(rowdata)
except IOError:
pass
#----------------------------------------------------------------------
def initMenu(self):
"""初始化右键菜单"""
self.menu = QtGui.QMenu(self)
saveAction = QtGui.QAction(u'保存内容', self)
saveAction.triggered.connect(self.saveToCsv)
self.menu.addAction(saveAction)
#----------------------------------------------------------------------
def contextMenuEvent(self, event):
"""右键点击事件"""
self.menu.popup(QtGui.QCursor.pos())
########################################################################
class MarketMonitor(BasicMonitor):
"""市场监控组件"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(MarketMonitor, self).__init__(mainEngine, eventEngine, parent)
# 设置表头有序字典
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['lastPrice'] = {'chinese':u'最新价', 'cellType':BasicCell}
d['preClosePrice'] = {'chinese':u'昨收盘价', 'cellType':BasicCell}
d['volume'] = {'chinese':u'成交量', 'cellType':BasicCell}
d['openInterest'] = {'chinese':u'持仓量', 'cellType':BasicCell}
d['openPrice'] = {'chinese':u'开盘价', 'cellType':BasicCell}
d['highPrice'] = {'chinese':u'最高价', 'cellType':BasicCell}
d['lowPrice'] = {'chinese':u'最低价', 'cellType':BasicCell}
d['bidPrice1'] = {'chinese':u'买一价', 'cellType':BidCell}
d['bidVolume1'] = {'chinese':u'买一量', 'cellType':BidCell}
d['askPrice1'] = {'chinese':u'卖一价', 'cellType':AskCell}
d['askVolume1'] = {'chinese':u'卖一量', 'cellType':AskCell}
d['time'] = {'chinese':u'时间', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
# 设置数据键
self.setDataKey('vtSymbol')
# 设置监控事件类型
self.setEventType(EVENT_TICK)
# 设置字体
self.setFont(BASIC_FONT)
# 设置允许排序
self.setSorting(True)
# 初始化表格
self.initTable()
# 注册事件监听
self.registerEvent()
########################################################################
class LogMonitor(BasicMonitor):
"""日志监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(LogMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['logTime'] = {'chinese':u'时间', 'cellType':BasicCell}
d['logContent'] = {'chinese':u'内容', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_LOG)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class ErrorMonitor(BasicMonitor):
"""错误监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(ErrorMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['errorTime'] = {'chinese':u'错误时间', 'cellType':BasicCell}
d['errorID'] = {'chinese':u'错误代码', 'cellType':BasicCell}
d['errorMsg'] = {'chinese':u'错误信息', 'cellType':BasicCell}
d['additionalInfo'] = {'chinese':u'补充信息', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_ERROR)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradeMonitor(BasicMonitor):
"""成交监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradeMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['tradeID'] = {'chinese':u'成交编号', 'cellType':NumCell}
d['orderID'] = {'chinese':u'委托编号', 'cellType':NumCell}
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['offset'] = {'chinese':u'开平', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['volume'] = {'chinese':u'数量', 'cellType':BasicCell}
d['tradeTime'] = {'chinese':u'成交时间', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setEventType(EVENT_TRADE)
self.setFont(BASIC_FONT)
self.setSorting(True)
self.initTable()
self.registerEvent()
########################################################################
class OrderMonitor(BasicMonitor):
"""委托监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(OrderMonitor, self).__init__(mainEngine, eventEngine, parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['orderID'] = {'chinese':u'委托编号', 'cellType':NumCell}
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['offset'] = {'chinese':u'开平', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['totalVolume'] = {'chinese':u'委托数量', 'cellType':BasicCell}
d['tradedVolume'] = {'chinese':u'成交数量', 'cellType':BasicCell}
d['status'] = {'chinese':u'状态', 'cellType':BasicCell}
d['orderTime'] = {'chinese':u'委托时间', 'cellType':BasicCell}
d['cancelTime'] = {'chinese':u'撤销时间', 'cellType':BasicCell}
d['frontID'] = {'chinese':u'前置编号', 'cellType':BasicCell}
d['sessionID'] = {'chinese':u'会话编号', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtOrderID')
self.setEventType(EVENT_ORDER)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.setSorting(True)
self.initTable()
self.registerEvent()
self.connectSignal()
#----------------------------------------------------------------------
def connectSignal(self):
"""连接信号"""
# 双击单元格撤单
self.itemDoubleClicked.connect(self.cancelOrder)
#----------------------------------------------------------------------
def cancelOrder(self, cell):
"""根据单元格的数据撤单"""
order = cell.data
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
########################################################################
class PositionMonitor(BasicMonitor):
"""持仓监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(PositionMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'名称', 'cellType':NameCell}
d['direction'] = {'chinese':u'方向', 'cellType':DirectionCell}
d['position'] = {'chinese':u'持仓量', 'cellType':BasicCell}
d['ydPosition'] = {'chinese':u'昨持仓', 'cellType':BasicCell}
d['frozen'] = {'chinese':u'冻结量', 'cellType':BasicCell}
d['price'] = {'chinese':u'价格', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtPositionName')
self.setEventType(EVENT_POSITION)
self.setFont(BASIC_FONT)
self.setSaveData(True)
self.initTable()
self.registerEvent()
########################################################################
class AccountMonitor(BasicMonitor):
"""账户监控"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(AccountMonitor, self).__init__(mainEngine, eventEngine, parent)
d = OrderedDict()
d['accountID'] = {'chinese':u'账户', 'cellType':BasicCell}
d['preBalance'] = {'chinese':u'昨结', 'cellType':BasicCell}
d['balance'] = {'chinese':u'净值', 'cellType':BasicCell}
d['available'] = {'chinese':u'可用', 'cellType':BasicCell}
d['commission'] = {'chinese':u'手续费', 'cellType':BasicCell}
d['margin'] = {'chinese':u'保证金', 'cellType':BasicCell}
d['closeProfit'] = {'chinese':u'平仓盈亏', 'cellType':BasicCell}
d['positionProfit'] = {'chinese':u'持仓盈亏', 'cellType':BasicCell}
d['gatewayName'] = {'chinese':u'接口', 'cellType':BasicCell}
self.setHeaderDict(d)
self.setDataKey('vtAccountID')
self.setEventType(EVENT_ACCOUNT)
self.setFont(BASIC_FONT)
self.initTable()
self.registerEvent()
########################################################################
class TradingWidget(QtGui.QFrame):
"""简单交易组件"""
signal = QtCore.pyqtSignal(type(Event()))
directionList = [DIRECTION_LONG,
DIRECTION_SHORT]
offsetList = [OFFSET_OPEN,
OFFSET_CLOSE,
OFFSET_CLOSEYESTERDAY,
OFFSET_CLOSETODAY]
priceTypeList = [PRICETYPE_LIMITPRICE,
PRICETYPE_MARKETPRICE,
PRICETYPE_FAK,
PRICETYPE_FOK]
exchangeList = [EXCHANGE_NONE,
EXCHANGE_CFFEX,
EXCHANGE_SHFE,
EXCHANGE_DCE,
EXCHANGE_CZCE,
EXCHANGE_SSE,
EXCHANGE_SZSE,
EXCHANGE_SGE,
EXCHANGE_HKEX,
EXCHANGE_SMART,
EXCHANGE_ICE,
EXCHANGE_CME,
EXCHANGE_NYMEX,
EXCHANGE_GLOBEX,
EXCHANGE_IDEALPRO]
currencyList = [CURRENCY_NONE,
CURRENCY_CNY,
CURRENCY_USD]
productClassList = [PRODUCT_NONE,
PRODUCT_EQUITY,
PRODUCT_FUTURES,
PRODUCT_OPTION,
PRODUCT_FOREX]
gatewayList = ['']
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, parent=None):
"""Constructor"""
super(TradingWidget, self).__init__(parent)
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.symbol = ''
# 添加交易接口
self.gatewayList.extend(mainEngine.gatewayDict.keys())
self.initUi()
self.connectSignal()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'交易')
self.setMaximumWidth(400)
self.setFrameShape(self.Box) # 设置边框
self.setLineWidth(1)
# 左边部分
labelSymbol = QtGui.QLabel(u'代码')
labelName = QtGui.QLabel(u'名称')
labelDirection = QtGui.QLabel(u'方向类型')
labelOffset = QtGui.QLabel(u'开平')
labelPrice = QtGui.QLabel(u'价格')
labelVolume = QtGui.QLabel(u'数量')
labelPriceType = QtGui.QLabel(u'价格类型')
labelExchange = QtGui.QLabel(u'交易所')
labelCurrency = QtGui.QLabel(u'货币')
labelProductClass = QtGui.QLabel(u'产品类型')
labelGateway = QtGui.QLabel(u'交易接口')
self.lineSymbol = QtGui.QLineEdit()
self.lineName = QtGui.QLineEdit()
self.comboDirection = QtGui.QComboBox()
self.comboDirection.addItems(self.directionList)
self.comboOffset = QtGui.QComboBox()
self.comboOffset.addItems(self.offsetList)
self.spinPrice = QtGui.QDoubleSpinBox()
self.spinPrice.setDecimals(4)
self.spinPrice.setMinimum(0)
self.spinPrice.setMaximum(100000)
self.spinVolume = QtGui.QSpinBox()
self.spinVolume.setMinimum(0)
self.spinVolume.setMaximum(1000000)
self.comboPriceType = QtGui.QComboBox()
self.comboPriceType.addItems(self.priceTypeList)
self.comboExchange = QtGui.QComboBox()
self.comboExchange.addItems(self.exchangeList)
self.comboCurrency = QtGui.QComboBox()
self.comboCurrency.addItems(self.currencyList)
self.comboProductClass = QtGui.QComboBox()
self.comboProductClass.addItems(self.productClassList)
self.comboGateway = QtGui.QComboBox()
self.comboGateway.addItems(self.gatewayList)
gridleft = QtGui.QGridLayout()
gridleft.addWidget(labelSymbol, 0, 0)
gridleft.addWidget(labelName, 1, 0)
gridleft.addWidget(labelDirection, 2, 0)
gridleft.addWidget(labelOffset, 3, 0)
gridleft.addWidget(labelPrice, 4, 0)
gridleft.addWidget(labelVolume, 5, 0)
gridleft.addWidget(labelPriceType, 6, 0)
gridleft.addWidget(labelExchange, 7, 0)
gridleft.addWidget(labelCurrency, 8, 0)
gridleft.addWidget(labelProductClass, 9, 0)
gridleft.addWidget(labelGateway, 10, 0)
gridleft.addWidget(self.lineSymbol, 0, 1)
gridleft.addWidget(self.lineName, 1, 1)
gridleft.addWidget(self.comboDirection, 2, 1)
gridleft.addWidget(self.comboOffset, 3, 1)
gridleft.addWidget(self.spinPrice, 4, 1)
gridleft.addWidget(self.spinVolume, 5, 1)
gridleft.addWidget(self.comboPriceType, 6, 1)
gridleft.addWidget(self.comboExchange, 7, 1)
gridleft.addWidget(self.comboCurrency, 8, 1)
gridleft.addWidget(self.comboProductClass, 9, 1)
gridleft.addWidget(self.comboGateway, 10, 1)
# 右边部分
labelBid1 = QtGui.QLabel(u'买一')
labelBid2 = QtGui.QLabel(u'买二')
labelBid3 = QtGui.QLabel(u'买三')
labelBid4 = QtGui.QLabel(u'买四')
labelBid5 = QtGui.QLabel(u'买五')
labelAsk1 = QtGui.QLabel(u'卖一')
labelAsk2 = QtGui.QLabel(u'卖二')
labelAsk3 = QtGui.QLabel(u'卖三')
labelAsk4 = QtGui.QLabel(u'卖四')
labelAsk5 = QtGui.QLabel(u'卖五')
self.labelBidPrice1 = QtGui.QLabel()
self.labelBidPrice2 = QtGui.QLabel()
self.labelBidPrice3 = QtGui.QLabel()
self.labelBidPrice4 = QtGui.QLabel()
self.labelBidPrice5 = QtGui.QLabel()
self.labelBidVolume1 = QtGui.QLabel()
self.labelBidVolume2 = QtGui.QLabel()
self.labelBidVolume3 = QtGui.QLabel()
self.labelBidVolume4 = QtGui.QLabel()
self.labelBidVolume5 = QtGui.QLabel()
self.labelAskPrice1 = QtGui.QLabel()
self.labelAskPrice2 = QtGui.QLabel()
self.labelAskPrice3 = QtGui.QLabel()
self.labelAskPrice4 = QtGui.QLabel()
self.labelAskPrice5 = QtGui.QLabel()
self.labelAskVolume1 = QtGui.QLabel()
self.labelAskVolume2 = QtGui.QLabel()
self.labelAskVolume3 = QtGui.QLabel()
self.labelAskVolume4 = QtGui.QLabel()
self.labelAskVolume5 = QtGui.QLabel()
labelLast = QtGui.QLabel(u'最新')
self.labelLastPrice = QtGui.QLabel()
self.labelReturn = QtGui.QLabel()
self.labelLastPrice.setMinimumWidth(60)
self.labelReturn.setMinimumWidth(60)
gridRight = QtGui.QGridLayout()
gridRight.addWidget(labelAsk5, 0, 0)
gridRight.addWidget(labelAsk4, 1, 0)
gridRight.addWidget(labelAsk3, 2, 0)
gridRight.addWidget(labelAsk2, 3, 0)
gridRight.addWidget(labelAsk1, 4, 0)
gridRight.addWidget(labelLast, 5, 0)
gridRight.addWidget(labelBid1, 6, 0)
gridRight.addWidget(labelBid2, 7, 0)
gridRight.addWidget(labelBid3, 8, 0)
gridRight.addWidget(labelBid4, 9, 0)
gridRight.addWidget(labelBid5, 10, 0)
gridRight.addWidget(self.labelAskPrice5, 0, 1)
gridRight.addWidget(self.labelAskPrice4, 1, 1)
gridRight.addWidget(self.labelAskPrice3, 2, 1)
gridRight.addWidget(self.labelAskPrice2, 3, 1)
gridRight.addWidget(self.labelAskPrice1, 4, 1)
gridRight.addWidget(self.labelLastPrice, 5, 1)
gridRight.addWidget(self.labelBidPrice1, 6, 1)
gridRight.addWidget(self.labelBidPrice2, 7, 1)
gridRight.addWidget(self.labelBidPrice3, 8, 1)
gridRight.addWidget(self.labelBidPrice4, 9, 1)
gridRight.addWidget(self.labelBidPrice5, 10, 1)
gridRight.addWidget(self.labelAskVolume5, 0, 2)
gridRight.addWidget(self.labelAskVolume4, 1, 2)
gridRight.addWidget(self.labelAskVolume3, 2, 2)
gridRight.addWidget(self.labelAskVolume2, 3, 2)
gridRight.addWidget(self.labelAskVolume1, 4, 2)
gridRight.addWidget(self.labelReturn, 5, 2)
gridRight.addWidget(self.labelBidVolume1, 6, 2)
gridRight.addWidget(self.labelBidVolume2, 7, 2)
gridRight.addWidget(self.labelBidVolume3, 8, 2)
gridRight.addWidget(self.labelBidVolume4, 9, 2)
gridRight.addWidget(self.labelBidVolume5, 10, 2)
# 发单按钮
buttonSendOrder = QtGui.QPushButton(u'发单')
buttonCancelAll = QtGui.QPushButton(u'全撤')
size = buttonSendOrder.sizeHint()
buttonSendOrder.setMinimumHeight(size.height()*2) # 把按钮高度设为默认两倍
buttonCancelAll.setMinimumHeight(size.height()*2)
# 整合布局
hbox = QtGui.QHBoxLayout()
hbox.addLayout(gridleft)
hbox.addLayout(gridRight)
vbox = QtGui.QVBoxLayout()
vbox.addLayout(hbox)
vbox.addWidget(buttonSendOrder)
vbox.addWidget(buttonCancelAll)
vbox.addStretch()
self.setLayout(vbox)
# 关联更新
buttonSendOrder.clicked.connect(self.sendOrder)
buttonCancelAll.clicked.connect(self.cancelAll)
self.lineSymbol.returnPressed.connect(self.updateSymbol)
#----------------------------------------------------------------------
def updateSymbol(self):
"""合约变化"""
# 读取组件数据
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
vtSymbol = contract.vtSymbol
gatewayName = contract.gatewayName
self.lineName.setText(contract.name)
exchange = contract.exchange # 保证有交易所代码
# 清空价格数量
self.spinPrice.setValue(0)
self.spinVolume.setValue(0)
# 清空行情显示
self.labelBidPrice1.setText('')
self.labelBidPrice2.setText('')
self.labelBidPrice3.setText('')
self.labelBidPrice4.setText('')
self.labelBidPrice5.setText('')
self.labelBidVolume1.setText('')
self.labelBidVolume2.setText('')
self.labelBidVolume3.setText('')
self.labelBidVolume4.setText('')
self.labelBidVolume5.setText('')
self.labelAskPrice1.setText('')
self.labelAskPrice2.setText('')
self.labelAskPrice3.setText('')
self.labelAskPrice4.setText('')
self.labelAskPrice5.setText('')
self.labelAskVolume1.setText('')
self.labelAskVolume2.setText('')
self.labelAskVolume3.setText('')
self.labelAskVolume4.setText('')
self.labelAskVolume5.setText('')
self.labelLastPrice.setText('')
self.labelReturn.setText('')
# 重新注册事件监听
self.eventEngine.unregister(EVENT_TICK + self.symbol, self.signal.emit)
self.eventEngine.register(EVENT_TICK + vtSymbol, self.signal.emit)
# 订阅合约
req = VtSubscribeReq()
req.symbol = symbol
req.exchange = exchange
req.currency = currency
req.productClass = productClass
self.mainEngine.subscribe(req, gatewayName)
# 更新组件当前交易的合约
self.symbol = vtSymbol
#----------------------------------------------------------------------
def updateTick(self, event):
"""更新行情"""
tick = event.dict_['data']
if tick.vtSymbol == self.symbol:
self.labelBidPrice1.setText(str(tick.bidPrice1))
self.labelAskPrice1.setText(str(tick.askPrice1))
self.labelBidVolume1.setText(str(tick.bidVolume1))
self.labelAskVolume1.setText(str(tick.askVolume1))
if tick.bidPrice2:
self.labelBidPrice2.setText(str(tick.bidPrice2))
self.labelBidPrice3.setText(str(tick.bidPrice3))
self.labelBidPrice4.setText(str(tick.bidPrice4))
self.labelBidPrice5.setText(str(tick.bidPrice5))
self.labelAskPrice2.setText(str(tick.askPrice2))
self.labelAskPrice3.setText(str(tick.askPrice3))
self.labelAskPrice4.setText(str(tick.askPrice4))
self.labelAskPrice5.setText(str(tick.askPrice5))
self.labelBidVolume2.setText(str(tick.bidVolume2))
self.labelBidVolume3.setText(str(tick.bidVolume3))
self.labelBidVolume4.setText(str(tick.bidVolume4))
self.labelBidVolume5.setText(str(tick.bidVolume5))
self.labelAskVolume2.setText(str(tick.askVolume2))
self.labelAskVolume3.setText(str(tick.askVolume3))
self.labelAskVolume4.setText(str(tick.askVolume4))
self.labelAskVolume5.setText(str(tick.askVolume5))
self.labelLastPrice.setText(str(tick.lastPrice))
if tick.preClosePrice:
rt = (tick.lastPrice/tick.preClosePrice)-1
self.labelReturn.setText(('%.2f' %(rt*100))+'%')
else:
self.labelReturn.setText('')
#----------------------------------------------------------------------
def connectSignal(self):
"""连接Signal"""
self.signal.connect(self.updateTick)
#----------------------------------------------------------------------
def sendOrder(self):
"""发单"""
symbol = str(self.lineSymbol.text())
exchange = unicode(self.comboExchange.currentText())
currency = unicode(self.comboCurrency.currentText())
productClass = unicode(self.comboProductClass.currentText())
gatewayName = unicode(self.comboGateway.currentText())
# 查询合约
if exchange:
vtSymbol = '.'.join([symbol, exchange])
contract = self.mainEngine.getContract(vtSymbol)
else:
vtSymbol = symbol
contract = self.mainEngine.getContract(symbol)
if contract:
gatewayName = contract.gatewayName
exchange = contract.exchange # 保证有交易所代码
req = VtOrderReq()
req.symbol = symbol
req.exchange = exchange
req.price = self.spinPrice.value()
req.volume = self.spinVolume.value()
req.direction = unicode(self.comboDirection.currentText())
req.priceType = unicode(self.comboPriceType.currentText())
req.offset = unicode(self.comboOffset.currentText())
req.currency = currency
req.productClass = productClass
self.mainEngine.sendOrder(req, gatewayName)
#----------------------------------------------------------------------
def cancelAll(self):
"""一键撤销所有委托"""
l = self.mainEngine.getAllWorkingOrders()
for order in l:
req = VtCancelOrderReq()
req.symbol = order.symbol
req.exchange = order.exchange
req.frontID = order.frontID
req.sessionID = order.sessionID
req.orderID = order.orderID
self.mainEngine.cancelOrder(req, order.gatewayName)
#----------------------------------------------------------------------
def closePosition(self, cell):
"""根据持仓信息自动填写交易组件"""
# 读取持仓数据,cell是一个表格中的单元格对象
pos = cell.data
symbol = pos.symbol
# 更新交易组件的显示合约
self.lineSymbol.setText(symbol)
self.updateSymbol()
# 自动填写信息
self.comboPriceType.setCurrentIndex(self.priceTypeList.index(PRICETYPE_LIMITPRICE))
self.comboOffset.setCurrentIndex(self.offsetList.index(OFFSET_CLOSE))
self.spinVolume.setValue(pos.position)
if pos.direction == DIRECTION_LONG or pos.direction == DIRECTION_NET:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_SHORT))
else:
self.comboDirection.setCurrentIndex(self.directionList.index(DIRECTION_LONG))
# 价格留待更新后由用户输入,防止有误操作
########################################################################
class ContractMonitor(BasicMonitor):
"""合约查询"""
#----------------------------------------------------------------------
def __init__(self, mainEngine, parent=None):
"""Constructor"""
super(ContractMonitor, self).__init__(parent=parent)
self.mainEngine = mainEngine
d = OrderedDict()
d['symbol'] = {'chinese':u'合约代码', 'cellType':BasicCell}
d['exchange'] = {'chinese':u'交易所', 'cellType':BasicCell}
d['vtSymbol'] = {'chinese':u'vt系统代码', 'cellType':BasicCell}
d['name'] = {'chinese':u'名称', 'cellType':BasicCell}
d['productClass'] = {'chinese':u'合约类型', 'cellType':BasicCell}
d['size'] = {'chinese':u'大小', 'cellType':BasicCell}
d['priceTick'] = {'chinese':u'最小价格变动', 'cellType':BasicCell}
#d['strikePrice'] = {'chinese':u'期权行权价', 'cellType':BasicCell}
#d['underlyingSymbol'] = {'chinese':u'期权标的物', 'cellType':BasicCell}
#d['optionType'] = {'chinese':u'期权类型', 'cellType':BasicCell}
self.setHeaderDict(d)
self.initUi()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(u'合约查询')
self.setMinimumSize(800, 800)
self.setFont(BASIC_FONT)
self.initTable()
self.addMenuAction()
#----------------------------------------------------------------------
def showAllContracts(self):
"""显示所有合约数据"""
l = self.mainEngine.getAllContracts()
d = {'.'.join([contract.exchange, contract.symbol]):contract for contract in l}
l2 = d.keys()
l2.sort(reverse=True)
self.setRowCount(len(l2))
row = 0
for key in l2:
contract = d[key]
for n, header in enumerate(self.headerList):
content = safeUnicode(contract.__getattribute__(header))
cellType = self.headerDict[header]['cellType']
cell = cellType(content)
if self.font:
cell.setFont(self.font) # 如果设置了特殊字体,则进行单元格设置
self.setItem(row, n, cell)
row = row + 1
#----------------------------------------------------------------------
def refresh(self):
"""刷新"""
self.menu.close() # 关闭菜单
self.clearContents()
self.setRowCount(0)
self.showAllContracts()
#----------------------------------------------------------------------
def addMenuAction(self):
"""增加右键菜单内容"""
refreshAction = QtGui.QAction(u'刷新', self)
refreshAction.triggered.connect(self.refresh)
self.menu.addAction(refreshAction)
#----------------------------------------------------------------------
def show(self):
"""显示"""
super(ContractMonitor, self).show()
self.refresh()
| mit |
mattt416/neutron | neutron/tests/retargetable/base.py | 37 | 2833 | # Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
This module defines a base test case that uses testscenarios to
parametize the test methods of subclasses by varying the client
fixture used to target the Neutron API.
PluginClientFixture targets the Neutron API directly via the plugin
api, and will be executed by default. testscenarios will ensure that
each test is run against all plugins defined in plugin_configurations.
RestClientFixture targets a deployed Neutron daemon, and will be used
instead of PluginClientFixture only if OS_TEST_API_WITH_REST is set to 1.
Reference: https://pypi.python.org/pypi/testscenarios/
"""
import testscenarios
from neutron.tests import base as tests_base
from neutron.tests.retargetable import client_fixtures
from neutron.tests.unit.plugins.ml2 import test_plugin
# Each plugin must add a class to plugin_configurations that can configure the
# plugin for use with PluginClient. For a given plugin, the setup
# used for NeutronDbPluginV2TestCase can usually be reused. See the
# configuration classes listed below for examples of this reuse.
# TODO(marun) Discover plugin conf via a metaclass
plugin_configurations = [
test_plugin.Ml2ConfFixture(),
]
def rest_enabled():
return tests_base.bool_from_env('OS_TEST_API_WITH_REST')
def get_plugin_scenarios():
scenarios = []
for conf in plugin_configurations:
name = conf.plugin_name
class_name = name.rsplit('.', 1)[-1]
client = client_fixtures.PluginClientFixture(conf)
scenarios.append((class_name, {'client': client}))
return scenarios
def get_scenarios():
if rest_enabled():
# FIXME(marun) Remove local import once tempest config is safe
# to import alonside neutron config
from neutron.tests.retargetable import rest_fixture
return [('tempest', {'client': rest_fixture.RestClientFixture()})]
else:
return get_plugin_scenarios()
class RetargetableApiTest(testscenarios.WithScenarios,
tests_base.BaseTestCase):
scenarios = get_scenarios()
def setUp(self):
super(RetargetableApiTest, self).setUp()
if rest_enabled():
raise self.skipException(
'Tempest fixture requirements prevent this test from running')
self.useFixture(self.client)
| apache-2.0 |
zhaochengw/android_kernel_ef51lsk | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
follow99/django | django/contrib/sitemaps/views.py | 352 | 2953 | import datetime
from calendar import timegm
from functools import wraps
from django.contrib.sites.shortcuts import get_current_site
from django.core import urlresolvers
from django.core.paginator import EmptyPage, PageNotAnInteger
from django.http import Http404
from django.template.response import TemplateResponse
from django.utils.http import http_date
def x_robots_tag(func):
@wraps(func)
def inner(request, *args, **kwargs):
response = func(request, *args, **kwargs)
response['X-Robots-Tag'] = 'noindex, noodp, noarchive'
return response
return inner
@x_robots_tag
def index(request, sitemaps,
template_name='sitemap_index.xml', content_type='application/xml',
sitemap_url_name='django.contrib.sitemaps.views.sitemap'):
req_protocol = request.scheme
req_site = get_current_site(request)
sites = []
for section, site in sitemaps.items():
if callable(site):
site = site()
protocol = req_protocol if site.protocol is None else site.protocol
sitemap_url = urlresolvers.reverse(
sitemap_url_name, kwargs={'section': section})
absolute_url = '%s://%s%s' % (protocol, req_site.domain, sitemap_url)
sites.append(absolute_url)
for page in range(2, site.paginator.num_pages + 1):
sites.append('%s?p=%s' % (absolute_url, page))
return TemplateResponse(request, template_name, {'sitemaps': sites},
content_type=content_type)
@x_robots_tag
def sitemap(request, sitemaps, section=None,
template_name='sitemap.xml', content_type='application/xml'):
req_protocol = request.scheme
req_site = get_current_site(request)
if section is not None:
if section not in sitemaps:
raise Http404("No sitemap available for section: %r" % section)
maps = [sitemaps[section]]
else:
maps = sitemaps.values()
page = request.GET.get("p", 1)
urls = []
for site in maps:
try:
if callable(site):
site = site()
urls.extend(site.get_urls(page=page, site=req_site,
protocol=req_protocol))
except EmptyPage:
raise Http404("Page %s empty" % page)
except PageNotAnInteger:
raise Http404("No page '%s'" % page)
response = TemplateResponse(request, template_name, {'urlset': urls},
content_type=content_type)
if hasattr(site, 'latest_lastmod'):
# if latest_lastmod is defined for site, set header so as
# ConditionalGetMiddleware is able to send 304 NOT MODIFIED
lastmod = site.latest_lastmod
response['Last-Modified'] = http_date(
timegm(
lastmod.utctimetuple() if isinstance(lastmod, datetime.datetime)
else lastmod.timetuple()
)
)
return response
| bsd-3-clause |
tlakshman26/cinder-new-branch | cinder/tests/unit/api/contrib/test_availability_zones.py | 32 | 2932 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo_utils import timeutils
import cinder.api.contrib.availability_zones
import cinder.context
import cinder.test
import cinder.volume.api
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
current_time = timeutils.utcnow()
def list_availability_zones(self):
return (
{'name': 'ping', 'available': True},
{'name': 'pong', 'available': False},
)
class FakeRequest(object):
environ = {'cinder.context': cinder.context.get_admin_context()}
GET = {}
class ControllerTestCase(cinder.test.TestCase):
def setUp(self):
super(ControllerTestCase, self).setUp()
self.controller = cinder.api.contrib.availability_zones.Controller()
self.req = FakeRequest()
self.stubs.Set(cinder.volume.api.API,
'list_availability_zones',
list_availability_zones)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
actual = self.controller.index(self.req)
expected = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
self.assertEqual(expected, actual)
class XMLSerializerTest(cinder.test.TestCase):
def test_index_xml(self):
fixture = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
serializer = cinder.api.contrib.availability_zones.ListTemplate()
text = serializer.serialize(fixture)
tree = etree.fromstring(text)
self.assertEqual('availabilityZones', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('availabilityZone', tree[0].tag)
self.assertEqual('ping', tree[0].get('name'))
self.assertEqual('zoneState', tree[0][0].tag)
self.assertEqual('True', tree[0][0].get('available'))
self.assertEqual('pong', tree[1].get('name'))
self.assertEqual('zoneState', tree[1][0].tag)
self.assertEqual('False', tree[1][0].get('available'))
| apache-2.0 |
brianwoo/django-tutorial | ENV/lib/python2.7/site-packages/django/contrib/gis/maps/google/overlays.py | 79 | 11934 | from __future__ import unicode_literals
from django.contrib.gis.geos import (
LinearRing, LineString, Point, Polygon, fromstr,
)
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import total_ordering
from django.utils.html import html_safe
@html_safe
@python_2_unicode_compatible
class GEvent(object):
"""
A Python wrapper for the Google GEvent object.
Events can be attached to any object derived from GOverlayBase with the
add_event() call.
For more information please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GEvent
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google import GoogleMap, GEvent, GPolyline
def sample_request(request):
polyline = GPolyline('LINESTRING(101 26, 112 26, 102 31)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
polyline.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(polylines=[polyline])})
"""
def __init__(self, event, action):
"""
Initializes a GEvent object.
Parameters:
event:
string for the event, such as 'click'. The event must be a valid
event for the object in the Google Maps API.
There is no validation of the event type within Django.
action:
string containing a Javascript function, such as
'function() { location.href = "newurl";}'
The string must be a valid Javascript function. Again there is no
validation fo the function within Django.
"""
self.event = event
self.action = action
def __str__(self):
"Returns the parameter part of a GEvent."
return '"%s", %s' % (self.event, self.action)
@html_safe
@python_2_unicode_compatible
class GOverlayBase(object):
def __init__(self):
self.events = []
def latlng_from_coords(self, coords):
"Generates a JavaScript array of GLatLng objects for the given coordinates."
return '[%s]' % ','.join('new GLatLng(%s,%s)' % (y, x) for x, y in coords)
def add_event(self, event):
"Attaches a GEvent to the overlay object."
self.events.append(event)
def __str__(self):
"The string representation is the JavaScript API call."
return '%s(%s)' % (self.__class__.__name__, self.js_params)
class GPolygon(GOverlayBase):
"""
A Python wrapper for the Google GPolygon object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolygon
"""
def __init__(self, poly,
stroke_color='#0000ff', stroke_weight=2, stroke_opacity=1,
fill_color='#0000ff', fill_opacity=0.4):
"""
The GPolygon object initializes on a GEOS Polygon or a parameter that
may be instantiated into GEOS Polygon. Please note that this will not
depict a Polygon's internal rings.
Keyword Options:
stroke_color:
The color of the polygon outline. Defaults to '#0000ff' (blue).
stroke_weight:
The width of the polygon outline, in pixels. Defaults to 2.
stroke_opacity:
The opacity of the polygon outline, between 0 and 1. Defaults to 1.
fill_color:
The color of the polygon fill. Defaults to '#0000ff' (blue).
fill_opacity:
The opacity of the polygon fill. Defaults to 0.4.
"""
if isinstance(poly, six.string_types):
poly = fromstr(poly)
if isinstance(poly, (tuple, list)):
poly = Polygon(poly)
if not isinstance(poly, Polygon):
raise TypeError('GPolygon may only initialize on GEOS Polygons.')
# Getting the envelope of the input polygon (used for automatically
# determining the zoom level).
self.envelope = poly.envelope
# Translating the coordinates into a JavaScript array of
# Google `GLatLng` objects.
self.points = self.latlng_from_coords(poly.shell.coords)
# Stroke settings.
self.stroke_color, self.stroke_opacity, self.stroke_weight = stroke_color, stroke_opacity, stroke_weight
# Fill settings.
self.fill_color, self.fill_opacity = fill_color, fill_opacity
super(GPolygon, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s, "%s", %s' % (self.points, self.stroke_color, self.stroke_weight, self.stroke_opacity,
self.fill_color, self.fill_opacity)
class GPolyline(GOverlayBase):
"""
A Python wrapper for the Google GPolyline object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GPolyline
"""
def __init__(self, geom, color='#0000ff', weight=2, opacity=1):
"""
The GPolyline object may be initialized on GEOS LineStirng, LinearRing,
and Polygon objects (internal rings not supported) or a parameter that
may instantiated into one of the above geometries.
Keyword Options:
color:
The color to use for the polyline. Defaults to '#0000ff' (blue).
weight:
The width of the polyline, in pixels. Defaults to 2.
opacity:
The opacity of the polyline, between 0 and 1. Defaults to 1.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Polygon(geom)
# Generating the lat/lng coordinate pairs.
if isinstance(geom, (LineString, LinearRing)):
self.latlngs = self.latlng_from_coords(geom.coords)
elif isinstance(geom, Polygon):
self.latlngs = self.latlng_from_coords(geom.shell.coords)
else:
raise TypeError('GPolyline may only initialize on GEOS LineString, LinearRing, and/or Polygon geometries.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
self.color, self.weight, self.opacity = color, weight, opacity
super(GPolyline, self).__init__()
@property
def js_params(self):
return '%s, "%s", %s, %s' % (self.latlngs, self.color, self.weight, self.opacity)
@total_ordering
class GIcon(object):
"""
Creates a GIcon object to pass into a Gmarker object.
The keyword arguments map to instance attributes of the same name. These,
in turn, correspond to a subset of the attributes of the official GIcon
javascript object:
http://code.google.com/apis/maps/documentation/reference.html#GIcon
Because a Google map often uses several different icons, a name field has
been added to the required arguments.
Required Arguments:
varname:
A string which will become the basis for the js variable name of
the marker, for this reason, your code should assign a unique
name for each GIcon you instantiate, otherwise there will be
name space collisions in your javascript.
Keyword Options:
image:
The url of the image to be used as the icon on the map defaults
to 'G_DEFAULT_ICON'
iconsize:
a tuple representing the pixel size of the foreground (not the
shadow) image of the icon, in the format: (width, height) ex.:
GIcon('fast_food',
image="/media/icon/star.png",
iconsize=(15,10))
Would indicate your custom icon was 15px wide and 10px height.
shadow:
the url of the image of the icon's shadow
shadowsize:
a tuple representing the pixel size of the shadow image, format is
the same as ``iconsize``
iconanchor:
a tuple representing the pixel coordinate relative to the top left
corner of the icon image at which this icon is anchored to the map.
In (x, y) format. x increases to the right in the Google Maps
coordinate system and y increases downwards in the Google Maps
coordinate system.)
infowindowanchor:
The pixel coordinate relative to the top left corner of the icon
image at which the info window is anchored to this icon.
"""
def __init__(self, varname, image=None, iconsize=None,
shadow=None, shadowsize=None, iconanchor=None,
infowindowanchor=None):
self.varname = varname
self.image = image
self.iconsize = iconsize
self.shadow = shadow
self.shadowsize = shadowsize
self.iconanchor = iconanchor
self.infowindowanchor = infowindowanchor
def __eq__(self, other):
return self.varname == other.varname
def __lt__(self, other):
return self.varname < other.varname
def __hash__(self):
# XOR with hash of GIcon type so that hash('varname') won't
# equal hash(GIcon('varname')).
return hash(self.__class__) ^ hash(self.varname)
class GMarker(GOverlayBase):
"""
A Python wrapper for the Google GMarker object. For more information
please see the Google Maps API Reference:
http://code.google.com/apis/maps/documentation/reference.html#GMarker
Example:
from django.shortcuts import render_to_response
from django.contrib.gis.maps.google.overlays import GMarker, GEvent
def sample_request(request):
marker = GMarker('POINT(101 26)')
event = GEvent('click',
'function() { location.href = "http://www.google.com"}')
marker.add_event(event)
return render_to_response('mytemplate.html',
{'google' : GoogleMap(markers=[marker])})
"""
def __init__(self, geom, title=None, draggable=False, icon=None):
"""
The GMarker object may initialize on GEOS Points or a parameter
that may be instantiated into a GEOS point. Keyword options map to
GMarkerOptions -- so far only the title option is supported.
Keyword Options:
title:
Title option for GMarker, will be displayed as a tooltip.
draggable:
Draggable option for GMarker, disabled by default.
"""
# If a GEOS geometry isn't passed in, try to construct one.
if isinstance(geom, six.string_types):
geom = fromstr(geom)
if isinstance(geom, (tuple, list)):
geom = Point(geom)
if isinstance(geom, Point):
self.latlng = self.latlng_from_coords(geom.coords)
else:
raise TypeError('GMarker may only initialize on GEOS Point geometry.')
# Getting the envelope for automatic zoom determination.
self.envelope = geom.envelope
# TODO: Add support for more GMarkerOptions
self.title = title
self.draggable = draggable
self.icon = icon
super(GMarker, self).__init__()
def latlng_from_coords(self, coords):
return 'new GLatLng(%s,%s)' % (coords[1], coords[0])
def options(self):
result = []
if self.title:
result.append('title: "%s"' % self.title)
if self.icon:
result.append('icon: %s' % self.icon.varname)
if self.draggable:
result.append('draggable: true')
return '{%s}' % ','.join(result)
@property
def js_params(self):
return '%s, %s' % (self.latlng, self.options())
| gpl-3.0 |
oihane/odoo-addons | product_pricelist_item_menu/models/res_partner.py | 2 | 1420 | # Copyright 2020 Alfredo de la fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from odoo import models, fields, api
from odoo.models import expression
from odoo.tools.safe_eval import safe_eval
class ResPartner(models.Model):
_inherit = 'res.partner'
count_pricelists_item = fields.Integer(
string='Count pricelist items',
compute='_compute_count_pricelists_item')
def _compute_count_pricelists_item(self):
for partner in self.filtered(lambda c: c.property_product_pricelist):
partner.count_pricelists_item = len(
partner.property_product_pricelist.item_ids)
@api.multi
def button_show_partner_pricelist_items(self):
self.ensure_one()
action = self.env.ref(
'product_pricelist_item_menu.product_pricelist_item_menu_action')
action_dict = action.read()[0] if action else {}
action_dict['context'] = safe_eval(action_dict.get('context', '{}'))
action_dict['context'].update({
'search_pricelits_id': self.property_product_pricelist.id,
'default_pricelist_id': self.property_product_pricelist.id,
})
domain = expression.AND([
[('pricelist_id', '=', self.property_product_pricelist.id)],
safe_eval(action.domain or '[]')])
action_dict.update({'domain': domain})
return action_dict
| agpl-3.0 |
santiavenda2/griffith | lib/add.py | 3 | 36739 | # -*- coding: UTF-8 -*-
# vim: fdm=marker
__revision__ = '$Id$'
# Copyright (c) 2005-2011 Vasco Nunes, Piotr Ożarowski
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published byp
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
# You may use and distribute this software under the terms of the
# GNU General Public License, version 2 or later
import logging
import os
import urllib2
import gtk
from sqlalchemy.exc import IntegrityError
import quick_filter
import db
import gutils
import initialize
import main_treeview
log = logging.getLogger("Griffith")
### widgets ###################################################
def clear(self):
"""clears all fields in dialog"""
set_details(self, {})
self.widgets['add']['cb_only_empty'].set_active(False)
def add_movie(self, details={}):
set_details(self, details)
self.active_plugin = ''
self.widgets['add']['add_button'].show()
self.widgets['add']['add_close_button'].show()
self.widgets['add']['clear_button'].show()
self.widgets['add']['save_button'].hide()
self.widgets['add']['window'].set_title(_('Add a new movie'))
self.widgets['add']['window'].show()
def edit_movie(self, details={}):
if not 'number' in details:
details['number'] = gutils.find_next_available(self.db)
self.selected_iter_edit = self.selected_iter
set_details(self, details)
self.widgets['add']['add_button'].hide()
self.widgets['add']['add_close_button'].hide()
self.widgets['add']['clear_button'].show()
self.widgets['add']['save_button'].show()
self.widgets['add']['window'].set_title(_('Edit movie'))
self.widgets['add']['window'].show()
def update_movie(self):
session = self.db.Session()
if self._am_movie_id is not None:
movie = session.query(db.Movie).filter_by(movie_id=self._am_movie_id).one()
else:
movie = session.query(db.Movie).filter_by(movie_id=self._movie_id).one()
if movie is None: # movie was deleted in the meantime
return add_movie_db(self, True)
details = get_details(self)
old_poster_md5 = movie.poster_md5
new_poster_md5 = old_poster_md5
if details['image']:
if old_poster_md5 != details['image']: # details["image"] can contain MD5 or file path
new_image_path = details['image']
if not os.path.isfile(new_image_path):
new_image_path = os.path.join(self.locations['temp'], "poster_%s.jpg" % details['image'])
if not os.path.isfile(new_image_path):
log.warn("cannot read temporary file: %s", new_image_path)
else:
new_poster_md5 = gutils.md5sum(file(new_image_path, 'rb'))
if session.query(db.Poster).filter_by(md5sum=new_poster_md5).count() == 0:
try:
data = file(new_image_path, 'rb').read()
except Exception, e:
log.warning("cannot read poster data")
old_poster_md5 = new_poster_md5
else:
poster = db.Poster(md5sum=new_poster_md5, data=data)
del details["image"]
details['poster_md5'] = new_poster_md5
session.add(poster)
else:
details['poster_md5'] = new_poster_md5
else:
details['poster_md5'] = None
update_movie_instance(movie, details, session)
session.add(movie)
# delete old image
if old_poster_md5 and old_poster_md5 != new_poster_md5:
import delete
old_poster = session.query(db.Poster).filter_by(md5sum=old_poster_md5).first()
if old_poster and len(old_poster.movies) == 1: # other movies are not using the same poster
session.delete(old_poster)
delete.delete_poster_from_cache(old_poster_md5, self.locations['posters'])
if commit(session):
main_treeview.setmovie(self, movie, self.selected_iter_edit[0], self.treemodel)
# close add window
self.widgets['add']['window'].hide()
# refresh
self.treeview_clicked()
self.update_statusbar(_('Movie information has been updated'))
def change_rating_from_slider(self):
rating = int(self.widgets['add']['rating_slider'].get_value())
self.widgets['add']['image_rating'].show()
try:
rimage = int(str(self.config.get('rating_image')))
except:
rimage = 0
if rimage:
prefix = ''
else:
prefix = "meter"
rating_file = "%s/%s0%d.png" % (self.locations['images'], prefix, rating)
handler = self.widgets['add']['image_rating'].set_from_pixbuf(gtk.gdk.pixbuf_new_from_file(rating_file))
def populate_with_results(self):
w = self.widgets['add']
m_id = None
if self.founded_results_id:
log.info("selected id: %s", self.founded_results_id)
m_id = self.founded_results_id
else:
self.founded_results_id = 0
treeselection = self.widgets['results']['treeview'].get_selection()
(tmp_model, tmp_iter) = treeselection.get_selected()
if tmp_iter is None:
return False
m_id = tmp_model.get_value(tmp_iter, 0)
self.treemodel_results.clear()
self.widgets['results']['window'].hide()
plugin_name = 'PluginMovie' + self.active_plugin
plugin = __import__(plugin_name)
self.movie = plugin.Plugin(m_id)
self.movie.locations = self.locations
self.movie.config = self.config
fields_to_fetch = ['o_title', 'title', 'director', 'plot', 'cast', 'country', 'genre',
'classification', 'studio', 'o_site', 'site', 'trailer', 'year',
'notes', 'runtime', 'image', 'rating', 'screenplay', 'cameraman',
'resolution', 'barcode']
# remove fields that user doesn't want to fetch: (see preferences window)
fields_to_fetch = [i for i in fields_to_fetch if self.config.get("s_%s" % i, True, section='add')]
if w['cb_only_empty'].get_active(): # only empty fields
details = get_details(self)
fields_to_fetch = [i for i in fields_to_fetch if details[i] is None or details[i] == 0.0]
self.movie.fields_to_fetch = fields_to_fetch
if not self.movie.get_movie(w['window']):
return None
self.movie.parse_movie()
if 'year' in fields_to_fetch:
w['year'].set_value(int(self.movie.year))
fields_to_fetch.pop(fields_to_fetch.index('year'))
if 'runtime' in fields_to_fetch:
w['runtime'].set_value(int(self.movie.runtime))
fields_to_fetch.pop(fields_to_fetch.index('runtime'))
if 'cast' in fields_to_fetch:
cast_buffer = w['cast'].get_buffer()
cast_buffer.set_text(gutils.convert_entities(self.movie.cast))
fields_to_fetch.pop(fields_to_fetch.index('cast'))
if 'plot' in fields_to_fetch:
plot_buffer = w['plot'].get_buffer()
plot_buffer.set_text(gutils.convert_entities(self.movie.plot))
fields_to_fetch.pop(fields_to_fetch.index('plot'))
if 'notes' in fields_to_fetch:
notes_buffer = w['notes'].get_buffer()
notes_buffer.set_text(gutils.convert_entities(self.movie.notes))
fields_to_fetch.pop(fields_to_fetch.index('notes'))
if 'rating' in fields_to_fetch:
if self.movie.rating:
w['rating_slider'].set_value(float(self.movie.rating))
fields_to_fetch.pop(fields_to_fetch.index('rating'))
if 'resolution' in fields_to_fetch:
w['resolution'].get_child().set_text(gutils.convert_entities(self.movie.resolution))
fields_to_fetch.pop(fields_to_fetch.index('resolution'))
# poster
if 'image' in fields_to_fetch:
w['image'].set_text('')
if self.movie.image:
image = os.path.join(self.locations['temp'], "poster_%s.jpg" % self.movie.image)
try:
handler = self.Image.set_from_file(image)
pixbuf = self.Image.get_pixbuf()
w['picture'].set_from_pixbuf(pixbuf.scale_simple(100, 140, 3))
w['image'].set_text(self.movie.image)
w['aremove_poster'].set_sensitive(True)
except:
image = gutils.get_defaultimage_fname(self)
handler = self.Image.set_from_file(image)
w['picture'].set_from_pixbuf(self.Image.get_pixbuf())
w['aremove_poster'].set_sensitive(False)
else:
image = gutils.get_defaultimage_fname(self)
handler = self.Image.set_from_file(image)
Pixbuf = self.Image.get_pixbuf()
w['picture'].set_from_pixbuf(Pixbuf)
w['aremove_poster'].set_sensitive(False)
fields_to_fetch.pop(fields_to_fetch.index('image'))
# other fields
for i in fields_to_fetch:
w[i].set_text(gutils.convert_entities(self.movie[i]))
def show_websearch_results(self):
total = self.founded_results_id = 0
for g in self.search_movie.ids:
if (str(g) != ''):
total += 1
if total > 1:
self.widgets['results']['window'].show()
self.widgets['results']['window'].set_keep_above(True)
movieslist = []
row = None
key = 0
for row in self.search_movie.ids:
if (str(row) != ''):
if isinstance(self.search_movie.titles[key], unicode):
title = self.search_movie.titles[key]
else:
title = str(self.search_movie.titles[key]).decode(self.search_movie.encode)
movieslist.append((row, title))
key += 1
movieslist = sorted(movieslist, key=lambda titel: titel[1])
self.treemodel_results.clear()
for entry in movieslist:
myiter = self.treemodel_results.insert_before(None, None)
self.treemodel_results.set_value(myiter, 0, str(entry[0]))
self.treemodel_results.set_value(myiter, 1, entry[1])
self.widgets['results']['treeview'].show()
elif total == 1:
self.widgets['results']['treeview'].set_cursor(total-1)
for row in self.search_movie.ids:
if (str(row) != ''):
self.founded_results_id = str(row)
populate_with_results(self)
else:
gutils.error(_("No results"), self.widgets['add']['window'])
def get_from_web(self):
"""search the movie in web using the active plugin"""
title = self.widgets['add']['title'].get_text()
o_title = self.widgets['add']['o_title'].get_text()
if o_title or title:
option = gutils.on_combo_box_entry_changed_name(self.widgets['add']['source'])
self.active_plugin = option
plugin_name = 'PluginMovie%s' % option
plugin = __import__(plugin_name)
if self.debug_mode:
log.debug('reloading %s', plugin_name)
import sys
reload(sys.modules[plugin_name])
self.search_movie = plugin.SearchPlugin()
self.search_movie.config = self.config
self.search_movie.locations = self.locations
if o_title:
self.search_movie.url = self.search_movie.original_url_search
if self.search_movie.remove_accents:
self.search_movie.title = gutils.remove_accents(o_title, 'utf-8')
else:
self.search_movie.title = unicode(o_title, 'utf-8')
elif title:
self.search_movie.url = self.search_movie.translated_url_search
if self.search_movie.remove_accents:
self.search_movie.title = gutils.remove_accents(title, 'utf-8')
else:
self.search_movie.title = unicode(title, 'utf-8')
# check if internet connection is available
try:
urllib2.urlopen("http://www.griffith.cc")
if self.search_movie.search_movies(self.widgets['add']['window']):
self.search_movie.get_searches()
if len(self.search_movie.ids) == 1 and o_title and title:
self.search_movie.url = self.search_movie.translated_url_search
if self.search_movie.remove_accents:
self.search_movie.title = gutils.remove_accents(title, 'utf-8')
else:
self.search_movie.title = unicode(title, 'utf-8')
if self.search_movie.search_movies(self.widgets['add']['window']):
self.search_movie.get_searches()
self.show_search_results(self.search_movie)
except:
log.exception('')
gutils.error(_("Connection failed."))
else:
gutils.error(_("You should fill the original title\nor the movie title."))
def source_changed(self):
option = gutils.on_combo_box_entry_changed_name(self.widgets['add']['source'])
self.active_plugin = option
plugin_name = 'PluginMovie' + option
plugin = __import__(plugin_name)
self.widgets['add']['plugin_desc'].set_text(plugin.plugin_name + "\n" \
+ plugin.plugin_description + "\n" + _('Url: ') \
+ plugin.plugin_url + "\n" + _('Language: ') + plugin.plugin_language)
image = os.path.join(self.locations['images'], plugin_name + ".png")
# if movie plugin logo exists lets use it
if os.path.exists(image):
handler = self.widgets['add']['plugin_image'].set_from_pixbuf(gtk.gdk.pixbuf_new_from_file(image))
def get_details(self): #{{{
w = self.widgets['add']
cast_buffer = w['cast'].get_buffer()
notes_buffer = w['notes'].get_buffer()
plot_buffer = w['plot'].get_buffer()
t_movies = {
'cameraman': w['cameraman'].get_text().decode('utf-8'),
'classification': w['classification'].get_text().decode('utf-8'),
'barcode': unicode(gutils.digits_only(w['barcode'].get_text().decode('utf-8'))),
'color': w['color'].get_active(),
'cond': w['condition'].get_active(),
'country': w['country'].get_text().decode('utf-8'),
'director': w['director'].get_text().decode('utf-8'),
'genre': w['genre'].get_text().decode('utf-8'),
'image': w['image'].get_text().decode('utf-8'),
'layers': w['layers'].get_active(),
'media_num': w['discs'].get_value(),
'number': w['number'].get_value(),
'o_site': w['o_site'].get_text().decode('utf-8'),
'o_title': w['o_title'].get_text().decode('utf-8'),
'rating': w['rating_slider'].get_value(),
'region': w['region'].get_active(),
'resolution': w['resolution'].get_child().get_text().strip().decode('utf-8'),
'runtime': w['runtime'].get_text().decode('utf-8'),
'screenplay': w['screenplay'].get_text().decode('utf-8'),
'site': w['site'].get_text().decode('utf-8'),
'studio': w['studio'].get_text().decode('utf-8'),
'title': w['title'].get_text().decode('utf-8'),
'trailer': w['trailer'].get_text().decode('utf-8'),
'year': w['year'].get_value(),
'collection_id': w['collection'].get_active(),
'medium_id': w['media'].get_active(),
'volume_id': w['volume'].get_active(),
'vcodec_id': w['vcodec'].get_active(),
'cast': cast_buffer.get_text(cast_buffer.get_start_iter(), cast_buffer.get_end_iter()).decode('utf-8'),
'notes': notes_buffer.get_text(notes_buffer.get_start_iter(), notes_buffer.get_end_iter()).decode('utf-8'),
'plot': plot_buffer.get_text(plot_buffer.get_start_iter(), plot_buffer.get_end_iter()).decode('utf-8'),
'created': None,
'updated': None
}
if self._am_movie_id is not None:
t_movies['movie_id'] = self._am_movie_id
if t_movies['collection_id'] > 0:
t_movies['collection_id'] = self.collection_combo_ids[t_movies['collection_id']]
else:
t_movies['collection_id'] = None
if t_movies['volume_id'] > 0:
t_movies['volume_id'] = self.volume_combo_ids[t_movies['volume_id']]
else:
t_movies['volume_id'] = None
if t_movies['medium_id'] > 0:
t_movies['medium_id'] = self.media_ids[t_movies['medium_id']]
else:
t_movies['medium_id'] = None
if t_movies['vcodec_id'] > 0:
t_movies['vcodec_id'] = self.vcodecs_ids[t_movies['vcodec_id']]
else:
t_movies['vcodec_id'] = None
if t_movies['barcode'] == '0':
t_movies['barcode'] = None
if w['seen'].get_active():
t_movies['seen'] = True
else:
t_movies['seen'] = False
if t_movies['year'] < 1900:
t_movies['year'] = None
def get_id(model, text):
for i in model:
if i[1] == text:
return i[0]
return None
# languages
t_movies['languages'] = set()
# isn't the best but it works. without it the current selection of a language field is lost
w['lang_treeview'].child_focus(gtk.DIR_TAB_FORWARD)
for row in self.lang['model']:
lang_id = get_id(self.lang['lang'], row[0])
lang_type = get_id(self.lang['type'], row[1])
acodec = get_id(self.lang['acodec'], row[2])
achannel = get_id(self.lang['achannel'], row[3])
subformat = get_id(self.lang['subformat'], row[4])
t_movies['languages'].add((lang_id, lang_type, acodec, achannel, subformat))
# tags
t_movies['tags'] = {}
for i in self.tags_ids:
if self.am_tags[i].get_active() == True:
t_movies['tags'][self.tags_ids[i]] = 1
validate_details(t_movies)
return t_movies #}}}
def set_details(self, item=None):#{{{
if item is None:
item = {}
if 'movie_id' in item and item['movie_id']:
self._am_movie_id = item['movie_id']
else:
self._am_movie_id = None
w = self.widgets['add']
cast_buffer = w['cast'].get_buffer()
notes_buffer = w['notes'].get_buffer()
plot_buffer = w['plot'].get_buffer()
if 'o_title' in item and item['o_title']:
w['o_title'].set_text(item['o_title'])
else:
w['o_title'].set_text('')
if 'title' in item and item['title']:
w['title'].set_text(item['title'])
else:
w['title'].set_text('')
if 'number' in item and item['number']:
w['number'].set_value(int(item['number']))
else:
w['number'].set_value(int(gutils.find_next_available(self.db)))
if 'title' in item and item['title']:
w['title'].set_text(item['title'])
if 'year' in item and item['year']:
w['year'].set_value(gutils.digits_only(item['year'], 2100))
else:
w['year'].set_value(0)
if 'resolution' in item and item['resolution']:
if self.config.get('use_resolution_alias', True):
w['resolution'].get_child().set_text(item['resolution'])
elif 'height' in item and item['height'] and 'width' in item and item['width']:
w['resolution'].get_child().set_text("%dx%d" % (item['width'], item['height']))
else: # failback to 'resolution'
w['resolution'].get_child().set_text(item['resolution'])
else:
w['resolution'].get_child().set_text('')
if 'runtime' in item and item['runtime']:
w['runtime'].set_value(gutils.digits_only(item['runtime']))
else:
w['runtime'].set_value(0)
if 'barcode' in item and item['barcode']:
w['barcode'].set_text(item['barcode'])
else:
w['barcode'].set_text('')
if 'cameraman' in item and item['cameraman']:
w['cameraman'].set_text(item['cameraman'])
else:
w['cameraman'].set_text('')
if 'screenplay' in item and item['screenplay']:
w['screenplay'].set_text(item['screenplay'])
else:
w['screenplay'].set_text('')
if 'country' in item and item['country']:
w['country'].set_text(item['country'])
else:
w['country'].set_text('')
if 'classification' in item and item['classification']:
w['classification'].set_text(item['classification'])
else:
w['classification'].set_text('')
if 'studio' in item and item['studio']:
w['studio'].set_text(item['studio'])
else:
w['studio'].set_text('')
if 'o_site' in item and item['o_site']:
w['o_site'].set_text(item['o_site'])
else:
w['o_site'].set_text('')
if 'director' in item and item['director']:
w['director'].set_text(item['director'])
else:
w['director'].set_text('')
if 'site' in item and item['site']:
w['site'].set_text(item['site'])
else:
w['site'].set_text('')
if 'trailer' in item and item['trailer']:
w['trailer'].set_text(item['trailer'])
else:
w['trailer'].set_text('')
if 'genre' in item and item['genre']:
w['genre'].set_text(item['genre'])
else:
w['genre'].set_text('')
if 'color' in item and item['color']:
w['color'].set_active(gutils.digits_only(item['color'], 3))
else:
w['color'].set_active(gutils.digits_only(self.config.get('color', 0, section='defaults'), 3))
if 'layers' in item and item['layers']:
w['layers'].set_active(gutils.digits_only(item['layers'], 4))
else:
w['layers'].set_active(gutils.digits_only(self.config.get('layers', 0, section='defaults'), 4))
if 'region' in item and item['region'] >= 0:
w['region'].set_active(gutils.digits_only(item['region'], 11))
else:
w['region'].set_active(gutils.digits_only(self.config.get('region', 0, section='defaults'), 11))
if 'cond' in item and item['cond'] >= 0:
w['condition'].set_active(gutils.digits_only(item['cond'], 5))
else:
w['condition'].set_active(gutils.digits_only(self.config.get('condition', 0, section='defaults'), 5))
if 'media_num' in item and item['media_num']:
w['discs'].set_value(gutils.digits_only(item['media_num']))
else:
w['discs'].set_value(1)
if 'rating' in item and item['rating']:
w['rating_slider'].set_value(gutils.digits_only(item['rating'], 10))
else:
w['rating_slider'].set_value(0)
if 'seen' in item:
if item['seen'] is True:
w['seen'].set_active(True)
else:
w['seen'].set_active(False)
else:
w['seen'].set_active(bool(self.config.get('seen', True, section='defaults')))
if 'cast' in item and item['cast']:
cast_buffer.set_text(item['cast'])
else:
cast_buffer.set_text('')
if 'notes' in item and item['notes']:
notes_buffer.set_text(item['notes'])
else:
notes_buffer.set_text('')
if 'plot' in item and item['plot']:
plot_buffer.set_text(item['plot'])
else:
plot_buffer.set_text('')
pos = 0
if 'medium_id' in item and item['medium_id']:
pos = gutils.findKey(item['medium_id'], self.media_ids)
else:
pos = gutils.findKey(int(self.config.get('media', 0, section='defaults')), self.media_ids)
if pos is not None:
w['media'].set_active(int(pos))
else:
w['media'].set_active(0)
pos = 0
if 'vcodec_id' in item and item['vcodec_id']:
pos = gutils.findKey(item['vcodec_id'], self.vcodecs_ids)
else:
pos = gutils.findKey(int(self.config.get('vcodec', 0, section='defaults')), self.vcodecs_ids)
if pos is not None:
w['vcodec'].set_active(int(pos))
else:
w['vcodec'].set_active(0)
pos = 0
if 'volume_id' in item and item['volume_id']:
pos = gutils.findKey(item['volume_id'], self.volume_combo_ids)
if pos is not None:
w['volume'].set_active(int(pos))
else:
w['volume'].set_active(0)
pos = 0
if 'collection_id' in item and item['collection_id']:
pos = gutils.findKey(item['collection_id'], self.collection_combo_ids)
if pos is not None:
w['collection'].set_active(int(pos))
else:
w['collection'].set_active(0)
# tags
for tag in self.am_tags:
self.am_tags[tag].set_active(False)
if 'tags' in item:
for tag in item['tags']:
i = gutils.findKey(tag.tag_id, self.tags_ids)
self.am_tags[i].set_active(True)
# languages
w['lang_treeview'].get_model().clear()
if 'languages' in item and len(item['languages']) > 0:
for i in item['languages']:
self.create_language_row(i)
# poster
w['aremove_poster'].set_sensitive(True)
if 'poster_md5' in item and item['poster_md5']:
image_path = gutils.get_image_fname(item["poster_md5"], self.db, 'm')
if not image_path:
image_path = '' # isfile doesn't like bool
w['aremove_poster'].set_sensitive(False)
w['image'].set_text(item['poster_md5'])
elif 'image' in item and item['image']:
if len(item['image']) == 32: # md5
image_path = gutils.get_image_fname(item["image"], self.db, 'm')
if not image_path:
image_path = '' # isfile doesn't like bool
w['aremove_poster'].set_sensitive(False)
else:
w['image'].set_text(item['image'])
else:
image_path = os.path.join(self.locations['posters'], "m_%s.jpg" % item['image'])
log.warn("TODO: image=%s", item['image'])
else:
w['image'].set_text('')
image_path = gutils.get_defaultimage_fname(self)
w['aremove_poster'].set_sensitive(False)
if not os.path.isfile(image_path):
image_path = gutils.get_defaultimage_fname(self)
w['aremove_poster'].set_sensitive(False)
w['picture'].set_from_file(image_path)
w['notebook'].set_current_page(0)
w['o_title'].grab_focus()
#}}}
def validate_details(t_movies, allow_only=None):
for i in t_movies.keys():
if t_movies[i] == '':
t_movies[i] = None
for i in ('color', 'cond', 'layers', 'media', 'vcodec'):
if i in t_movies and t_movies[i] < 1:
t_movies[i] = None
for i in ('volume_id', 'collection_id', 'runtime'):
if i in t_movies and (t_movies[i] is None or int(t_movies[i]) == 0):
t_movies[i] = None
if allow_only is not None:
# iterate over a copy of keys of the dict because removing elements of a dict
# within a for enumeration of the same dict instance isn't supported
for i in t_movies.keys():
if not i in allow_only:
t_movies.pop(i)
### database part #############################################
def add_movie_db(self, close):
session = self.db.Session()
details = get_details(self)
if not details['o_title'] and not details['title']:
gutils.error(_("You should fill the original title\nor the movie title."),
parent=self.widgets['add']['window'])
return False
asked = False
if details['o_title']:
if session.query(db.Movie).filter_by(o_title=details['o_title']).count() > 0:
asked = True
if not gutils.question(_('Movie with that title already exists, are you sure you want to add?'), self.widgets['add']['window']):
return False
if not asked and details['title']:
if session.query(db.Movie).filter_by(title=details['title']).count() > 0:
if not gutils.question(_('Movie with that title already exists, are you sure you want to add?'), self.widgets['add']['window']):
return False
new_poster_md5 = None
if details['image']:
tmp_image_path = original_image_path = details['image']
if not os.path.isfile(tmp_image_path):
tmp_image_path = os.path.join(self.locations['temp'], "poster_%s.jpg" % details['image'])
if os.path.isfile(tmp_image_path):
file_object = file(tmp_image_path, 'rb')
try:
new_poster_md5 = gutils.md5sum(file_object)
if session.query(db.Poster).filter_by(md5sum=new_poster_md5).count() == 0:
try:
file_object.seek(0, 0);
data = file_object.read()
except Exception, e:
log.warning("cannot read poster data")
else:
poster = db.Poster(md5sum=new_poster_md5, data=data)
del details["image"]
details["poster_md5"] = new_poster_md5
session.add(poster)
else:
details["poster_md5"] = new_poster_md5
finally:
file_object.close()
try:
if not tmp_image_path == original_image_path:
os.remove(tmp_image_path)
except Exception, e:
log.warn("cannot remove temporary file %s", tmp_image_path)
else:
log.warn("cannot read temporary file: %s", tmp_image_path)
movie = update_movie_instance(None, details, session)
session.add(movie)
if not commit(session):
return False
# create new entry, unselect current movie and select new entry in main treelist
myiter = main_treeview.addmovie(self, movie)
main_treeview.select(self, None)
main_treeview.select(self, myiter)
# update statusbar
self.total += 1
self.count_statusbar()
clear(self)
if close:
self.hide_add_window()
def clone_movie(self):
session = self.db.Session()
if self.selected_iter[0] is None:
log.warn("cannot clone movie: no item selected")
return False
movie = session.query(db.Movie).filter_by(number=self.selected[0]).first()
if movie is None:
log.warn("cannot clone movie: Movie(%s) not found", number)
return False
next_number = gutils.find_next_available(self.db)
# integer problem workaround
if int(movie.seen) == 1:
seen = True
else:
seen = False
new_movie = db.Movie()
# TODO: WARNING: loan problems (don't copy volume/collection data until resolved)
new_movie.cast = movie.cast
new_movie.classification = movie.classification
new_movie.vcodec_id = movie.vcodec_id
new_movie.barcode = movie.barcode
new_movie.cameraman = movie.cameraman
new_movie.collection_id = movie.collection_id
new_movie.volume_id = movie.volume_id
new_movie.color = movie.color
new_movie.cond = movie.cond
new_movie.country = movie.country
new_movie.director = movie.director
new_movie.genre = movie.genre
new_movie.site = movie.site
new_movie.loaned = movie.loaned
new_movie.layers = movie.layers
new_movie.medium_id = movie.medium_id
new_movie.number = next_number
new_movie.media_num = movie.media_num
new_movie.notes = movie.notes
new_movie.o_title = movie.o_title
new_movie.plot = movie.plot
new_movie.poster_md5 = movie.poster_md5
new_movie.ratio_id = movie.ratio_id
new_movie.rating = movie.rating
new_movie.region = movie.region
new_movie.runtime = movie.runtime
new_movie.resolution = movie.resolution
new_movie.screenplay = movie.screenplay
new_movie.seen = seen
new_movie.o_site = movie.o_site
new_movie.studio = movie.studio
new_movie.title = movie.title
new_movie.trailer = movie.trailer
new_movie.year = movie.year
new_movie.tags = movie.tags
new_movie.languages = movie.languages
new_movie.loans = movie.loans
# save
session.add(new_movie)
if not commit(session):
return False
if movie.poster_md5:
image_path = gutils.get_image_fname(movie.poster_md5, self.db)
if not image_path or not os.path.isfile(image_path):
image_path = gutils.get_defaultimage_fname(self)
handler = self.Image.set_from_file(image_path)
# change_filter calls populate_treeview which updates the status bar
quick_filter.change_filter(self)
def update_movie_instance(movie, details, session):
if not movie:
movie = db.Movie()
if details is not None:
t_tags = t_languages = None
if 'tags' in details:
t_tags = details.pop('tags')
if 'languages' in details:
t_languages = details.pop('languages')
#for i in db.tables.movies.columns.keys():
for i in details:
if i not in ('created', 'updated') and hasattr(movie, i):
setattr(movie, i, details[i])
# clear previous data (in case of updates)
if movie.languages:
movie.languages = []
if movie.tags:
movie.tags = []
# languages
if t_languages is not None:
for lang in t_languages:
if lang[0] > 0:
ml = db.MovieLang(lang_id=lang[0], type=lang[1],
acodec_id=lang[2], achannel_id=lang[3], subformat_id=lang[4])
movie.languages.append(ml)
# tags
if t_tags is not None:
for tag in t_tags.keys():
dbTag = session.query(db.Tag).filter_by(tag_id=tag).one()
#movie.tags.append(db.MovieTag(tag_id=tag))
movie.tags.append(dbTag)
if hasattr(movie, 'image') and movie.image: # TODO: remove it once image will be removed from movies_table
movie.image = None # remove MD5 or link
return movie
def commit(session):
try:
session.commit()
except IntegrityError, e:
session.rollback()
log.warn("Cannot commit movie: %s", e.message)
gutils.warning(unicode(e.orig))
return False
except Exception, e:
log.error("Unexpected problem: %s", e)
return False
return True
def add_medium(self, name):
session = self.db.Session()
medium = db.Medium(name=name)
session.add(medium)
try:
session.commit()
except Exception, e:
session.rollback()
log.warn("Cannot add medium entry: %s", e.message)
else:
initialize.media_combos(self)
return medium.medium_id
def add_vcodec(self, name):
session = self.db.Session()
vcodec = db.VCodec(name=name)
session.add(vcodec)
try:
session.commit()
except Exception, e:
session.rollback()
log.warn("Cannot add video codec entry: %s", e.message)
else:
initialize.vcodec_combos(self)
return vcodec.vcodec_id
def add_volume(self, name):
session = self.db.Session()
vol = db.Volume(name=name)
session.add(vol)
try:
session.commit()
except Exception, e:
session.rollback()
log.warn("Cannot add volume: %s", e.message)
else:
initialize.update_volume_combo_ids(self)
initialize.fill_volumes_combo(self, vol.volume_id)
return vol.volume_id
def add_collection(self, name):
session = self.db.Session()
col = db.Collection(name=name)
session.add(col)
try:
session.commit()
except Exception, e:
session.rollback()
log.warn("Cannot add collection: %s", e.message)
else:
initialize.update_collection_combo_ids(self)
initialize.fill_collections_combo(self, col.collection_id)
return col.collection_id
def change_poster(self):
from edit import change_poster_select_file
if change_poster_select_file(self, -1, change_poster_new_movie):
self.widgets['add']['aremove_poster'].set_sensitive(True)
def change_poster_new_movie(self, number, filename):
try:
handler = self.Image.set_from_file(filename)
pixbuf = self.Image.get_pixbuf()
handler = self.widgets['add']['picture'].set_from_pixbuf(pixbuf.scale_simple(100, 140, 3))
gutils.garbage(handler)
self.widgets['add']['image'].set_text(filename)
return True
except:
image = gutils.get_defaultimage_fname(self)
handler = self.Image.set_from_file(image)
handler = self.widgets['add']['picture'].set_from_pixbuf(self.Image.get_pixbuf())
gutils.garbage(handler)
return False
def delete_poster(self):
w = self.widgets['add']
w['image'].set_text('')
image_path = gutils.get_defaultimage_fname(self)
w['picture'].set_from_file(image_path)
w['aremove_poster'].set_sensitive(False)
| gpl-2.0 |
laurentb/weboob | weboob/core/ouiboube.py | 1 | 19192 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2014 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
import os
from weboob.core.bcall import BackendsCall
from weboob.core.modules import ModulesLoader, RepositoryModulesLoader
from weboob.core.backendscfg import BackendsConfig
from weboob.core.requests import RequestsManager
from weboob.core.repositories import Repositories, PrintProgress
from weboob.core.scheduler import Scheduler
from weboob.tools.backend import Module
from weboob.tools.compat import basestring, unicode
from weboob.tools.config.iconfig import ConfigError
from weboob.tools.log import getLogger
from weboob.exceptions import ModuleLoadError
__all__ = ['WebNip', 'Weboob']
class VersionsMismatchError(ConfigError):
pass
class WebNip(object):
"""
Weboob in Non Integrated Programs
It provides methods to build backends or call methods on all loaded
backends.
You should use this class when you want to build an application
using Weboob as a library, without using the standard modules nor
the automatic module download and update machanism. When using
WebNip, you have to explicitely provide module paths and deal
yourself with backend configuration.
:param modules_path: path to directory containing modules.
:type modules_path: :class:`basestring`
:param storage: provide a storage where backends can save data
:type storage: :class:`weboob.tools.storage.IStorage`
:param scheduler: what scheduler to use; default is :class:`weboob.core.scheduler.Scheduler`
:type scheduler: :class:`weboob.core.scheduler.IScheduler`
"""
VERSION = '2.1'
def __init__(self, modules_path=None, storage=None, scheduler=None):
self.logger = getLogger('weboob')
self.backend_instances = {}
self.requests = RequestsManager()
if modules_path is None:
import pkg_resources
# Package weboob_modules is provided by
# https://git.weboob.org/weboob/modules
# and should be pip-installed separately.
# Note that Weboob users should rather install Weboob modules
# through https://updates.weboob.org/.
modules_path = pkg_resources.resource_filename('weboob_modules', '')
if modules_path:
self.modules_loader = ModulesLoader(modules_path, self.VERSION)
if scheduler is None:
scheduler = Scheduler()
self.scheduler = scheduler
self.storage = storage
def __deinit__(self):
self.deinit()
def deinit(self):
"""
Call this method when you stop using Weboob, to
properly unload all correctly.
"""
self.unload_backends()
def build_backend(self, module_name, params=None, storage=None, name=None, nofail=False, logger=None):
"""
Create a backend.
It does not load it into the Weboob object, so you are responsible for
deinitialization and calls.
:param module_name: name of module
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:param name: name of backend
:type name: :class:`basestring`
:rtype: :class:`weboob.tools.backend.Module`
:param nofail: if true, this call can't fail
:type nofail: :class:`bool`
"""
module = self.modules_loader.get_or_load_module(module_name)
backend_instance = module.create_instance(self, name or module_name, params or {}, storage, nofail, logger=logger or self.logger)
return backend_instance
class LoadError(Exception):
"""
Raised when a backend is unabled to load.
:param backend_name: name of backend we can't load
:param exception: exception object
"""
def __init__(self, backend_name, exception):
super(WebNip.LoadError, self).__init__(unicode(exception))
self.backend_name = backend_name
def load_backend(self, module_name, name, params=None, storage=None):
"""
Load a backend.
:param module_name: name of module to load
:type module_name: :class:`basestring`:
:param name: name of instance
:type name: :class:`basestring`
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:rtype: :class:`weboob.tools.backend.Module`
"""
if name is None:
name = module_name
if name in self.backend_instances:
raise self.LoadError(name, 'A loaded backend already named "%s"' % name)
backend = self.build_backend(module_name, params, storage, name)
self.backend_instances[name] = backend
return backend
def unload_backends(self, names=None):
"""
Unload backends.
:param names: if specified, only unload that backends
:type names: :class:`list`
"""
unloaded = {}
if isinstance(names, basestring):
names = [names]
elif names is None:
names = list(self.backend_instances.keys())
for name in names:
backend = self.backend_instances.pop(name)
with backend:
backend.deinit()
unloaded[backend.name] = backend
return unloaded
def __getitem__(self, name):
"""
Alias for :func:`WebNip.get_backend`.
"""
return self.get_backend(name)
def get_backend(self, name, **kwargs):
"""
Get a backend from its name.
:param name: name of backend to get
:type name: str
:param default: if specified, get this value when the backend is not found
:type default: whatever you want
:raises: :class:`KeyError` if not found.
"""
try:
return self.backend_instances[name]
except KeyError:
if 'default' in kwargs:
return kwargs['default']
else:
raise
def count_backends(self):
"""
Get number of loaded backends.
"""
return len(self.backend_instances)
def iter_backends(self, caps=None, module=None):
"""
Iter on each backends.
Note: each backend is locked when it is returned.
:param caps: optional list of capabilities to select backends
:type caps: tuple[:class:`weboob.capabilities.base.Capability`]
:param module: optional name of module
:type module: :class:`basestring`
:rtype: iter[:class:`weboob.tools.backend.Module`]
"""
for _, backend in sorted(self.backend_instances.items()):
if (caps is None or backend.has_caps(caps)) and \
(module is None or backend.NAME == module):
with backend:
yield backend
def __getattr__(self, name):
def caller(*args, **kwargs):
return self.do(name, *args, **kwargs)
return caller
def do(self, function, *args, **kwargs):
r"""
Do calls on loaded backends with specified arguments, in separated
threads.
This function has two modes:
- If *function* is a string, it calls the method with this name on
each backends with the specified arguments;
- If *function* is a callable, it calls it in a separated thread with
the locked backend instance at first arguments, and \*args and
\*\*kwargs.
:param function: backend's method name, or a callable object
:type function: :class:`str`
:param backends: list of backends to iterate on
:type backends: list[:class:`str`]
:param caps: iterate on backends which implement this caps
:type caps: list[:class:`weboob.capabilities.base.Capability`]
:rtype: A :class:`weboob.core.bcall.BackendsCall` object (iterable)
"""
backends = list(self.backend_instances.values())
_backends = kwargs.pop('backends', None)
if _backends is not None:
if isinstance(_backends, Module):
backends = [_backends]
elif isinstance(_backends, basestring):
if len(_backends) > 0:
try:
backends = [self.backend_instances[_backends]]
except (ValueError, KeyError):
backends = []
elif isinstance(_backends, (list, tuple, set)):
backends = []
for backend in _backends:
if isinstance(backend, basestring):
try:
backends.append(self.backend_instances[backend])
except (ValueError, KeyError):
pass
else:
backends.append(backend)
else:
self.logger.warning(u'The "backends" value isn\'t supported: %r', _backends)
if 'caps' in kwargs:
caps = kwargs.pop('caps')
backends = [backend for backend in backends if backend.has_caps(caps)]
# The return value MUST BE the BackendsCall instance. Please never iterate
# here on this object, because caller might want to use other methods, like
# wait() on callback_thread().
# Thanks a lot.
return BackendsCall(backends, function, *args, **kwargs)
def schedule(self, interval, function, *args):
"""
Schedule an event.
:param interval: delay before calling the function
:type interval: int
:param function: function to call
:type function: callabale
:param args: arguments to give to function
:returns: an event identificator
"""
return self.scheduler.schedule(interval, function, *args)
def repeat(self, interval, function, *args):
"""
Repeat a call to a function
:param interval: interval between two calls
:type interval: int
:param function: function to call
:type function: callable
:param args: arguments to give to function
:returns: an event identificator
"""
return self.scheduler.repeat(interval, function, *args)
def cancel(self, ev):
"""
Cancel an event
:param ev: the event identificator
"""
return self.scheduler.cancel(ev)
def want_stop(self):
"""
Plan to stop the scheduler.
"""
return self.scheduler.want_stop()
def loop(self):
"""
Run the scheduler loop
"""
return self.scheduler.run()
def load_or_install_module(self, module_name):
""" Load a backend, but can't install it """
return self.modules_loader.get_or_load_module(module_name)
class Weboob(WebNip):
"""
The main class of Weboob, used to manage backends, modules repositories and
call methods on all loaded backends.
:param workdir: optional parameter to set path of the working directory
:type workdir: str
:param datadir: optional parameter to set path of the data directory
:type datadir: str
:param backends_filename: name of the *backends* file, where configuration of
backends is stored
:type backends_filename: str
:param storage: provide a storage where backends can save data
:type storage: :class:`weboob.tools.storage.IStorage`
"""
BACKENDS_FILENAME = 'backends'
def __init__(self, workdir=None, datadir=None, backends_filename=None, scheduler=None, storage=None):
super(Weboob, self).__init__(modules_path=False, scheduler=scheduler, storage=storage)
# Create WORKDIR
if workdir is None:
if 'WEBOOB_WORKDIR' in os.environ:
workdir = os.environ['WEBOOB_WORKDIR']
else:
workdir = os.path.join(os.environ.get('XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')), 'weboob')
self.workdir = os.path.realpath(workdir)
self._create_dir(workdir)
# Create DATADIR
if datadir is None:
if 'WEBOOB_DATADIR' in os.environ:
datadir = os.environ['WEBOOB_DATADIR']
elif 'WEBOOB_WORKDIR' in os.environ:
datadir = os.environ['WEBOOB_WORKDIR']
else:
datadir = os.path.join(os.environ.get('XDG_DATA_HOME', os.path.join(os.path.expanduser('~'), '.local', 'share')), 'weboob')
_datadir = os.path.realpath(datadir)
self._create_dir(_datadir)
# Modules management
self.repositories = Repositories(workdir, _datadir, self.VERSION)
self.modules_loader = RepositoryModulesLoader(self.repositories)
# Backend instances config
if not backends_filename:
backends_filename = os.environ.get('WEBOOB_BACKENDS', os.path.join(self.workdir, self.BACKENDS_FILENAME))
elif not backends_filename.startswith('/'):
backends_filename = os.path.join(self.workdir, backends_filename)
self.backends_config = BackendsConfig(backends_filename)
def _create_dir(self, name):
if not os.path.exists(name):
os.makedirs(name)
elif not os.path.isdir(name):
self.logger.error(u'"%s" is not a directory', name)
def update(self, progress=PrintProgress()):
"""
Update modules from repositories.
"""
self.repositories.update(progress)
modules_to_check = set([module_name for _, module_name, _ in self.backends_config.iter_backends()])
for module_name in modules_to_check:
minfo = self.repositories.get_module_info(module_name)
if minfo and not minfo.is_installed():
self.repositories.install(minfo, progress)
def build_backend(self, module_name, params=None, storage=None, name=None, nofail=False):
"""
Create a single backend which is not listed in configuration.
:param module_name: name of module
:param params: parameters to give to backend
:type params: :class:`dict`
:param storage: storage to use
:type storage: :class:`weboob.tools.storage.IStorage`
:param name: name of backend
:type name: :class:`basestring`
:rtype: :class:`weboob.tools.backend.Module`
:param nofail: if true, this call can't fail
:type nofail: :class:`bool`
"""
minfo = self.repositories.get_module_info(module_name)
if minfo is None:
raise ModuleLoadError(module_name, 'Module does not exist.')
if not minfo.is_installed():
self.repositories.install(minfo)
return super(Weboob, self).build_backend(module_name, params, storage, name, nofail)
def load_backends(self, caps=None, names=None, modules=None, exclude=None, storage=None, errors=None):
"""
Load backends listed in config file.
:param caps: load backends which implement all of specified caps
:type caps: tuple[:class:`weboob.capabilities.base.Capability`]
:param names: load backends in list
:type names: tuple[:class:`str`]
:param modules: load backends which module is in list
:type modules: tuple[:class:`str`]
:param exclude: do not load backends in list
:type exclude: tuple[:class:`str`]
:param storage: use this storage if specified
:type storage: :class:`weboob.tools.storage.IStorage`
:param errors: if specified, store every errors in this list
:type errors: list[:class:`LoadError`]
:returns: loaded backends
:rtype: dict[:class:`str`, :class:`weboob.tools.backend.Module`]
"""
loaded = {}
if storage is None:
storage = self.storage
if not self.repositories.check_repositories():
self.logger.error(u'Repositories are not consistent with the sources.list')
raise VersionsMismatchError(u'Versions mismatch, please run "weboob-config update"')
for backend_name, module_name, params in self.backends_config.iter_backends():
if '_enabled' in params and not params['_enabled'].lower() in ('1', 'y', 'true', 'on', 'yes') or \
names is not None and backend_name not in names or \
modules is not None and module_name not in modules or \
exclude is not None and backend_name in exclude:
continue
minfo = self.repositories.get_module_info(module_name)
if minfo is None:
self.logger.warning(u'Backend "%s" is referenced in %s but was not found. '
u'Perhaps a missing repository or a removed module?', module_name, self.backends_config.confpath)
continue
if caps is not None and not minfo.has_caps(caps):
continue
if not minfo.is_installed():
self.repositories.install(minfo)
module = None
try:
module = self.modules_loader.get_or_load_module(module_name)
except ModuleLoadError as e:
self.logger.error(u'Unable to load module "%s": %s', module_name, e)
continue
if backend_name in self.backend_instances:
self.logger.warning(u'Oops, the backend "%s" is already loaded. Unload it before reloading...', backend_name)
self.unload_backends(backend_name)
try:
backend_instance = module.create_instance(self, backend_name, params, storage)
except Module.ConfigError as e:
if errors is not None:
errors.append(self.LoadError(backend_name, e))
else:
self.backend_instances[backend_name] = loaded[backend_name] = backend_instance
return loaded
def load_or_install_module(self, module_name):
""" Load a backend, and install it if not done before """
try:
return self.modules_loader.get_or_load_module(module_name)
except ModuleLoadError:
self.repositories.install(module_name)
return self.modules_loader.get_or_load_module(module_name)
| lgpl-3.0 |
rghe/ansible | test/units/modules/network/f5/test_bigip_monitor_tcp_echo.py | 26 | 10042 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
import pytest
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_tcp_echo import Parameters
from library.modules.bigip_monitor_tcp_echo import ModuleManager
from library.modules.bigip_monitor_tcp_echo import ArgumentSpec
from library.modules.bigip_monitor_tcp_echo import HAS_F5SDK
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_monitor_tcp_echo import Parameters
from ansible.modules.network.f5.bigip_monitor_tcp_echo import ModuleManager
from ansible.modules.network.f5.bigip_monitor_tcp_echo import ArgumentSpec
from ansible.modules.network.f5.bigip_monitor_tcp_echo import HAS_F5SDK
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
interval=20,
timeout=30,
time_until_up=60,
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp_echo'
assert p.destination == '10.10.10.10'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_module_parameters_ints_as_strings(self):
args = dict(
name='foo',
parent='parent',
ip='10.10.10.10',
interval='20',
timeout='30',
time_until_up='60',
partition='Common'
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp_echo'
assert p.destination == '10.10.10.10'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
def test_api_parameters(self):
args = dict(
name='foo',
defaultsFrom='/Common/parent',
destination='10.10.10.10',
interval=20,
timeout=30,
timeUntilUp=60
)
p = Parameters(params=args)
assert p.name == 'foo'
assert p.parent == '/Common/parent'
assert p.ip == '10.10.10.10'
assert p.type == 'tcp_echo'
assert p.destination == '10.10.10.10'
assert p.interval == 20
assert p.timeout == 30
assert p.time_until_up == 60
class TestManagerEcho(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_monitor_idempotent(self, *args):
set_module_args(dict(
name='foo',
ip='10.10.10.10',
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is False
def test_update_interval(self, *args):
set_module_args(dict(
name='foo',
interval=10,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['interval'] == 10
def test_update_interval_larger_than_existing_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=30,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_interval_larger_than_new_timeout(self, *args):
set_module_args(dict(
name='foo',
interval=10,
timeout=5,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert "must be less than" in str(ex)
def test_update_timeout(self, *args):
set_module_args(dict(
name='foo',
timeout=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['timeout'] == 300
def test_update_time_until_up(self, *args):
set_module_args(dict(
name='foo',
time_until_up=300,
server='localhost',
password='password',
user='admin'
))
current = Parameters(params=load_fixture('load_ltm_monitor_tcp_echo.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
mm.update_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['time_until_up'] == 300
| gpl-3.0 |
ptisserand/ansible | lib/ansible/plugins/callback/default.py | 24 | 13770 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: default
type: stdout
short_description: default Ansible screen output
version_added: historical
description:
- This is the default output callback for ansible-playbook.
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible import constants as C
from ansible.playbook.task_include import TaskInclude
from ansible.plugins.callback import CallbackBase
from ansible.utils.color import colorize, hostcolor
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'default'
def __init__(self):
self._play = None
self._last_task_banner = None
super(CallbackModule, self).__init__()
def v2_runner_on_failed(self, result, ignore_errors=False):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
self._handle_exception(result._result)
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
if delegated_vars:
self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)), color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
if ignore_errors:
self._display.display("...ignoring", color=C.COLOR_SKIP)
def v2_runner_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
if delegated_vars:
msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "changed: [%s]" % result._host.get_name()
color = C.COLOR_CHANGED
else:
if delegated_vars:
msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg = "ok: [%s]" % result._host.get_name()
color = C.COLOR_OK
self._handle_warnings(result._result)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
self._clean_results(result._result, result._task.action)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % (self._dump_results(result._result),)
self._display.display(msg, color=color)
def v2_runner_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
if result._task.loop and 'results' in result._result:
self._process_items(result)
else:
msg = "skipping: [%s]" % result._host.get_name()
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result):
if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid:
self._print_task_banner(result._task)
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
self._dump_results(result._result)),
color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
def v2_playbook_on_no_hosts_matched(self):
self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP)
def v2_playbook_on_no_hosts_remaining(self):
self._display.banner("NO MORE HOSTS LEFT")
def v2_playbook_on_task_start(self, task, is_conditional):
if self._play.strategy != 'free':
self._print_task_banner(task)
def _print_task_banner(self, task):
# args can be specified as no_log in several places: in the task or in
# the argument spec. We can check whether the task is no_log but the
# argument spec can't be because that is only run on the target
# machine and we haven't run it thereyet at this time.
#
# So we give people a config option to affect display of the args so
# that they can secure this if they feel that their stdout is insecure
# (shoulder surfing, logging stdout straight to a file, etc).
args = ''
if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT:
args = u', '.join(u'%s=%s' % a for a in task.args.items())
args = u' %s' % args
self._display.banner(u"TASK [%s%s]" % (task.get_name().strip(), args))
if self._display.verbosity >= 2:
path = task.get_path()
if path:
self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG)
self._last_task_banner = task._uuid
def v2_playbook_on_cleanup_task_start(self, task):
self._display.banner("CLEANUP TASK [%s]" % task.get_name().strip())
def v2_playbook_on_handler_task_start(self, task):
self._display.banner("RUNNING HANDLER [%s]" % task.get_name().strip())
def v2_playbook_on_play_start(self, play):
name = play.get_name().strip()
if not name:
msg = u"PLAY"
else:
msg = u"PLAY [%s]" % name
self._play = play
self._display.banner(msg)
def v2_on_file_diff(self, result):
if result._task.loop and 'results' in result._result:
for res in result._result['results']:
if 'diff' in res and res['diff'] and res.get('changed', False):
diff = self._get_diff(res['diff'])
if diff:
self._display.display(diff)
elif 'diff' in result._result and result._result['diff'] and result._result.get('changed', False):
diff = self._get_diff(result._result['diff'])
if diff:
self._display.display(diff)
def v2_runner_item_on_ok(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
if isinstance(result._task, TaskInclude):
return
elif result._result.get('changed', False):
msg = 'changed'
color = C.COLOR_CHANGED
else:
msg = 'ok'
color = C.COLOR_OK
if delegated_vars:
msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += ": [%s]" % result._host.get_name()
msg += " => (item=%s)" % (self._get_item(result._result),)
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=color)
def v2_runner_item_on_failed(self, result):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
self._clean_results(result._result, result._task.action)
self._handle_exception(result._result)
msg = "failed: "
if delegated_vars:
msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host'])
else:
msg += "[%s]" % (result._host.get_name())
self._handle_warnings(result._result)
self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR)
def v2_runner_item_on_skipped(self, result):
if self._plugin_options.get('show_skipped_hosts', C.DISPLAY_SKIPPED_HOSTS): # fallback on constants for inherited plugins missing docs
self._clean_results(result._result, result._task.action)
msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result))
if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += " => %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_include(self, included_file):
msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts]))
self._display.display(msg, color=C.COLOR_SKIP)
def v2_playbook_on_stats(self, stats):
self._display.banner("PLAY RECAP")
hosts = sorted(stats.processed.keys())
for h in hosts:
t = stats.summarize(h)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t),
colorize(u'ok', t['ok'], C.COLOR_OK),
colorize(u'changed', t['changed'], C.COLOR_CHANGED),
colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE),
colorize(u'failed', t['failures'], C.COLOR_ERROR)),
screen_only=True
)
self._display.display(u"%s : %s %s %s %s" % (
hostcolor(h, t, False),
colorize(u'ok', t['ok'], None),
colorize(u'changed', t['changed'], None),
colorize(u'unreachable', t['unreachable'], None),
colorize(u'failed', t['failures'], None)),
log_only=True
)
self._display.display("", screen_only=True)
# print custom stats
if self._plugin_options.get('show_custom_stats', C.SHOW_CUSTOM_STATS) and stats.custom: # fallback on constants for inherited plugins missing docs
self._display.banner("CUSTOM STATS: ")
# per host
# TODO: come up with 'pretty format'
for k in sorted(stats.custom.keys()):
if k == '_run':
continue
self._display.display('\t%s: %s' % (k, self._dump_results(stats.custom[k], indent=1).replace('\n', '')))
# print per run custom stats
if '_run' in stats.custom:
self._display.display("", screen_only=True)
self._display.display('\tRUN: %s' % self._dump_results(stats.custom['_run'], indent=1).replace('\n', ''))
self._display.display("", screen_only=True)
def v2_playbook_on_start(self, playbook):
if self._display.verbosity > 1:
from os.path import basename
self._display.banner("PLAYBOOK: %s" % basename(playbook._file_name))
if self._display.verbosity > 3:
# show CLI options
if self._options is not None:
for option in dir(self._options):
if option.startswith('_') or option in ['read_file', 'ensure_value', 'read_module']:
continue
val = getattr(self._options, option)
if val and self._display.verbosity > 3:
self._display.display('%s: %s' % (option, val), color=C.COLOR_VERBOSE, screen_only=True)
def v2_runner_retry(self, result):
task_name = result.task_name or result._task
msg = "FAILED - RETRYING: %s (%d retries left)." % (task_name, result._result['retries'] - result._result['attempts'])
if (self._display.verbosity > 2 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result:
msg += "Result was: %s" % self._dump_results(result._result)
self._display.display(msg, color=C.COLOR_DEBUG)
def v2_playbook_on_notify(self, handler, host):
if self._display.verbosity > 1:
self._display.display("NOTIFIED HANDLER %s for %s" % (handler.get_name(), host), color=C.COLOR_VERBOSE, screen_only=True)
| gpl-3.0 |
rmfitzpatrick/ansible | lib/ansible/modules/network/avi/avi_cluster.py | 27 | 3776 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cluster
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of Cluster Avi RESTful Object
description:
- This module is used to configure Cluster object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
name:
description:
- Name of the object.
required: true
nodes:
description:
- List of clusternode.
rejoin_nodes_automatically:
description:
- Re-join cluster nodes automatically in the event one of the node is reset to factory.
- Default value when not specified in API or module is interpreted by Avi Controller as True.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
virtual_ip:
description:
- A virtual ip address.
- This ip address will be dynamically reconfigured so that it always is the ip of the cluster leader.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create Cluster object
avi_cluster:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_cluster
"""
RETURN = '''
obj:
description: Cluster (api/cluster) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
name=dict(type='str', required=True),
nodes=dict(type='list',),
rejoin_nodes_automatically=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
virtual_ip=dict(type='dict',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cluster',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
goto-bus-stop/py3status | py3status/modules/taskwarrior.py | 8 | 1396 | # -*- coding: utf-8 -*-
"""
Display currently active (started) taskwarrior tasks.
Configuration parameters:
- cache_timeout : how often we refresh this module in seconds (5s default)
Requires
- task
@author James Smith http://jazmit.github.io/
@license BSD
"""
# import your useful libs here
from time import time
from subprocess import check_output
import json
import shlex
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 5
def taskWarrior(self, i3s_output_list, i3s_config):
command = 'task start.before:tomorrow status:pending export'
taskwarrior_output = check_output(shlex.split(command))
tasks_json = json.loads('[' + taskwarrior_output.decode('utf-8') + ']')
def describeTask(taskObj):
return str(taskObj['id']) + ' ' + taskObj['description']
result = ', '.join(map(describeTask, tasks_json))
response = {
'cached_until': time() + self.cache_timeout,
'full_text': result
}
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00'
}
while True:
print(x.taskWarrior([], config))
sleep(1)
| bsd-3-clause |
Integral-Technology-Solutions/ConfigNOW | Lib/tzparse.py | 27 | 3494 | """Parse a timezone specification."""
# XXX Unfinished.
# XXX Only the typical form "XXXhhYYY;ddd/hh,ddd/hh" is currently supported.
import warnings
warnings.warn(
"The tzparse module is obsolete and will disappear in the future",
DeprecationWarning)
tzpat = ('^([A-Z][A-Z][A-Z])([-+]?[0-9]+)([A-Z][A-Z][A-Z]);'
'([0-9]+)/([0-9]+),([0-9]+)/([0-9]+)$')
tzprog = None
def tzparse(tzstr):
"""Given a timezone spec, return a tuple of information
(tzname, delta, dstname, daystart, hourstart, dayend, hourend),
where 'tzname' is the name of the timezone, 'delta' is the offset
in hours from GMT, 'dstname' is the name of the daylight-saving
timezone, and 'daystart'/'hourstart' and 'dayend'/'hourend'
specify the starting and ending points for daylight saving time."""
global tzprog
if tzprog is None:
import re
tzprog = re.compile(tzpat)
match = tzprog.match(tzstr)
if not match:
raise ValueError, 'not the TZ syntax I understand'
subs = []
for i in range(1, 8):
subs.append(match.group(i))
for i in (1, 3, 4, 5, 6):
subs[i] = eval(subs[i])
[tzname, delta, dstname, daystart, hourstart, dayend, hourend] = subs
return (tzname, delta, dstname, daystart, hourstart, dayend, hourend)
def tzlocaltime(secs, params):
"""Given a Unix time in seconds and a tuple of information about
a timezone as returned by tzparse(), return the local time in the
form (year, month, day, hour, min, sec, yday, wday, tzname)."""
import time
(tzname, delta, dstname, daystart, hourstart, dayend, hourend) = params
year, month, days, hours, mins, secs, yday, wday, isdst = \
time.gmtime(secs - delta*3600)
if (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend):
tzname = dstname
hours = hours + 1
return year, month, days, hours, mins, secs, yday, wday, tzname
def tzset():
"""Determine the current timezone from the "TZ" environment variable."""
global tzparams, timezone, altzone, daylight, tzname
import os
tzstr = os.environ['TZ']
tzparams = tzparse(tzstr)
timezone = tzparams[1] * 3600
altzone = timezone - 3600
daylight = 1
tzname = tzparams[0], tzparams[2]
def isdst(secs):
"""Return true if daylight-saving time is in effect for the given
Unix time in the current timezone."""
import time
(tzname, delta, dstname, daystart, hourstart, dayend, hourend) = \
tzparams
year, month, days, hours, mins, secs, yday, wday, isdst = \
time.gmtime(secs - delta*3600)
return (daystart, hourstart) <= (yday+1, hours) < (dayend, hourend)
tzset()
def localtime(secs):
"""Get the local time in the current timezone."""
return tzlocaltime(secs, tzparams)
def test():
from time import asctime, gmtime
import time, sys
now = time.time()
x = localtime(now)
tm = x[:-1] + (0,)
print 'now =', now, '=', asctime(tm), x[-1]
now = now - now % (24*3600)
if sys.argv[1:]: now = now + eval(sys.argv[1])
x = gmtime(now)
tm = x[:-1] + (0,)
print 'gmtime =', now, '=', asctime(tm), 'yday =', x[-2]
jan1 = now - x[-2]*24*3600
x = localtime(jan1)
tm = x[:-1] + (0,)
print 'jan1 =', jan1, '=', asctime(tm), x[-1]
for d in range(85, 95) + range(265, 275):
t = jan1 + d*24*3600
x = localtime(t)
tm = x[:-1] + (0,)
print 'd =', d, 't =', t, '=', asctime(tm), x[-1]
| mit |
RudoCris/horizon | openstack_dashboard/hooks.py | 89 | 1063 | # Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils.command import install
def setup_hook(config):
"""Filter config parsed from a setup.cfg to inject our defaults."""
# Tell distutils not to put the data_files in platform-specific
# installation locations. See here for an explanation:
# https://groups.google.com/forum/#!topic/comp.lang.python/Nex7L-026uw
for scheme in install.INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
| apache-2.0 |
samuel-phan/mssh-copy-id | msshcopyid/cli.py | 1 | 9466 | from __future__ import unicode_literals
import argparse
import datetime
import logging
import os
import socket
import sys
import traceback
import paramiko
import msshcopyid
from msshcopyid.constants import DEFAULT_KNOWN_HOSTS
from msshcopyid.constants import DEFAULT_SSH_DSA
from msshcopyid.constants import DEFAULT_SSH_RSA
from msshcopyid.errors import CopySSHKeyError, CopySSHKeysError
from msshcopyid.log import format_exception, format_error
from msshcopyid import utils
logger = logging.getLogger(__name__)
def main():
start_dt = datetime.datetime.now()
mc = Main()
mc.init()
try:
mc.run()
rc = 0
except:
rc = 1
logger.debug('Elapsed time: %s', datetime.datetime.now() - start_dt)
sys.exit(rc)
class Main(object):
def __init__(self):
self.args = None
self.hosts = None
self.ssh_config = None
self.sshcopyid = None
def init(self, argv=sys.argv):
# Parse input arguments
parser = self.get_parser()
self.args = parser.parse_args(argv[1:])
# Init logging
self.init_log(self.args.verbose)
# Check input arguments
self.check_ssh_key_exists()
self.check_add_remove_options_exclusion()
# Get the password
default_password = self.args.password or utils.get_password(from_stdin_only=True)
# Load ~/.ssh/config if it exists
self.ssh_config = utils.load_ssh_config()
# Init `SSHCopyId` object
self.sshcopyid = msshcopyid.SSHCopyId(priv_key=self.args.identity, ssh_config=self.ssh_config,
default_password=default_password)
# Parse the hosts to extract the username if given
self.hosts = utils.parse_hosts(self.args.hosts, ssh_port=self.args.port, ssh_config=self.ssh_config)
def init_log(self, verbose):
root_logger = logging.getLogger()
sh = logging.StreamHandler()
root_logger.addHandler(sh)
if verbose:
sh.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] [%(name)s] %(message)s"))
root_logger.setLevel(logging.DEBUG)
else:
sh.setFormatter(logging.Formatter('%(message)s'))
root_logger.setLevel(logging.INFO)
paramiko_logger = logging.getLogger('paramiko')
paramiko_logger.setLevel(logging.ERROR)
def check_ssh_key_exists(self):
error_msg = None
if not self.args.identity:
if os.path.exists(DEFAULT_SSH_RSA):
self.args.identity = DEFAULT_SSH_RSA
elif os.path.exists(DEFAULT_SSH_DSA):
self.args.identity = DEFAULT_SSH_DSA
else:
error_msg = 'Cannot find any SSH keys "{0}" and "{1}".'.format(DEFAULT_SSH_RSA, DEFAULT_SSH_DSA)
elif not os.path.exists(self.args.identity):
error_msg = 'Cannot find the SSH key "{0}".'.format(self.args.identity)
if error_msg:
logger.error(format_error(error_msg))
sys.exit(1)
else:
logger.debug('Found SSH key: %s', self.args.identity)
def check_add_remove_options_exclusion(self):
if self.args.add and self.args.remove:
logger.error(format_error('argument -a/--add not allowed with argument -r/--remove.'))
sys.exit(1)
def get_parser(self):
parser = argparse.ArgumentParser(description='Copy SSH keys to multiple servers.')
parser.add_argument('hosts', metavar='host', nargs='+',
help='the remote hosts to copy the keys to. Syntax: [user@]hostname')
parser.add_argument('-k', '--known-hosts', default=DEFAULT_KNOWN_HOSTS,
help='the known_hosts file to use. Default: ~/.ssh/known_hosts')
parser.add_argument('-n', '--dry', action='store_true', help='do a dry run. Do not change anything')
parser.add_argument('-v', '--verbose', action='store_true', help='enable verbose mode.')
parser.add_argument('--version', action='version', version=msshcopyid.__version__)
copy_group = parser.add_argument_group('Copy SSH keys')
copy_group.add_argument('-A', '--no-add-host', action='store_true',
help='don\'t add automatically new hosts into "known_hosts" file')
copy_group.add_argument('-c', '--clear', action='store_true',
help='clear the hosts from the "known_hosts" file before copying the SSH keys')
copy_group.add_argument('-i', '--identity', help='the SSH identity file. Default: {0} or {1}'
.format(DEFAULT_SSH_RSA, DEFAULT_SSH_DSA))
copy_group.add_argument('-p', '--port', type=int, help='the SSH port for the remote hosts')
copy_group.add_argument('-P', '--password',
help='the password to log into the remote hosts. It is NOT SECURED to set the '
'password that way, since it stays in the bash history. Password can also be sent '
'on the STDIN.')
known_host_group = parser.add_argument_group('Manage the "known_host" file only')
known_host_group.add_argument('-a', '--add', action='store_true',
help='don\'t copy the SSH keys, but instead, add the given hosts to the '
'"known_hosts" file')
known_host_group.add_argument('-r', '--remove', action='store_true',
help='don\'t copy the SSH keys, but instead, remove the given hosts from the '
'"known_hosts" file')
return parser
def run(self):
# Check dry run
if self.args.dry:
logger.info('Dry run: nothing will be changed.')
# Check the action to perform
if self.args.add or self.args.remove:
# Action on the known_hosts file
# Check that known_hosts file exists
if not os.path.exists(self.args.known_hosts):
with open(self.args.known_hosts, 'w'):
pass
if self.args.add:
self.sshcopyid.add_to_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
else:
self.sshcopyid.remove_from_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
else:
# Copy the SSH keys to the hosts
if self.args.clear:
# Clear the hosts from the known_hosts file
self.sshcopyid.remove_from_known_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
# Read the public key
if not self.sshcopyid.pub_key_content:
self.sshcopyid.read_pub_key()
try:
self.copy_ssh_keys_to_hosts(self.hosts, known_hosts=self.args.known_hosts, dry=self.args.dry)
except CopySSHKeysError as ex:
logger.error(format_error(format_exception(ex)))
raise
def copy_ssh_keys_to_hosts(self, hosts, known_hosts=DEFAULT_KNOWN_HOSTS, dry=False):
"""
Copy the SSH keys to the given hosts.
:param hosts: the list of `Host` objects to copy the SSH keys to.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:param dry: perform a dry run.
:raise msshcopyid.errors.CopySSHKeysError:
"""
exceptions = [] # list of `CopySSHKeyError`
for host in hosts:
logger.info('[%s] Copy the SSH public key [%s]...', host.hostname, self.sshcopyid.pub_key)
if not dry:
try:
self.copy_ssh_keys_to_host(host, known_hosts=known_hosts)
except (paramiko.ssh_exception.SSHException, socket.error) as ex:
logger.error(format_error(format_exception(ex)))
logger.debug(traceback.format_exc())
exceptions.append(CopySSHKeyError(host=host, exception=ex))
if exceptions:
raise CopySSHKeysError(exceptions=exceptions)
def copy_ssh_keys_to_host(self, host, known_hosts=DEFAULT_KNOWN_HOSTS):
"""
Copy the SSH keys to the given host.
:param host: the `Host` object to copy the SSH keys to.
:param known_hosts: the `known_hosts` file to store the SSH public keys.
:raise paramiko.ssh_exception.AuthenticationException:
"""
password = host.password or self.sshcopyid.default_password
try:
self.sshcopyid.copy_ssh_keys_to_host(host, password=password, no_add_host=self.args.no_add_host,
known_hosts=known_hosts)
except paramiko.ssh_exception.AuthenticationException:
if password:
# A password was given, and it is wrong
raise
else:
# Ask for password
password = utils.get_password()
self.sshcopyid.default_password = password
# Try to connect again
self.sshcopyid.copy_ssh_keys_to_host(host, password=password, no_add_host=self.args.no_add_host,
known_hosts=known_hosts)
| mit |
cnsoft/kbengine-cocos2dx | kbe/src/lib/python/Lib/_osx_support.py | 8 | 18472 | """Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from succesful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Futhermore, the compiler that can be used varies between
# Xcode releases. Upto Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explictly
# overriden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags, re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overriden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system("'%s' -arch ppc -x c /dev/null 2>/dev/null"%(
_config_vars['CC'].replace("'", "'\"'\"'"),))
# The Apple compiler drivers return status 255 if no PPC
if (status >> 8) == 255:
# Compiler doesn't support PPC, remove the related
# '-arch' flags if not explicitly overridden by an
# environment variable
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalong Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overriden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extention module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if ((macrelease + '.') >= '10.4.' and
'-arch' in cflags.strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
| lgpl-3.0 |
fener06/pyload | module/plugins/accounts/DepositfilesCom.py | 2 | 1814 | # -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
@author: mkaay
"""
from module.plugins.Account import Account
import re
from time import strptime, mktime
class DepositfilesCom(Account):
__name__ = "DepositfilesCom"
__version__ = "0.1"
__type__ = "account"
__description__ = """depositfiles.com account plugin"""
__author_name__ = ("mkaay")
__author_mail__ = ("mkaay@mkaay.de")
def loadAccountInfo(self, user, req):
src = req.load("http://depositfiles.com/de/gold/")
validuntil = re.search("noch den Gold-Zugriff: <b>(.*?)</b></div>", src).group(1)
validuntil = int(mktime(strptime(validuntil, "%Y-%m-%d %H:%M:%S")))
tmp = {"validuntil":validuntil, "trafficleft":-1}
return tmp
def login(self, user, data, req):
req.load("http://depositfiles.com/de/gold/payment.php")
src = req.load("http://depositfiles.com/de/login.php", get={"return": "/de/gold/payment.php"}, post={"login": user, "password": data["password"]})
if r'<div class="error_message">Sie haben eine falsche Benutzername-Passwort-Kombination verwendet.</div>' in src:
self.wrongPassword()
| gpl-3.0 |
woodscn/scipy | scipy/sparse/linalg/dsolve/_add_newdocs.py | 131 | 3801 | from numpy.lib import add_newdoc
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU',
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr * A * Pc = L * U
To construct these `SuperLU` objects, call the `splu` and `spilu`
functions.
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
Methods
-------
solve
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix, linalg as sla
>>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = sla.splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([0, 2, 1, 3], dtype=int32)
>>> lu.perm_c
array([2, 0, 1, 3], dtype=int32)
The L and U factors are sparse matrices in CSC format:
>>> lu.L.A
array([[ 1. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0.5, 0.5, 1. ]])
>>> lu.U.A
array([[ 2., 0., 1., 4.],
[ 0., 2., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., -5.]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((4, 4))
>>> Pr[lu.perm_r, np.arange(4)] = 1
>>> Pc = csc_matrix((4, 4))
>>> Pc[np.arange(4), lu.perm_c] = 1
We can reassemble the original matrix:
>>> (Pr.T * (lu.L * lu.U) * Pc.T).A
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
""")
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('solve',
"""
solve(rhs[, trans])
Solves linear system of equations with one or several right-hand sides.
Parameters
----------
rhs : ndarray, shape (n,) or (n, k)
Right hand side(s) of equation
trans : {'N', 'T', 'H'}, optional
Type of system to solve::
'N': A * x == rhs (default)
'T': A^T * x == rhs
'H': A^H * x == rhs
i.e., normal, transposed, and hermitian conjugate.
Returns
-------
x : ndarray, shape ``rhs.shape``
Solution vector(s)
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('L',
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('U',
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('shape',
"""
Shape of the original matrix as a tuple of ints.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('nnz',
"""
Number of nonzero elements in the matrix.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_c',
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_r',
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""))
| bsd-3-clause |
oneraghavan/portcache | setup.py | 1 | 2150 | from setuptools import setup
setup(name='portcache',
version='0.3 ',
description='A simple cache for port from remote service',
url='https://github.com/oneraghavan/portcache',
author='Raghavan',
author_email='oneraghavan@gmail.com',
license='MIT',
packages=['portcache'],
install_requires=[
'web.py', 'PyYAML' , 'requests'
],
zip_safe=False,
entry_points={
'console_scripts': ['portcache=portcache.command_line:main'],
})
print "___________________________________"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "| Succesfully installed portcache |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ |"
print "|_________________________________|"
print "\nportcache is a cache for remote calls . In microservices world, we have to work with lots of services which are needed to run our service and \n" \
"its a pain if the list of these service list grows big .portcache gives you the ability to point to a remote service instance and also cache \n" \
"the responses for you calls.\n\n" \
"To start : portcache <config yml file> \n\n" \
"The config file requires three params localport , remote , cache_file .\n" \
"localport - The port you want to run your cache service . you will point your dependent app/service to this port \n" \
"remote - The remote url with port that corresponds to the service you would like to cache \n" \
"cache_file - The location of the cache you want to save \n\n" \
"A sample config yml file looks like this \n\n" \
"localport: 9090 \n" \
"remote: http://myremoteserviceurl.com \n" \
"cache_file: \"/data/tmp/merch \n\n" \
"Starting with this config file, starts a server at port 9090.Whenever a request comes to the localhost:9090, it \n" \
"will check if this request has been already cached ,if yes then it will serve from cache file, else it will call \n" \
"the http://myremoteserviceurl.com with the request, cache and return the response"
| mit |
arcivanov/pybuilder | src/main/python/pybuilder/plugins/python/install_dependencies_plugin.py | 3 | 4525 | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from pybuilder import pip_utils
from pybuilder.core import (task,
description,
use_plugin,
depends,
init)
from pybuilder.install_utils import install_dependencies as install_dependency
from pybuilder.utils import mkdir, as_list
__author__ = "Alexander Metzner, Arcadiy Ivanov"
use_plugin("core")
@init
def initialize_install_dependencies_plugin(project):
project.set_property_if_unset("pip_verbose", 0)
project.set_property_if_unset("install_env", "system")
project.set_property_if_unset("dir_install_logs", "$dir_logs/install_dependencies")
project.set_property_if_unset("install_dependencies_index_url", None)
project.set_property_if_unset("install_dependencies_local_mapping", {})
project.set_property_if_unset("install_dependencies_extra_index_url", None)
project.set_property_if_unset("install_dependencies_trusted_host", None)
project.set_property_if_unset("install_dependencies_constraints", "constraints_file")
# Deprecated - has no effect
project.set_property_if_unset("install_dependencies_upgrade", False)
project.set_property_if_unset("install_dependencies_insecure_installation", [])
@task
@depends("prepare")
@description("Installs all (both runtime and build) dependencies specified in the build descriptor")
def install_dependencies(logger, project, reactor):
logger.info("Installing all dependencies")
install_dependency(logger, project, as_list(project.build_dependencies) + as_list(project.dependencies),
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@depends("prepare")
@description("Installs all build dependencies specified in the build descriptor")
def install_build_dependencies(logger, project, reactor):
logger.info("Installing build dependencies")
install_dependency(logger, project, project.build_dependencies,
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@depends("prepare")
@description("Installs all runtime dependencies specified in the build descriptor")
def install_runtime_dependencies(logger, project, reactor):
logger.info("Installing runtime dependencies")
install_dependency(logger, project, project.dependencies,
reactor.python_env_registry[project.get_property("install_env")],
project.expand_path("$dir_install_logs", "install_batch"),
project.get_property("install_dependencies_local_mapping"),
project.expand_path("$dir_target", "install_dependencies_constraints"))
@task
@description("Displays all dependencies the project requires")
def list_dependencies(project):
print("\n".join(
map(lambda d: "{0}".format(" ".join(pip_utils.as_pip_install_target(d))),
project.build_dependencies + project.dependencies)))
@task("prepare")
def create_install_log_directory(logger, project):
log_dir = project.expand_path("$dir_install_logs")
logger.debug("Creating log directory %r", log_dir)
mkdir(log_dir)
target_dir = project.expand_path("$dir_target")
logger.debug("Creating target directory %r", target_dir)
mkdir(target_dir)
| apache-2.0 |
mdhaber/scipy | scipy/stats/tests/test_tukeylambda_stats.py | 21 | 3232 | import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.stats._tukeylambda_stats import (tukeylambda_variance,
tukeylambda_kurtosis)
def test_tukeylambda_stats_known_exact():
"""Compare results with some known exact formulas."""
# Some exact values of the Tukey Lambda variance and kurtosis:
# lambda var kurtosis
# 0 pi**2/3 6/5 (logistic distribution)
# 0.5 4 - pi (5/3 - pi/2)/(pi/4 - 1)**2 - 3
# 1 1/3 -6/5 (uniform distribution on (-1,1))
# 2 1/12 -6/5 (uniform distribution on (-1/2, 1/2))
# lambda = 0
var = tukeylambda_variance(0)
assert_allclose(var, np.pi**2 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(0)
assert_allclose(kurt, 1.2, atol=1e-10)
# lambda = 0.5
var = tukeylambda_variance(0.5)
assert_allclose(var, 4 - np.pi, atol=1e-12)
kurt = tukeylambda_kurtosis(0.5)
desired = (5./3 - np.pi/2) / (np.pi/4 - 1)**2 - 3
assert_allclose(kurt, desired, atol=1e-10)
# lambda = 1
var = tukeylambda_variance(1)
assert_allclose(var, 1.0 / 3, atol=1e-12)
kurt = tukeylambda_kurtosis(1)
assert_allclose(kurt, -1.2, atol=1e-10)
# lambda = 2
var = tukeylambda_variance(2)
assert_allclose(var, 1.0 / 12, atol=1e-12)
kurt = tukeylambda_kurtosis(2)
assert_allclose(kurt, -1.2, atol=1e-10)
def test_tukeylambda_stats_mpmath():
"""Compare results with some values that were computed using mpmath."""
a10 = dict(atol=1e-10, rtol=0)
a12 = dict(atol=1e-12, rtol=0)
data = [
# lambda variance kurtosis
[-0.1, 4.78050217874253547, 3.78559520346454510],
[-0.0649, 4.16428023599895777, 2.52019675947435718],
[-0.05, 3.93672267890775277, 2.13129793057777277],
[-0.001, 3.30128380390964882, 1.21452460083542988],
[0.001, 3.27850775649572176, 1.18560634779287585],
[0.03125, 2.95927803254615800, 0.804487555161819980],
[0.05, 2.78281053405464501, 0.611604043886644327],
[0.0649, 2.65282386754100551, 0.476834119532774540],
[1.2, 0.242153920578588346, -1.23428047169049726],
[10.0, 0.00095237579757703597, 2.37810697355144933],
[20.0, 0.00012195121951131043, 7.37654321002709531],
]
for lam, var_expected, kurt_expected in data:
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
# Test with vector arguments (most of the other tests are for single
# values).
lam, var_expected, kurt_expected = zip(*data)
var = tukeylambda_variance(lam)
assert_allclose(var, var_expected, **a12)
kurt = tukeylambda_kurtosis(lam)
assert_allclose(kurt, kurt_expected, **a10)
def test_tukeylambda_stats_invalid():
"""Test values of lambda outside the domains of the functions."""
lam = [-1.0, -0.5]
var = tukeylambda_variance(lam)
assert_equal(var, np.array([np.nan, np.inf]))
lam = [-1.0, -0.25]
kurt = tukeylambda_kurtosis(lam)
assert_equal(kurt, np.array([np.nan, np.inf]))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.