repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
Carmezim/tensorflow
tensorflow/contrib/learn/python/learn/learn_io/dask_io.py
138
3844
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Methods to allow dask.DataFrame.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np try: # pylint: disable=g-import-not-at-top import dask.dataframe as dd allowed_classes = (dd.Series, dd.DataFrame) HAS_DASK = True except ImportError: HAS_DASK = False def _add_to_index(df, start): """New dask.dataframe with values added to index of each subdataframe.""" df = df.copy() df.index += start return df def _get_divisions(df): """Number of rows in each sub-dataframe.""" lengths = df.map_partitions(len).compute() divisions = np.cumsum(lengths).tolist() divisions.insert(0, 0) return divisions def _construct_dask_df_with_divisions(df): """Construct the new task graph and make a new dask.dataframe around it.""" divisions = _get_divisions(df) # pylint: disable=protected-access name = 'csv-index' + df._name dsk = {(name, i): (_add_to_index, (df._name, i), divisions[i]) for i in range(df.npartitions)} # pylint: enable=protected-access from toolz import merge # pylint: disable=g-import-not-at-top if isinstance(df, dd.DataFrame): return dd.DataFrame(merge(dsk, df.dask), name, df.columns, divisions) elif isinstance(df, dd.Series): return dd.Series(merge(dsk, df.dask), name, df.name, divisions) def extract_dask_data(data): """Extract data from dask.Series or dask.DataFrame for predictors. Given a distributed dask.DataFrame or dask.Series containing columns or names for one or more predictors, this operation returns a single dask.DataFrame or dask.Series that can be iterated over. Args: data: A distributed dask.DataFrame or dask.Series. Returns: A dask.DataFrame or dask.Series that can be iterated over. If the supplied argument is neither a dask.DataFrame nor a dask.Series this operation returns it without modification. """ if isinstance(data, allowed_classes): return _construct_dask_df_with_divisions(data) else: return data def extract_dask_labels(labels): """Extract data from dask.Series or dask.DataFrame for labels. Given a distributed dask.DataFrame or dask.Series containing exactly one column or name, this operation returns a single dask.DataFrame or dask.Series that can be iterated over. Args: labels: A distributed dask.DataFrame or dask.Series with exactly one column or name. Returns: A dask.DataFrame or dask.Series that can be iterated over. If the supplied argument is neither a dask.DataFrame nor a dask.Series this operation returns it without modification. Raises: ValueError: If the supplied dask.DataFrame contains more than one column or the supplied dask.Series contains more than one name. """ if isinstance(labels, dd.DataFrame): ncol = labels.columns elif isinstance(labels, dd.Series): ncol = labels.name if isinstance(labels, allowed_classes): if len(ncol) > 1: raise ValueError('Only one column for labels is allowed.') return _construct_dask_df_with_divisions(labels) else: return labels
apache-2.0
liyongyue/dnsspider
dns/rdtypes/ANY/SOA.py
2
4906
# Copyright (C) 2003-2005 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import struct import dns.exception import dns.rdata import dns.name class SOA(dns.rdata.Rdata): """SOA record @ivar mname: the SOA MNAME (master name) field @type mname: dns.name.Name object @ivar rname: the SOA RNAME (responsible name) field @type rname: dns.name.Name object @ivar serial: The zone's serial number @type serial: int @ivar refresh: The zone's refresh value (in seconds) @type refresh: int @ivar retry: The zone's retry value (in seconds) @type retry: int @ivar expire: The zone's expiration value (in seconds) @type expire: int @ivar minimum: The zone's negative caching time (in seconds, called "minimum" for historical reasons) @type minimum: int @see: RFC 1035""" __slots__ = ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'] def __init__(self, rdclass, rdtype, mname, rname, serial, refresh, retry, expire, minimum): super(SOA, self).__init__(rdclass, rdtype) self.mname = mname self.rname = rname self.serial = serial self.refresh = refresh self.retry = retry self.expire = expire self.minimum = minimum def to_text(self, origin=None, relativize=True, **kw): mname = self.mname.choose_relativity(origin, relativize) rname = self.rname.choose_relativity(origin, relativize) return '%s %s %d %d %d %d %d' % ( mname, rname, self.serial, self.refresh, self.retry, self.expire, self.minimum ) def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): mname = tok.get_name() rname = tok.get_name() mname = mname.choose_relativity(origin, relativize) rname = rname.choose_relativity(origin, relativize) serial = tok.get_uint32() refresh = tok.get_ttl() retry = tok.get_ttl() expire = tok.get_ttl() minimum = tok.get_ttl() tok.get_eol() return cls(rdclass, rdtype, mname, rname, serial, refresh, retry, expire, minimum ) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): self.mname.to_wire(file, compress, origin) self.rname.to_wire(file, compress, origin) five_ints = struct.pack('!IIIII', self.serial, self.refresh, self.retry, self.expire, self.minimum) file.write(five_ints) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): (mname, cused) = dns.name.from_wire(wire[: current + rdlen], current) current += cused rdlen -= cused (rname, cused) = dns.name.from_wire(wire[: current + rdlen], current) current += cused rdlen -= cused if rdlen != 20: raise dns.exception.FormError five_ints = struct.unpack('!IIIII', wire[current : current + rdlen]) if not origin is None: mname = mname.relativize(origin) rname = rname.relativize(origin) return cls(rdclass, rdtype, mname, rname, five_ints[0], five_ints[1], five_ints[2], five_ints[3], five_ints[4]) from_wire = classmethod(from_wire) def choose_relativity(self, origin = None, relativize = True): self.mname = self.mname.choose_relativity(origin, relativize) self.rname = self.rname.choose_relativity(origin, relativize) def _cmp(self, other): v = cmp(self.mname, other.mname) if v == 0: v = cmp(self.rname, other.rname) if v == 0: self_ints = struct.pack('!IIIII', self.serial, self.refresh, self.retry, self.expire, self.minimum) other_ints = struct.pack('!IIIII', other.serial, other.refresh, other.retry, other.expire, other.minimum) v = cmp(self_ints, other_ints) return v
isc
liangazhou/django-rdp
packages/Django-1.8.6/django/views/generic/dates.py
251
25791
from __future__ import unicode_literals import datetime from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import models from django.http import Http404 from django.utils import timezone from django.utils.encoding import force_str, force_text from django.utils.functional import cached_property from django.utils.translation import ugettext as _ from django.views.generic.base import View from django.views.generic.detail import ( BaseDetailView, SingleObjectTemplateResponseMixin, ) from django.views.generic.list import ( MultipleObjectMixin, MultipleObjectTemplateResponseMixin, ) class YearMixin(object): """ Mixin for views manipulating year-based data. """ year_format = '%Y' year = None def get_year_format(self): """ Get a year format string in strptime syntax to be used to parse the year from url variables. """ return self.year_format def get_year(self): """ Return the year for which this view should display data. """ year = self.year if year is None: try: year = self.kwargs['year'] except KeyError: try: year = self.request.GET['year'] except KeyError: raise Http404(_("No year specified")) return year def get_next_year(self, date): """ Get the next valid year. """ return _get_next_prev(self, date, is_previous=False, period='year') def get_previous_year(self, date): """ Get the previous valid year. """ return _get_next_prev(self, date, is_previous=True, period='year') def _get_next_year(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date.replace(year=date.year + 1, month=1, day=1) def _get_current_year(self, date): """ Return the start date of the current interval. """ return date.replace(month=1, day=1) class MonthMixin(object): """ Mixin for views manipulating month-based data. """ month_format = '%b' month = None def get_month_format(self): """ Get a month format string in strptime syntax to be used to parse the month from url variables. """ return self.month_format def get_month(self): """ Return the month for which this view should display data. """ month = self.month if month is None: try: month = self.kwargs['month'] except KeyError: try: month = self.request.GET['month'] except KeyError: raise Http404(_("No month specified")) return month def get_next_month(self, date): """ Get the next valid month. """ return _get_next_prev(self, date, is_previous=False, period='month') def get_previous_month(self, date): """ Get the previous valid month. """ return _get_next_prev(self, date, is_previous=True, period='month') def _get_next_month(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ if date.month == 12: return date.replace(year=date.year + 1, month=1, day=1) else: return date.replace(month=date.month + 1, day=1) def _get_current_month(self, date): """ Return the start date of the previous interval. """ return date.replace(day=1) class DayMixin(object): """ Mixin for views manipulating day-based data. """ day_format = '%d' day = None def get_day_format(self): """ Get a day format string in strptime syntax to be used to parse the day from url variables. """ return self.day_format def get_day(self): """ Return the day for which this view should display data. """ day = self.day if day is None: try: day = self.kwargs['day'] except KeyError: try: day = self.request.GET['day'] except KeyError: raise Http404(_("No day specified")) return day def get_next_day(self, date): """ Get the next valid day. """ return _get_next_prev(self, date, is_previous=False, period='day') def get_previous_day(self, date): """ Get the previous valid day. """ return _get_next_prev(self, date, is_previous=True, period='day') def _get_next_day(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=1) def _get_current_day(self, date): """ Return the start date of the current interval. """ return date class WeekMixin(object): """ Mixin for views manipulating week-based data. """ week_format = '%U' week = None def get_week_format(self): """ Get a week format string in strptime syntax to be used to parse the week from url variables. """ return self.week_format def get_week(self): """ Return the week for which this view should display data """ week = self.week if week is None: try: week = self.kwargs['week'] except KeyError: try: week = self.request.GET['week'] except KeyError: raise Http404(_("No week specified")) return week def get_next_week(self, date): """ Get the next valid week. """ return _get_next_prev(self, date, is_previous=False, period='week') def get_previous_week(self, date): """ Get the previous valid week. """ return _get_next_prev(self, date, is_previous=True, period='week') def _get_next_week(self, date): """ Return the start date of the next interval. The interval is defined by start date <= item date < next start date. """ return date + datetime.timedelta(days=7 - self._get_weekday(date)) def _get_current_week(self, date): """ Return the start date of the current interval. """ return date - datetime.timedelta(self._get_weekday(date)) def _get_weekday(self, date): """ Return the weekday for a given date. The first day according to the week format is 0 and the last day is 6. """ week_format = self.get_week_format() if week_format == '%W': # week starts on Monday return date.weekday() elif week_format == '%U': # week starts on Sunday return (date.weekday() + 1) % 7 else: raise ValueError("unknown week format: %s" % week_format) class DateMixin(object): """ Mixin class for views manipulating date-based data. """ date_field = None allow_future = False def get_date_field(self): """ Get the name of the date field to be used to filter by. """ if self.date_field is None: raise ImproperlyConfigured("%s.date_field is required." % self.__class__.__name__) return self.date_field def get_allow_future(self): """ Returns `True` if the view should be allowed to display objects from the future. """ return self.allow_future # Note: the following three methods only work in subclasses that also # inherit SingleObjectMixin or MultipleObjectMixin. @cached_property def uses_datetime_field(self): """ Return `True` if the date field is a `DateTimeField` and `False` if it's a `DateField`. """ model = self.get_queryset().model if self.model is None else self.model field = model._meta.get_field(self.get_date_field()) return isinstance(field, models.DateTimeField) def _make_date_lookup_arg(self, value): """ Convert a date into a datetime when the date field is a DateTimeField. When time zone support is enabled, `date` is assumed to be in the current time zone, so that displayed items are consistent with the URL. """ if self.uses_datetime_field: value = datetime.datetime.combine(value, datetime.time.min) if settings.USE_TZ: value = timezone.make_aware(value, timezone.get_current_timezone()) return value def _make_single_date_lookup(self, date): """ Get the lookup kwargs for filtering on a single date. If the date field is a DateTimeField, we can't just filter on date_field=date because that doesn't take the time into account. """ date_field = self.get_date_field() if self.uses_datetime_field: since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(date + datetime.timedelta(days=1)) return { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } else: # Skip self._make_date_lookup_arg, it's a no-op in this branch. return {date_field: date} class BaseDateListView(MultipleObjectMixin, DateMixin, View): """ Abstract base class for date-based views displaying a list of objects. """ allow_empty = False date_list_period = 'year' def get(self, request, *args, **kwargs): self.date_list, self.object_list, extra_context = self.get_dated_items() context = self.get_context_data(object_list=self.object_list, date_list=self.date_list) context.update(extra_context) return self.render_to_response(context) def get_dated_items(self): """ Obtain the list of dates and items. """ raise NotImplementedError('A DateView must provide an implementation of get_dated_items()') def get_ordering(self): """ Returns the field or fields to use for ordering the queryset; uses the date field by default. """ return '-%s' % self.get_date_field() if self.ordering is None else self.ordering def get_dated_queryset(self, **lookup): """ Get a queryset properly filtered according to `allow_future` and any extra lookup kwargs. """ qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{'%s__lte' % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = len(qs) == 0 if paginate_by is None else not qs.exists() if is_empty: raise Http404(_("No %(verbose_name_plural)s available") % { 'verbose_name_plural': force_text(qs.model._meta.verbose_name_plural) }) return qs def get_date_list_period(self): """ Get the aggregation period for the list of dates: 'year', 'month', or 'day'. """ return self.date_list_period def get_date_list(self, queryset, date_type=None, ordering='ASC'): """ Get a date list by calling `queryset.dates/datetimes()`, checking along the way for empty lists that aren't allowed. """ date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and not date_list and not allow_empty: name = force_text(queryset.model._meta.verbose_name_plural) raise Http404(_("No %(verbose_name_plural)s available") % {'verbose_name_plural': name}) return date_list class BaseArchiveIndexView(BaseDateListView): """ Base class for archives of date-based items. Requires a response mixin. """ context_object_name = 'latest' def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ qs = self.get_dated_queryset() date_list = self.get_date_list(qs, ordering='DESC') if not date_list: qs = qs.none() return (date_list, qs, {}) class ArchiveIndexView(MultipleObjectTemplateResponseMixin, BaseArchiveIndexView): """ Top-level archive of date-based items. """ template_name_suffix = '_archive' class BaseYearArchiveView(YearMixin, BaseDateListView): """ List of objects published in a given year. """ date_list_period = 'month' make_object_list = False def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_year(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) if not self.get_make_object_list(): # We need this to be a queryset since parent classes introspect it # to find information about the model. qs = qs.none() return (date_list, qs, { 'year': date, 'next_year': self.get_next_year(date), 'previous_year': self.get_previous_year(date), }) def get_make_object_list(self): """ Return `True` if this view should contain the full list of objects in the given year. """ return self.make_object_list class YearArchiveView(MultipleObjectTemplateResponseMixin, BaseYearArchiveView): """ List of objects published in a given year. """ template_name_suffix = '_archive_year' class BaseMonthArchiveView(YearMixin, MonthMixin, BaseDateListView): """ List of objects published in a given month. """ date_list_period = 'day' def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() date_field = self.get_date_field() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format()) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_month(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) date_list = self.get_date_list(qs) return (date_list, qs, { 'month': date, 'next_month': self.get_next_month(date), 'previous_month': self.get_previous_month(date), }) class MonthArchiveView(MultipleObjectTemplateResponseMixin, BaseMonthArchiveView): """ List of objects published in a given month. """ template_name_suffix = '_archive_month' class BaseWeekArchiveView(YearMixin, WeekMixin, BaseDateListView): """ List of objects published in a given week. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() week = self.get_week() date_field = self.get_date_field() week_format = self.get_week_format() week_start = { '%W': '1', '%U': '0', }[week_format] date = _date_from_string(year, self.get_year_format(), week_start, '%w', week, week_format) since = self._make_date_lookup_arg(date) until = self._make_date_lookup_arg(self._get_next_week(date)) lookup_kwargs = { '%s__gte' % date_field: since, '%s__lt' % date_field: until, } qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'week': date, 'next_week': self.get_next_week(date), 'previous_week': self.get_previous_week(date), }) class WeekArchiveView(MultipleObjectTemplateResponseMixin, BaseWeekArchiveView): """ List of objects published in a given week. """ template_name_suffix = '_archive_week' class BaseDayArchiveView(YearMixin, MonthMixin, DayMixin, BaseDateListView): """ List of objects published on a given day. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) return self._get_dated_items(date) def _get_dated_items(self, date): """ Do the actual heavy lifting of getting the dated items; this accepts a date object so that TodayArchiveView can be trivial. """ lookup_kwargs = self._make_single_date_lookup(date) qs = self.get_dated_queryset(**lookup_kwargs) return (None, qs, { 'day': date, 'previous_day': self.get_previous_day(date), 'next_day': self.get_next_day(date), 'previous_month': self.get_previous_month(date), 'next_month': self.get_next_month(date) }) class DayArchiveView(MultipleObjectTemplateResponseMixin, BaseDayArchiveView): """ List of objects published on a given day. """ template_name_suffix = "_archive_day" class BaseTodayArchiveView(BaseDayArchiveView): """ List of objects published today. """ def get_dated_items(self): """ Return (date_list, items, extra_context) for this request. """ return self._get_dated_items(datetime.date.today()) class TodayArchiveView(MultipleObjectTemplateResponseMixin, BaseTodayArchiveView): """ List of objects published today. """ template_name_suffix = "_archive_day" class BaseDateDetailView(YearMixin, MonthMixin, DayMixin, DateMixin, BaseDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ def get_object(self, queryset=None): """ Get the object this request displays. """ year = self.get_year() month = self.get_month() day = self.get_day() date = _date_from_string(year, self.get_year_format(), month, self.get_month_format(), day, self.get_day_format()) # Use a custom queryset if provided qs = self.get_queryset() if queryset is None else queryset if not self.get_allow_future() and date > datetime.date.today(): raise Http404(_( "Future %(verbose_name_plural)s not available because " "%(class_name)s.allow_future is False.") % { 'verbose_name_plural': qs.model._meta.verbose_name_plural, 'class_name': self.__class__.__name__, }, ) # Filter down a queryset from self.queryset using the date from the # URL. This'll get passed as the queryset to DetailView.get_object, # which'll handle the 404 lookup_kwargs = self._make_single_date_lookup(date) qs = qs.filter(**lookup_kwargs) return super(BaseDetailView, self).get_object(queryset=qs) class DateDetailView(SingleObjectTemplateResponseMixin, BaseDateDetailView): """ Detail view of a single object on a single date; this differs from the standard DetailView by accepting a year/month/day in the URL. """ template_name_suffix = '_detail' def _date_from_string(year, year_format, month='', month_format='', day='', day_format='', delim='__'): """ Helper: get a datetime.date object given a format string and a year, month, and day (only year is mandatory). Raise a 404 for an invalid date. """ format = delim.join((year_format, month_format, day_format)) datestr = delim.join((year, month, day)) try: return datetime.datetime.strptime(force_str(datestr), format).date() except ValueError: raise Http404(_("Invalid date string '%(datestr)s' given format '%(format)s'") % { 'datestr': datestr, 'format': format, }) def _get_next_prev(generic_view, date, is_previous, period): """ Helper: Get the next or the previous valid date. The idea is to allow links on month/day views to never be 404s by never providing a date that'll be invalid for the given view. This is a bit complicated since it handles different intervals of time, hence the coupling to generic_view. However in essence the logic comes down to: * If allow_empty and allow_future are both true, this is easy: just return the naive result (just the next/previous day/week/month, regardless of object existence.) * If allow_empty is true, allow_future is false, and the naive result isn't in the future, then return it; otherwise return None. * If allow_empty is false and allow_future is true, return the next date *that contains a valid object*, even if it's in the future. If there are no next objects, return None. * If allow_empty is false and allow_future is false, return the next date that contains a valid object. If that date is in the future, or if there are no next objects, return None. """ date_field = generic_view.get_date_field() allow_empty = generic_view.get_allow_empty() allow_future = generic_view.get_allow_future() get_current = getattr(generic_view, '_get_current_%s' % period) get_next = getattr(generic_view, '_get_next_%s' % period) # Bounds of the current interval start, end = get_current(date), get_next(date) # If allow_empty is True, the naive result will be valid if allow_empty: if is_previous: result = get_current(start - datetime.timedelta(days=1)) else: result = end if allow_future or result <= timezone_today(): return result else: return None # Otherwise, we'll need to go to the database to look for an object # whose date_field is at least (greater than/less than) the given # naive result else: # Construct a lookup and an ordering depending on whether we're doing # a previous date or a next date lookup. if is_previous: lookup = {'%s__lt' % date_field: generic_view._make_date_lookup_arg(start)} ordering = '-%s' % date_field else: lookup = {'%s__gte' % date_field: generic_view._make_date_lookup_arg(end)} ordering = date_field # Filter out objects in the future if appropriate. if not allow_future: # Fortunately, to match the implementation of allow_future, # we need __lte, which doesn't conflict with __lt above. if generic_view.uses_datetime_field: now = timezone.now() else: now = timezone_today() lookup['%s__lte' % date_field] = now qs = generic_view.get_queryset().filter(**lookup).order_by(ordering) # Snag the first object from the queryset; if it doesn't exist that # means there's no next/previous link available. try: result = getattr(qs[0], date_field) except IndexError: return None # Convert datetimes to dates in the current time zone. if generic_view.uses_datetime_field: if settings.USE_TZ: result = timezone.localtime(result) result = result.date() # Return the first day of the period. return get_current(result) def timezone_today(): """ Return the current date in the current time zone. """ if settings.USE_TZ: return timezone.localtime(timezone.now()).date() else: return datetime.date.today()
apache-2.0
vlinhd11/vlinhd11-android-scripting
python/src/Lib/plat-mac/PixMapWrapper.py
73
8133
"""PixMapWrapper - defines the PixMapWrapper class, which wraps an opaque QuickDraw PixMap data structure in a handy Python class. Also provides methods to convert to/from pixel data (from, e.g., the img module) or a Python Imaging Library Image object. J. Strout <joe@strout.net> February 1999""" from warnings import warnpy3k warnpy3k("In 3.x, the PixMapWrapper module is removed.", stacklevel=2) from Carbon import Qd from Carbon import QuickDraw import struct import MacOS import img import imgformat # PixMap data structure element format (as used with struct) _pmElemFormat = { 'baseAddr':'l', # address of pixel data 'rowBytes':'H', # bytes per row, plus 0x8000 'bounds':'hhhh', # coordinates imposed over pixel data 'top':'h', 'left':'h', 'bottom':'h', 'right':'h', 'pmVersion':'h', # flags for Color QuickDraw 'packType':'h', # format of compression algorithm 'packSize':'l', # size after compression 'hRes':'l', # horizontal pixels per inch 'vRes':'l', # vertical pixels per inch 'pixelType':'h', # pixel format 'pixelSize':'h', # bits per pixel 'cmpCount':'h', # color components per pixel 'cmpSize':'h', # bits per component 'planeBytes':'l', # offset in bytes to next plane 'pmTable':'l', # handle to color table 'pmReserved':'l' # reserved for future use } # PixMap data structure element offset _pmElemOffset = { 'baseAddr':0, 'rowBytes':4, 'bounds':6, 'top':6, 'left':8, 'bottom':10, 'right':12, 'pmVersion':14, 'packType':16, 'packSize':18, 'hRes':22, 'vRes':26, 'pixelType':30, 'pixelSize':32, 'cmpCount':34, 'cmpSize':36, 'planeBytes':38, 'pmTable':42, 'pmReserved':46 } class PixMapWrapper: """PixMapWrapper -- wraps the QD PixMap object in a Python class, with methods to easily get/set various pixmap fields. Note: Use the PixMap() method when passing to QD calls.""" def __init__(self): self.__dict__['data'] = '' self._header = struct.pack("lhhhhhhhlllhhhhlll", id(self.data)+MacOS.string_id_to_buffer, 0, # rowBytes 0, 0, 0, 0, # bounds 0, # pmVersion 0, 0, # packType, packSize 72<<16, 72<<16, # hRes, vRes QuickDraw.RGBDirect, # pixelType 16, # pixelSize 2, 5, # cmpCount, cmpSize, 0, 0, 0) # planeBytes, pmTable, pmReserved self.__dict__['_pm'] = Qd.RawBitMap(self._header) def _stuff(self, element, bytes): offset = _pmElemOffset[element] fmt = _pmElemFormat[element] self._header = self._header[:offset] \ + struct.pack(fmt, bytes) \ + self._header[offset + struct.calcsize(fmt):] self.__dict__['_pm'] = None def _unstuff(self, element): offset = _pmElemOffset[element] fmt = _pmElemFormat[element] return struct.unpack(fmt, self._header[offset:offset+struct.calcsize(fmt)])[0] def __setattr__(self, attr, val): if attr == 'baseAddr': raise 'UseErr', "don't assign to .baseAddr -- assign to .data instead" elif attr == 'data': self.__dict__['data'] = val self._stuff('baseAddr', id(self.data) + MacOS.string_id_to_buffer) elif attr == 'rowBytes': # high bit is always set for some odd reason self._stuff('rowBytes', val | 0x8000) elif attr == 'bounds': # assume val is in official Left, Top, Right, Bottom order! self._stuff('left',val[0]) self._stuff('top',val[1]) self._stuff('right',val[2]) self._stuff('bottom',val[3]) elif attr == 'hRes' or attr == 'vRes': # 16.16 fixed format, so just shift 16 bits self._stuff(attr, int(val) << 16) elif attr in _pmElemFormat.keys(): # any other pm attribute -- just stuff self._stuff(attr, val) else: self.__dict__[attr] = val def __getattr__(self, attr): if attr == 'rowBytes': # high bit is always set for some odd reason return self._unstuff('rowBytes') & 0x7FFF elif attr == 'bounds': # return bounds in official Left, Top, Right, Bottom order! return ( \ self._unstuff('left'), self._unstuff('top'), self._unstuff('right'), self._unstuff('bottom') ) elif attr == 'hRes' or attr == 'vRes': # 16.16 fixed format, so just shift 16 bits return self._unstuff(attr) >> 16 elif attr in _pmElemFormat.keys(): # any other pm attribute -- just unstuff return self._unstuff(attr) else: return self.__dict__[attr] def PixMap(self): "Return a QuickDraw PixMap corresponding to this data." if not self.__dict__['_pm']: self.__dict__['_pm'] = Qd.RawBitMap(self._header) return self.__dict__['_pm'] def blit(self, x1=0,y1=0,x2=None,y2=None, port=None): """Draw this pixmap into the given (default current) grafport.""" src = self.bounds dest = [x1,y1,x2,y2] if x2 is None: dest[2] = x1 + src[2]-src[0] if y2 is None: dest[3] = y1 + src[3]-src[1] if not port: port = Qd.GetPort() Qd.CopyBits(self.PixMap(), port.GetPortBitMapForCopyBits(), src, tuple(dest), QuickDraw.srcCopy, None) def fromstring(self,s,width,height,format=imgformat.macrgb): """Stuff this pixmap with raw pixel data from a string. Supply width, height, and one of the imgformat specifiers.""" # we only support 16- and 32-bit mac rgb... # so convert if necessary if format != imgformat.macrgb and format != imgformat.macrgb16: # (LATER!) raise "NotImplementedError", "conversion to macrgb or macrgb16" self.data = s self.bounds = (0,0,width,height) self.cmpCount = 3 self.pixelType = QuickDraw.RGBDirect if format == imgformat.macrgb: self.pixelSize = 32 self.cmpSize = 8 else: self.pixelSize = 16 self.cmpSize = 5 self.rowBytes = width*self.pixelSize/8 def tostring(self, format=imgformat.macrgb): """Return raw data as a string in the specified format.""" # is the native format requested? if so, just return data if (format == imgformat.macrgb and self.pixelSize == 32) or \ (format == imgformat.macrgb16 and self.pixelsize == 16): return self.data # otherwise, convert to the requested format # (LATER!) raise "NotImplementedError", "data format conversion" def fromImage(self,im): """Initialize this PixMap from a PIL Image object.""" # We need data in ARGB format; PIL can't currently do that, # but it can do RGBA, which we can use by inserting one null # up frontpm = if im.mode != 'RGBA': im = im.convert('RGBA') data = chr(0) + im.tostring() self.fromstring(data, im.size[0], im.size[1]) def toImage(self): """Return the contents of this PixMap as a PIL Image object.""" import Image # our tostring() method returns data in ARGB format, # whereas Image uses RGBA; a bit of slicing fixes this... data = self.tostring()[1:] + chr(0) bounds = self.bounds return Image.fromstring('RGBA',(bounds[2]-bounds[0],bounds[3]-bounds[1]),data) def test(): import MacOS import EasyDialogs import Image path = EasyDialogs.AskFileForOpen("Image File:") if not path: return pm = PixMapWrapper() pm.fromImage( Image.open(path) ) pm.blit(20,20) return pm
apache-2.0
Leemoonsoo/incubator-zeppelin
python/src/main/resources/python/zeppelin_ipython.py
21
1531
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from py4j.java_gateway import java_import, JavaGateway, GatewayClient import os # start JVM gateway if "PY4J_GATEWAY_SECRET" in os.environ: from py4j.java_gateway import GatewayParameters gateway_secret = os.environ["PY4J_GATEWAY_SECRET"] gateway = JavaGateway(gateway_parameters=GatewayParameters(address="${JVM_GATEWAY_ADDRESS}", port=${JVM_GATEWAY_PORT}, auth_token=gateway_secret, auto_convert=True)) java_import(gateway.jvm, "org.apache.zeppelin.display.Input") intp = gateway.entry_point else: gateway = JavaGateway(GatewayClient(address="${JVM_GATEWAY_ADDRESS}", port=${JVM_GATEWAY_PORT}), auto_convert=True) java_import(gateway.jvm, "org.apache.zeppelin.display.Input") intp = gateway.entry_point
apache-2.0
pwaller/pgi
pgi/_compat.py
1
1727
# Copyright 2013 Christoph Reiter # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. import sys PY2 = sys.version_info[0] == 2 PY3 = not PY2 if PY3: string_types = (str,) text_type = str integer_types = (int,) long_type = int import builtins exec_ = getattr(builtins, "exec") def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value iterkeys = lambda d: iter(d.keys()) itervalues = lambda d: iter(d.values()) iteritems = lambda d: iter(d.items()) xrange = range from io import StringIO StringIO = StringIO import builtins builtins = builtins elif PY2: string_types = (str, unicode) text_type = unicode integer_types = (int, long) long_type = long def exec_(_code_, _globs_=None, _locs_=None): if _globs_ is None: frame = sys._getframe(1) _globs_ = frame.f_globals if _locs_ is None: _locs_ = frame.f_locals del frame elif _locs_ is None: _locs_ = _globs_ exec("""exec _code_ in _globs_, _locs_""") exec('def reraise(tp, value, tb=None):\n raise tp, value, tb') iterkeys = lambda d: d.iterkeys() itervalues = lambda d: d.itervalues() iteritems = lambda d: d.iteritems() xrange = xrange from StringIO import StringIO StringIO = StringIO import __builtin__ as builtins builtins = builtins else: assert 0
lgpl-2.1
alexsavio/scikit-learn
sklearn/linear_model/sag.py
14
11269
"""Solvers for Ridge and LogisticRegression using SAG algorithm""" # Authors: Tom Dupre la Tour <tom.dupre-la-tour@m4x.org> # # License: BSD 3 clause import numpy as np import warnings from ..exceptions import ConvergenceWarning from ..utils import check_array from ..utils.extmath import row_norms from .base import make_dataset from .sag_fast import sag def get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept): """Compute automatic step size for SAG solver The step size is set to 1 / (alpha_scaled + L + fit_intercept) where L is the max sum of squares for over all samples. Parameters ---------- max_squared_sum : float Maximum squared sum of X over samples. alpha_scaled : float Constant that multiplies the regularization term, scaled by 1. / n_samples, the number of samples. loss : string, in {"log", "squared"} The loss function used in SAG solver. fit_intercept : bool Specifies if a constant (a.k.a. bias or intercept) will be added to the decision function. Returns ------- step_size : float Step size used in SAG solver. References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document """ if loss in ('log', 'multinomial'): # inverse Lipschitz constant for log loss return 4.0 / (max_squared_sum + int(fit_intercept) + 4.0 * alpha_scaled) elif loss == 'squared': # inverse Lipschitz constant for squared loss return 1.0 / (max_squared_sum + int(fit_intercept) + alpha_scaled) else: raise ValueError("Unknown loss function for SAG solver, got %s " "instead of 'log' or 'squared'" % loss) def sag_solver(X, y, sample_weight=None, loss='log', alpha=1., max_iter=1000, tol=0.001, verbose=0, random_state=None, check_input=True, max_squared_sum=None, warm_start_mem=None): """SAG solver for Ridge and LogisticRegression SAG stands for Stochastic Average Gradient: the gradient of the loss is estimated each sample at a time and the model is updated along the way with a constant learning rate. IMPORTANT NOTE: 'sag' solver converges faster on columns that are on the same scale. You can normalize the data by using sklearn.preprocessing.StandardScaler on your data before passing it to the fit method. This implementation works with data represented as dense numpy arrays or sparse scipy arrays of floating point values for the features. It will fit the data according to squared loss or log loss. The regularizer is a penalty added to the loss function that shrinks model parameters towards the zero vector using the squared euclidean norm L2. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data y : numpy array, shape (n_samples,) Target values. With loss='multinomial', y must be label encoded (see preprocessing.LabelEncoder). sample_weight : array-like, shape (n_samples,), optional Weights applied to individual samples (1. for unweighted). loss : 'log' | 'squared' | 'multinomial' Loss function that will be optimized: -'log' is the binary logistic loss, as used in LogisticRegression. -'squared' is the squared loss, as used in Ridge. -'multinomial' is the multinomial logistic loss, as used in LogisticRegression. .. versionadded:: 0.18 *loss='multinomial'* alpha : float, optional Constant that multiplies the regularization term. Defaults to 1. max_iter: int, optional The max number of passes over the training data if the stopping criteria is not reached. Defaults to 1000. tol: double, optional The stopping criteria for the weights. The iterations will stop when max(change in weights) / max(weights) < tol. Defaults to .001 verbose: integer, optional The verbosity level. random_state : int seed, RandomState instance, or None (default) The seed of the pseudo random number generator to use when shuffling the data. check_input : bool, default True If False, the input arrays X and y will not be checked. max_squared_sum : float, default None Maximum squared sum of X over samples. If None, it will be computed, going through all the samples. The value should be precomputed to speed up cross validation. warm_start_mem: dict, optional The initialization parameters used for warm starting. Warm starting is currently used in LogisticRegression but not in Ridge. It contains: - 'coef': the weight vector, with the intercept in last line if the intercept is fitted. - 'gradient_memory': the scalar gradient for all seen samples. - 'sum_gradient': the sum of gradient over all seen samples, for each feature. - 'intercept_sum_gradient': the sum of gradient over all seen samples, for the intercept. - 'seen': array of boolean describing the seen samples. - 'num_seen': the number of seen samples. Returns ------- coef_ : array, shape (n_features) Weight vector. n_iter_ : int The number of full pass on all samples. warm_start_mem : dict Contains a 'coef' key with the fitted result, and possibly the fitted intercept at the end of the array. Contains also other keys used for warm starting. Examples -------- >>> import numpy as np >>> from sklearn import linear_model >>> n_samples, n_features = 10, 5 >>> np.random.seed(0) >>> X = np.random.randn(n_samples, n_features) >>> y = np.random.randn(n_samples) >>> clf = linear_model.Ridge(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE Ridge(alpha=1.0, copy_X=True, fit_intercept=True, max_iter=None, normalize=False, random_state=None, solver='sag', tol=0.001) >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) >>> y = np.array([1, 1, 2, 2]) >>> clf = linear_model.LogisticRegression(solver='sag') >>> clf.fit(X, y) ... #doctest: +NORMALIZE_WHITESPACE LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True, intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1, penalty='l2', random_state=None, solver='sag', tol=0.0001, verbose=0, warm_start=False) References ---------- Schmidt, M., Roux, N. L., & Bach, F. (2013). Minimizing finite sums with the stochastic average gradient https://hal.inria.fr/hal-00860051/document See also -------- Ridge, SGDRegressor, ElasticNet, Lasso, SVR, and LogisticRegression, SGDClassifier, LinearSVC, Perceptron """ if warm_start_mem is None: warm_start_mem = {} # Ridge default max_iter is None if max_iter is None: max_iter = 1000 if check_input: X = check_array(X, dtype=np.float64, accept_sparse='csr', order='C') y = check_array(y, dtype=np.float64, ensure_2d=False, order='C') n_samples, n_features = X.shape[0], X.shape[1] # As in SGD, the alpha is scaled by n_samples. alpha_scaled = float(alpha) / n_samples # if loss == 'multinomial', y should be label encoded. n_classes = int(y.max()) + 1 if loss == 'multinomial' else 1 # initialization if sample_weight is None: sample_weight = np.ones(n_samples, dtype=np.float64, order='C') if 'coef' in warm_start_mem.keys(): coef_init = warm_start_mem['coef'] else: # assume fit_intercept is False coef_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') # coef_init contains possibly the intercept_init at the end. # Note that Ridge centers the data before fitting, so fit_intercept=False. fit_intercept = coef_init.shape[0] == (n_features + 1) if fit_intercept: intercept_init = coef_init[-1, :] coef_init = coef_init[:-1, :] else: intercept_init = np.zeros(n_classes, dtype=np.float64) if 'intercept_sum_gradient' in warm_start_mem.keys(): intercept_sum_gradient = warm_start_mem['intercept_sum_gradient'] else: intercept_sum_gradient = np.zeros(n_classes, dtype=np.float64) if 'gradient_memory' in warm_start_mem.keys(): gradient_memory_init = warm_start_mem['gradient_memory'] else: gradient_memory_init = np.zeros((n_samples, n_classes), dtype=np.float64, order='C') if 'sum_gradient' in warm_start_mem.keys(): sum_gradient_init = warm_start_mem['sum_gradient'] else: sum_gradient_init = np.zeros((n_features, n_classes), dtype=np.float64, order='C') if 'seen' in warm_start_mem.keys(): seen_init = warm_start_mem['seen'] else: seen_init = np.zeros(n_samples, dtype=np.int32, order='C') if 'num_seen' in warm_start_mem.keys(): num_seen_init = warm_start_mem['num_seen'] else: num_seen_init = 0 dataset, intercept_decay = make_dataset(X, y, sample_weight, random_state) if max_squared_sum is None: max_squared_sum = row_norms(X, squared=True).max() step_size = get_auto_step_size(max_squared_sum, alpha_scaled, loss, fit_intercept) if step_size * alpha_scaled == 1: raise ZeroDivisionError("Current sag implementation does not handle " "the case step_size * alpha_scaled == 1") num_seen, n_iter_ = sag(dataset, coef_init, intercept_init, n_samples, n_features, n_classes, tol, max_iter, loss, step_size, alpha_scaled, sum_gradient_init, gradient_memory_init, seen_init, num_seen_init, fit_intercept, intercept_sum_gradient, intercept_decay, verbose) if n_iter_ == max_iter: warnings.warn("The max_iter was reached which means " "the coef_ did not converge", ConvergenceWarning) if fit_intercept: coef_init = np.vstack((coef_init, intercept_init)) warm_start_mem = {'coef': coef_init, 'sum_gradient': sum_gradient_init, 'intercept_sum_gradient': intercept_sum_gradient, 'gradient_memory': gradient_memory_init, 'seen': seen_init, 'num_seen': num_seen} if loss == 'multinomial': coef_ = coef_init.T else: coef_ = coef_init[:, 0] return coef_, n_iter_, warm_start_mem
bsd-3-clause
ramadhane/odoo
addons/stock/stock.py
19
270206
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from datetime import date, datetime from dateutil import relativedelta import json import time from openerp.osv import fields, osv from openerp.tools.float_utils import float_compare, float_round from openerp.tools.translate import _ from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, DEFAULT_SERVER_DATE_FORMAT from openerp.exceptions import Warning from openerp import SUPERUSER_ID, api import openerp.addons.decimal_precision as dp from openerp.addons.procurement import procurement import logging _logger = logging.getLogger(__name__) #---------------------------------------------------------- # Incoterms #---------------------------------------------------------- class stock_incoterms(osv.osv): _name = "stock.incoterms" _description = "Incoterms" _columns = { 'name': fields.char('Name', required=True, help="Incoterms are series of sales terms. They are used to divide transaction costs and responsibilities between buyer and seller and reflect state-of-the-art transportation practices."), 'code': fields.char('Code', size=3, required=True, help="Incoterm Standard Code"), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide an INCOTERM you will not use."), } _defaults = { 'active': True, } #---------------------------------------------------------- # Stock Location #---------------------------------------------------------- class stock_location(osv.osv): _name = "stock.location" _description = "Inventory Locations" _parent_name = "location_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' _rec_name = 'complete_name' def _location_owner(self, cr, uid, location, context=None): ''' Return the company owning the location if any ''' return location and (location.usage == 'internal') and location.company_id or False def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.location_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.location_id return res def _get_sublocations(self, cr, uid, ids, context=None): """ return all sublocations of the given stock locations (included) """ if context is None: context = {} context_with_inactive = context.copy() context_with_inactive['active_test'] = False return self.search(cr, uid, [('id', 'child_of', ids)], context=context_with_inactive) def _name_get(self, cr, uid, location, context=None): name = location.name while location.location_id and location.usage != 'view': location = location.location_id name = location.name + '/' + name return name def name_get(self, cr, uid, ids, context=None): res = [] for location in self.browse(cr, uid, ids, context=context): res.append((location.id, self._name_get(cr, uid, location, context=context))) return res _columns = { 'name': fields.char('Location Name', required=True, translate=True), 'active': fields.boolean('Active', help="By unchecking the active field, you may hide a location without deleting it."), 'usage': fields.selection([ ('supplier', 'Supplier Location'), ('view', 'View'), ('internal', 'Internal Location'), ('customer', 'Customer Location'), ('inventory', 'Inventory'), ('procurement', 'Procurement'), ('production', 'Production'), ('transit', 'Transit Location')], 'Location Type', required=True, help="""* Supplier Location: Virtual location representing the source location for products coming from your suppliers \n* View: Virtual location used to create a hierarchical structures for your warehouse, aggregating its child locations ; can't directly contain products \n* Internal Location: Physical locations inside your own warehouses, \n* Customer Location: Virtual location representing the destination location for products sent to your customers \n* Inventory: Virtual location serving as counterpart for inventory operations used to correct stock levels (Physical inventories) \n* Procurement: Virtual location serving as temporary counterpart for procurement operations when the source (supplier or production) is not known yet. This location should be empty when the procurement scheduler has finished running. \n* Production: Virtual counterpart location for production operations: this location consumes the raw material and produces finished products \n* Transit Location: Counterpart location that should be used in inter-companies or inter-warehouses operations """, select=True), 'complete_name': fields.function(_complete_name, type='char', string="Location Name", store={'stock.location': (_get_sublocations, ['name', 'location_id', 'active'], 10)}), 'location_id': fields.many2one('stock.location', 'Parent Location', select=True, ondelete='cascade'), 'child_ids': fields.one2many('stock.location', 'location_id', 'Contains'), 'partner_id': fields.many2one('res.partner', 'Owner', help="Owner of the location if not internal"), 'comment': fields.text('Additional Information'), 'posx': fields.integer('Corridor (X)', help="Optional localization details, for information purpose only"), 'posy': fields.integer('Shelves (Y)', help="Optional localization details, for information purpose only"), 'posz': fields.integer('Height (Z)', help="Optional localization details, for information purpose only"), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this location is shared between companies'), 'scrap_location': fields.boolean('Is a Scrap Location?', help='Check this box to allow using this location to put scrapped/damaged goods.'), 'removal_strategy_id': fields.many2one('product.removal', 'Removal Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to take the products from, which lot etc. for this location. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'putaway_strategy_id': fields.many2one('product.putaway', 'Put Away Strategy', help="Defines the default method used for suggesting the exact location (shelf) where to store the products. This method can be enforced at the product category level, and a fallback is made on the parent locations if none is set here."), 'loc_barcode': fields.char('Location Barcode'), } _defaults = { 'active': True, 'usage': 'internal', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location', context=c), 'posx': 0, 'posy': 0, 'posz': 0, 'scrap_location': False, } _sql_constraints = [('loc_barcode_company_uniq', 'unique (loc_barcode,company_id)', 'The barcode for a location must be unique per company !')] def create(self, cr, uid, default, context=None): if not default.get('loc_barcode', False): default.update({'loc_barcode': default.get('complete_name', False)}) return super(stock_location, self).create(cr, uid, default, context=context) def get_putaway_strategy(self, cr, uid, location, product, context=None): ''' Returns the location where the product has to be put, if any compliant putaway strategy is found. Otherwise returns None.''' putaway_obj = self.pool.get('product.putaway') loc = location while loc: if loc.putaway_strategy_id: res = putaway_obj.putaway_apply(cr, uid, loc.putaway_strategy_id, product, context=context) if res: return res loc = loc.location_id def _default_removal_strategy(self, cr, uid, context=None): return 'fifo' def get_removal_strategy(self, cr, uid, location, product, context=None): ''' Returns the removal strategy to consider for the given product and location. :param location: browse record (stock.location) :param product: browse record (product.product) :rtype: char ''' if product.categ_id.removal_strategy_id: return product.categ_id.removal_strategy_id.method loc = location while loc: if loc.removal_strategy_id: return loc.removal_strategy_id.method loc = loc.location_id return self._default_removal_strategy(cr, uid, context=context) def get_warehouse(self, cr, uid, location, context=None): """ Returns warehouse id of warehouse that contains location :param location: browse record (stock.location) """ wh_obj = self.pool.get("stock.warehouse") whs = wh_obj.search(cr, uid, [('view_location_id.parent_left', '<=', location.parent_left), ('view_location_id.parent_right', '>=', location.parent_left)], context=context) return whs and whs[0] or False #---------------------------------------------------------- # Routes #---------------------------------------------------------- class stock_location_route(osv.osv): _name = 'stock.location.route' _description = "Inventory Routes" _order = 'sequence' _columns = { 'name': fields.char('Route Name', required=True, translate=True), 'sequence': fields.integer('Sequence'), 'pull_ids': fields.one2many('procurement.rule', 'route_id', 'Pull Rules', copy=True), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the route without removing it."), 'push_ids': fields.one2many('stock.location.path', 'route_id', 'Push Rules', copy=True), 'product_selectable': fields.boolean('Applicable on Product'), 'product_categ_selectable': fields.boolean('Applicable on Product Category'), 'warehouse_selectable': fields.boolean('Applicable on Warehouse'), 'supplied_wh_id': fields.many2one('stock.warehouse', 'Supplied Warehouse'), 'supplier_wh_id': fields.many2one('stock.warehouse', 'Supplier Warehouse'), 'company_id': fields.many2one('res.company', 'Company', select=1, help='Let this field empty if this route is shared between all companies'), } _defaults = { 'sequence': lambda self, cr, uid, ctx: 0, 'active': True, 'product_selectable': True, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.location.route', context=c), } def write(self, cr, uid, ids, vals, context=None): '''when a route is deactivated, deactivate also its pull and push rules''' if isinstance(ids, (int, long)): ids = [ids] res = super(stock_location_route, self).write(cr, uid, ids, vals, context=context) if 'active' in vals: push_ids = [] pull_ids = [] for route in self.browse(cr, uid, ids, context=context): if route.push_ids: push_ids += [r.id for r in route.push_ids if r.active != vals['active']] if route.pull_ids: pull_ids += [r.id for r in route.pull_ids if r.active != vals['active']] if push_ids: self.pool.get('stock.location.path').write(cr, uid, push_ids, {'active': vals['active']}, context=context) if pull_ids: self.pool.get('procurement.rule').write(cr, uid, pull_ids, {'active': vals['active']}, context=context) return res #---------------------------------------------------------- # Quants #---------------------------------------------------------- class stock_quant(osv.osv): """ Quants are the smallest unit of stock physical instances """ _name = "stock.quant" _description = "Quants" def _get_quant_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for q in self.browse(cr, uid, ids, context=context): res[q.id] = q.product_id.code or '' if q.lot_id: res[q.id] = q.lot_id.name res[q.id] += ': ' + str(q.qty) + q.product_id.uom_id.name return res def _calc_inventory_value(self, cr, uid, ids, name, attr, context=None): context = dict(context or {}) res = {} uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id for quant in self.browse(cr, uid, ids, context=context): context.pop('force_company', None) if quant.company_id.id != uid_company_id: #if the company of the quant is different than the current user company, force the company in the context #then re-do a browse to read the property fields for the good company. context['force_company'] = quant.company_id.id quant = self.browse(cr, uid, quant.id, context=context) res[quant.id] = self._get_inventory_value(cr, uid, quant, context=context) return res def _get_inventory_value(self, cr, uid, quant, context=None): return quant.product_id.standard_price * quant.qty _columns = { 'name': fields.function(_get_quant_name, type='char', string='Identifier'), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete="restrict", readonly=True, select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="restrict", readonly=True, select=True, auto_join=True), 'qty': fields.float('Quantity', required=True, help="Quantity of products in this quant, in the default unit of measure of the product", readonly=True, select=True), 'package_id': fields.many2one('stock.quant.package', string='Package', help="The package containing this quant", readonly=True, select=True), 'packaging_type_id': fields.related('package_id', 'packaging_id', type='many2one', relation='product.packaging', string='Type of packaging', readonly=True, store=True), 'reservation_id': fields.many2one('stock.move', 'Reserved for Move', help="The move the quant is reserved for", readonly=True, select=True), 'lot_id': fields.many2one('stock.production.lot', 'Lot', readonly=True, select=True, ondelete="restrict"), 'cost': fields.float('Unit Cost'), 'owner_id': fields.many2one('res.partner', 'Owner', help="This is the owner of the quant", readonly=True, select=True), 'create_date': fields.datetime('Creation Date', readonly=True), 'in_date': fields.datetime('Incoming Date', readonly=True, select=True), 'history_ids': fields.many2many('stock.move', 'stock_quant_move_rel', 'quant_id', 'move_id', 'Moves', help='Moves that operate(d) on this quant', copy=False), 'company_id': fields.many2one('res.company', 'Company', help="The company to which the quants belong", required=True, readonly=True, select=True), 'inventory_value': fields.function(_calc_inventory_value, string="Inventory Value", type='float', readonly=True), # Used for negative quants to reconcile after compensated by a new positive one 'propagated_from_id': fields.many2one('stock.quant', 'Linked Quant', help='The negative quant this is coming from', readonly=True, select=True), 'negative_move_id': fields.many2one('stock.move', 'Move Negative Quant', help='If this is a negative quant, this will be the move that caused this negative quant.', readonly=True), 'negative_dest_location_id': fields.related('negative_move_id', 'location_dest_id', type='many2one', relation='stock.location', string="Negative Destination Location", readonly=True, help="Technical field used to record the destination location of a move that created a negative quant"), } _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.quant', context=c), } def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_quant_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_quant_product_location_index ON stock_quant (product_id, location_id, company_id, qty, in_date, reservation_id)') def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): ''' Overwrite the read_group in order to sum the function field 'inventory_value' in group by''' res = super(stock_quant, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy) if 'inventory_value' in fields: for line in res: if '__domain' in line: lines = self.search(cr, uid, line['__domain'], context=context) inv_value = 0.0 for line2 in self.browse(cr, uid, lines, context=context): inv_value += line2.inventory_value line['inventory_value'] = inv_value return res def action_view_quant_history(self, cr, uid, ids, context=None): ''' This function returns an action that display the history of the quant, which mean all the stock moves that lead to this quant creation with this quant quantity. ''' mod_obj = self.pool.get('ir.model.data') act_obj = self.pool.get('ir.actions.act_window') result = mod_obj.get_object_reference(cr, uid, 'stock', 'action_move_form2') id = result and result[1] or False result = act_obj.read(cr, uid, [id], context={})[0] move_ids = [] for quant in self.browse(cr, uid, ids, context=context): move_ids += [move.id for move in quant.history_ids] result['domain'] = "[('id','in',[" + ','.join(map(str, move_ids)) + "])]" return result def quants_reserve(self, cr, uid, quants, move, link=False, context=None): '''This function reserves quants for the given move (and optionally given link). If the total of quantity reserved is enough, the move's state is also set to 'assigned' :param quants: list of tuple(quant browse record or None, qty to reserve). If None is given as first tuple element, the item will be ignored. Negative quants should not be received as argument :param move: browse record :param link: browse record (stock.move.operation.link) ''' toreserve = [] reserved_availability = move.reserved_availability #split quants if needed for quant, qty in quants: if qty <= 0.0 or (quant and quant.qty <= 0.0): raise osv.except_osv(_('Error!'), _('You can not reserve a negative quantity or a negative quant.')) if not quant: continue self._quant_split(cr, uid, quant, qty, context=context) toreserve.append(quant.id) reserved_availability += quant.qty #reserve quants if toreserve: self.write(cr, SUPERUSER_ID, toreserve, {'reservation_id': move.id}, context=context) #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) #check if move'state needs to be set as 'assigned' rounding = move.product_id.uom_id.rounding if float_compare(reserved_availability, move.product_qty, precision_rounding=rounding) == 0 and move.state in ('confirmed', 'waiting') : self.pool.get('stock.move').write(cr, uid, [move.id], {'state': 'assigned'}, context=context) elif float_compare(reserved_availability, 0, precision_rounding=rounding) > 0 and not move.partially_available: self.pool.get('stock.move').write(cr, uid, [move.id], {'partially_available': True}, context=context) def quants_move(self, cr, uid, quants, move, location_to, location_from=False, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, context=None): """Moves all given stock.quant in the given destination location. Unreserve from current move. :param quants: list of tuple(browse record(stock.quant) or None, quantity to move) :param move: browse record (stock.move) :param location_to: browse record (stock.location) depicting where the quants have to be moved :param location_from: optional browse record (stock.location) explaining where the quant has to be taken (may differ from the move source location in case a removal strategy applied). This parameter is only used to pass to _quant_create if a negative quant must be created :param lot_id: ID of the lot that must be set on the quants to move :param owner_id: ID of the partner that must own the quants to move :param src_package_id: ID of the package that contains the quants to move :param dest_package_id: ID of the package that must be set on the moved quant """ quants_reconcile = [] to_move_quants = [] self._check_location(cr, uid, location_to, context=context) for quant, qty in quants: if not quant: #If quant is None, we will create a quant to move (and potentially a negative counterpart too) quant = self._quant_create(cr, uid, qty, move, lot_id=lot_id, owner_id=owner_id, src_package_id=src_package_id, dest_package_id=dest_package_id, force_location_from=location_from, force_location_to=location_to, context=context) else: self._quant_split(cr, uid, quant, qty, context=context) to_move_quants.append(quant) quants_reconcile.append(quant) if to_move_quants: to_recompute_move_ids = [x.reservation_id.id for x in to_move_quants if x.reservation_id and x.reservation_id.id != move.id] self.move_quants_write(cr, uid, to_move_quants, move, location_to, dest_package_id, context=context) self.pool.get('stock.move').recalculate_move_state(cr, uid, to_recompute_move_ids, context=context) if location_to.usage == 'internal': # Do manual search for quant to avoid full table scan (order by id) cr.execute(""" SELECT 0 FROM stock_quant, stock_location WHERE product_id = %s AND stock_location.id = stock_quant.location_id AND ((stock_location.parent_left >= %s AND stock_location.parent_left < %s) OR stock_location.id = %s) AND qty < 0.0 LIMIT 1 """, (move.product_id.id, location_to.parent_left, location_to.parent_right, location_to.id)) if cr.fetchone(): for quant in quants_reconcile: self._quant_reconcile_negative(cr, uid, quant, move, context=context) def move_quants_write(self, cr, uid, quants, move, location_dest_id, dest_package_id, context=None): context=context or {} vals = {'location_id': location_dest_id.id, 'history_ids': [(4, move.id)], 'reservation_id': False} if not context.get('entire_pack'): vals.update({'package_id': dest_package_id}) self.write(cr, SUPERUSER_ID, [q.id for q in quants], vals, context=context) def quants_get_prefered_domain(self, cr, uid, location, product, qty, domain=None, prefered_domain_list=[], restrict_lot_id=False, restrict_partner_id=False, context=None): ''' This function tries to find quants in the given location for the given domain, by trying to first limit the choice on the quants that match the first item of prefered_domain_list as well. But if the qty requested is not reached it tries to find the remaining quantity by looping on the prefered_domain_list (tries with the second item and so on). Make sure the quants aren't found twice => all the domains of prefered_domain_list should be orthogonal ''' if domain is None: domain = [] quants = [(None, qty)] #don't look for quants in location that are of type production, supplier or inventory. if location.usage in ['inventory', 'production', 'supplier']: return quants res_qty = qty if not prefered_domain_list: return self.quants_get(cr, uid, location, product, qty, domain=domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for prefered_domain in prefered_domain_list: res_qty_cmp = float_compare(res_qty, 0, precision_rounding=product.uom_id.rounding) if res_qty_cmp > 0: #try to replace the last tuple (None, res_qty) with something that wasn't chosen at first because of the prefered order quants.pop() tmp_quants = self.quants_get(cr, uid, location, product, res_qty, domain=domain + prefered_domain, restrict_lot_id=restrict_lot_id, restrict_partner_id=restrict_partner_id, context=context) for quant in tmp_quants: if quant[0]: res_qty -= quant[1] quants += tmp_quants return quants def quants_get(self, cr, uid, location, product, qty, domain=None, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Use the removal strategies of product to search for the correct quants If you inherit, put the super at the end of your method. :location: browse record of the parent location where the quants have to be found :product: browse record of the product to find :qty in UoM of product """ result = [] domain = domain or [('qty', '>', 0.0)] if restrict_partner_id: domain += [('owner_id', '=', restrict_partner_id)] if restrict_lot_id: domain += [('lot_id', '=', restrict_lot_id)] if location: removal_strategy = self.pool.get('stock.location').get_removal_strategy(cr, uid, location, product, context=context) result += self.apply_removal_strategy(cr, uid, location, product, qty, domain, removal_strategy, context=context) return result def apply_removal_strategy(self, cr, uid, location, product, quantity, domain, removal_strategy, context=None): if removal_strategy == 'fifo': order = 'in_date, id' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) elif removal_strategy == 'lifo': order = 'in_date desc, id desc' return self._quants_get_order(cr, uid, location, product, quantity, domain, order, context=context) raise osv.except_osv(_('Error!'), _('Removal strategy %s not implemented.' % (removal_strategy,))) def _quant_create(self, cr, uid, qty, move, lot_id=False, owner_id=False, src_package_id=False, dest_package_id=False, force_location_from=False, force_location_to=False, context=None): '''Create a quant in the destination location and create a negative quant in the source location if it's an internal location. ''' if context is None: context = {} price_unit = self.pool.get('stock.move').get_price_unit(cr, uid, move, context=context) location = force_location_to or move.location_dest_id rounding = move.product_id.uom_id.rounding vals = { 'product_id': move.product_id.id, 'location_id': location.id, 'qty': float_round(qty, precision_rounding=rounding), 'cost': price_unit, 'history_ids': [(4, move.id)], 'in_date': datetime.now().strftime(DEFAULT_SERVER_DATETIME_FORMAT), 'company_id': move.company_id.id, 'lot_id': lot_id, 'owner_id': owner_id, 'package_id': dest_package_id, } if move.location_id.usage == 'internal': #if we were trying to move something from an internal location and reach here (quant creation), #it means that a negative quant has to be created as well. negative_vals = vals.copy() negative_vals['location_id'] = force_location_from and force_location_from.id or move.location_id.id negative_vals['qty'] = float_round(-qty, precision_rounding=rounding) negative_vals['cost'] = price_unit negative_vals['negative_move_id'] = move.id negative_vals['package_id'] = src_package_id negative_quant_id = self.create(cr, SUPERUSER_ID, negative_vals, context=context) vals.update({'propagated_from_id': negative_quant_id}) #create the quant as superuser, because we want to restrict the creation of quant manually: we should always use this method to create quants quant_id = self.create(cr, SUPERUSER_ID, vals, context=context) return self.browse(cr, uid, quant_id, context=context) def _quant_split(self, cr, uid, quant, qty, context=None): context = context or {} rounding = quant.product_id.uom_id.rounding if float_compare(abs(quant.qty), abs(qty), precision_rounding=rounding) <= 0: # if quant <= qty in abs, take it entirely return False qty_round = float_round(qty, precision_rounding=rounding) new_qty_round = float_round(quant.qty - qty, precision_rounding=rounding) # Fetch the history_ids manually as it will not do a join with the stock moves then (=> a lot faster) cr.execute("""SELECT move_id FROM stock_quant_move_rel WHERE quant_id = %s""", (quant.id,)) res = cr.fetchall() new_quant = self.copy(cr, SUPERUSER_ID, quant.id, default={'qty': new_qty_round, 'history_ids': [(4, x[0]) for x in res]}, context=context) self.write(cr, SUPERUSER_ID, quant.id, {'qty': qty_round}, context=context) return self.browse(cr, uid, new_quant, context=context) def _get_latest_move(self, cr, uid, quant, context=None): move = False for m in quant.history_ids: if not move or m.date > move.date: move = m return move @api.cr_uid_ids_context def _quants_merge(self, cr, uid, solved_quant_ids, solving_quant, context=None): path = [] for move in solving_quant.history_ids: path.append((4, move.id)) self.write(cr, SUPERUSER_ID, solved_quant_ids, {'history_ids': path}, context=context) def _quant_reconcile_negative(self, cr, uid, quant, move, context=None): """ When new quant arrive in a location, try to reconcile it with negative quants. If it's possible, apply the cost of the new quant to the conter-part of the negative quant. """ solving_quant = quant dom = [('qty', '<', 0)] if quant.lot_id: dom += [('lot_id', '=', quant.lot_id.id)] dom += [('owner_id', '=', quant.owner_id.id)] dom += [('package_id', '=', quant.package_id.id)] dom += [('id', '!=', quant.propagated_from_id.id)] quants = self.quants_get(cr, uid, quant.location_id, quant.product_id, quant.qty, dom, context=context) product_uom_rounding = quant.product_id.uom_id.rounding for quant_neg, qty in quants: if not quant_neg or not solving_quant: continue to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id)], context=context) if not to_solve_quant_ids: continue solving_qty = qty solved_quant_ids = [] for to_solve_quant in self.browse(cr, uid, to_solve_quant_ids, context=context): if float_compare(solving_qty, 0, precision_rounding=product_uom_rounding) <= 0: continue solved_quant_ids.append(to_solve_quant.id) self._quant_split(cr, uid, to_solve_quant, min(solving_qty, to_solve_quant.qty), context=context) solving_qty -= min(solving_qty, to_solve_quant.qty) remaining_solving_quant = self._quant_split(cr, uid, solving_quant, qty, context=context) remaining_neg_quant = self._quant_split(cr, uid, quant_neg, -qty, context=context) #if the reconciliation was not complete, we need to link together the remaining parts if remaining_neg_quant: remaining_to_solve_quant_ids = self.search(cr, uid, [('propagated_from_id', '=', quant_neg.id), ('id', 'not in', solved_quant_ids)], context=context) if remaining_to_solve_quant_ids: self.write(cr, SUPERUSER_ID, remaining_to_solve_quant_ids, {'propagated_from_id': remaining_neg_quant.id}, context=context) if solving_quant.propagated_from_id and solved_quant_ids: self.write(cr, SUPERUSER_ID, solved_quant_ids, {'propagated_from_id': solving_quant.propagated_from_id.id}, context=context) #delete the reconciled quants, as it is replaced by the solved quants self.unlink(cr, SUPERUSER_ID, [quant_neg.id], context=context) if solved_quant_ids: #price update + accounting entries adjustments self._price_update(cr, uid, solved_quant_ids, solving_quant.cost, context=context) #merge history (and cost?) self._quants_merge(cr, uid, solved_quant_ids, solving_quant, context=context) self.unlink(cr, SUPERUSER_ID, [solving_quant.id], context=context) solving_quant = remaining_solving_quant def _price_update(self, cr, uid, ids, newprice, context=None): self.write(cr, SUPERUSER_ID, ids, {'cost': newprice}, context=context) def quants_unreserve(self, cr, uid, move, context=None): related_quants = [x.id for x in move.reserved_quant_ids] if related_quants: #if move has a picking_id, write on that picking that pack_operation might have changed and need to be recomputed if move.picking_id: self.pool.get('stock.picking').write(cr, uid, [move.picking_id.id], {'recompute_pack_op': True}, context=context) if move.partially_available: self.pool.get("stock.move").write(cr, uid, [move.id], {'partially_available': False}, context=context) self.write(cr, SUPERUSER_ID, related_quants, {'reservation_id': False}, context=context) def _quants_get_order(self, cr, uid, location, product, quantity, domain=[], orderby='in_date', context=None): ''' Implementation of removal strategies If it can not reserve, it will return a tuple (None, qty) ''' if context is None: context = {} domain += location and [('location_id', 'child_of', location.id)] or [] domain += [('product_id', '=', product.id)] if context.get('force_company'): domain += [('company_id', '=', context.get('force_company'))] else: domain += [('company_id', '=', self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id)] res = [] offset = 0 while float_compare(quantity, 0, precision_rounding=product.uom_id.rounding) > 0: quants = self.search(cr, uid, domain, order=orderby, limit=10, offset=offset, context=context) if not quants: res.append((None, quantity)) break for quant in self.browse(cr, uid, quants, context=context): rounding = product.uom_id.rounding if float_compare(quantity, abs(quant.qty), precision_rounding=rounding) >= 0: res += [(quant, abs(quant.qty))] quantity -= abs(quant.qty) elif float_compare(quantity, 0.0, precision_rounding=rounding) != 0: res += [(quant, quantity)] quantity = 0 break offset += 10 return res def _check_location(self, cr, uid, location, context=None): if location.usage == 'view': raise osv.except_osv(_('Error'), _('You cannot move to a location of type view %s.') % (location.name)) return True #---------------------------------------------------------- # Stock Picking #---------------------------------------------------------- class stock_picking(osv.osv): _name = "stock.picking" _inherit = ['mail.thread'] _description = "Picking List" _order = "priority desc, date asc, id desc" def _set_min_date(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'date_expected': value}, context=context) def _set_priority(self, cr, uid, id, field, value, arg, context=None): move_obj = self.pool.get("stock.move") if value: move_ids = [move.id for move in self.browse(cr, uid, id, context=context).move_lines] move_obj.write(cr, uid, move_ids, {'priority': value}, context=context) def get_min_max_date(self, cr, uid, ids, field_name, arg, context=None): """ Finds minimum and maximum dates for picking. @return: Dictionary of values """ res = {} for id in ids: res[id] = {'min_date': False, 'max_date': False, 'priority': '1'} if not ids: return res cr.execute("""select picking_id, min(date_expected), max(date_expected), max(priority) from stock_move where picking_id IN %s group by picking_id""", (tuple(ids),)) for pick, dt1, dt2, prio in cr.fetchall(): res[pick]['min_date'] = dt1 res[pick]['max_date'] = dt2 res[pick]['priority'] = prio return res def create(self, cr, user, vals, context=None): context = context or {} if ('name' not in vals) or (vals.get('name') in ('/', False)): ptype_id = vals.get('picking_type_id', context.get('default_picking_type_id', False)) sequence_id = self.pool.get('stock.picking.type').browse(cr, user, ptype_id, context=context).sequence_id.id vals['name'] = self.pool.get('ir.sequence').get_id(cr, user, sequence_id, 'id', context=context) return super(stock_picking, self).create(cr, user, vals, context) def _state_get(self, cr, uid, ids, field_name, arg, context=None): '''The state of a picking depends on the state of its related stock.move draft: the picking has no line or any one of the lines is draft done, draft, cancel: all lines are done / draft / cancel confirmed, waiting, assigned, partially_available depends on move_type (all at once or partial) ''' res = {} for pick in self.browse(cr, uid, ids, context=context): if (not pick.move_lines) or any([x.state == 'draft' for x in pick.move_lines]): res[pick.id] = 'draft' continue if all([x.state == 'cancel' for x in pick.move_lines]): res[pick.id] = 'cancel' continue if all([x.state in ('cancel', 'done') for x in pick.move_lines]): res[pick.id] = 'done' continue order = {'confirmed': 0, 'waiting': 1, 'assigned': 2} order_inv = {0: 'confirmed', 1: 'waiting', 2: 'assigned'} lst = [order[x.state] for x in pick.move_lines if x.state not in ('cancel', 'done')] if pick.move_type == 'one': res[pick.id] = order_inv[min(lst)] else: #we are in the case of partial delivery, so if all move are assigned, picking #should be assign too, else if one of the move is assigned, or partially available, picking should be #in partially available state, otherwise, picking is in waiting or confirmed state res[pick.id] = order_inv[max(lst)] if not all(x == 2 for x in lst): if any(x == 2 for x in lst): res[pick.id] = 'partially_available' else: #if all moves aren't assigned, check if we have one product partially available for move in pick.move_lines: if move.partially_available: res[pick.id] = 'partially_available' break return res def _get_pickings(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id: res.add(move.picking_id.id) return list(res) def _get_pickings_dates_priority(self, cr, uid, ids, context=None): res = set() for move in self.browse(cr, uid, ids, context=context): if move.picking_id and (not (move.picking_id.min_date < move.date_expected < move.picking_id.max_date) or move.priority > move.picking_id.priority): res.add(move.picking_id.id) return list(res) def _get_pack_operation_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False if pick.pack_operation_ids: res[pick.id] = True return res def _get_quant_reserved_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for pick in self.browse(cr, uid, ids, context=context): res[pick.id] = False for move in pick.move_lines: if move.reserved_quant_ids: res[pick.id] = True continue return res def check_group_lot(self, cr, uid, context=None): """ This function will return true if we have the setting to use lots activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_production_lot') def check_group_pack(self, cr, uid, context=None): """ This function will return true if we have the setting to use package activated. """ return self.pool.get('res.users').has_group(cr, uid, 'stock.group_tracking_lot') def action_assign_owner(self, cr, uid, ids, context=None): for picking in self.browse(cr, uid, ids, context=context): packop_ids = [op.id for op in picking.pack_operation_ids] self.pool.get('stock.pack.operation').write(cr, uid, packop_ids, {'owner_id': picking.owner_id.id}, context=context) _columns = { 'name': fields.char('Reference', select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'origin': fields.char('Source Document', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Reference of the document", select=True), 'backorder_id': fields.many2one('stock.picking', 'Back Order of', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="If this shipment was split, then this field links to the shipment which contains the already processed part.", select=True, copy=False), 'note': fields.text('Notes'), 'move_type': fields.selection([('direct', 'Partial'), ('one', 'All at once')], 'Delivery Method', required=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="It specifies goods to be deliver partially or all at once"), 'state': fields.function(_state_get, type="selection", copy=False, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_type'], 20), 'stock.move': (_get_pickings, ['state', 'picking_id', 'partially_available'], 20)}, selection=[ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Operation'), ('confirmed', 'Waiting Availability'), ('partially_available', 'Partially Available'), ('assigned', 'Ready to Transfer'), ('done', 'Transferred'), ], string='Status', readonly=True, select=True, track_visibility='onchange', help=""" * Draft: not confirmed yet and will not be scheduled until confirmed\n * Waiting Another Operation: waiting for another move to proceed before it becomes automatically available (e.g. in Make-To-Order flows)\n * Waiting Availability: still waiting for the availability of products\n * Partially Available: some products are available and reserved\n * Ready to Transfer: products reserved, simply waiting for confirmation.\n * Transferred: has been processed, can't be modified or cancelled anymore\n * Cancelled: has been cancelled, can't be confirmed anymore""" ), 'priority': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_priority, type='selection', selection=procurement.PROCUREMENT_PRIORITIES, string='Priority', store={'stock.move': (_get_pickings_dates_priority, ['priority', 'picking_id'], 20)}, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, select=1, help="Priority for this picking. Setting manually a value here would set it as priority for all the moves", track_visibility='onchange', required=True), 'min_date': fields.function(get_min_max_date, multi="min_max_date", fnct_inv=_set_min_date, store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Scheduled Date', select=1, help="Scheduled time for the first part of the shipment to be processed. Setting manually a value here would set it as expected date for all the stock moves.", track_visibility='onchange'), 'max_date': fields.function(get_min_max_date, multi="min_max_date", store={'stock.move': (_get_pickings_dates_priority, ['date_expected', 'picking_id'], 20)}, type='datetime', string='Max. Expected Date', select=2, help="Scheduled time for the last part of the shipment to be processed"), 'date': fields.datetime('Creation Date', help="Creation Date, usually the time of the order", select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, track_visibility='onchange'), 'date_done': fields.datetime('Date of Transfer', help="Date of Completion", states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=False), 'move_lines': fields.one2many('stock.move', 'picking_id', 'Internal Moves', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, copy=True), 'quant_reserved_exist': fields.function(_get_quant_reserved_exist, type='boolean', string='Quant already reserved ?', help='technical field used to know if there is already at least one quant reserved on moves of a given picking'), 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}), 'pack_operation_ids': fields.one2many('stock.pack.operation', 'picking_id', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, string='Related Packing Operations'), 'pack_operation_exist': fields.function(_get_pack_operation_exist, type='boolean', string='Pack Operation Exists?', help='technical field for attrs in view'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, required=True), 'picking_type_code': fields.related('picking_type_id', 'code', type='char', string='Picking Type Code', help="Technical field used to display the correct label on print button in the picking view"), 'owner_id': fields.many2one('res.partner', 'Owner', states={'done': [('readonly', True)], 'cancel': [('readonly', True)]}, help="Default Owner"), # Used to search on pickings 'product_id': fields.related('move_lines', 'product_id', type='many2one', relation='product.product', string='Product'), 'recompute_pack_op': fields.boolean('Recompute pack operation?', help='True if reserved quants changed, which mean we might need to recompute the package operations', copy=False), 'location_id': fields.related('move_lines', 'location_id', type='many2one', relation='stock.location', string='Location', readonly=True), 'location_dest_id': fields.related('move_lines', 'location_dest_id', type='many2one', relation='stock.location', string='Destination Location', readonly=True), 'group_id': fields.related('move_lines', 'group_id', type='many2one', relation='procurement.group', string='Procurement Group', readonly=True, store={ 'stock.picking': (lambda self, cr, uid, ids, ctx: ids, ['move_lines'], 10), 'stock.move': (_get_pickings, ['group_id', 'picking_id'], 10), }), } _defaults = { 'name': '/', 'state': 'draft', 'move_type': 'direct', 'priority': '1', # normal 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.picking', context=c), 'recompute_pack_op': True, } _sql_constraints = [ ('name_uniq', 'unique(name, company_id)', 'Reference must be unique per company!'), ] def do_print_picking(self, cr, uid, ids, context=None): '''This function prints the picking list''' context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_picking', context=context) def action_confirm(self, cr, uid, ids, context=None): todo = [] todo_force_assign = [] for picking in self.browse(cr, uid, ids, context=context): if picking.location_id.usage in ('supplier', 'inventory', 'production'): todo_force_assign.append(picking.id) for r in picking.move_lines: if r.state == 'draft': todo.append(r.id) if len(todo): self.pool.get('stock.move').action_confirm(cr, uid, todo, context=context) if todo_force_assign: self.force_assign(cr, uid, todo_force_assign, context=context) return True def action_assign(self, cr, uid, ids, context=None): """ Check availability of picking moves. This has the effect of changing the state and reserve quants on available moves, and may also impact the state of the picking as it is computed based on move's states. @return: True """ for pick in self.browse(cr, uid, ids, context=context): if pick.state == 'draft': self.action_confirm(cr, uid, [pick.id], context=context) #skip the moves that don't need to be checked move_ids = [x.id for x in pick.move_lines if x.state not in ('draft', 'cancel', 'done')] if not move_ids: raise osv.except_osv(_('Warning!'), _('Nothing to check the availability for.')) self.pool.get('stock.move').action_assign(cr, uid, move_ids, context=context) return True def force_assign(self, cr, uid, ids, context=None): """ Changes state of picking to available if moves are confirmed or waiting. @return: True """ for pick in self.browse(cr, uid, ids, context=context): move_ids = [x.id for x in pick.move_lines if x.state in ['confirmed', 'waiting']] self.pool.get('stock.move').force_assign(cr, uid, move_ids, context=context) #pack_operation might have changed and need to be recomputed self.write(cr, uid, ids, {'recompute_pack_op': True}, context=context) return True def action_cancel(self, cr, uid, ids, context=None): for pick in self.browse(cr, uid, ids, context=context): ids2 = [move.id for move in pick.move_lines] self.pool.get('stock.move').action_cancel(cr, uid, ids2, context) return True def action_done(self, cr, uid, ids, context=None): """Changes picking state to done by processing the Stock Moves of the Picking Normally that happens when the button "Done" is pressed on a Picking view. @return: True """ for pick in self.browse(cr, uid, ids, context=context): todo = [] for move in pick.move_lines: if move.state == 'draft': todo.extend(self.pool.get('stock.move').action_confirm(cr, uid, [move.id], context=context)) elif move.state in ('assigned', 'confirmed'): todo.append(move.id) if len(todo): self.pool.get('stock.move').action_done(cr, uid, todo, context=context) return True def unlink(self, cr, uid, ids, context=None): #on picking deletion, cancel its move then unlink them too move_obj = self.pool.get('stock.move') context = context or {} for pick in self.browse(cr, uid, ids, context=context): move_ids = [move.id for move in pick.move_lines] move_obj.action_cancel(cr, uid, move_ids, context=context) move_obj.unlink(cr, uid, move_ids, context=context) return super(stock_picking, self).unlink(cr, uid, ids, context=context) def write(self, cr, uid, ids, vals, context=None): if vals.get('move_lines') and not vals.get('pack_operation_ids'): # pack operations are directly dependant of move lines, it needs to be recomputed pack_operation_obj = self.pool['stock.pack.operation'] existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) res = super(stock_picking, self).write(cr, uid, ids, vals, context=context) #if we changed the move lines or the pack operations, we need to recompute the remaining quantities of both if 'move_lines' in vals or 'pack_operation_ids' in vals: self.do_recompute_remaining_quantities(cr, uid, ids, context=context) return res def _create_backorder(self, cr, uid, picking, backorder_moves=[], context=None): """ Move all non-done lines into a new backorder picking. If the key 'do_only_split' is given in the context, then move all lines not in context.get('split', []) instead of all non-done lines. """ if not backorder_moves: backorder_moves = picking.move_lines backorder_move_ids = [x.id for x in backorder_moves if x.state not in ('done', 'cancel')] if 'do_only_split' in context and context['do_only_split']: backorder_move_ids = [x.id for x in backorder_moves if x.id not in context.get('split', [])] if backorder_move_ids: backorder_id = self.copy(cr, uid, picking.id, { 'name': '/', 'move_lines': [], 'pack_operation_ids': [], 'backorder_id': picking.id, }) backorder = self.browse(cr, uid, backorder_id, context=context) self.message_post(cr, uid, picking.id, body=_("Back order <em>%s</em> <b>created</b>.") % (backorder.name), context=context) move_obj = self.pool.get("stock.move") move_obj.write(cr, uid, backorder_move_ids, {'picking_id': backorder_id}, context=context) if not picking.date_done: self.write(cr, uid, [picking.id], {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.action_confirm(cr, uid, [backorder_id], context=context) return backorder_id return False @api.cr_uid_ids_context def recheck_availability(self, cr, uid, picking_ids, context=None): self.action_assign(cr, uid, picking_ids, context=context) self.do_prepare_partial(cr, uid, picking_ids, context=context) def _get_top_level_packages(self, cr, uid, quants_suggested_locations, context=None): """This method searches for the higher level packages that can be moved as a single operation, given a list of quants to move and their suggested destination, and returns the list of matching packages. """ # Try to find as much as possible top-level packages that can be moved pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") top_lvl_packages = set() quants_to_compare = quants_suggested_locations.keys() for pack in list(set([x.package_id for x in quants_suggested_locations.keys() if x and x.package_id])): loop = True test_pack = pack good_pack = False pack_destination = False while loop: pack_quants = pack_obj.get_content(cr, uid, [test_pack.id], context=context) all_in = True for quant in quant_obj.browse(cr, uid, pack_quants, context=context): # If the quant is not in the quants to compare and not in the common location if not quant in quants_to_compare: all_in = False break else: #if putaway strat apply, the destination location of each quant may be different (and thus the package should not be taken as a single operation) if not pack_destination: pack_destination = quants_suggested_locations[quant] elif pack_destination != quants_suggested_locations[quant]: all_in = False break if all_in: good_pack = test_pack if test_pack.parent_id: test_pack = test_pack.parent_id else: #stop the loop when there's no parent package anymore loop = False else: #stop the loop when the package test_pack is not totally reserved for moves of this picking #(some quants may be reserved for other picking or not reserved at all) loop = False if good_pack: top_lvl_packages.add(good_pack) return list(top_lvl_packages) def _prepare_pack_ops(self, cr, uid, picking, quants, forced_qties, context=None): """ returns a list of dict, ready to be used in create() of stock.pack.operation. :param picking: browse record (stock.picking) :param quants: browse record list (stock.quant). List of quants associated to the picking :param forced_qties: dictionary showing for each product (keys) its corresponding quantity (value) that is not covered by the quants associated to the picking """ def _picking_putaway_apply(product): location = False # Search putaway strategy if product_putaway_strats.get(product.id): location = product_putaway_strats[product.id] else: location = self.pool.get('stock.location').get_putaway_strategy(cr, uid, picking.location_dest_id, product, context=context) product_putaway_strats[product.id] = location return location or picking.location_dest_id.id # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead. product_uom = {} # Determines UoM used in pack operations location_dest_id = None location_id = None for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not product_uom.get(move.product_id.id): product_uom[move.product_id.id] = move.product_id.uom_id if move.product_uom.id != move.product_id.uom_id.id and move.product_uom.factor > product_uom[move.product_id.id].factor: product_uom[move.product_id.id] = move.product_uom if not move.scrapped: if location_dest_id and move.location_dest_id.id != location_dest_id: raise Warning(_('The destination location must be the same for all the moves of the picking.')) location_dest_id = move.location_dest_id.id if location_id and move.location_id.id != location_id: raise Warning(_('The source location must be the same for all the moves of the picking.')) location_id = move.location_id.id pack_obj = self.pool.get("stock.quant.package") quant_obj = self.pool.get("stock.quant") vals = [] qtys_grouped = {} #for each quant of the picking, find the suggested location quants_suggested_locations = {} product_putaway_strats = {} for quant in quants: if quant.qty <= 0: continue suggested_location_id = _picking_putaway_apply(quant.product_id) quants_suggested_locations[quant] = suggested_location_id #find the packages we can movei as a whole top_lvl_packages = self._get_top_level_packages(cr, uid, quants_suggested_locations, context=context) # and then create pack operations for the top-level packages found for pack in top_lvl_packages: pack_quant_ids = pack_obj.get_content(cr, uid, [pack.id], context=context) pack_quants = quant_obj.browse(cr, uid, pack_quant_ids, context=context) vals.append({ 'picking_id': picking.id, 'package_id': pack.id, 'product_qty': 1.0, 'location_id': pack.location_id.id, 'location_dest_id': quants_suggested_locations[pack_quants[0]], 'owner_id': pack.owner_id.id, }) #remove the quants inside the package so that they are excluded from the rest of the computation for quant in pack_quants: del quants_suggested_locations[quant] # Go through all remaining reserved quants and group by product, package, lot, owner, source location and dest location for quant, dest_location_id in quants_suggested_locations.items(): key = (quant.product_id.id, quant.package_id.id, quant.lot_id.id, quant.owner_id.id, quant.location_id.id, dest_location_id) if qtys_grouped.get(key): qtys_grouped[key] += quant.qty else: qtys_grouped[key] = quant.qty # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example) for product, qty in forced_qties.items(): if qty <= 0: continue suggested_location_id = _picking_putaway_apply(product) key = (product.id, False, False, picking.owner_id.id, picking.location_id.id, suggested_location_id) if qtys_grouped.get(key): qtys_grouped[key] += qty else: qtys_grouped[key] = qty # Create the necessary operations for the grouped quants and remaining qtys uom_obj = self.pool.get('product.uom') prevals = {} for key, qty in qtys_grouped.items(): product = self.pool.get("product.product").browse(cr, uid, key[0], context=context) uom_id = product.uom_id.id qty_uom = qty if product_uom.get(key[0]): uom_id = product_uom[key[0]].id qty_uom = uom_obj._compute_qty(cr, uid, product.uom_id.id, qty, uom_id) val_dict = { 'picking_id': picking.id, 'product_qty': qty_uom, 'product_id': key[0], 'package_id': key[1], 'lot_id': key[2], 'owner_id': key[3], 'location_id': key[4], 'location_dest_id': key[5], 'product_uom_id': uom_id, } if key[0] in prevals: prevals[key[0]].append(val_dict) else: prevals[key[0]] = [val_dict] # prevals var holds the operations in order to create them in the same order than the picking stock moves if possible processed_products = set() for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if move.product_id.id not in processed_products: vals += prevals.get(move.product_id.id, []) processed_products.add(move.product_id.id) return vals @api.cr_uid_ids_context def open_barcode_interface(self, cr, uid, picking_ids, context=None): final_url="/barcode/web/#action=stock.ui&picking_id="+str(picking_ids[0]) return {'type': 'ir.actions.act_url', 'url':final_url, 'target': 'self',} @api.cr_uid_ids_context def do_partial_open_barcode(self, cr, uid, picking_ids, context=None): self.do_prepare_partial(cr, uid, picking_ids, context=context) return self.open_barcode_interface(cr, uid, picking_ids, context=context) @api.cr_uid_ids_context def do_prepare_partial(self, cr, uid, picking_ids, context=None): context = context or {} pack_operation_obj = self.pool.get('stock.pack.operation') #used to avoid recomputing the remaining quantities at each new pack operation created ctx = context.copy() ctx['no_recompute'] = True #get list of existing operations and delete them existing_package_ids = pack_operation_obj.search(cr, uid, [('picking_id', 'in', picking_ids)], context=context) if existing_package_ids: pack_operation_obj.unlink(cr, uid, existing_package_ids, context) for picking in self.browse(cr, uid, picking_ids, context=context): forced_qties = {} # Quantity remaining after calculating reserved quants picking_quants = [] #Calculate packages, reserved quants, qtys of this picking's moves for move in picking.move_lines: if move.state not in ('assigned', 'confirmed', 'waiting'): continue move_quants = move.reserved_quant_ids picking_quants += move_quants forced_qty = (move.state == 'assigned') and move.product_qty - sum([x.qty for x in move_quants]) or 0 #if we used force_assign() on the move, or if the move is incoming, forced_qty > 0 if float_compare(forced_qty, 0, precision_rounding=move.product_id.uom_id.rounding) > 0: if forced_qties.get(move.product_id): forced_qties[move.product_id] += forced_qty else: forced_qties[move.product_id] = forced_qty for vals in self._prepare_pack_ops(cr, uid, picking, picking_quants, forced_qties, context=context): pack_operation_obj.create(cr, uid, vals, context=ctx) #recompute the remaining quantities all at once self.do_recompute_remaining_quantities(cr, uid, picking_ids, context=context) self.write(cr, uid, picking_ids, {'recompute_pack_op': False}, context=context) @api.cr_uid_ids_context def do_unreserve(self, cr, uid, picking_ids, context=None): """ Will remove all quants for picking in picking_ids """ moves_to_unreserve = [] pack_line_to_unreserve = [] for picking in self.browse(cr, uid, picking_ids, context=context): moves_to_unreserve += [m.id for m in picking.move_lines if m.state not in ('done', 'cancel')] pack_line_to_unreserve += [p.id for p in picking.pack_operation_ids] if moves_to_unreserve: if pack_line_to_unreserve: self.pool.get('stock.pack.operation').unlink(cr, uid, pack_line_to_unreserve, context=context) self.pool.get('stock.move').do_unreserve(cr, uid, moves_to_unreserve, context=context) def recompute_remaining_qty(self, cr, uid, picking, context=None): def _create_link_for_index(operation_id, index, product_id, qty_to_assign, quant_id=False): move_dict = prod2move_ids[product_id][index] qty_on_link = min(move_dict['remaining_qty'], qty_to_assign) self.pool.get('stock.move.operation.link').create(cr, uid, {'move_id': move_dict['move'].id, 'operation_id': operation_id, 'qty': qty_on_link, 'reserved_quant_id': quant_id}, context=context) if move_dict['remaining_qty'] == qty_on_link: prod2move_ids[product_id].pop(index) else: move_dict['remaining_qty'] -= qty_on_link return qty_on_link def _create_link_for_quant(operation_id, quant, qty): """create a link for given operation and reserved move of given quant, for the max quantity possible, and returns this quantity""" if not quant.reservation_id.id: return _create_link_for_product(operation_id, quant.product_id.id, qty) qty_on_link = 0 for i in range(0, len(prod2move_ids[quant.product_id.id])): if prod2move_ids[quant.product_id.id][i]['move'].id != quant.reservation_id.id: continue qty_on_link = _create_link_for_index(operation_id, i, quant.product_id.id, qty, quant_id=quant.id) break return qty_on_link def _create_link_for_product(operation_id, product_id, qty): '''method that creates the link between a given operation and move(s) of given product, for the given quantity. Returns True if it was possible to create links for the requested quantity (False if there was not enough quantity on stock moves)''' qty_to_assign = qty prod_obj = self.pool.get("product.product") product = prod_obj.browse(cr, uid, product_id) rounding = product.uom_id.rounding qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) if prod2move_ids.get(product_id): while prod2move_ids[product_id] and qtyassign_cmp > 0: qty_on_link = _create_link_for_index(operation_id, 0, product_id, qty_to_assign, quant_id=False) qty_to_assign -= qty_on_link qtyassign_cmp = float_compare(qty_to_assign, 0.0, precision_rounding=rounding) return qtyassign_cmp == 0 uom_obj = self.pool.get('product.uom') package_obj = self.pool.get('stock.quant.package') quant_obj = self.pool.get('stock.quant') link_obj = self.pool.get('stock.move.operation.link') quants_in_package_done = set() prod2move_ids = {} still_to_do = [] #make a dictionary giving for each product, the moves and related quantity that can be used in operation links for move in [x for x in picking.move_lines if x.state not in ('done', 'cancel')]: if not prod2move_ids.get(move.product_id.id): prod2move_ids[move.product_id.id] = [{'move': move, 'remaining_qty': move.product_qty}] else: prod2move_ids[move.product_id.id].append({'move': move, 'remaining_qty': move.product_qty}) need_rereserve = False #sort the operations in order to give higher priority to those with a package, then a serial number operations = picking.pack_operation_ids operations = sorted(operations, key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) #delete existing operations to start again from scratch links = link_obj.search(cr, uid, [('operation_id', 'in', [x.id for x in operations])], context=context) if links: link_obj.unlink(cr, uid, links, context=context) #1) first, try to create links when quants can be identified without any doubt for ops in operations: #for each operation, create the links with the stock move by seeking on the matching reserved quants, #and deffer the operation if there is some ambiguity on the move to select if ops.package_id and not ops.product_id: #entire package quant_ids = package_obj.get_content(cr, uid, [ops.package_id.id], context=context) for quant in quant_obj.browse(cr, uid, quant_ids, context=context): remaining_qty_on_quant = quant.qty if quant.reservation_id: #avoid quants being counted twice quants_in_package_done.add(quant.id) qty_on_link = _create_link_for_quant(ops.id, quant, quant.qty) remaining_qty_on_quant -= qty_on_link if remaining_qty_on_quant: still_to_do.append((ops, quant.product_id.id, remaining_qty_on_quant)) need_rereserve = True elif ops.product_id.id: #Check moves with same product qty_to_assign = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for move_dict in prod2move_ids.get(ops.product_id.id, []): move = move_dict['move'] for quant in move.reserved_quant_ids: if not qty_to_assign > 0: break if quant.id in quants_in_package_done: continue #check if the quant is matching the operation details if ops.package_id: flag = quant.package_id and bool(package_obj.search(cr, uid, [('id', 'child_of', [ops.package_id.id])], context=context)) or False else: flag = not quant.package_id.id flag = flag and ((ops.lot_id and ops.lot_id.id == quant.lot_id.id) or not ops.lot_id) flag = flag and (ops.owner_id.id == quant.owner_id.id) if flag: max_qty_on_link = min(quant.qty, qty_to_assign) qty_on_link = _create_link_for_quant(ops.id, quant, max_qty_on_link) qty_to_assign -= qty_on_link qty_assign_cmp = float_compare(qty_to_assign, 0, precision_rounding=ops.product_id.uom_id.rounding) if qty_assign_cmp > 0: #qty reserved is less than qty put in operations. We need to create a link but it's deferred after we processed #all the quants (because they leave no choice on their related move and needs to be processed with higher priority) still_to_do += [(ops, ops.product_id.id, qty_to_assign)] need_rereserve = True #2) then, process the remaining part all_op_processed = True for ops, product_id, remaining_qty in still_to_do: all_op_processed = _create_link_for_product(ops.id, product_id, remaining_qty) and all_op_processed return (need_rereserve, all_op_processed) def picking_recompute_remaining_quantities(self, cr, uid, picking, context=None): need_rereserve = False all_op_processed = True if picking.pack_operation_ids: need_rereserve, all_op_processed = self.recompute_remaining_qty(cr, uid, picking, context=context) return need_rereserve, all_op_processed @api.cr_uid_ids_context def do_recompute_remaining_quantities(self, cr, uid, picking_ids, context=None): for picking in self.browse(cr, uid, picking_ids, context=context): if picking.pack_operation_ids: self.recompute_remaining_qty(cr, uid, picking, context=context) def _prepare_values_extra_move(self, cr, uid, op, product, remaining_qty, context=None): """ Creates an extra move when there is no corresponding original move to be copied """ uom_obj = self.pool.get("product.uom") uom_id = product.uom_id.id qty = remaining_qty if op.product_id and op.product_uom_id and op.product_uom_id.id != product.uom_id.id: if op.product_uom_id.factor > product.uom_id.factor: #If the pack operation's is a smaller unit uom_id = op.product_uom_id.id #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM qty = uom_obj._compute_qty_obj(cr, uid, product.uom_id, remaining_qty, op.product_uom_id, rounding_method='HALF-UP') picking = op.picking_id ref = product.default_code name = '[' + ref + ']' + ' ' + product.name if ref else product.name res = { 'picking_id': picking.id, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'product_id': product.id, 'product_uom': uom_id, 'product_uom_qty': qty, 'name': _('Extra Move: ') + name, 'state': 'draft', 'restrict_partner_id': op.owner_id, 'group_id': picking.group_id.id, } return res def _create_extra_moves(self, cr, uid, picking, context=None): '''This function creates move lines on a picking, at the time of do_transfer, based on unexpected product transfers (or exceeding quantities) found in the pack operations. ''' move_obj = self.pool.get('stock.move') operation_obj = self.pool.get('stock.pack.operation') moves = [] for op in picking.pack_operation_ids: for product_id, remaining_qty in operation_obj._get_remaining_prod_quantities(cr, uid, op, context=context).items(): product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if float_compare(remaining_qty, 0, precision_rounding=product.uom_id.rounding) > 0: vals = self._prepare_values_extra_move(cr, uid, op, product, remaining_qty, context=context) moves.append(move_obj.create(cr, uid, vals, context=context)) if moves: move_obj.action_confirm(cr, uid, moves, context=context) return moves def rereserve_pick(self, cr, uid, ids, context=None): """ This can be used to provide a button that rereserves taking into account the existing pack operations """ for pick in self.browse(cr, uid, ids, context=context): self.rereserve_quants(cr, uid, pick, move_ids = [x.id for x in pick.move_lines], context=context) def rereserve_quants(self, cr, uid, picking, move_ids=[], context=None): """ Unreserve quants then try to reassign quants.""" stock_move_obj = self.pool.get('stock.move') if not move_ids: self.do_unreserve(cr, uid, [picking.id], context=context) self.action_assign(cr, uid, [picking.id], context=context) else: stock_move_obj.do_unreserve(cr, uid, move_ids, context=context) stock_move_obj.action_assign(cr, uid, move_ids, context=context) @api.cr_uid_ids_context def do_enter_transfer_details(self, cr, uid, picking, context=None): if not context: context = {} context.update({ 'active_model': self._name, 'active_ids': picking, 'active_id': len(picking) and picking[0] or False }) created_id = self.pool['stock.transfer_details'].create(cr, uid, {'picking_id': len(picking) and picking[0] or False}, context) return self.pool['stock.transfer_details'].wizard_view(cr, uid, created_id, context) @api.cr_uid_ids_context def do_transfer(self, cr, uid, picking_ids, context=None): """ If no pack operation, we do simple action_done of the picking Otherwise, do the pack operations """ if not context: context = {} stock_move_obj = self.pool.get('stock.move') for picking in self.browse(cr, uid, picking_ids, context=context): if not picking.pack_operation_ids: self.action_done(cr, uid, [picking.id], context=context) continue else: need_rereserve, all_op_processed = self.picking_recompute_remaining_quantities(cr, uid, picking, context=context) #create extra moves in the picking (unexpected product moves coming from pack operations) todo_move_ids = [] if not all_op_processed: todo_move_ids += self._create_extra_moves(cr, uid, picking, context=context) #split move lines if needed toassign_move_ids = [] for move in picking.move_lines: remaining_qty = move.remaining_qty if move.state in ('done', 'cancel'): #ignore stock moves cancelled or already done continue elif move.state == 'draft': toassign_move_ids.append(move.id) if float_compare(remaining_qty, 0, precision_rounding = move.product_id.uom_id.rounding) == 0: if move.state in ('draft', 'assigned', 'confirmed'): todo_move_ids.append(move.id) elif float_compare(remaining_qty,0, precision_rounding = move.product_id.uom_id.rounding) > 0 and \ float_compare(remaining_qty, move.product_qty, precision_rounding = move.product_id.uom_id.rounding) < 0: new_move = stock_move_obj.split(cr, uid, move, remaining_qty, context=context) todo_move_ids.append(move.id) #Assign move as it was assigned before toassign_move_ids.append(new_move) if need_rereserve or not all_op_processed: if not picking.location_id.usage in ("supplier", "production", "inventory"): self.rereserve_quants(cr, uid, picking, move_ids=todo_move_ids, context=context) self.do_recompute_remaining_quantities(cr, uid, [picking.id], context=context) if todo_move_ids and not context.get('do_only_split'): self.pool.get('stock.move').action_done(cr, uid, todo_move_ids, context=context) elif context.get('do_only_split'): context = dict(context, split=todo_move_ids) self._create_backorder(cr, uid, picking, context=context) if toassign_move_ids: stock_move_obj.action_assign(cr, uid, toassign_move_ids, context=context) return True @api.cr_uid_ids_context def do_split(self, cr, uid, picking_ids, context=None): """ just split the picking (create a backorder) without making it 'done' """ if context is None: context = {} ctx = context.copy() ctx['do_only_split'] = True return self.do_transfer(cr, uid, picking_ids, context=ctx) def get_next_picking_for_ui(self, cr, uid, context=None): """ returns the next pickings to process. Used in the barcode scanner UI""" if context is None: context = {} domain = [('state', 'in', ('assigned', 'partially_available'))] if context.get('default_picking_type_id'): domain.append(('picking_type_id', '=', context['default_picking_type_id'])) return self.search(cr, uid, domain, context=context) def action_done_from_ui(self, cr, uid, picking_id, context=None): """ called when button 'done' is pushed in the barcode scanner UI """ #write qty_done into field product_qty for every package_operation before doing the transfer pack_op_obj = self.pool.get('stock.pack.operation') for operation in self.browse(cr, uid, picking_id, context=context).pack_operation_ids: pack_op_obj.write(cr, uid, operation.id, {'product_qty': operation.qty_done}, context=context) self.do_transfer(cr, uid, [picking_id], context=context) #return id of next picking to work on return self.get_next_picking_for_ui(cr, uid, context=context) @api.cr_uid_ids_context def action_pack(self, cr, uid, picking_ids, operation_filter_ids=None, context=None): """ Create a package with the current pack_operation_ids of the picking that aren't yet in a pack. Used in the barcode scanner UI and the normal interface as well. operation_filter_ids is used by barcode scanner interface to specify a subset of operation to pack""" if operation_filter_ids == None: operation_filter_ids = [] stock_operation_obj = self.pool.get('stock.pack.operation') package_obj = self.pool.get('stock.quant.package') stock_move_obj = self.pool.get('stock.move') package_id = False for picking_id in picking_ids: operation_search_domain = [('picking_id', '=', picking_id), ('result_package_id', '=', False)] if operation_filter_ids != []: operation_search_domain.append(('id', 'in', operation_filter_ids)) operation_ids = stock_operation_obj.search(cr, uid, operation_search_domain, context=context) pack_operation_ids = [] if operation_ids: for operation in stock_operation_obj.browse(cr, uid, operation_ids, context=context): #If we haven't done all qty in operation, we have to split into 2 operation op = operation if (operation.qty_done < operation.product_qty): new_operation = stock_operation_obj.copy(cr, uid, operation.id, {'product_qty': operation.qty_done,'qty_done': operation.qty_done}, context=context) stock_operation_obj.write(cr, uid, operation.id, {'product_qty': operation.product_qty - operation.qty_done,'qty_done': 0, 'lot_id': False}, context=context) op = stock_operation_obj.browse(cr, uid, new_operation, context=context) pack_operation_ids.append(op.id) if op.product_id and op.location_id and op.location_dest_id: stock_move_obj.check_tracking_product(cr, uid, op.product_id, op.lot_id.id, op.location_id, op.location_dest_id, context=context) package_id = package_obj.create(cr, uid, {}, context=context) stock_operation_obj.write(cr, uid, pack_operation_ids, {'result_package_id': package_id}, context=context) return package_id def process_product_id_from_ui(self, cr, uid, picking_id, product_id, op_id, increment=True, context=None): return self.pool.get('stock.pack.operation')._search_and_increment(cr, uid, picking_id, [('product_id', '=', product_id),('id', '=', op_id)], increment=increment, context=context) def process_barcode_from_ui(self, cr, uid, picking_id, barcode_str, visible_op_ids, context=None): '''This function is called each time there barcode scanner reads an input''' lot_obj = self.pool.get('stock.production.lot') package_obj = self.pool.get('stock.quant.package') product_obj = self.pool.get('product.product') stock_operation_obj = self.pool.get('stock.pack.operation') stock_location_obj = self.pool.get('stock.location') answer = {'filter_loc': False, 'operation_id': False} #check if the barcode correspond to a location matching_location_ids = stock_location_obj.search(cr, uid, [('loc_barcode', '=', barcode_str)], context=context) if matching_location_ids: #if we have a location, return immediatly with the location name location = stock_location_obj.browse(cr, uid, matching_location_ids[0], context=None) answer['filter_loc'] = stock_location_obj._name_get(cr, uid, location, context=None) answer['filter_loc_id'] = matching_location_ids[0] return answer #check if the barcode correspond to a product matching_product_ids = product_obj.search(cr, uid, ['|', ('ean13', '=', barcode_str), ('default_code', '=', barcode_str)], context=context) if matching_product_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', matching_product_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a lot matching_lot_ids = lot_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_lot_ids: lot = lot_obj.browse(cr, uid, matching_lot_ids[0], context=context) op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('product_id', '=', lot.product_id.id), ('lot_id', '=', lot.id)], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer #check if the barcode correspond to a package matching_package_ids = package_obj.search(cr, uid, [('name', '=', barcode_str)], context=context) if matching_package_ids: op_id = stock_operation_obj._search_and_increment(cr, uid, picking_id, [('package_id', '=', matching_package_ids[0])], filter_visible=True, visible_op_ids=visible_op_ids, increment=True, context=context) answer['operation_id'] = op_id return answer return answer class stock_production_lot(osv.osv): _name = 'stock.production.lot' _inherit = ['mail.thread'] _description = 'Lot/Serial' _columns = { 'name': fields.char('Serial Number', required=True, help="Unique Serial Number"), 'ref': fields.char('Internal Reference', help="Internal reference number in case it differs from the manufacturer's serial number"), 'product_id': fields.many2one('product.product', 'Product', required=True, domain=[('type', '<>', 'service')]), 'quant_ids': fields.one2many('stock.quant', 'lot_id', 'Quants', readonly=True), 'create_date': fields.datetime('Creation Date'), } _defaults = { 'name': lambda x, y, z, c: x.pool.get('ir.sequence').get(y, z, 'stock.lot.serial'), 'product_id': lambda x, y, z, c: c.get('product_id', False), } _sql_constraints = [ ('name_ref_uniq', 'unique (name, ref, product_id)', 'The combination of serial number, internal reference and product must be unique !'), ] def action_traceability(self, cr, uid, ids, context=None): """ It traces the information of lots @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param ids: List of IDs selected @param context: A standard dictionary @return: A dictionary of values """ quant_obj = self.pool.get("stock.quant") quants = quant_obj.search(cr, uid, [('lot_id', 'in', ids)], context=context) moves = set() for quant in quant_obj.browse(cr, uid, quants, context=context): moves |= {move.id for move in quant.history_ids} if moves: return { 'domain': "[('id','in',[" + ','.join(map(str, list(moves))) + "])]", 'name': _('Traceability'), 'view_mode': 'tree,form', 'view_type': 'form', 'context': {'tree_view_ref': 'stock.view_move_tree'}, 'res_model': 'stock.move', 'type': 'ir.actions.act_window', } return False # ---------------------------------------------------- # Move # ---------------------------------------------------- class stock_move(osv.osv): _name = "stock.move" _description = "Stock Move" _order = 'date_expected desc, id' _log_create = False def get_price_unit(self, cr, uid, move, context=None): """ Returns the unit price to store on the quant """ return move.price_unit or move.product_id.standard_price def name_get(self, cr, uid, ids, context=None): res = [] for line in self.browse(cr, uid, ids, context=context): name = line.location_id.name + ' > ' + line.location_dest_id.name if line.product_id.code: name = line.product_id.code + ': ' + name if line.picking_id.origin: name = line.picking_id.origin + '/ ' + name res.append((line.id, name)) return res def _quantity_normalize(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = uom_obj._compute_qty_obj(cr, uid, m.product_uom, m.product_uom_qty, m.product_id.uom_id, context=context) return res def _get_remaining_qty(self, cr, uid, ids, field_name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for move in self.browse(cr, uid, ids, context=context): qty = move.product_qty for record in move.linked_move_operation_ids: qty -= record.qty # Keeping in product default UoM res[move.id] = float_round(qty, precision_rounding=move.product_id.uom_id.rounding) return res def _get_lot_ids(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = [q.lot_id.id for q in move.quant_ids if q.lot_id] else: res[move.id] = [q.lot_id.id for q in move.reserved_quant_ids if q.lot_id] return res def _get_product_availability(self, cr, uid, ids, field_name, args, context=None): quant_obj = self.pool.get('stock.quant') res = dict.fromkeys(ids, False) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': res[move.id] = move.product_qty else: sublocation_ids = self.pool.get('stock.location').search(cr, uid, [('id', 'child_of', [move.location_id.id])], context=context) quant_ids = quant_obj.search(cr, uid, [('location_id', 'in', sublocation_ids), ('product_id', '=', move.product_id.id), ('reservation_id', '=', False)], context=context) availability = 0 for quant in quant_obj.browse(cr, uid, quant_ids, context=context): availability += quant.qty res[move.id] = min(move.product_qty, availability) return res def _get_string_qty_information(self, cr, uid, ids, field_name, args, context=None): settings_obj = self.pool.get('stock.config.settings') uom_obj = self.pool.get('product.uom') res = dict.fromkeys(ids, '') precision = self.pool['decimal.precision'].precision_get(cr, uid, 'Product Unit of Measure') for move in self.browse(cr, uid, ids, context=context): if move.state in ('draft', 'done', 'cancel') or move.location_id.usage != 'internal': res[move.id] = '' # 'not applicable' or 'n/a' could work too continue total_available = min(move.product_qty, move.reserved_availability + move.availability) total_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, total_available, move.product_uom, round=False, context=context) total_available = float_round(total_available, precision_digits=precision) info = str(total_available) #look in the settings if we need to display the UoM name or not config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) if config_ids: stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_uom: info += ' ' + move.product_uom.name if move.reserved_availability: if move.reserved_availability != total_available: #some of the available quantity is assigned and some are available but not reserved reserved_available = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, move.reserved_availability, move.product_uom, round=False, context=context) reserved_available = float_round(reserved_available, precision_digits=precision) info += _(' (%s reserved)') % str(reserved_available) else: #all available quantity is assigned info += _(' (reserved)') res[move.id] = info return res def _get_reserved_availability(self, cr, uid, ids, field_name, args, context=None): res = dict.fromkeys(ids, 0) for move in self.browse(cr, uid, ids, context=context): res[move.id] = sum([quant.qty for quant in move.reserved_quant_ids]) return res def _get_move(self, cr, uid, ids, context=None): res = set() for quant in self.browse(cr, uid, ids, context=context): if quant.reservation_id: res.add(quant.reservation_id.id) return list(res) def _get_move_ids(self, cr, uid, ids, context=None): res = [] for picking in self.browse(cr, uid, ids, context=context): res += [x.id for x in picking.move_lines] return res def _get_moves_from_prod(self, cr, uid, ids, context=None): if ids: return self.pool.get('stock.move').search(cr, uid, [('product_id', 'in', ids)], context=context) return [] def _set_product_qty(self, cr, uid, id, field, value, arg, context=None): """ The meaning of product_qty field changed lately and is now a functional field computing the quantity in the default product UoM. This code has been added to raise an error if a write is made given a value for `product_qty`, where the same write should set the `product_uom_qty` field instead, in order to detect errors. """ raise osv.except_osv(_('Programming Error!'), _('The requested operation cannot be processed because of a programming error setting the `product_qty` field instead of the `product_uom_qty`.')) _columns = { 'name': fields.char('Description', required=True, select=True), 'priority': fields.selection(procurement.PROCUREMENT_PRIORITIES, 'Priority'), 'create_date': fields.datetime('Creation Date', readonly=True, select=True), 'date': fields.datetime('Date', required=True, select=True, help="Move date: scheduled date until move is done, then date of actual move processing", states={'done': [('readonly', True)]}), 'date_expected': fields.datetime('Expected Date', states={'done': [('readonly', True)]}, required=True, select=True, help="Scheduled date for the processing of this move"), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True, domain=[('type', '<>', 'service')], states={'done': [('readonly', True)]}), 'product_qty': fields.function(_quantity_normalize, fnct_inv=_set_product_qty, type='float', digits=0, store={ _name: (lambda self, cr, uid, ids, c={}: ids, ['product_id', 'product_uom', 'product_uom_qty'], 10), }, string='Quantity', help='Quantity in the default UoM of the product'), 'product_uom_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'done': [('readonly', True)]}, help="This is the quantity of products from an inventory " "point of view. For moves in the state 'done', this is the " "quantity of products that were actually moved. For other " "moves, this is the quantity of product that is planned to " "be moved. Lowering this quantity does not generate a " "backorder. Changing this quantity on assigned moves affects " "the product reservation, and should be done with care." ), 'product_uom': fields.many2one('product.uom', 'Unit of Measure', required=True, states={'done': [('readonly', True)]}), 'product_uos_qty': fields.float('Quantity (UOS)', digits_compute=dp.get_precision('Product UoS'), states={'done': [('readonly', True)]}), 'product_uos': fields.many2one('product.uom', 'Product UOS', states={'done': [('readonly', True)]}), 'product_tmpl_id': fields.related('product_id', 'product_tmpl_id', type='many2one', relation='product.template', string='Product Template'), 'product_packaging': fields.many2one('product.packaging', 'Prefered Packaging', help="It specifies attributes of packaging like type, quantity of packaging,etc."), 'location_id': fields.many2one('stock.location', 'Source Location', required=True, select=True, auto_join=True, states={'done': [('readonly', True)]}, help="Sets a location if you produce at a fixed location. This can be a partner location if you subcontract the manufacturing operations."), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True, states={'done': [('readonly', True)]}, select=True, auto_join=True, help="Location where the system will stock the finished products."), 'partner_id': fields.many2one('res.partner', 'Destination Address ', states={'done': [('readonly', True)]}, help="Optional address where goods are to be delivered, specifically used for allotment"), 'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Optional: next stock move when chaining them", select=True, copy=False), 'move_orig_ids': fields.one2many('stock.move', 'move_dest_id', 'Original Move', help="Optional: previous stock move when chaining them", select=True), 'picking_id': fields.many2one('stock.picking', 'Reference', select=True, states={'done': [('readonly', True)]}), 'note': fields.text('Notes'), 'state': fields.selection([('draft', 'New'), ('cancel', 'Cancelled'), ('waiting', 'Waiting Another Move'), ('confirmed', 'Waiting Availability'), ('assigned', 'Available'), ('done', 'Done'), ], 'Status', readonly=True, select=True, copy=False, help= "* New: When the stock move is created and not yet confirmed.\n"\ "* Waiting Another Move: This state can be seen when a move is waiting for another one, for example in a chained flow.\n"\ "* Waiting Availability: This state is reached when the procurement resolution is not straight forward. It may need the scheduler to run, a component to me manufactured...\n"\ "* Available: When products are reserved, it is set to \'Available\'.\n"\ "* Done: When the shipment is processed, the state is \'Done\'."), 'partially_available': fields.boolean('Partially Available', readonly=True, help="Checks if the move has some stock reserved", copy=False), 'price_unit': fields.float('Unit Price', help="Technical field used to record the product cost set by the user during a picking confirmation (when costing method used is 'average price' or 'real'). Value given in company currency and in product uom."), # as it's a technical field, we intentionally don't provide the digits attribute 'company_id': fields.many2one('res.company', 'Company', required=True, select=True), 'split_from': fields.many2one('stock.move', string="Move Split From", help="Technical field used to track the origin of a split move, which can be useful in case of debug", copy=False), 'backorder_id': fields.related('picking_id', 'backorder_id', type='many2one', relation="stock.picking", string="Back Order of", select=True), 'origin': fields.char("Source"), 'procure_method': fields.selection([('make_to_stock', 'Default: Take From Stock'), ('make_to_order', 'Advanced: Apply Procurement Rules')], 'Supply Method', required=True, help="""By default, the system will take from the stock in the source location and passively wait for availability. The other possibility allows you to directly create a procurement on the source location (and thus ignore its current stock) to gather products. If we want to chain moves and have this one to wait for the previous, this second option should be chosen."""), # used for colors in tree views: 'scrapped': fields.related('location_dest_id', 'scrap_location', type='boolean', relation='stock.location', string='Scrapped', readonly=True), 'quant_ids': fields.many2many('stock.quant', 'stock_quant_move_rel', 'move_id', 'quant_id', 'Moved Quants', copy=False), 'reserved_quant_ids': fields.one2many('stock.quant', 'reservation_id', 'Reserved quants'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'move_id', string='Linked Operations', readonly=True, help='Operations that impact this move for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', string='Remaining Quantity', digits=0, states={'done': [('readonly', True)]}, help="Remaining Quantity in default UoM according to operations matched with this move"), 'procurement_id': fields.many2one('procurement.order', 'Procurement'), 'group_id': fields.many2one('procurement.group', 'Procurement Group'), 'rule_id': fields.many2one('procurement.rule', 'Procurement Rule', help='The pull rule that created this stock move'), 'push_rule_id': fields.many2one('stock.location.path', 'Push Rule', help='The push rule that created this stock move'), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when this move is cancelled, cancel the linked move too'), 'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type'), 'inventory_id': fields.many2one('stock.inventory', 'Inventory'), 'lot_ids': fields.function(_get_lot_ids, type='many2many', relation='stock.production.lot', string='Lots'), 'origin_returned_move_id': fields.many2one('stock.move', 'Origin return move', help='move that created the return move', copy=False), 'returned_move_ids': fields.one2many('stock.move', 'origin_returned_move_id', 'All returned moves', help='Optional: all returned moves created from this move'), 'reserved_availability': fields.function(_get_reserved_availability, type='float', string='Quantity Reserved', readonly=True, help='Quantity that has already been reserved for this move'), 'availability': fields.function(_get_product_availability, type='float', string='Quantity Available', readonly=True, help='Quantity in stock that can still be reserved for this move'), 'string_availability_info': fields.function(_get_string_qty_information, type='text', string='Availability', readonly=True, help='Show various information on stock availability for this move'), 'restrict_lot_id': fields.many2one('stock.production.lot', 'Lot', help="Technical field used to depict a restriction on the lot of quants to consider when marking this move as 'done'"), 'restrict_partner_id': fields.many2one('res.partner', 'Owner ', help="Technical field used to depict a restriction on the ownership of quants to consider when marking this move as 'done'"), 'route_ids': fields.many2many('stock.location.route', 'stock_location_route_move', 'move_id', 'route_id', 'Destination route', help="Preferred route to be followed by the procurement order"), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Technical field depicting the warehouse to consider for the route selection on the next procurement (if any)."), } def _default_location_destination(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_dest_id and pick_type.default_location_dest_id.id or False return False def _default_location_source(self, cr, uid, context=None): context = context or {} if context.get('default_picking_type_id', False): pick_type = self.pool.get('stock.picking.type').browse(cr, uid, context['default_picking_type_id'], context=context) return pick_type.default_location_src_id and pick_type.default_location_src_id.id or False return False def _default_destination_address(self, cr, uid, context=None): return False def _default_group_id(self, cr, uid, context=None): context = context or {} if context.get('default_picking_id', False): picking = self.pool.get('stock.picking').browse(cr, uid, context['default_picking_id'], context=context) return picking.group_id.id return False _defaults = { 'location_id': _default_location_source, 'location_dest_id': _default_location_destination, 'partner_id': _default_destination_address, 'state': 'draft', 'priority': '1', 'product_uom_qty': 1.0, 'scrapped': False, 'date': fields.datetime.now, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.move', context=c), 'date_expected': fields.datetime.now, 'procure_method': 'make_to_stock', 'propagate': True, 'partially_available': False, 'group_id': _default_group_id, } def _check_uom(self, cr, uid, ids, context=None): for move in self.browse(cr, uid, ids, context=context): if move.product_id.uom_id.category_id.id != move.product_uom.category_id.id: return False return True _constraints = [ (_check_uom, 'You try to move a product using a UoM that is not compatible with the UoM of the product moved. Please use an UoM in the same UoM category.', ['product_uom']), ] def init(self, cr): cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('stock_move_product_location_index',)) if not cr.fetchone(): cr.execute('CREATE INDEX stock_move_product_location_index ON stock_move (product_id, location_id, location_dest_id, company_id, state)') @api.cr_uid_ids_context def do_unreserve(self, cr, uid, move_ids, context=None): quant_obj = self.pool.get("stock.quant") for move in self.browse(cr, uid, move_ids, context=context): if move.state in ('done', 'cancel'): raise osv.except_osv(_('Operation Forbidden!'), _('Cannot unreserve a done move')) quant_obj.quants_unreserve(cr, uid, move, context=context) if self.find_move_ancestors(cr, uid, move, context=context): self.write(cr, uid, [move.id], {'state': 'waiting'}, context=context) else: self.write(cr, uid, [move.id], {'state': 'confirmed'}, context=context) def _prepare_procurement_from_move(self, cr, uid, move, context=None): origin = (move.group_id and (move.group_id.name + ":") or "") + (move.rule_id and move.rule_id.name or move.origin or move.picking_id.name or "/") group_id = move.group_id and move.group_id.id or False if move.rule_id: if move.rule_id.group_propagation_option == 'fixed' and move.rule_id.group_id: group_id = move.rule_id.group_id.id elif move.rule_id.group_propagation_option == 'none': group_id = False return { 'name': move.rule_id and move.rule_id.name or "/", 'origin': origin, 'company_id': move.company_id and move.company_id.id or False, 'date_planned': move.date, 'product_id': move.product_id.id, 'product_qty': move.product_uom_qty, 'product_uom': move.product_uom.id, 'product_uos_qty': (move.product_uos and move.product_uos_qty) or move.product_uom_qty, 'product_uos': (move.product_uos and move.product_uos.id) or move.product_uom.id, 'location_id': move.location_id.id, 'move_dest_id': move.id, 'group_id': group_id, 'route_ids': [(4, x.id) for x in move.route_ids], 'warehouse_id': move.warehouse_id.id or (move.picking_type_id and move.picking_type_id.warehouse_id.id or False), 'priority': move.priority, } def _push_apply(self, cr, uid, moves, context=None): push_obj = self.pool.get("stock.location.path") for move in moves: #1) if the move is already chained, there is no need to check push rules #2) if the move is a returned move, we don't want to check push rules, as returning a returned move is the only decent way # to receive goods without triggering the push rules again (which would duplicate chained operations) if not move.move_dest_id and not move.origin_returned_move_id: domain = [('location_from_id', '=', move.location_dest_id.id)] #priority goes to the route defined on the product and product category route_ids = [x.id for x in move.product_id.route_ids + move.product_id.categ_id.total_route_ids] rules = push_obj.search(cr, uid, domain + [('route_id', 'in', route_ids)], order='route_sequence, sequence', context=context) if not rules: #then we search on the warehouse if a rule can apply wh_route_ids = [] if move.warehouse_id: wh_route_ids = [x.id for x in move.warehouse_id.route_ids] elif move.picking_type_id and move.picking_type_id.warehouse_id: wh_route_ids = [x.id for x in move.picking_type_id.warehouse_id.route_ids] if wh_route_ids: rules = push_obj.search(cr, uid, domain + [('route_id', 'in', wh_route_ids)], order='route_sequence, sequence', context=context) if not rules: #if no specialized push rule has been found yet, we try to find a general one (without route) rules = push_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context) if rules: rule = push_obj.browse(cr, uid, rules[0], context=context) push_obj._apply(cr, uid, rule, move, context=context) return True def _create_procurement(self, cr, uid, move, context=None): """ This will create a procurement order """ return self.pool.get("procurement.order").create(cr, uid, self._prepare_procurement_from_move(cr, uid, move, context=context), context=context) def _create_procurements(self, cr, uid, moves, context=None): res = [] for move in moves: res.append(self._create_procurement(cr, uid, move, context=context)) return res def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] # Check that we do not modify a stock.move which is done frozen_fields = set(['product_qty', 'product_uom', 'product_uos_qty', 'product_uos', 'location_id', 'location_dest_id', 'product_id']) for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': if frozen_fields.intersection(vals): raise osv.except_osv(_('Operation Forbidden!'), _('Quantities, Units of Measure, Products and Locations cannot be modified on stock moves that have already been processed (except by the Administrator).')) propagated_changes_dict = {} #propagation of quantity change if vals.get('product_uom_qty'): propagated_changes_dict['product_uom_qty'] = vals['product_uom_qty'] if vals.get('product_uom_id'): propagated_changes_dict['product_uom_id'] = vals['product_uom_id'] #propagation of expected date: propagated_date_field = False if vals.get('date_expected'): #propagate any manual change of the expected date propagated_date_field = 'date_expected' elif (vals.get('state', '') == 'done' and vals.get('date')): #propagate also any delta observed when setting the move as done propagated_date_field = 'date' if not context.get('do_not_propagate', False) and (propagated_date_field or propagated_changes_dict): #any propagation is (maybe) needed for move in self.browse(cr, uid, ids, context=context): if move.move_dest_id and move.propagate: if 'date_expected' in propagated_changes_dict: propagated_changes_dict.pop('date_expected') if propagated_date_field: current_date = datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_date = datetime.strptime(vals.get(propagated_date_field), DEFAULT_SERVER_DATETIME_FORMAT) delta = new_date - current_date if abs(delta.days) >= move.company_id.propagation_minimum_delta: old_move_date = datetime.strptime(move.move_dest_id.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) new_move_date = (old_move_date + relativedelta.relativedelta(days=delta.days or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) propagated_changes_dict['date_expected'] = new_move_date #For pushed moves as well as for pulled moves, propagate by recursive call of write(). #Note that, for pulled moves we intentionally don't propagate on the procurement. if propagated_changes_dict: self.write(cr, uid, [move.move_dest_id.id], propagated_changes_dict, context=context) return super(stock_move, self).write(cr, uid, ids, vals, context=context) def onchange_quantity(self, cr, uid, ids, product_id, product_qty, product_uom, product_uos): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_qty: Changed Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uos_qty': 0.00 } warning = {} if (not product_id) or (product_qty <= 0.0): result['product_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # Warn if the quantity was decreased if ids: for move in self.read(cr, uid, ids, ['product_qty']): if product_qty < move['product_qty']: warning.update({ 'title': _('Information'), 'message': _("By changing this quantity here, you accept the " "new quantity as complete: Odoo will not " "automatically generate a back order.")}) break if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product UoS') result['product_uos_qty'] = float_round(product_qty * uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uos_qty'] = product_qty return {'value': result, 'warning': warning} def onchange_uos_quantity(self, cr, uid, ids, product_id, product_uos_qty, product_uos, product_uom): """ On change of product quantity finds UoM and UoS quantities @param product_id: Product id @param product_uos_qty: Changed UoS Quantity of product @param product_uom: Unit of measure of product @param product_uos: Unit of sale of product @return: Dictionary of values """ result = { 'product_uom_qty': 0.00 } if (not product_id) or (product_uos_qty <= 0.0): result['product_uos_qty'] = 0.0 return {'value': result} product_obj = self.pool.get('product.product') uos_coeff = product_obj.read(cr, uid, product_id, ['uos_coeff']) # No warning if the quantity was decreased to avoid double warnings: # The clients should call onchange_quantity too anyway if product_uos and product_uom and (product_uom != product_uos): precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure') result['product_uom_qty'] = float_round(product_uos_qty / uos_coeff['uos_coeff'], precision_digits=precision) else: result['product_uom_qty'] = product_uos_qty return {'value': result} def onchange_product_id(self, cr, uid, ids, prod_id=False, loc_id=False, loc_dest_id=False, partner_id=False): """ On change of product id, if finds UoM, UoS, quantity and UoS quantity. @param prod_id: Changed Product id @param loc_id: Source location id @param loc_dest_id: Destination location id @param partner_id: Address id of partner @return: Dictionary of values """ if not prod_id: return {} user = self.pool.get('res.users').browse(cr, uid, uid) lang = user and user.lang or False if partner_id: addr_rec = self.pool.get('res.partner').browse(cr, uid, partner_id) if addr_rec: lang = addr_rec and addr_rec.lang or False ctx = {'lang': lang} product = self.pool.get('product.product').browse(cr, uid, [prod_id], context=ctx)[0] uos_id = product.uos_id and product.uos_id.id or False result = { 'name': product.partner_ref, 'product_uom': product.uom_id.id, 'product_uos': uos_id, 'product_uom_qty': 1.00, 'product_uos_qty': self.pool.get('stock.move').onchange_quantity(cr, uid, ids, prod_id, 1.00, product.uom_id.id, uos_id)['value']['product_uos_qty'], } if loc_id: result['location_id'] = loc_id if loc_dest_id: result['location_dest_id'] = loc_dest_id return {'value': result} def _prepare_picking_assign(self, cr, uid, move, context=None): """ Prepares a new picking for this move as it could not be assigned to another picking. This method is designed to be inherited. """ values = { 'origin': move.origin, 'company_id': move.company_id and move.company_id.id or False, 'move_type': move.group_id and move.group_id.move_type or 'direct', 'partner_id': move.partner_id.id or False, 'picking_type_id': move.picking_type_id and move.picking_type_id.id or False, } return values @api.cr_uid_ids_context def _picking_assign(self, cr, uid, move_ids, procurement_group, location_from, location_to, context=None): """Assign a picking on the given move_ids, which is a list of move supposed to share the same procurement_group, location_from and location_to (and company). Those attributes are also given as parameters. """ pick_obj = self.pool.get("stock.picking") # Use a SQL query as doing with the ORM will split it in different queries with id IN (,,) # In the next version, the locations on the picking should be stored again. query = """ SELECT stock_picking.id FROM stock_picking, stock_move WHERE stock_picking.state in ('draft', 'confirmed', 'waiting') AND stock_move.picking_id = stock_picking.id AND stock_move.location_id = %s AND stock_move.location_dest_id = %s AND """ params = (location_from, location_to) if not procurement_group: query += "stock_picking.group_id IS NULL LIMIT 1" else: query += "stock_picking.group_id = %s LIMIT 1" params += (procurement_group,) cr.execute(query, params) [pick] = cr.fetchone() or [None] if not pick: move = self.browse(cr, uid, move_ids, context=context)[0] values = self._prepare_picking_assign(cr, uid, move, context=context) pick = pick_obj.create(cr, uid, values, context=context) return self.write(cr, uid, move_ids, {'picking_id': pick}, context=context) def onchange_date(self, cr, uid, ids, date, date_expected, context=None): """ On change of Scheduled Date gives a Move date. @param date_expected: Scheduled Date @param date: Move Date @return: Move Date """ if not date_expected: date_expected = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT) return {'value': {'date': date_expected}} def attribute_price(self, cr, uid, move, context=None): """ Attribute price to move, important in inter-company moves or receipts with only one partner """ if not move.price_unit: price = move.product_id.standard_price self.write(cr, uid, [move.id], {'price_unit': price}) def action_confirm(self, cr, uid, ids, context=None): """ Confirms stock move or put it in waiting if it's linked to another move. @return: List of ids. """ if not context: context = {} if isinstance(ids, (int, long)): ids = [ids] states = { 'confirmed': [], 'waiting': [] } to_assign = {} for move in self.browse(cr, uid, ids, context=context): self.attribute_price(cr, uid, move, context=context) state = 'confirmed' #if the move is preceeded, then it's waiting (if preceeding move is done, then action_assign has been called already and its state is already available) if move.move_orig_ids: state = 'waiting' #if the move is split and some of the ancestor was preceeded, then it's waiting as well elif move.split_from: move2 = move.split_from while move2 and state != 'waiting': if move2.move_orig_ids: state = 'waiting' move2 = move2.split_from states[state].append(move.id) if not move.picking_id and move.picking_type_id: key = (move.group_id.id, move.location_id.id, move.location_dest_id.id) if key not in to_assign: to_assign[key] = [] to_assign[key].append(move.id) moves = [move for move in self.browse(cr, uid, states['confirmed'], context=context) if move.procure_method == 'make_to_order'] self._create_procurements(cr, uid, moves, context=context) for move in moves: states['waiting'].append(move.id) states['confirmed'].remove(move.id) for state, write_ids in states.items(): if len(write_ids): self.write(cr, uid, write_ids, {'state': state}) #assign picking in batch for all confirmed move that share the same details for key, move_ids in to_assign.items(): procurement_group, location_from, location_to = key self._picking_assign(cr, uid, move_ids, procurement_group, location_from, location_to, context=context) moves = self.browse(cr, uid, ids, context=context) self._push_apply(cr, uid, moves, context=context) return ids def force_assign(self, cr, uid, ids, context=None): """ Changes the state to assigned. @return: True """ return self.write(cr, uid, ids, {'state': 'assigned'}, context=context) def check_tracking_product(self, cr, uid, product, lot_id, location, location_dest, context=None): check = False if product.track_all and not location_dest.usage == 'inventory': check = True elif product.track_incoming and location.usage in ('supplier', 'transit', 'inventory') and location_dest.usage == 'internal': check = True elif product.track_outgoing and location_dest.usage in ('customer', 'transit') and location.usage == 'internal': check = True if check and not lot_id: raise osv.except_osv(_('Warning!'), _('You must assign a serial number for the product %s') % (product.name)) def check_tracking(self, cr, uid, move, lot_id, context=None): """ Checks if serial number is assigned to stock move or not and raise an error if it had to. """ self.check_tracking_product(cr, uid, move.product_id, lot_id, move.location_id, move.location_dest_id, context=context) def action_assign(self, cr, uid, ids, context=None): """ Checks the product type and accordingly writes the state. """ context = context or {} quant_obj = self.pool.get("stock.quant") to_assign_moves = [] main_domain = {} todo_moves = [] operations = set() for move in self.browse(cr, uid, ids, context=context): if move.state not in ('confirmed', 'waiting', 'assigned'): continue if move.location_id.usage in ('supplier', 'inventory', 'production'): to_assign_moves.append(move.id) #in case the move is returned, we want to try to find quants before forcing the assignment if not move.origin_returned_move_id: continue if move.product_id.type == 'consu': to_assign_moves.append(move.id) continue else: todo_moves.append(move) #we always keep the quants already assigned and try to find the remaining quantity on quants not assigned only main_domain[move.id] = [('reservation_id', '=', False), ('qty', '>', 0)] #if the move is preceeded, restrict the choice of quants in the ones moved previously in original move ancestors = self.find_move_ancestors(cr, uid, move, context=context) if move.state == 'waiting' and not ancestors: #if the waiting move hasn't yet any ancestor (PO/MO not confirmed yet), don't find any quant available in stock main_domain[move.id] += [('id', '=', False)] elif ancestors: main_domain[move.id] += [('history_ids', 'in', ancestors)] #if the move is returned from another, restrict the choice of quants to the ones that follow the returned move if move.origin_returned_move_id: main_domain[move.id] += [('history_ids', 'in', move.origin_returned_move_id.id)] for link in move.linked_move_operation_ids: operations.add(link.operation_id) # Check all ops and sort them: we want to process first the packages, then operations with lot then the rest operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: #first try to find quants based on specific domains given by linked operations for record in ops.linked_move_operation_ids: move = record.move_id if move.id in main_domain: domain = main_domain[move.id] + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) qty = record.qty if qty: quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, qty, domain=domain, prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, record, context=context) for move in todo_moves: if move.linked_move_operation_ids: continue #then if the move isn't totally assigned, try to find quants without any specific domain if move.state != 'assigned': qty_already_assigned = move.reserved_availability qty = move.product_qty - qty_already_assigned quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain[move.id], prefered_domain_list=[], restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) #force assignation of consumable products and incoming from supplier/inventory/production if to_assign_moves: self.force_assign(cr, uid, to_assign_moves, context=context) def action_cancel(self, cr, uid, ids, context=None): """ Cancels the moves and if all moves are cancelled it cancels the picking. @return: True """ procurement_obj = self.pool.get('procurement.order') context = context or {} procs_to_check = [] for move in self.browse(cr, uid, ids, context=context): if move.state == 'done': raise osv.except_osv(_('Operation Forbidden!'), _('You cannot cancel a stock move that has been set to \'Done\'.')) if move.reserved_quant_ids: self.pool.get("stock.quant").quants_unreserve(cr, uid, move, context=context) if context.get('cancel_procurement'): if move.propagate: procurement_ids = procurement_obj.search(cr, uid, [('move_dest_id', '=', move.id)], context=context) procurement_obj.cancel(cr, uid, procurement_ids, context=context) else: if move.move_dest_id: if move.propagate: self.action_cancel(cr, uid, [move.move_dest_id.id], context=context) elif move.move_dest_id.state == 'waiting': #If waiting, the chain will be broken and we are not sure if we can still wait for it (=> could take from stock instead) self.write(cr, uid, [move.move_dest_id.id], {'state': 'confirmed'}, context=context) if move.procurement_id: # Does the same as procurement check, only eliminating a refresh procs_to_check.append(move.procurement_id.id) res = self.write(cr, uid, ids, {'state': 'cancel', 'move_dest_id': False}, context=context) if procs_to_check: procurement_obj.check(cr, uid, procs_to_check, context=context) return res def _check_package_from_moves(self, cr, uid, ids, context=None): pack_obj = self.pool.get("stock.quant.package") packs = set() for move in self.browse(cr, uid, ids, context=context): packs |= set([q.package_id for q in move.quant_ids if q.package_id and q.qty > 0]) return pack_obj._check_location_constraint(cr, uid, list(packs), context=context) def find_move_ancestors(self, cr, uid, move, context=None): '''Find the first level ancestors of given move ''' ancestors = [] move2 = move while move2: ancestors += [x.id for x in move2.move_orig_ids] #loop on the split_from to find the ancestor of split moves only if the move has not direct ancestor (priority goes to them) move2 = not move2.move_orig_ids and move2.split_from or False return ancestors @api.cr_uid_ids_context def recalculate_move_state(self, cr, uid, move_ids, context=None): '''Recompute the state of moves given because their reserved quants were used to fulfill another operation''' for move in self.browse(cr, uid, move_ids, context=context): vals = {} reserved_quant_ids = move.reserved_quant_ids if len(reserved_quant_ids) > 0 and not move.partially_available: vals['partially_available'] = True if len(reserved_quant_ids) == 0 and move.partially_available: vals['partially_available'] = False if move.state == 'assigned': if self.find_move_ancestors(cr, uid, move, context=context): vals['state'] = 'waiting' else: vals['state'] = 'confirmed' if vals: self.write(cr, uid, [move.id], vals, context=context) def action_done(self, cr, uid, ids, context=None): """ Process completely the moves given as ids and if all moves are done, it will finish the picking. """ context = context or {} picking_obj = self.pool.get("stock.picking") quant_obj = self.pool.get("stock.quant") todo = [move.id for move in self.browse(cr, uid, ids, context=context) if move.state == "draft"] if todo: ids = self.action_confirm(cr, uid, todo, context=context) pickings = set() procurement_ids = set() #Search operations that are linked to the moves operations = set() move_qty = {} for move in self.browse(cr, uid, ids, context=context): move_qty[move.id] = move.product_qty for link in move.linked_move_operation_ids: operations.add(link.operation_id) #Sort operations according to entire packages first, then package + lot, package only, lot only operations = list(operations) operations.sort(key=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (x.lot_id and -1 or 0)) for ops in operations: if ops.picking_id: pickings.add(ops.picking_id.id) main_domain = [('qty', '>', 0)] for record in ops.linked_move_operation_ids: move = record.move_id self.check_tracking(cr, uid, move, not ops.product_id and ops.package_id.id or ops.lot_id.id, context=context) prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2] dom = main_domain + self.pool.get('stock.move.operation.link').get_specific_domain(cr, uid, record, context=context) quants = quant_obj.quants_get_prefered_domain(cr, uid, ops.location_id, move.product_id, record.qty, domain=dom, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) if ops.product_id: #If a product is given, the result is always put immediately in the result package (if it is False, they are without package) quant_dest_package_id = ops.result_package_id.id ctx = context else: # When a pack is moved entirely, the quants should not be written anything for the destination package quant_dest_package_id = False ctx = context.copy() ctx['entire_pack'] = True quant_obj.quants_move(cr, uid, quants, move, ops.location_dest_id, location_from=ops.location_id, lot_id=ops.lot_id.id, owner_id=ops.owner_id.id, src_package_id=ops.package_id.id, dest_package_id=quant_dest_package_id, context=ctx) # Handle pack in pack if not ops.product_id and ops.package_id and ops.result_package_id.id != ops.package_id.parent_id.id: self.pool.get('stock.quant.package').write(cr, SUPERUSER_ID, [ops.package_id.id], {'parent_id': ops.result_package_id.id}, context=context) if not move_qty.get(move.id): raise osv.except_osv(_("Error"), _("The roundings of your Unit of Measures %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. ") % (move.product_uom.name, move.product_id.uom_id.name)) move_qty[move.id] -= record.qty #Check for remaining qtys and unreserve/check move_dest_id in move_dest_ids = set() for move in self.browse(cr, uid, ids, context=context): move_qty_cmp = float_compare(move_qty[move.id], 0, precision_rounding=move.product_id.uom_id.rounding) if move_qty_cmp > 0: # (=In case no pack operations in picking) main_domain = [('qty', '>', 0)] prefered_domain = [('reservation_id', '=', move.id)] fallback_domain = [('reservation_id', '=', False)] fallback_domain2 = ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)] prefered_domain_list = [prefered_domain] + [fallback_domain] + [fallback_domain2] self.check_tracking(cr, uid, move, move.restrict_lot_id.id, context=context) qty = move_qty[move.id] quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, qty, domain=main_domain, prefered_domain_list=prefered_domain_list, restrict_lot_id=move.restrict_lot_id.id, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_move(cr, uid, quants, move, move.location_dest_id, lot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id, context=context) # If the move has a destination, add it to the list to reserve if move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'): move_dest_ids.add(move.move_dest_id.id) if move.procurement_id: procurement_ids.add(move.procurement_id.id) #unreserve the quants and make them available for other operations/moves quant_obj.quants_unreserve(cr, uid, move, context=context) # Check the packages have been placed in the correct locations self._check_package_from_moves(cr, uid, ids, context=context) #set the move as done self.write(cr, uid, ids, {'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) self.pool.get('procurement.order').check(cr, uid, list(procurement_ids), context=context) #assign destination moves if move_dest_ids: self.action_assign(cr, uid, list(move_dest_ids), context=context) #check picking state to set the date_done is needed done_picking = [] for picking in picking_obj.browse(cr, uid, list(pickings), context=context): if picking.state == 'done' and not picking.date_done: done_picking.append(picking.id) if done_picking: picking_obj.write(cr, uid, done_picking, {'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context) return True def unlink(self, cr, uid, ids, context=None): context = context or {} for move in self.browse(cr, uid, ids, context=context): if move.state not in ('draft', 'cancel'): raise osv.except_osv(_('User Error!'), _('You can only delete draft moves.')) return super(stock_move, self).unlink(cr, uid, ids, context=context) def action_scrap(self, cr, uid, ids, quantity, location_id, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Move the scrap/damaged product into scrap location @param cr: the database cursor @param uid: the user id @param ids: ids of stock move object to be scrapped @param quantity : specify scrap qty @param location_id : specify scrap location @param context: context arguments @return: Scraped lines """ quant_obj = self.pool.get("stock.quant") #quantity should be given in MOVE UOM if quantity <= 0: raise osv.except_osv(_('Warning!'), _('Please provide a positive quantity to scrap.')) res = [] for move in self.browse(cr, uid, ids, context=context): source_location = move.location_id if move.state == 'done': source_location = move.location_dest_id #Previously used to prevent scraping from virtual location but not necessary anymore #if source_location.usage != 'internal': #restrict to scrap from a virtual location because it's meaningless and it may introduce errors in stock ('creating' new products from nowhere) #raise osv.except_osv(_('Error!'), _('Forbidden operation: it is not allowed to scrap products from a virtual location.')) move_qty = move.product_qty uos_qty = quantity / move_qty * move.product_uos_qty default_val = { 'location_id': source_location.id, 'product_uom_qty': quantity, 'product_uos_qty': uos_qty, 'state': move.state, 'scrapped': True, 'location_dest_id': location_id, 'restrict_lot_id': restrict_lot_id, 'restrict_partner_id': restrict_partner_id, } new_move = self.copy(cr, uid, move.id, default_val) res += [new_move] product_obj = self.pool.get('product.product') for product in product_obj.browse(cr, uid, [move.product_id.id], context=context): if move.picking_id: uom = product.uom_id.name if product.uom_id else '' message = _("%s %s %s has been <b>moved to</b> scrap.") % (quantity, uom, product.name) move.picking_id.message_post(body=message) # We "flag" the quant from which we want to scrap the products. To do so: # - we select the quants related to the move we scrap from # - we reserve the quants with the scrapped move # See self.action_done, et particularly how is defined the "prefered_domain" for clarification scrap_move = self.browse(cr, uid, new_move, context=context) if move.state == 'done' and scrap_move.location_id.usage not in ('supplier', 'inventory', 'production'): domain = [('qty', '>', 0), ('history_ids', 'in', [move.id])] # We use scrap_move data since a reservation makes sense for a move not already done quants = quant_obj.quants_get_prefered_domain(cr, uid, scrap_move.location_id, scrap_move.product_id, quantity, domain=domain, prefered_domain_list=[], restrict_lot_id=scrap_move.restrict_lot_id.id, restrict_partner_id=scrap_move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, scrap_move, context=context) self.action_done(cr, uid, res, context=context) return res def split(self, cr, uid, move, qty, restrict_lot_id=False, restrict_partner_id=False, context=None): """ Splits qty from move move into a new move :param move: browse record :param qty: float. quantity to split (given in product UoM) :param restrict_lot_id: optional production lot that can be given in order to force the new move to restrict its choice of quants to this lot. :param restrict_partner_id: optional partner that can be given in order to force the new move to restrict its choice of quants to the ones belonging to this partner. :param context: dictionay. can contains the special key 'source_location_id' in order to force the source location when copying the move returns the ID of the backorder move created """ if move.state in ('done', 'cancel'): raise osv.except_osv(_('Error'), _('You cannot split a move done')) if move.state == 'draft': #we restrict the split of a draft move because if not confirmed yet, it may be replaced by several other moves in #case of phantom bom (with mrp module). And we don't want to deal with this complexity by copying the product that will explode. raise osv.except_osv(_('Error'), _('You cannot split a draft move. It needs to be confirmed first.')) if move.product_qty <= qty or qty == 0: return move.id uom_obj = self.pool.get('product.uom') context = context or {} #HALF-UP rounding as only rounding errors will be because of propagation of error from default UoM uom_qty = uom_obj._compute_qty_obj(cr, uid, move.product_id.uom_id, qty, move.product_uom, rounding_method='HALF-UP', context=context) uos_qty = uom_qty * move.product_uos_qty / move.product_uom_qty defaults = { 'product_uom_qty': uom_qty, 'product_uos_qty': uos_qty, 'procure_method': 'make_to_stock', 'restrict_lot_id': restrict_lot_id, 'restrict_partner_id': restrict_partner_id, 'split_from': move.id, 'procurement_id': move.procurement_id.id, 'move_dest_id': move.move_dest_id.id, 'origin_returned_move_id': move.origin_returned_move_id.id, } if context.get('source_location_id'): defaults['location_id'] = context['source_location_id'] new_move = self.copy(cr, uid, move.id, defaults, context=context) ctx = context.copy() ctx['do_not_propagate'] = True self.write(cr, uid, [move.id], { 'product_uom_qty': move.product_uom_qty - uom_qty, 'product_uos_qty': move.product_uos_qty - uos_qty, }, context=ctx) if move.move_dest_id and move.propagate and move.move_dest_id.state not in ('done', 'cancel'): new_move_prop = self.split(cr, uid, move.move_dest_id, qty, context=context) self.write(cr, uid, [new_move], {'move_dest_id': new_move_prop}, context=context) #returning the first element of list returned by action_confirm is ok because we checked it wouldn't be exploded (and #thus the result of action_confirm should always be a list of 1 element length) return self.action_confirm(cr, uid, [new_move], context=context)[0] def get_code_from_locs(self, cr, uid, move, location_id=False, location_dest_id=False, context=None): """ Returns the code the picking type should have. This can easily be used to check if a move is internal or not move, location_id and location_dest_id are browse records """ code = 'internal' src_loc = location_id or move.location_id dest_loc = location_dest_id or move.location_dest_id if src_loc.usage == 'internal' and dest_loc.usage != 'internal': code = 'outgoing' if src_loc.usage != 'internal' and dest_loc.usage == 'internal': code = 'incoming' return code def _get_taxes(self, cr, uid, move, context=None): return [] class stock_inventory(osv.osv): _name = "stock.inventory" _description = "Inventory" def _get_move_ids_exist(self, cr, uid, ids, field_name, arg, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = False if inv.move_ids: res[inv.id] = True return res def _get_available_filters(self, cr, uid, context=None): """ This function will return the list of filter allowed according to the options checked in 'Settings\Warehouse'. :rtype: list of tuple """ #default available choices res_filter = [('none', _('All products')), ('partial', _('Manual Selection of Products')), ('product', _('One product only'))] settings_obj = self.pool.get('stock.config.settings') config_ids = settings_obj.search(cr, uid, [], limit=1, order='id DESC', context=context) #If we don't have updated config until now, all fields are by default false and so should be not dipslayed if not config_ids: return res_filter stock_settings = settings_obj.browse(cr, uid, config_ids[0], context=context) if stock_settings.group_stock_tracking_owner: res_filter.append(('owner', _('One owner only'))) res_filter.append(('product_owner', _('One product for a specific owner'))) if stock_settings.group_stock_tracking_lot: res_filter.append(('lot', _('One Lot/Serial Number'))) if stock_settings.group_stock_packaging: res_filter.append(('pack', _('A Pack'))) return res_filter def _get_total_qty(self, cr, uid, ids, field_name, args, context=None): res = {} for inv in self.browse(cr, uid, ids, context=context): res[inv.id] = sum([x.product_qty for x in inv.line_ids]) return res INVENTORY_STATE_SELECTION = [ ('draft', 'Draft'), ('cancel', 'Cancelled'), ('confirm', 'In Progress'), ('done', 'Validated'), ] _columns = { 'name': fields.char('Inventory Reference', required=True, readonly=True, states={'draft': [('readonly', False)]}, help="Inventory Name."), 'date': fields.datetime('Inventory Date', required=True, readonly=True, help="The date that will be used for the stock level check of the products and the validation of the stock move related to this inventory."), 'line_ids': fields.one2many('stock.inventory.line', 'inventory_id', 'Inventories', readonly=False, states={'done': [('readonly', True)]}, help="Inventory Lines.", copy=True), 'move_ids': fields.one2many('stock.move', 'inventory_id', 'Created Moves', help="Inventory Moves.", states={'done': [('readonly', True)]}), 'state': fields.selection(INVENTORY_STATE_SELECTION, 'Status', readonly=True, select=True, copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True, select=True, readonly=True, states={'draft': [('readonly', False)]}), 'location_id': fields.many2one('stock.location', 'Inventoried Location', required=True, readonly=True, states={'draft': [('readonly', False)]}), 'product_id': fields.many2one('product.product', 'Inventoried Product', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Product to focus your inventory on a particular Product."), 'package_id': fields.many2one('stock.quant.package', 'Inventoried Pack', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Pack to focus your inventory on a particular Pack."), 'partner_id': fields.many2one('res.partner', 'Inventoried Owner', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Owner to focus your inventory on a particular Owner."), 'lot_id': fields.many2one('stock.production.lot', 'Inventoried Lot/Serial Number', readonly=True, states={'draft': [('readonly', False)]}, help="Specify Lot/Serial Number to focus your inventory on a particular Lot/Serial Number.", copy=False), 'move_ids_exist': fields.function(_get_move_ids_exist, type='boolean', string=' Stock Move Exists?', help='technical field for attrs in view'), 'filter': fields.selection(_get_available_filters, 'Inventory of', required=True, help="If you do an entire inventory, you can choose 'All Products' and it will prefill the inventory with the current stock. If you only do some products "\ "(e.g. Cycle Counting) you can choose 'Manual Selection of Products' and the system won't propose anything. You can also let the "\ "system propose for a single product / lot /... "), 'total_qty': fields.function(_get_total_qty, type="float"), } def _default_stock_location(self, cr, uid, context=None): try: warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0') return warehouse.lot_stock_id.id except: return False _defaults = { 'date': fields.datetime.now, 'state': 'draft', 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'location_id': _default_stock_location, 'filter': 'none', } def reset_real_qty(self, cr, uid, ids, context=None): inventory = self.browse(cr, uid, ids[0], context=context) line_ids = [line.id for line in inventory.line_ids] self.pool.get('stock.inventory.line').write(cr, uid, line_ids, {'product_qty': 0}) return True def action_done(self, cr, uid, ids, context=None): """ Finish the inventory @return: True """ for inv in self.browse(cr, uid, ids, context=context): for inventory_line in inv.line_ids: if inventory_line.product_qty < 0 and inventory_line.product_qty != inventory_line.theoretical_qty: raise osv.except_osv(_('Warning'), _('You cannot set a negative product quantity in an inventory line:\n\t%s - qty: %s' % (inventory_line.product_id.name, inventory_line.product_qty))) self.action_check(cr, uid, [inv.id], context=context) self.write(cr, uid, [inv.id], {'state': 'done'}, context=context) self.post_inventory(cr, uid, inv, context=context) return True def post_inventory(self, cr, uid, inv, context=None): #The inventory is posted as a single step which means quants cannot be moved from an internal location to another using an inventory #as they will be moved to inventory loss, and other quants will be created to the encoded quant location. This is a normal behavior #as quants cannot be reuse from inventory location (users can still manually move the products before/after the inventory if they want). move_obj = self.pool.get('stock.move') move_obj.action_done(cr, uid, [x.id for x in inv.move_ids if x.state != 'done'], context=context) def action_check(self, cr, uid, ids, context=None): """ Checks the inventory and computes the stock move to do @return: True """ inventory_line_obj = self.pool.get('stock.inventory.line') stock_move_obj = self.pool.get('stock.move') for inventory in self.browse(cr, uid, ids, context=context): #first remove the existing stock moves linked to this inventory move_ids = [move.id for move in inventory.move_ids] stock_move_obj.unlink(cr, uid, move_ids, context=context) for line in inventory.line_ids: #compare the checked quantities on inventory lines to the theorical one stock_move = inventory_line_obj._resolve_inventory_line(cr, uid, line, context=context) def action_cancel_draft(self, cr, uid, ids, context=None): """ Cancels the stock move and change inventory state to draft. @return: True """ for inv in self.browse(cr, uid, ids, context=context): self.write(cr, uid, [inv.id], {'line_ids': [(5,)]}, context=context) self.pool.get('stock.move').action_cancel(cr, uid, [x.id for x in inv.move_ids], context=context) self.write(cr, uid, [inv.id], {'state': 'draft'}, context=context) return True def action_cancel_inventory(self, cr, uid, ids, context=None): self.action_cancel_draft(cr, uid, ids, context=context) def prepare_inventory(self, cr, uid, ids, context=None): inventory_line_obj = self.pool.get('stock.inventory.line') for inventory in self.browse(cr, uid, ids, context=context): # If there are inventory lines already (e.g. from import), respect those and set their theoretical qty line_ids = [line.id for line in inventory.line_ids] if not line_ids and inventory.filter != 'partial': #compute the inventory lines and create them vals = self._get_inventory_lines(cr, uid, inventory, context=context) for product_line in vals: inventory_line_obj.create(cr, uid, product_line, context=context) return self.write(cr, uid, ids, {'state': 'confirm', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}) def _get_inventory_lines(self, cr, uid, inventory, context=None): location_obj = self.pool.get('stock.location') product_obj = self.pool.get('product.product') location_ids = location_obj.search(cr, uid, [('id', 'child_of', [inventory.location_id.id])], context=context) domain = ' location_id in %s' args = (tuple(location_ids),) if inventory.partner_id: domain += ' and owner_id = %s' args += (inventory.partner_id.id,) if inventory.lot_id: domain += ' and lot_id = %s' args += (inventory.lot_id.id,) if inventory.product_id: domain += ' and product_id = %s' args += (inventory.product_id.id,) if inventory.package_id: domain += ' and package_id = %s' args += (inventory.package_id.id,) cr.execute(''' SELECT product_id, sum(qty) as product_qty, location_id, lot_id as prod_lot_id, package_id, owner_id as partner_id FROM stock_quant WHERE''' + domain + ''' GROUP BY product_id, location_id, lot_id, package_id, partner_id ''', args) vals = [] for product_line in cr.dictfetchall(): #replace the None the dictionary by False, because falsy values are tested later on for key, value in product_line.items(): if not value: product_line[key] = False product_line['inventory_id'] = inventory.id product_line['theoretical_qty'] = product_line['product_qty'] if product_line['product_id']: product = product_obj.browse(cr, uid, product_line['product_id'], context=context) product_line['product_uom_id'] = product.uom_id.id vals.append(product_line) return vals def _check_filter_product(self, cr, uid, ids, context=None): for inventory in self.browse(cr, uid, ids, context=context): if inventory.filter == 'none' and inventory.product_id and inventory.location_id and inventory.lot_id: return True if inventory.filter not in ('product', 'product_owner') and inventory.product_id: return False if inventory.filter != 'lot' and inventory.lot_id: return False if inventory.filter not in ('owner', 'product_owner') and inventory.partner_id: return False if inventory.filter != 'pack' and inventory.package_id: return False return True def onchange_filter(self, cr, uid, ids, filter, context=None): to_clean = { 'value': {} } if filter not in ('product', 'product_owner'): to_clean['value']['product_id'] = False if filter != 'lot': to_clean['value']['lot_id'] = False if filter not in ('owner', 'product_owner'): to_clean['value']['partner_id'] = False if filter != 'pack': to_clean['value']['package_id'] = False return to_clean _constraints = [ (_check_filter_product, 'The selected inventory options are not coherent.', ['filter', 'product_id', 'lot_id', 'partner_id', 'package_id']), ] class stock_inventory_line(osv.osv): _name = "stock.inventory.line" _description = "Inventory Line" _order = "inventory_id, location_name, product_code, product_name, prodlot_name" def _get_product_name_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('product_id', 'in', ids)], context=context) def _get_location_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('location_id', 'in', ids)], context=context) def _get_prodlot_change(self, cr, uid, ids, context=None): return self.pool.get('stock.inventory.line').search(cr, uid, [('prod_lot_id', 'in', ids)], context=context) def _get_theoretical_qty(self, cr, uid, ids, name, args, context=None): res = {} quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] for line in self.browse(cr, uid, ids, context=context): quant_ids = self._get_quants(cr, uid, line, context=context) quants = quant_obj.browse(cr, uid, quant_ids, context=context) tot_qty = sum([x.qty for x in quants]) if line.product_uom_id and line.product_id.uom_id.id != line.product_uom_id.id: tot_qty = uom_obj._compute_qty_obj(cr, uid, line.product_id.uom_id, tot_qty, line.product_uom_id, context=context) res[line.id] = tot_qty return res _columns = { 'inventory_id': fields.many2one('stock.inventory', 'Inventory', ondelete='cascade', select=True), 'location_id': fields.many2one('stock.location', 'Location', required=True, select=True), 'product_id': fields.many2one('product.product', 'Product', required=True, select=True), 'package_id': fields.many2one('stock.quant.package', 'Pack', select=True), 'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure', required=True), 'product_qty': fields.float('Checked Quantity', digits_compute=dp.get_precision('Product Unit of Measure')), 'company_id': fields.related('inventory_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, select=True, readonly=True), 'prod_lot_id': fields.many2one('stock.production.lot', 'Serial Number', domain="[('product_id','=',product_id)]"), 'state': fields.related('inventory_id', 'state', type='char', string='Status', readonly=True), 'theoretical_qty': fields.function(_get_theoretical_qty, type='float', digits_compute=dp.get_precision('Product Unit of Measure'), store={'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id', 'product_id', 'package_id', 'product_uom_id', 'company_id', 'prod_lot_id', 'partner_id'], 20),}, readonly=True, string="Theoretical Quantity"), 'partner_id': fields.many2one('res.partner', 'Owner'), 'product_name': fields.related('product_id', 'name', type='char', string='Product Name', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'product_code': fields.related('product_id', 'default_code', type='char', string='Product Code', store={ 'product.product': (_get_product_name_change, ['name', 'default_code'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['product_id'], 20),}), 'location_name': fields.related('location_id', 'complete_name', type='char', string='Location Name', store={ 'stock.location': (_get_location_change, ['name', 'location_id', 'active'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['location_id'], 20),}), 'prodlot_name': fields.related('prod_lot_id', 'name', type='char', string='Serial Number Name', store={ 'stock.production.lot': (_get_prodlot_change, ['name'], 20), 'stock.inventory.line': (lambda self, cr, uid, ids, c={}: ids, ['prod_lot_id'], 20),}), } _defaults = { 'product_qty': 0, 'product_uom_id': lambda self, cr, uid, ctx=None: self.pool['ir.model.data'].get_object_reference(cr, uid, 'product', 'product_uom_unit')[1] } def _get_quants(self, cr, uid, line, context=None): quant_obj = self.pool["stock.quant"] dom = [('company_id', '=', line.company_id.id), ('location_id', '=', line.location_id.id), ('lot_id', '=', line.prod_lot_id.id), ('product_id','=', line.product_id.id), ('owner_id', '=', line.partner_id.id), ('package_id', '=', line.package_id.id)] quants = quant_obj.search(cr, uid, dom, context=context) return quants def onchange_createline(self, cr, uid, ids, location_id=False, product_id=False, uom_id=False, package_id=False, prod_lot_id=False, partner_id=False, company_id=False, context=None): quant_obj = self.pool["stock.quant"] uom_obj = self.pool["product.uom"] res = {'value': {}} # If no UoM already put the default UoM of the product if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) uom = self.pool['product.uom'].browse(cr, uid, uom_id, context=context) if product.uom_id.category_id.id != uom.category_id.id: res['value']['product_uom_id'] = product.uom_id.id res['domain'] = {'product_uom_id': [('category_id','=',product.uom_id.category_id.id)]} uom_id = product.uom_id.id # Calculate theoretical quantity by searching the quants as in quants_get if product_id and location_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) if not company_id: company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id dom = [('company_id', '=', company_id), ('location_id', '=', location_id), ('lot_id', '=', prod_lot_id), ('product_id','=', product_id), ('owner_id', '=', partner_id), ('package_id', '=', package_id)] quants = quant_obj.search(cr, uid, dom, context=context) th_qty = sum([x.qty for x in quant_obj.browse(cr, uid, quants, context=context)]) if product_id and uom_id and product.uom_id.id != uom_id: th_qty = uom_obj._compute_qty(cr, uid, product.uom_id.id, th_qty, uom_id) res['value']['theoretical_qty'] = th_qty res['value']['product_qty'] = th_qty return res def _resolve_inventory_line(self, cr, uid, inventory_line, context=None): stock_move_obj = self.pool.get('stock.move') quant_obj = self.pool.get('stock.quant') diff = inventory_line.theoretical_qty - inventory_line.product_qty if not diff: return #each theorical_lines where difference between theoretical and checked quantities is not 0 is a line for which we need to create a stock move vals = { 'name': _('INV:') + (inventory_line.inventory_id.name or ''), 'product_id': inventory_line.product_id.id, 'product_uom': inventory_line.product_uom_id.id, 'date': inventory_line.inventory_id.date, 'company_id': inventory_line.inventory_id.company_id.id, 'inventory_id': inventory_line.inventory_id.id, 'state': 'confirmed', 'restrict_lot_id': inventory_line.prod_lot_id.id, 'restrict_partner_id': inventory_line.partner_id.id, } inventory_location_id = inventory_line.product_id.property_stock_inventory.id if diff < 0: #found more than expected vals['location_id'] = inventory_location_id vals['location_dest_id'] = inventory_line.location_id.id vals['product_uom_qty'] = -diff else: #found less than expected vals['location_id'] = inventory_line.location_id.id vals['location_dest_id'] = inventory_location_id vals['product_uom_qty'] = diff move_id = stock_move_obj.create(cr, uid, vals, context=context) move = stock_move_obj.browse(cr, uid, move_id, context=context) if diff > 0: domain = [('qty', '>', 0.0), ('package_id', '=', inventory_line.package_id.id), ('lot_id', '=', inventory_line.prod_lot_id.id), ('location_id', '=', inventory_line.location_id.id)] preferred_domain_list = [[('reservation_id', '=', False)], [('reservation_id.inventory_id', '!=', inventory_line.inventory_id.id)]] quants = quant_obj.quants_get_prefered_domain(cr, uid, move.location_id, move.product_id, move.product_qty, domain=domain, prefered_domain_list=preferred_domain_list, restrict_partner_id=move.restrict_partner_id.id, context=context) quant_obj.quants_reserve(cr, uid, quants, move, context=context) elif inventory_line.package_id: stock_move_obj.action_done(cr, uid, move_id, context=context) quants = [x.id for x in move.quant_ids] quant_obj.write(cr, uid, quants, {'package_id': inventory_line.package_id.id}, context=context) res = quant_obj.search(cr, uid, [('qty', '<', 0.0), ('product_id', '=', move.product_id.id), ('location_id', '=', move.location_dest_id.id), ('package_id', '!=', False)], limit=1, context=context) if res: for quant in move.quant_ids: if quant.location_id.id == move.location_dest_id.id: #To avoid we take a quant that was reconcile already quant_obj._quant_reconcile_negative(cr, uid, quant, move, context=context) return move_id # Should be left out in next version def restrict_change(self, cr, uid, ids, theoretical_qty, context=None): return {} # Should be left out in next version def on_change_product_id(self, cr, uid, ids, product, uom, theoretical_qty, context=None): """ Changes UoM @param location_id: Location id @param product: Changed product_id @param uom: UoM product @return: Dictionary of changed values """ if not product: return {'value': {'product_uom_id': False}} obj_product = self.pool.get('product.product').browse(cr, uid, product, context=context) return {'value': {'product_uom_id': uom or obj_product.uom_id.id}} #---------------------------------------------------------- # Stock Warehouse #---------------------------------------------------------- class stock_warehouse(osv.osv): _name = "stock.warehouse" _description = "Warehouse" _columns = { 'name': fields.char('Warehouse Name', required=True, select=True), 'company_id': fields.many2one('res.company', 'Company', required=True, readonly=True, select=True), 'partner_id': fields.many2one('res.partner', 'Address'), 'view_location_id': fields.many2one('stock.location', 'View Location', required=True, domain=[('usage', '=', 'view')]), 'lot_stock_id': fields.many2one('stock.location', 'Location Stock', domain=[('usage', '=', 'internal')], required=True), 'code': fields.char('Short Name', size=5, required=True, help="Short name used to identify your warehouse"), 'route_ids': fields.many2many('stock.location.route', 'stock_route_warehouse', 'warehouse_id', 'route_id', 'Routes', domain="[('warehouse_selectable', '=', True)]", help='Defaults routes through the warehouse'), 'reception_steps': fields.selection([ ('one_step', 'Receive goods directly in stock (1 step)'), ('two_steps', 'Unload in input location then go to stock (2 steps)'), ('three_steps', 'Unload in input location, go through a quality control before being admitted in stock (3 steps)')], 'Incoming Shipments', help="Default incoming route to follow", required=True), 'delivery_steps': fields.selection([ ('ship_only', 'Ship directly from stock (Ship only)'), ('pick_ship', 'Bring goods to output location before shipping (Pick + Ship)'), ('pick_pack_ship', 'Make packages into a dedicated location, then bring them to the output location for shipping (Pick + Pack + Ship)')], 'Outgoing Shippings', help="Default outgoing route to follow", required=True), 'wh_input_stock_loc_id': fields.many2one('stock.location', 'Input Location'), 'wh_qc_stock_loc_id': fields.many2one('stock.location', 'Quality Control Location'), 'wh_output_stock_loc_id': fields.many2one('stock.location', 'Output Location'), 'wh_pack_stock_loc_id': fields.many2one('stock.location', 'Packing Location'), 'mto_pull_id': fields.many2one('procurement.rule', 'MTO rule'), 'pick_type_id': fields.many2one('stock.picking.type', 'Pick Type'), 'pack_type_id': fields.many2one('stock.picking.type', 'Pack Type'), 'out_type_id': fields.many2one('stock.picking.type', 'Out Type'), 'in_type_id': fields.many2one('stock.picking.type', 'In Type'), 'int_type_id': fields.many2one('stock.picking.type', 'Internal Type'), 'crossdock_route_id': fields.many2one('stock.location.route', 'Crossdock Route'), 'reception_route_id': fields.many2one('stock.location.route', 'Receipt Route'), 'delivery_route_id': fields.many2one('stock.location.route', 'Delivery Route'), 'resupply_from_wh': fields.boolean('Resupply From Other Warehouses', help='Unused field'), 'resupply_wh_ids': fields.many2many('stock.warehouse', 'stock_wh_resupply_table', 'supplied_wh_id', 'supplier_wh_id', 'Resupply Warehouses'), 'resupply_route_ids': fields.one2many('stock.location.route', 'supplied_wh_id', 'Resupply Routes', help="Routes will be created for these resupply warehouses and you can select them on products and product categories"), 'default_resupply_wh_id': fields.many2one('stock.warehouse', 'Default Resupply Warehouse', help="Goods will always be resupplied from this warehouse"), } def onchange_filter_default_resupply_wh_id(self, cr, uid, ids, default_resupply_wh_id, resupply_wh_ids, context=None): resupply_wh_ids = set([x['id'] for x in (self.resolve_2many_commands(cr, uid, 'resupply_wh_ids', resupply_wh_ids, ['id']))]) if default_resupply_wh_id: #If we are removing the default resupply, we don't have default_resupply_wh_id resupply_wh_ids.add(default_resupply_wh_id) resupply_wh_ids = list(resupply_wh_ids) return {'value': {'resupply_wh_ids': resupply_wh_ids}} def _get_external_transit_location(self, cr, uid, warehouse, context=None): ''' returns browse record of inter company transit location, if found''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: inter_wh_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_inter_wh')[1] except: return False return location_obj.browse(cr, uid, inter_wh_loc, context=context) def _get_inter_wh_route(self, cr, uid, warehouse, wh, context=None): return { 'name': _('%s: Supply Product from %s') % (warehouse.name, wh.name), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'supplied_wh_id': warehouse.id, 'supplier_wh_id': wh.id, } def _create_resupply_routes(self, cr, uid, warehouse, supplier_warehouses, default_resupply_wh, context=None): route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') #create route selectable on the product to resupply the warehouse from another one external_transit_location = self._get_external_transit_location(cr, uid, warehouse, context=context) internal_transit_location = warehouse.company_id.internal_transit_location_id input_loc = warehouse.wh_input_stock_loc_id if warehouse.reception_steps == 'one_step': input_loc = warehouse.lot_stock_id for wh in supplier_warehouses: transit_location = wh.company_id.id == warehouse.company_id.id and internal_transit_location or external_transit_location if transit_location: output_loc = wh.wh_output_stock_loc_id if wh.delivery_steps == 'ship_only': output_loc = wh.lot_stock_id # Create extra MTO rule (only for 'ship only' because in the other cases MTO rules already exists) mto_pull_vals = self._get_mto_pull_rule(cr, uid, wh, [(output_loc, transit_location, wh.out_type_id.id)], context=context)[0] pull_obj.create(cr, uid, mto_pull_vals, context=context) inter_wh_route_vals = self._get_inter_wh_route(cr, uid, warehouse, wh, context=context) inter_wh_route_id = route_obj.create(cr, uid, vals=inter_wh_route_vals, context=context) values = [(output_loc, transit_location, wh.out_type_id.id, wh), (transit_location, input_loc, warehouse.in_type_id.id, warehouse)] pull_rules_list = self._get_supply_pull_rules(cr, uid, wh.id, values, inter_wh_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #if the warehouse is also set as default resupply method, assign this route automatically to the warehouse if default_resupply_wh and default_resupply_wh.id == wh.id: self.write(cr, uid, [warehouse.id, wh.id], {'route_ids': [(4, inter_wh_route_id)]}, context=context) _defaults = { 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.inventory', context=c), 'reception_steps': 'one_step', 'delivery_steps': 'ship_only', } _sql_constraints = [ ('warehouse_name_uniq', 'unique(name, company_id)', 'The name of the warehouse must be unique per company!'), ('warehouse_code_uniq', 'unique(code, company_id)', 'The code of the warehouse must be unique per company!'), ] def _get_partner_locations(self, cr, uid, ids, context=None): ''' returns a tuple made of the browse record of customer location and the browse record of supplier location''' data_obj = self.pool.get('ir.model.data') location_obj = self.pool.get('stock.location') try: customer_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_customers')[1] supplier_loc = data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_suppliers')[1] except: customer_loc = location_obj.search(cr, uid, [('usage', '=', 'customer')], context=context) customer_loc = customer_loc and customer_loc[0] or False supplier_loc = location_obj.search(cr, uid, [('usage', '=', 'supplier')], context=context) supplier_loc = supplier_loc and supplier_loc[0] or False if not (customer_loc and supplier_loc): raise osv.except_osv(_('Error!'), _('Can\'t find any customer or supplier location.')) return location_obj.browse(cr, uid, [customer_loc, supplier_loc], context=context) def _location_used(self, cr, uid, location_id, warehouse, context=None): pull_obj = self.pool['procurement.rule'] push_obj = self.pool['stock.location.path'] pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_src_id', '=', location_id), ('location_id', '=', location_id)], context=context) pushs = push_obj.search(cr, uid, ['&', ('route_id', 'not in', [x.id for x in warehouse.route_ids]), '|', ('location_from_id', '=', location_id), ('location_dest_id', '=', location_id)], context=context) if pulls or pushs: return True return False def switch_location(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): location_obj = self.pool.get('stock.location') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps if warehouse.reception_steps != new_reception_step: if not self._location_used(cr, uid, warehouse.wh_input_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_input_stock_loc_id.id, warehouse.wh_qc_stock_loc_id.id], {'active': False}, context=context) if new_reception_step != 'one_step': location_obj.write(cr, uid, warehouse.wh_input_stock_loc_id.id, {'active': True}, context=context) if new_reception_step == 'three_steps': location_obj.write(cr, uid, warehouse.wh_qc_stock_loc_id.id, {'active': True}, context=context) if warehouse.delivery_steps != new_delivery_step: if not self._location_used(cr, uid, warehouse.wh_output_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_output_stock_loc_id.id], {'active': False}, context=context) if not self._location_used(cr, uid, warehouse.wh_pack_stock_loc_id.id, warehouse, context=context): location_obj.write(cr, uid, [warehouse.wh_pack_stock_loc_id.id], {'active': False}, context=context) if new_delivery_step != 'ship_only': location_obj.write(cr, uid, warehouse.wh_output_stock_loc_id.id, {'active': True}, context=context) if new_delivery_step == 'pick_pack_ship': location_obj.write(cr, uid, warehouse.wh_pack_stock_loc_id.id, {'active': True}, context=context) return True def _get_reception_delivery_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'product_categ_selectable': True, 'product_selectable': False, 'sequence': 10, } def _get_supply_pull_rules(self, cr, uid, supply_warehouse, values, new_route_id, context=None): pull_rules_list = [] for from_loc, dest_loc, pick_type_id, warehouse in values: pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': warehouse.lot_stock_id.id != from_loc.id and 'make_to_order' or 'make_to_stock', # first part of the resuply route is MTS 'warehouse_id': warehouse.id, 'propagate_warehouse_id': supply_warehouse, }) return pull_rules_list def _get_push_pull_rules(self, cr, uid, warehouse, active, values, new_route_id, context=None): first_rule = True push_rules_list = [] pull_rules_list = [] for from_loc, dest_loc, pick_type_id in values: push_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_from_id': from_loc.id, 'location_dest_id': dest_loc.id, 'route_id': new_route_id, 'auto': 'manual', 'picking_type_id': pick_type_id, 'active': active, 'warehouse_id': warehouse.id, }) pull_rules_list.append({ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': new_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': first_rule is True and 'make_to_stock' or 'make_to_order', 'active': active, 'warehouse_id': warehouse.id, }) first_rule = False return push_rules_list, pull_rules_list def _get_mto_route(self, cr, uid, context=None): route_obj = self.pool.get('stock.location.route') data_obj = self.pool.get('ir.model.data') try: mto_route_id = data_obj.get_object_reference(cr, uid, 'stock', 'route_warehouse0_mto')[1] except: mto_route_id = route_obj.search(cr, uid, [('name', 'like', _('Make To Order'))], context=context) mto_route_id = mto_route_id and mto_route_id[0] or False if not mto_route_id: raise osv.except_osv(_('Error!'), _('Can\'t find any generic Make To Order route.')) return mto_route_id def _check_remove_mto_resupply_rules(self, cr, uid, warehouse, context=None): """ Checks that the moves from the different """ pull_obj = self.pool.get('procurement.rule') mto_route_id = self._get_mto_route(cr, uid, context=context) rules = pull_obj.search(cr, uid, ['&', ('location_src_id', '=', warehouse.lot_stock_id.id), ('location_id.usage', '=', 'transit')], context=context) pull_obj.unlink(cr, uid, rules, context=context) def _get_mto_pull_rule(self, cr, uid, warehouse, values, context=None): mto_route_id = self._get_mto_route(cr, uid, context=context) res = [] for value in values: from_loc, dest_loc, pick_type_id = value res += [{ 'name': self._format_rulename(cr, uid, warehouse, from_loc, dest_loc, context=context) + _(' MTO'), 'location_src_id': from_loc.id, 'location_id': dest_loc.id, 'route_id': mto_route_id, 'action': 'move', 'picking_type_id': pick_type_id, 'procure_method': 'make_to_order', 'active': True, 'warehouse_id': warehouse.id, }] return res def _get_crossdock_route(self, cr, uid, warehouse, route_name, context=None): return { 'name': self._format_routename(cr, uid, warehouse, route_name, context=context), 'warehouse_selectable': False, 'product_selectable': True, 'product_categ_selectable': True, 'active': warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', 'sequence': 20, } def create_routes(self, cr, uid, ids, warehouse, context=None): wh_route_ids = [] route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #create reception route and rules route_name, values = routes_dict[warehouse.reception_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) reception_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, reception_route_id)) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, reception_route_id, context=context) #create the push/pull rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all pull rules in reception route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTS route and pull rules for delivery and a specific route MTO to be set on the product route_name, values = routes_dict[warehouse.delivery_steps] route_vals = self._get_reception_delivery_route(cr, uid, warehouse, route_name, context=context) #create the route and its pull rules delivery_route_id = route_obj.create(cr, uid, route_vals, context=context) wh_route_ids.append((4, delivery_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, delivery_route_id, context=context) for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #create MTO pull rule and link it to the generic MTO route mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] mto_pull_id = pull_obj.create(cr, uid, mto_pull_vals, context=context) #create a route for cross dock operations, that can be set on products and product categories route_name, values = routes_dict['crossdock'] crossdock_route_vals = self._get_crossdock_route(cr, uid, warehouse, route_name, context=context) crossdock_route_id = route_obj.create(cr, uid, vals=crossdock_route_vals, context=context) wh_route_ids.append((4, crossdock_route_id)) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, warehouse.delivery_steps != 'ship_only' and warehouse.reception_steps != 'one_step', values, crossdock_route_id, context=context) for pull_rule in pull_rules_list: # Fixed cross-dock is logically mto pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) #create route selectable on the product to resupply the warehouse from another one self._create_resupply_routes(cr, uid, warehouse, warehouse.resupply_wh_ids, warehouse.default_resupply_wh_id, context=context) #return routes and mto pull rule to store on the warehouse return { 'route_ids': wh_route_ids, 'mto_pull_id': mto_pull_id, 'reception_route_id': reception_route_id, 'delivery_route_id': delivery_route_id, 'crossdock_route_id': crossdock_route_id, } def change_route(self, cr, uid, ids, warehouse, new_reception_step=False, new_delivery_step=False, context=None): picking_type_obj = self.pool.get('stock.picking.type') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') route_obj = self.pool.get('stock.location.route') new_reception_step = new_reception_step or warehouse.reception_steps new_delivery_step = new_delivery_step or warehouse.delivery_steps #change the default source and destination location and (de)activate picking types input_loc = warehouse.wh_input_stock_loc_id if new_reception_step == 'one_step': input_loc = warehouse.lot_stock_id output_loc = warehouse.wh_output_stock_loc_id if new_delivery_step == 'ship_only': output_loc = warehouse.lot_stock_id picking_type_obj.write(cr, uid, warehouse.in_type_id.id, {'default_location_dest_id': input_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.out_type_id.id, {'default_location_src_id': output_loc.id}, context=context) picking_type_obj.write(cr, uid, warehouse.pick_type_id.id, { 'active': new_delivery_step != 'ship_only', 'default_location_dest_id': output_loc.id if new_delivery_step == 'pick_ship' else warehouse.wh_pack_stock_loc_id.id, }, context=context) picking_type_obj.write(cr, uid, warehouse.pack_type_id.id, {'active': new_delivery_step == 'pick_pack_ship'}, context=context) routes_dict = self.get_routes_dict(cr, uid, ids, warehouse, context=context) #update delivery route and rules: unlink the existing rules of the warehouse delivery route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.delivery_route_id.pull_ids], context=context) route_name, values = routes_dict[new_delivery_step] route_obj.write(cr, uid, warehouse.delivery_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) dummy, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.delivery_route_id.id, context=context) #create the pull rules for pull_rule in pull_rules_list: pull_obj.create(cr, uid, vals=pull_rule, context=context) #update receipt route and rules: unlink the existing rules of the warehouse receipt route and recreate it pull_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.pull_ids], context=context) push_obj.unlink(cr, uid, [pu.id for pu in warehouse.reception_route_id.push_ids], context=context) route_name, values = routes_dict[new_reception_step] route_obj.write(cr, uid, warehouse.reception_route_id.id, {'name': self._format_routename(cr, uid, warehouse, route_name, context=context)}, context=context) push_rules_list, pull_rules_list = self._get_push_pull_rules(cr, uid, warehouse, True, values, warehouse.reception_route_id.id, context=context) #create the push/pull rules for push_rule in push_rules_list: push_obj.create(cr, uid, vals=push_rule, context=context) for pull_rule in pull_rules_list: #all pull rules in receipt route are mto, because we don't want to wait for the scheduler to trigger an orderpoint on input location pull_rule['procure_method'] = 'make_to_order' pull_obj.create(cr, uid, vals=pull_rule, context=context) route_obj.write(cr, uid, warehouse.crossdock_route_id.id, {'active': new_reception_step != 'one_step' and new_delivery_step != 'ship_only'}, context=context) #change MTO rule dummy, values = routes_dict[new_delivery_step] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, values, context=context)[0] pull_obj.write(cr, uid, warehouse.mto_pull_id.id, mto_pull_vals, context=context) return True def create_sequences_and_picking_types(self, cr, uid, warehouse, context=None): seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') #create new sequences in_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence in'), 'prefix': warehouse.code + '/IN/', 'padding': 5}, context=context) out_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence out'), 'prefix': warehouse.code + '/OUT/', 'padding': 5}, context=context) pack_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence packing'), 'prefix': warehouse.code + '/PACK/', 'padding': 5}, context=context) pick_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence picking'), 'prefix': warehouse.code + '/PICK/', 'padding': 5}, context=context) int_seq_id = seq_obj.create(cr, SUPERUSER_ID, {'name': warehouse.name + _(' Sequence internal'), 'prefix': warehouse.code + '/INT/', 'padding': 5}, context=context) wh_stock_loc = warehouse.lot_stock_id wh_input_stock_loc = warehouse.wh_input_stock_loc_id wh_output_stock_loc = warehouse.wh_output_stock_loc_id wh_pack_stock_loc = warehouse.wh_pack_stock_loc_id #fetch customer and supplier locations, for references customer_loc, supplier_loc = self._get_partner_locations(cr, uid, warehouse.id, context=context) #create in, out, internal picking types for warehouse input_loc = wh_input_stock_loc if warehouse.reception_steps == 'one_step': input_loc = wh_stock_loc output_loc = wh_output_stock_loc if warehouse.delivery_steps == 'ship_only': output_loc = wh_stock_loc #choose the next available color for the picking types of this warehouse color = 0 available_colors = [c%9 for c in range(3, 12)] # put flashy colors first all_used_colors = self.pool.get('stock.picking.type').search_read(cr, uid, [('warehouse_id', '!=', False), ('color', '!=', False)], ['color'], order='color') #don't use sets to preserve the list order for x in all_used_colors: if x['color'] in available_colors: available_colors.remove(x['color']) if available_colors: color = available_colors[0] #order the picking types with a sequence allowing to have the following suit for each warehouse: reception, internal, pick, pack, ship. max_sequence = self.pool.get('stock.picking.type').search_read(cr, uid, [], ['sequence'], order='sequence desc') max_sequence = max_sequence and max_sequence[0]['sequence'] or 0 in_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Receipts'), 'warehouse_id': warehouse.id, 'code': 'incoming', 'sequence_id': in_seq_id, 'default_location_src_id': supplier_loc.id, 'default_location_dest_id': input_loc.id, 'sequence': max_sequence + 1, 'color': color}, context=context) out_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Delivery Orders'), 'warehouse_id': warehouse.id, 'code': 'outgoing', 'sequence_id': out_seq_id, 'return_picking_type_id': in_type_id, 'default_location_src_id': output_loc.id, 'default_location_dest_id': customer_loc.id, 'sequence': max_sequence + 4, 'color': color}, context=context) picking_type_obj.write(cr, uid, [in_type_id], {'return_picking_type_id': out_type_id}, context=context) int_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Internal Transfers'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': int_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': wh_stock_loc.id, 'active': True, 'sequence': max_sequence + 2, 'color': color}, context=context) pack_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pack'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': pack_seq_id, 'default_location_src_id': wh_pack_stock_loc.id, 'default_location_dest_id': output_loc.id, 'active': warehouse.delivery_steps == 'pick_pack_ship', 'sequence': max_sequence + 3, 'color': color}, context=context) pick_type_id = picking_type_obj.create(cr, uid, vals={ 'name': _('Pick'), 'warehouse_id': warehouse.id, 'code': 'internal', 'sequence_id': pick_seq_id, 'default_location_src_id': wh_stock_loc.id, 'default_location_dest_id': output_loc.id if warehouse.delivery_steps == 'pick_ship' else wh_pack_stock_loc.id, 'active': warehouse.delivery_steps != 'ship_only', 'sequence': max_sequence + 2, 'color': color}, context=context) #write picking types on WH vals = { 'in_type_id': in_type_id, 'out_type_id': out_type_id, 'pack_type_id': pack_type_id, 'pick_type_id': pick_type_id, 'int_type_id': int_type_id, } super(stock_warehouse, self).write(cr, uid, warehouse.id, vals=vals, context=context) def create(self, cr, uid, vals, context=None): if context is None: context = {} if vals is None: vals = {} data_obj = self.pool.get('ir.model.data') seq_obj = self.pool.get('ir.sequence') picking_type_obj = self.pool.get('stock.picking.type') location_obj = self.pool.get('stock.location') #create view location for warehouse loc_vals = { 'name': _(vals.get('code')), 'usage': 'view', 'location_id': data_obj.get_object_reference(cr, uid, 'stock', 'stock_location_locations')[1], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') wh_loc_id = location_obj.create(cr, uid, loc_vals, context=context) vals['view_location_id'] = wh_loc_id #create all location def_values = self.default_get(cr, uid, {'reception_steps', 'delivery_steps'}) reception_steps = vals.get('reception_steps', def_values['reception_steps']) delivery_steps = vals.get('delivery_steps', def_values['delivery_steps']) context_with_inactive = context.copy() context_with_inactive['active_test'] = False sub_locations = [ {'name': _('Stock'), 'active': True, 'field': 'lot_stock_id'}, {'name': _('Input'), 'active': reception_steps != 'one_step', 'field': 'wh_input_stock_loc_id'}, {'name': _('Quality Control'), 'active': reception_steps == 'three_steps', 'field': 'wh_qc_stock_loc_id'}, {'name': _('Output'), 'active': delivery_steps != 'ship_only', 'field': 'wh_output_stock_loc_id'}, {'name': _('Packing Zone'), 'active': delivery_steps == 'pick_pack_ship', 'field': 'wh_pack_stock_loc_id'}, ] for values in sub_locations: loc_vals = { 'name': values['name'], 'usage': 'internal', 'location_id': wh_loc_id, 'active': values['active'], } if vals.get('company_id'): loc_vals['company_id'] = vals.get('company_id') location_id = location_obj.create(cr, uid, loc_vals, context=context_with_inactive) vals[values['field']] = location_id #create WH new_id = super(stock_warehouse, self).create(cr, uid, vals=vals, context=context) warehouse = self.browse(cr, uid, new_id, context=context) self.create_sequences_and_picking_types(cr, uid, warehouse, context=context) #create routes and push/pull rules new_objects_dict = self.create_routes(cr, uid, new_id, warehouse, context=context) self.write(cr, uid, warehouse.id, new_objects_dict, context=context) return new_id def _format_rulename(self, cr, uid, obj, from_loc, dest_loc, context=None): return obj.code + ': ' + from_loc.name + ' -> ' + dest_loc.name def _format_routename(self, cr, uid, obj, name, context=None): return obj.name + ': ' + name def get_routes_dict(self, cr, uid, ids, warehouse, context=None): #fetch customer and supplier locations, for references customer_loc, supplier_loc = self._get_partner_locations(cr, uid, ids, context=context) return { 'one_step': (_('Receipt in 1 step'), []), 'two_steps': (_('Receipt in 2 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'three_steps': (_('Receipt in 3 steps'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_qc_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_qc_stock_loc_id, warehouse.lot_stock_id, warehouse.int_type_id.id)]), 'crossdock': (_('Cross-Dock'), [(warehouse.wh_input_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.int_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'ship_only': (_('Ship Only'), [(warehouse.lot_stock_id, customer_loc, warehouse.out_type_id.id)]), 'pick_ship': (_('Pick + Ship'), [(warehouse.lot_stock_id, warehouse.wh_output_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), 'pick_pack_ship': (_('Pick + Pack + Ship'), [(warehouse.lot_stock_id, warehouse.wh_pack_stock_loc_id, warehouse.pick_type_id.id), (warehouse.wh_pack_stock_loc_id, warehouse.wh_output_stock_loc_id, warehouse.pack_type_id.id), (warehouse.wh_output_stock_loc_id, customer_loc, warehouse.out_type_id.id)]), } def _handle_renaming(self, cr, uid, warehouse, name, code, context=None): location_obj = self.pool.get('stock.location') route_obj = self.pool.get('stock.location.route') pull_obj = self.pool.get('procurement.rule') push_obj = self.pool.get('stock.location.path') #rename location location_id = warehouse.lot_stock_id.location_id.id location_obj.write(cr, uid, location_id, {'name': code}, context=context) #rename route and push-pull rules for route in warehouse.route_ids: route_obj.write(cr, uid, route.id, {'name': route.name.replace(warehouse.name, name, 1)}, context=context) for pull in route.pull_ids: pull_obj.write(cr, uid, pull.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context) for push in route.push_ids: push_obj.write(cr, uid, push.id, {'name': pull.name.replace(warehouse.name, name, 1)}, context=context) #change the mto pull rule name if warehouse.mto_pull_id.id: pull_obj.write(cr, uid, warehouse.mto_pull_id.id, {'name': warehouse.mto_pull_id.name.replace(warehouse.name, name, 1)}, context=context) def _check_delivery_resupply(self, cr, uid, warehouse, new_location, change_to_multiple, context=None): """ Will check if the resupply routes from this warehouse follow the changes of number of delivery steps """ #Check routes that are being delivered by this warehouse and change the rule going to transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplier_wh_id','=', warehouse.id)], context=context) pulls = pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_id.usage', '=', 'transit')], context=context) if pulls: pull_obj.write(cr, uid, pulls, {'location_src_id': new_location, 'procure_method': change_to_multiple and "make_to_order" or "make_to_stock"}, context=context) # Create or clean MTO rules mto_route_id = self._get_mto_route(cr, uid, context=context) if not change_to_multiple: # If single delivery we should create the necessary MTO rules for the resupply # pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) pull_recs = pull_obj.browse(cr, uid, pulls, context=context) transfer_locs = list(set([x.location_id for x in pull_recs])) vals = [(warehouse.lot_stock_id , x, warehouse.out_type_id.id) for x in transfer_locs] mto_pull_vals = self._get_mto_pull_rule(cr, uid, warehouse, vals, context=context) for mto_pull_val in mto_pull_vals: pull_obj.create(cr, uid, mto_pull_val, context=context) else: # We need to delete all the MTO pull rules, otherwise they risk to be used in the system pulls = pull_obj.search(cr, uid, ['&', ('route_id', '=', mto_route_id), ('location_id.usage', '=', 'transit'), ('location_src_id', '=', warehouse.lot_stock_id.id)], context=context) if pulls: pull_obj.unlink(cr, uid, pulls, context=context) def _check_reception_resupply(self, cr, uid, warehouse, new_location, context=None): """ Will check if the resupply routes to this warehouse follow the changes of number of receipt steps """ #Check routes that are being delivered by this warehouse and change the rule coming from transit location route_obj = self.pool.get("stock.location.route") pull_obj = self.pool.get("procurement.rule") routes = route_obj.search(cr, uid, [('supplied_wh_id','=', warehouse.id)], context=context) pulls= pull_obj.search(cr, uid, ['&', ('route_id', 'in', routes), ('location_src_id.usage', '=', 'transit')]) if pulls: pull_obj.write(cr, uid, pulls, {'location_id': new_location}, context=context) def _check_resupply(self, cr, uid, warehouse, reception_new, delivery_new, context=None): if reception_new: old_val = warehouse.reception_steps new_val = reception_new change_to_one = (old_val != 'one_step' and new_val == 'one_step') change_to_multiple = (old_val == 'one_step' and new_val != 'one_step') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_input_stock_loc_id.id self._check_reception_resupply(cr, uid, warehouse, new_location, context=context) if delivery_new: old_val = warehouse.delivery_steps new_val = delivery_new change_to_one = (old_val != 'ship_only' and new_val == 'ship_only') change_to_multiple = (old_val == 'ship_only' and new_val != 'ship_only') if change_to_one or change_to_multiple: new_location = change_to_one and warehouse.lot_stock_id.id or warehouse.wh_output_stock_loc_id.id self._check_delivery_resupply(cr, uid, warehouse, new_location, change_to_multiple, context=context) def write(self, cr, uid, ids, vals, context=None): if context is None: context = {} if isinstance(ids, (int, long)): ids = [ids] seq_obj = self.pool.get('ir.sequence') route_obj = self.pool.get('stock.location.route') context_with_inactive = context.copy() context_with_inactive['active_test'] = False for warehouse in self.browse(cr, uid, ids, context=context_with_inactive): #first of all, check if we need to delete and recreate route if vals.get('reception_steps') or vals.get('delivery_steps'): #activate and deactivate location according to reception and delivery option self.switch_location(cr, uid, warehouse.id, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context) # switch between route self.change_route(cr, uid, ids, warehouse, vals.get('reception_steps', False), vals.get('delivery_steps', False), context=context_with_inactive) # Check if we need to change something to resupply warehouses and associated MTO rules self._check_resupply(cr, uid, warehouse, vals.get('reception_steps'), vals.get('delivery_steps'), context=context) if vals.get('code') or vals.get('name'): name = warehouse.name #rename sequence if vals.get('name'): name = vals.get('name', warehouse.name) self._handle_renaming(cr, uid, warehouse, name, vals.get('code', warehouse.code), context=context_with_inactive) if warehouse.in_type_id: seq_obj.write(cr, uid, warehouse.in_type_id.sequence_id.id, {'name': name + _(' Sequence in'), 'prefix': vals.get('code', warehouse.code) + '\IN\\'}, context=context) if warehouse.out_type_id: seq_obj.write(cr, uid, warehouse.out_type_id.sequence_id.id, {'name': name + _(' Sequence out'), 'prefix': vals.get('code', warehouse.code) + '\OUT\\'}, context=context) if warehouse.pack_type_id: seq_obj.write(cr, uid, warehouse.pack_type_id.sequence_id.id, {'name': name + _(' Sequence packing'), 'prefix': vals.get('code', warehouse.code) + '\PACK\\'}, context=context) if warehouse.pick_type_id: seq_obj.write(cr, uid, warehouse.pick_type_id.sequence_id.id, {'name': name + _(' Sequence picking'), 'prefix': vals.get('code', warehouse.code) + '\PICK\\'}, context=context) if warehouse.int_type_id: seq_obj.write(cr, uid, warehouse.int_type_id.sequence_id.id, {'name': name + _(' Sequence internal'), 'prefix': vals.get('code', warehouse.code) + '\INT\\'}, context=context) if vals.get('resupply_wh_ids') and not vals.get('resupply_route_ids'): for cmd in vals.get('resupply_wh_ids'): if cmd[0] == 6: new_ids = set(cmd[2]) old_ids = set([wh.id for wh in warehouse.resupply_wh_ids]) to_add_wh_ids = new_ids - old_ids if to_add_wh_ids: supplier_warehouses = self.browse(cr, uid, list(to_add_wh_ids), context=context) self._create_resupply_routes(cr, uid, warehouse, supplier_warehouses, warehouse.default_resupply_wh_id, context=context) to_remove_wh_ids = old_ids - new_ids if to_remove_wh_ids: to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', 'in', list(to_remove_wh_ids))], context=context) if to_remove_route_ids: route_obj.unlink(cr, uid, to_remove_route_ids, context=context) else: #not implemented pass if 'default_resupply_wh_id' in vals: if vals.get('default_resupply_wh_id') == warehouse.id: raise osv.except_osv(_('Warning'),_('The default resupply warehouse should be different than the warehouse itself!')) if warehouse.default_resupply_wh_id: #remove the existing resupplying route on the warehouse to_remove_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', warehouse.default_resupply_wh_id.id)], context=context) for inter_wh_route_id in to_remove_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(3, inter_wh_route_id)]}) if vals.get('default_resupply_wh_id'): #assign the new resupplying route on all products to_assign_route_ids = route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id), ('supplier_wh_id', '=', vals.get('default_resupply_wh_id'))], context=context) for inter_wh_route_id in to_assign_route_ids: self.write(cr, uid, [warehouse.id], {'route_ids': [(4, inter_wh_route_id)]}) return super(stock_warehouse, self).write(cr, uid, ids, vals=vals, context=context) def get_all_routes_for_wh(self, cr, uid, warehouse, context=None): route_obj = self.pool.get("stock.location.route") all_routes = [route.id for route in warehouse.route_ids] all_routes += route_obj.search(cr, uid, [('supplied_wh_id', '=', warehouse.id)], context=context) all_routes += [warehouse.mto_pull_id.route_id.id] return all_routes def view_all_routes_for_wh(self, cr, uid, ids, context=None): all_routes = [] for wh in self.browse(cr, uid, ids, context=context): all_routes += self.get_all_routes_for_wh(cr, uid, wh, context=context) domain = [('id', 'in', all_routes)] return { 'name': _('Warehouse\'s Routes'), 'domain': domain, 'res_model': 'stock.location.route', 'type': 'ir.actions.act_window', 'view_id': False, 'view_mode': 'tree,form', 'view_type': 'form', 'limit': 20 } class stock_location_path(osv.osv): _name = "stock.location.path" _description = "Pushed Flows" _order = "name" def _get_rules(self, cr, uid, ids, context=None): res = [] for route in self.browse(cr, uid, ids, context=context): res += [x.id for x in route.push_ids] return res _columns = { 'name': fields.char('Operation Name', required=True), 'company_id': fields.many2one('res.company', 'Company'), 'route_id': fields.many2one('stock.location.route', 'Route'), 'location_from_id': fields.many2one('stock.location', 'Source Location', ondelete='cascade', select=1, required=True), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', ondelete='cascade', select=1, required=True), 'delay': fields.integer('Delay (days)', help="Number of days to do this transition"), 'picking_type_id': fields.many2one('stock.picking.type', 'Type of the new Operation', required=True, help="This is the picking type associated with the different pickings"), 'auto': fields.selection( [('auto','Automatic Move'), ('manual','Manual Operation'),('transparent','Automatic No Step Added')], 'Automatic Move', required=True, select=1, help="This is used to define paths the product has to follow within the location tree.\n" \ "The 'Automatic Move' value will create a stock move after the current one that will be "\ "validated automatically. With 'Manual Operation', the stock move has to be validated "\ "by a worker. With 'Automatic No Step Added', the location is replaced in the original move." ), 'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move is cancelled or split, the move generated by this move will too'), 'active': fields.boolean('Active', help="If unchecked, it will allow you to hide the rule without removing it."), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse'), 'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence', store={ 'stock.location.route': (_get_rules, ['sequence'], 10), 'stock.location.path': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10), }), 'sequence': fields.integer('Sequence'), } _defaults = { 'auto': 'auto', 'delay': 0, 'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c), 'propagate': True, 'active': True, } def _prepare_push_apply(self, cr, uid, rule, move, context=None): newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) return { 'origin': move.origin or move.picking_id.name or "/", 'location_id': move.location_dest_id.id, 'location_dest_id': rule.location_dest_id.id, 'date': newdate, 'company_id': rule.company_id and rule.company_id.id or False, 'date_expected': newdate, 'picking_id': False, 'picking_type_id': rule.picking_type_id and rule.picking_type_id.id or False, 'propagate': rule.propagate, 'push_rule_id': rule.id, 'warehouse_id': rule.warehouse_id and rule.warehouse_id.id or False, } def _apply(self, cr, uid, rule, move, context=None): move_obj = self.pool.get('stock.move') newdate = (datetime.strptime(move.date_expected, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta.relativedelta(days=rule.delay or 0)).strftime(DEFAULT_SERVER_DATETIME_FORMAT) if rule.auto == 'transparent': old_dest_location = move.location_dest_id.id move_obj.write(cr, uid, [move.id], { 'date': newdate, 'date_expected': newdate, 'location_dest_id': rule.location_dest_id.id }) #avoid looping if a push rule is not well configured if rule.location_dest_id.id != old_dest_location: #call again push_apply to see if a next step is defined move_obj._push_apply(cr, uid, [move], context=context) else: vals = self._prepare_push_apply(cr, uid, rule, move, context=context) move_id = move_obj.copy(cr, uid, move.id, vals, context=context) move_obj.write(cr, uid, [move.id], { 'move_dest_id': move_id, }) move_obj.action_confirm(cr, uid, [move_id], context=None) # ------------------------- # Packaging related stuff # ------------------------- from openerp.report import report_sxw class stock_package(osv.osv): """ These are the packages, containing quants and/or other packages """ _name = "stock.quant.package" _description = "Physical Packages" _parent_name = "parent_id" _parent_store = True _parent_order = 'name' _order = 'parent_left' def name_get(self, cr, uid, ids, context=None): res = self._complete_name(cr, uid, ids, 'complete_name', None, context=context) return res.items() def _complete_name(self, cr, uid, ids, name, args, context=None): """ Forms complete name of location from parent location to child location. @return: Dictionary of values """ res = {} for m in self.browse(cr, uid, ids, context=context): res[m.id] = m.name parent = m.parent_id while parent: res[m.id] = parent.name + ' / ' + res[m.id] parent = parent.parent_id return res def _get_packages(self, cr, uid, ids, context=None): """Returns packages from quants for store""" res = set() for quant in self.browse(cr, uid, ids, context=context): pack = quant.package_id while pack: res.add(pack.id) pack = pack.parent_id return list(res) def _get_package_info(self, cr, uid, ids, name, args, context=None): quant_obj = self.pool.get("stock.quant") default_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id res = dict((res_id, {'location_id': False, 'company_id': default_company_id, 'owner_id': False}) for res_id in ids) for pack in self.browse(cr, uid, ids, context=context): quants = quant_obj.search(cr, uid, [('package_id', 'child_of', pack.id)], context=context) if quants: quant = quant_obj.browse(cr, uid, quants[0], context=context) res[pack.id]['location_id'] = quant.location_id.id res[pack.id]['owner_id'] = quant.owner_id.id res[pack.id]['company_id'] = quant.company_id.id else: res[pack.id]['location_id'] = False res[pack.id]['owner_id'] = False res[pack.id]['company_id'] = False return res def _get_packages_to_relocate(self, cr, uid, ids, context=None): res = set() for pack in self.browse(cr, uid, ids, context=context): res.add(pack.id) if pack.parent_id: res.add(pack.parent_id.id) return list(res) _columns = { 'name': fields.char('Package Reference', select=True, copy=False), 'complete_name': fields.function(_complete_name, type='char', string="Package Name",), 'parent_left': fields.integer('Left Parent', select=1), 'parent_right': fields.integer('Right Parent', select=1), 'packaging_id': fields.many2one('product.packaging', 'Packaging', help="This field should be completed only if everything inside the package share the same product, otherwise it doesn't really makes sense.", select=True), 'ul_id': fields.many2one('product.ul', 'Logistic Unit'), 'location_id': fields.function(_get_package_info, type='many2one', relation='stock.location', string='Location', multi="package", store={ 'stock.quant': (_get_packages, ['location_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'quant_ids': fields.one2many('stock.quant', 'package_id', 'Bulk Content', readonly=True), 'parent_id': fields.many2one('stock.quant.package', 'Parent Package', help="The package containing this item", ondelete='restrict', readonly=True), 'children_ids': fields.one2many('stock.quant.package', 'parent_id', 'Contained Packages', readonly=True), 'company_id': fields.function(_get_package_info, type="many2one", relation='res.company', string='Company', multi="package", store={ 'stock.quant': (_get_packages, ['company_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), 'owner_id': fields.function(_get_package_info, type='many2one', relation='res.partner', string='Owner', multi="package", store={ 'stock.quant': (_get_packages, ['owner_id'], 10), 'stock.quant.package': (_get_packages_to_relocate, ['quant_ids', 'children_ids', 'parent_id'], 10), }, readonly=True, select=True), } _defaults = { 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.quant.package') or _('Unknown Pack') } def _check_location_constraint(self, cr, uid, packs, context=None): '''checks that all quants in a package are stored in the same location. This function cannot be used as a constraint because it needs to be checked on pack operations (they may not call write on the package) ''' quant_obj = self.pool.get('stock.quant') for pack in packs: parent = pack while parent.parent_id: parent = parent.parent_id quant_ids = self.get_content(cr, uid, [parent.id], context=context) quants = [x for x in quant_obj.browse(cr, uid, quant_ids, context=context) if x.qty > 0] location_id = quants and quants[0].location_id.id or False if not [quant.location_id.id == location_id for quant in quants]: raise osv.except_osv(_('Error'), _('Everything inside a package should be in the same location')) return True def action_print(self, cr, uid, ids, context=None): context = dict(context or {}, active_ids=ids) return self.pool.get("report").get_action(cr, uid, ids, 'stock.report_package_barcode_small', context=context) def unpack(self, cr, uid, ids, context=None): quant_obj = self.pool.get('stock.quant') for package in self.browse(cr, uid, ids, context=context): quant_ids = [quant.id for quant in package.quant_ids] quant_obj.write(cr, uid, quant_ids, {'package_id': package.parent_id.id or False}, context=context) children_package_ids = [child_package.id for child_package in package.children_ids] self.write(cr, uid, children_package_ids, {'parent_id': package.parent_id.id or False}, context=context) #delete current package since it contains nothing anymore self.unlink(cr, uid, ids, context=context) return self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'action_package_view', context=context) def get_content(self, cr, uid, ids, context=None): child_package_ids = self.search(cr, uid, [('id', 'child_of', ids)], context=context) return self.pool.get('stock.quant').search(cr, uid, [('package_id', 'in', child_package_ids)], context=context) def get_content_package(self, cr, uid, ids, context=None): quants_ids = self.get_content(cr, uid, ids, context=context) res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'stock', 'quantsact', context=context) res['domain'] = [('id', 'in', quants_ids)] return res def _get_product_total_qty(self, cr, uid, package_record, product_id, context=None): ''' find the total of given product 'product_id' inside the given package 'package_id''' quant_obj = self.pool.get('stock.quant') all_quant_ids = self.get_content(cr, uid, [package_record.id], context=context) total = 0 for quant in quant_obj.browse(cr, uid, all_quant_ids, context=context): if quant.product_id.id == product_id: total += quant.qty return total def _get_all_products_quantities(self, cr, uid, package_id, context=None): '''This function computes the different product quantities for the given package ''' quant_obj = self.pool.get('stock.quant') res = {} for quant in quant_obj.browse(cr, uid, self.get_content(cr, uid, package_id, context=context)): if quant.product_id.id not in res: res[quant.product_id.id] = 0 res[quant.product_id.id] += quant.qty return res def copy_pack(self, cr, uid, id, default_pack_values=None, default=None, context=None): stock_pack_operation_obj = self.pool.get('stock.pack.operation') if default is None: default = {} new_package_id = self.copy(cr, uid, id, default_pack_values, context=context) default['result_package_id'] = new_package_id op_ids = stock_pack_operation_obj.search(cr, uid, [('result_package_id', '=', id)], context=context) for op_id in op_ids: stock_pack_operation_obj.copy(cr, uid, op_id, default, context=context) class stock_pack_operation(osv.osv): _name = "stock.pack.operation" _description = "Packing Operation" def _get_remaining_prod_quantities(self, cr, uid, operation, context=None): '''Get the remaining quantities per product on an operation with a package. This function returns a dictionary''' #if the operation doesn't concern a package, it's not relevant to call this function if not operation.package_id or operation.product_id: return {operation.product_id.id: operation.remaining_qty} #get the total of products the package contains res = self.pool.get('stock.quant.package')._get_all_products_quantities(cr, uid, operation.package_id.id, context=context) #reduce by the quantities linked to a move for record in operation.linked_move_operation_ids: if record.move_id.product_id.id not in res: res[record.move_id.product_id.id] = 0 res[record.move_id.product_id.id] -= record.qty return res def _get_remaining_qty(self, cr, uid, ids, name, args, context=None): uom_obj = self.pool.get('product.uom') res = {} for ops in self.browse(cr, uid, ids, context=context): res[ops.id] = 0 if ops.package_id and not ops.product_id: #dont try to compute the remaining quantity for packages because it's not relevant (a package could include different products). #should use _get_remaining_prod_quantities instead continue else: qty = ops.product_qty if ops.product_uom_id: qty = uom_obj._compute_qty_obj(cr, uid, ops.product_uom_id, ops.product_qty, ops.product_id.uom_id, context=context) for record in ops.linked_move_operation_ids: qty -= record.qty res[ops.id] = float_round(qty, precision_rounding=ops.product_id.uom_id.rounding) return res def product_id_change(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = self.on_change_tests(cr, uid, ids, product_id, product_uom_id, product_qty, context=context) if product_id and not product_uom_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) res['value']['product_uom_id'] = product.uom_id.id return res def on_change_tests(self, cr, uid, ids, product_id, product_uom_id, product_qty, context=None): res = {'value': {}} uom_obj = self.pool.get('product.uom') if product_id: product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) product_uom_id = product_uom_id or product.uom_id.id selected_uom = uom_obj.browse(cr, uid, product_uom_id, context=context) if selected_uom.category_id.id != product.uom_id.category_id.id: res['warning'] = { 'title': _('Warning: wrong UoM!'), 'message': _('The selected UoM for product %s is not compatible with the UoM set on the product form. \nPlease choose an UoM within the same UoM category.') % (product.name) } if product_qty and 'warning' not in res: rounded_qty = uom_obj._compute_qty(cr, uid, product_uom_id, product_qty, product_uom_id, round=True) if rounded_qty != product_qty: res['warning'] = { 'title': _('Warning: wrong quantity!'), 'message': _('The chosen quantity for product %s is not compatible with the UoM rounding. It will be automatically converted at confirmation') % (product.name) } return res _columns = { 'picking_id': fields.many2one('stock.picking', 'Stock Picking', help='The stock operation where the packing has been made', required=True), 'product_id': fields.many2one('product.product', 'Product', ondelete="CASCADE"), # 1 'product_uom_id': fields.many2one('product.uom', 'Product Unit of Measure'), 'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'qty_done': fields.float('Quantity Processed', digits_compute=dp.get_precision('Product Unit of Measure')), 'package_id': fields.many2one('stock.quant.package', 'Source Package'), # 2 'lot_id': fields.many2one('stock.production.lot', 'Lot/Serial Number'), 'result_package_id': fields.many2one('stock.quant.package', 'Destination Package', help="If set, the operations are packed into this package", required=False, ondelete='cascade'), 'date': fields.datetime('Date', required=True), 'owner_id': fields.many2one('res.partner', 'Owner', help="Owner of the quants"), #'update_cost': fields.boolean('Need cost update'), 'cost': fields.float("Cost", help="Unit Cost for this product line"), 'currency': fields.many2one('res.currency', string="Currency", help="Currency in which Unit cost is expressed", ondelete='CASCADE'), 'linked_move_operation_ids': fields.one2many('stock.move.operation.link', 'operation_id', string='Linked Moves', readonly=True, help='Moves impacted by this operation for the computation of the remaining quantities'), 'remaining_qty': fields.function(_get_remaining_qty, type='float', digits = 0, string="Remaining Qty", help="Remaining quantity in default UoM according to moves matched with this operation. "), 'location_id': fields.many2one('stock.location', 'Source Location', required=True), 'location_dest_id': fields.many2one('stock.location', 'Destination Location', required=True), 'processed': fields.selection([('true','Yes'), ('false','No')],'Has been processed?', required=True), } _defaults = { 'date': fields.date.context_today, 'qty_done': 0, 'processed': lambda *a: 'false', } def write(self, cr, uid, ids, vals, context=None): context = context or {} res = super(stock_pack_operation, self).write(cr, uid, ids, vals, context=context) if isinstance(ids, (int, long)): ids = [ids] if not context.get("no_recompute"): pickings = vals.get('picking_id') and [vals['picking_id']] or list(set([x.picking_id.id for x in self.browse(cr, uid, ids, context=context)])) self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, pickings, context=context) return res def create(self, cr, uid, vals, context=None): context = context or {} res_id = super(stock_pack_operation, self).create(cr, uid, vals, context=context) if vals.get("picking_id") and not context.get("no_recompute"): self.pool.get("stock.picking").do_recompute_remaining_quantities(cr, uid, [vals['picking_id']], context=context) return res_id def action_drop_down(self, cr, uid, ids, context=None): ''' Used by barcode interface to say that pack_operation has been moved from src location to destination location, if qty_done is less than product_qty than we have to split the operation in two to process the one with the qty moved ''' processed_ids = [] move_obj = self.pool.get("stock.move") for pack_op in self.browse(cr, uid, ids, context=None): if pack_op.product_id and pack_op.location_id and pack_op.location_dest_id: move_obj.check_tracking_product(cr, uid, pack_op.product_id, pack_op.lot_id.id, pack_op.location_id, pack_op.location_dest_id, context=context) op = pack_op.id if pack_op.qty_done < pack_op.product_qty: # we split the operation in two op = self.copy(cr, uid, pack_op.id, {'product_qty': pack_op.qty_done, 'qty_done': pack_op.qty_done}, context=context) self.write(cr, uid, [pack_op.id], {'product_qty': pack_op.product_qty - pack_op.qty_done, 'qty_done': 0, 'lot_id': False}, context=context) processed_ids.append(op) self.write(cr, uid, processed_ids, {'processed': 'true'}, context=context) def create_and_assign_lot(self, cr, uid, id, name, context=None): ''' Used by barcode interface to create a new lot and assign it to the operation ''' obj = self.browse(cr,uid,id,context) product_id = obj.product_id.id val = {'product_id': product_id} new_lot_id = False if name: lots = self.pool.get('stock.production.lot').search(cr, uid, ['&', ('name', '=', name), ('product_id', '=', product_id)], context=context) if lots: new_lot_id = lots[0] val.update({'name': name}) if not new_lot_id: new_lot_id = self.pool.get('stock.production.lot').create(cr, uid, val, context=context) self.write(cr, uid, id, {'lot_id': new_lot_id}, context=context) def _search_and_increment(self, cr, uid, picking_id, domain, filter_visible=False, visible_op_ids=False, increment=True, context=None): '''Search for an operation with given 'domain' in a picking, if it exists increment the qty (+1) otherwise create it :param domain: list of tuple directly reusable as a domain context can receive a key 'current_package_id' with the package to consider for this operation returns True ''' if context is None: context = {} #if current_package_id is given in the context, we increase the number of items in this package package_clause = [('result_package_id', '=', context.get('current_package_id', False))] existing_operation_ids = self.search(cr, uid, [('picking_id', '=', picking_id)] + domain + package_clause, context=context) todo_operation_ids = [] if existing_operation_ids: if filter_visible: todo_operation_ids = [val for val in existing_operation_ids if val in visible_op_ids] else: todo_operation_ids = existing_operation_ids if todo_operation_ids: #existing operation found for the given domain and picking => increment its quantity operation_id = todo_operation_ids[0] op_obj = self.browse(cr, uid, operation_id, context=context) qty = op_obj.qty_done if increment: qty += 1 else: qty -= 1 if qty >= 1 else 0 if qty == 0 and op_obj.product_qty == 0: #we have a line with 0 qty set, so delete it self.unlink(cr, uid, [operation_id], context=context) return False self.write(cr, uid, [operation_id], {'qty_done': qty}, context=context) else: #no existing operation found for the given domain and picking => create a new one picking_obj = self.pool.get("stock.picking") picking = picking_obj.browse(cr, uid, picking_id, context=context) values = { 'picking_id': picking_id, 'product_qty': 0, 'location_id': picking.location_id.id, 'location_dest_id': picking.location_dest_id.id, 'qty_done': 1, } for key in domain: var_name, dummy, value = key uom_id = False if var_name == 'product_id': uom_id = self.pool.get('product.product').browse(cr, uid, value, context=context).uom_id.id update_dict = {var_name: value} if uom_id: update_dict['product_uom_id'] = uom_id values.update(update_dict) operation_id = self.create(cr, uid, values, context=context) return operation_id class stock_move_operation_link(osv.osv): """ Table making the link between stock.moves and stock.pack.operations to compute the remaining quantities on each of these objects """ _name = "stock.move.operation.link" _description = "Link between stock moves and pack operations" _columns = { 'qty': fields.float('Quantity', help="Quantity of products to consider when talking about the contribution of this pack operation towards the remaining quantity of the move (and inverse). Given in the product main uom."), 'operation_id': fields.many2one('stock.pack.operation', 'Operation', required=True, ondelete="cascade"), 'move_id': fields.many2one('stock.move', 'Move', required=True, ondelete="cascade"), 'reserved_quant_id': fields.many2one('stock.quant', 'Reserved Quant', help="Technical field containing the quant that created this link between an operation and a stock move. Used at the stock_move_obj.action_done() time to avoid seeking a matching quant again"), } def get_specific_domain(self, cr, uid, record, context=None): '''Returns the specific domain to consider for quant selection in action_assign() or action_done() of stock.move, having the record given as parameter making the link between the stock move and a pack operation''' op = record.operation_id domain = [] if op.package_id and op.product_id: #if removing a product from a box, we restrict the choice of quants to this box domain.append(('package_id', '=', op.package_id.id)) elif op.package_id: #if moving a box, we allow to take everything from inside boxes as well domain.append(('package_id', 'child_of', [op.package_id.id])) else: #if not given any information about package, we don't open boxes domain.append(('package_id', '=', False)) #if lot info is given, we restrict choice to this lot otherwise we can take any if op.lot_id: domain.append(('lot_id', '=', op.lot_id.id)) #if owner info is given, we restrict to this owner otherwise we restrict to no owner if op.owner_id: domain.append(('owner_id', '=', op.owner_id.id)) else: domain.append(('owner_id', '=', False)) return domain class stock_warehouse_orderpoint(osv.osv): """ Defines Minimum stock rules. """ _name = "stock.warehouse.orderpoint" _description = "Minimum Inventory Rule" def subtract_procurements(self, cr, uid, orderpoint, context=None): '''This function returns quantity of product that needs to be deducted from the orderpoint computed quantity because there's already a procurement created with aim to fulfill it. ''' qty = 0 uom_obj = self.pool.get("product.uom") for procurement in orderpoint.procurement_ids: if procurement.state in ('cancel', 'done'): continue procurement_qty = uom_obj._compute_qty_obj(cr, uid, procurement.product_uom, procurement.product_qty, procurement.product_id.uom_id, context=context) for move in procurement.move_ids: #need to add the moves in draft as they aren't in the virtual quantity + moves that have not been created yet if move.state not in ('draft'): #if move is already confirmed, assigned or done, the virtual stock is already taking this into account so it shouldn't be deducted procurement_qty -= move.product_qty qty += procurement_qty return qty def _check_product_uom(self, cr, uid, ids, context=None): ''' Check if the UoM has the same category as the product standard UoM ''' if not context: context = {} for rule in self.browse(cr, uid, ids, context=context): if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id: return False return True def action_view_proc_to_process(self, cr, uid, ids, context=None): act_obj = self.pool.get('ir.actions.act_window') mod_obj = self.pool.get('ir.model.data') proc_ids = self.pool.get('procurement.order').search(cr, uid, [('orderpoint_id', 'in', ids), ('state', 'not in', ('done', 'cancel'))], context=context) result = mod_obj.get_object_reference(cr, uid, 'procurement', 'do_view_procurements') if not result: return False result = act_obj.read(cr, uid, [result[1]], context=context)[0] result['domain'] = "[('id', 'in', [" + ','.join(map(str, proc_ids)) + "])]" return result _columns = { 'name': fields.char('Name', required=True, copy=False), 'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."), 'logic': fields.selection([('max', 'Order to Max'), ('price', 'Best price (not yet active!)')], 'Reordering Mode', required=True), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"), 'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"), 'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type', '=', 'product')]), 'product_uom': fields.related('product_id', 'uom_id', type='many2one', relation='product.uom', string='Product Unit of Measure', readonly=True, required=True), 'product_min_qty': fields.float('Minimum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity specified for this field, Odoo generates "\ "a procurement to bring the forecasted quantity to the Max Quantity."), 'product_max_qty': fields.float('Maximum Quantity', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="When the virtual stock goes below the Min Quantity, Odoo generates "\ "a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."), 'qty_multiple': fields.float('Qty Multiple', required=True, digits_compute=dp.get_precision('Product Unit of Measure'), help="The procurement quantity will be rounded up to this multiple. If it is 0, the exact quantity will be used. "), 'procurement_ids': fields.one2many('procurement.order', 'orderpoint_id', 'Created Procurements'), 'group_id': fields.many2one('procurement.group', 'Procurement Group', help="Moves created through this orderpoint will be put in this procurement group. If none is given, the moves generated by procurement rules will be grouped into one big picking.", copy=False), 'company_id': fields.many2one('res.company', 'Company', required=True), } _defaults = { 'active': lambda *a: 1, 'logic': lambda *a: 'max', 'qty_multiple': lambda *a: 1, 'name': lambda self, cr, uid, context: self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '', 'product_uom': lambda self, cr, uid, context: context.get('product_uom', False), 'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=context) } _sql_constraints = [ ('qty_multiple_check', 'CHECK( qty_multiple >= 0 )', 'Qty Multiple must be greater than or equal to zero.'), ] _constraints = [ (_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']), ] def default_get(self, cr, uid, fields, context=None): warehouse_obj = self.pool.get('stock.warehouse') res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context) # default 'warehouse_id' and 'location_id' if 'warehouse_id' not in res: warehouse_ids = res.get('company_id') and warehouse_obj.search(cr, uid, [('company_id', '=', res['company_id'])], limit=1, context=context) or [] res['warehouse_id'] = warehouse_ids and warehouse_ids[0] or False if 'location_id' not in res: res['location_id'] = res.get('warehouse_id') and warehouse_obj.browse(cr, uid, res['warehouse_id'], context).lot_stock_id.id or False return res def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None): """ Finds location id for changed warehouse. @param warehouse_id: Changed id of warehouse. @return: Dictionary of values. """ if warehouse_id: w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context) v = {'location_id': w.lot_stock_id.id} return {'value': v} return {} def onchange_product_id(self, cr, uid, ids, product_id, context=None): """ Finds UoM for changed product. @param product_id: Changed id of product. @return: Dictionary of values. """ if product_id: prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context) d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]} v = {'product_uom': prod.uom_id.id} return {'value': v, 'domain': d} return {'domain': {'product_uom': []}} class stock_picking_type(osv.osv): _name = "stock.picking.type" _description = "The picking type determines the picking view" _order = 'sequence' def open_barcode_interface(self, cr, uid, ids, context=None): final_url = "/barcode/web/#action=stock.ui&picking_type_id=" + str(ids[0]) if len(ids) else '0' return {'type': 'ir.actions.act_url', 'url': final_url, 'target': 'self'} def _get_tristate_values(self, cr, uid, ids, field_name, arg, context=None): picking_obj = self.pool.get('stock.picking') res = {} for picking_type_id in ids: #get last 10 pickings of this type picking_ids = picking_obj.search(cr, uid, [('picking_type_id', '=', picking_type_id), ('state', '=', 'done')], order='date_done desc', limit=10, context=context) tristates = [] for picking in picking_obj.browse(cr, uid, picking_ids, context=context): if picking.date_done > picking.date: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Late'), 'value': -1}) elif picking.backorder_id: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('Backorder exists'), 'value': 0}) else: tristates.insert(0, {'tooltip': picking.name or '' + ": " + _('OK'), 'value': 1}) res[picking_type_id] = json.dumps(tristates) return res def _get_picking_count(self, cr, uid, ids, field_names, arg, context=None): obj = self.pool.get('stock.picking') domains = { 'count_picking_draft': [('state', '=', 'draft')], 'count_picking_waiting': [('state', '=', 'confirmed')], 'count_picking_ready': [('state', 'in', ('assigned', 'partially_available'))], 'count_picking': [('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_late': [('min_date', '<', time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)), ('state', 'in', ('assigned', 'waiting', 'confirmed', 'partially_available'))], 'count_picking_backorders': [('backorder_id', '!=', False), ('state', 'in', ('confirmed', 'assigned', 'waiting', 'partially_available'))], } result = {} for field in domains: data = obj.read_group(cr, uid, domains[field] + [('state', 'not in', ('done', 'cancel')), ('picking_type_id', 'in', ids)], ['picking_type_id'], ['picking_type_id'], context=context) count = dict(map(lambda x: (x['picking_type_id'] and x['picking_type_id'][0], x['picking_type_id_count']), data)) for tid in ids: result.setdefault(tid, {})[field] = count.get(tid, 0) for tid in ids: if result[tid]['count_picking']: result[tid]['rate_picking_late'] = result[tid]['count_picking_late'] * 100 / result[tid]['count_picking'] result[tid]['rate_picking_backorders'] = result[tid]['count_picking_backorders'] * 100 / result[tid]['count_picking'] else: result[tid]['rate_picking_late'] = 0 result[tid]['rate_picking_backorders'] = 0 return result def onchange_picking_code(self, cr, uid, ids, picking_code=False): if not picking_code: return False obj_data = self.pool.get('ir.model.data') stock_loc = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_stock') result = { 'default_location_src_id': stock_loc, 'default_location_dest_id': stock_loc, } if picking_code == 'incoming': result['default_location_src_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_suppliers') elif picking_code == 'outgoing': result['default_location_dest_id'] = obj_data.xmlid_to_res_id(cr, uid, 'stock.stock_location_customers') return {'value': result} def _get_name(self, cr, uid, ids, field_names, arg, context=None): return dict(self.name_get(cr, uid, ids, context=context)) def name_get(self, cr, uid, ids, context=None): """Overides orm name_get method to display 'Warehouse_name: PickingType_name' """ if context is None: context = {} if not isinstance(ids, list): ids = [ids] res = [] if not ids: return res for record in self.browse(cr, uid, ids, context=context): name = record.name if record.warehouse_id: name = record.warehouse_id.name + ': ' +name if context.get('special_shortened_wh_name'): if record.warehouse_id: name = record.warehouse_id.name else: name = _('Customer') + ' (' + record.name + ')' res.append((record.id, name)) return res def _default_warehouse(self, cr, uid, context=None): user = self.pool.get('res.users').browse(cr, uid, uid, context) res = self.pool.get('stock.warehouse').search(cr, uid, [('company_id', '=', user.company_id.id)], limit=1, context=context) return res and res[0] or False _columns = { 'name': fields.char('Picking Type Name', translate=True, required=True), 'complete_name': fields.function(_get_name, type='char', string='Name'), 'color': fields.integer('Color'), 'sequence': fields.integer('Sequence', help="Used to order the 'All Operations' kanban view"), 'sequence_id': fields.many2one('ir.sequence', 'Reference Sequence', required=True), 'default_location_src_id': fields.many2one('stock.location', 'Default Source Location'), 'default_location_dest_id': fields.many2one('stock.location', 'Default Destination Location'), 'code': fields.selection([('incoming', 'Suppliers'), ('outgoing', 'Customers'), ('internal', 'Internal')], 'Type of Operation', required=True), 'return_picking_type_id': fields.many2one('stock.picking.type', 'Picking Type for Returns'), 'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', ondelete='cascade'), 'active': fields.boolean('Active'), # Statistics for the kanban view 'last_done_picking': fields.function(_get_tristate_values, type='char', string='Last 10 Done Pickings'), 'count_picking_draft': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_ready': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_waiting': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'count_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_late': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), 'rate_picking_backorders': fields.function(_get_picking_count, type='integer', multi='_get_picking_count'), } _defaults = { 'warehouse_id': _default_warehouse, 'active': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
ibotty/ansible
test/units/parsing/vault/test_vault.py
81
5965
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import getpass import os import shutil import time import tempfile import six from binascii import unhexlify from binascii import hexlify from nose.plugins.skip import SkipTest from ansible.compat.tests import unittest from ansible.utils.unicode import to_bytes, to_unicode from ansible import errors from ansible.parsing.vault import VaultLib # Counter import fails for 2.0.1, requires >= 2.6.1 from pip try: from Crypto.Util import Counter HAS_COUNTER = True except ImportError: HAS_COUNTER = False # KDF import fails for 2.0.1, requires >= 2.6.1 from pip try: from Crypto.Protocol.KDF import PBKDF2 HAS_PBKDF2 = True except ImportError: HAS_PBKDF2 = False # AES IMPORTS try: from Crypto.Cipher import AES as AES HAS_AES = True except ImportError: HAS_AES = False class TestVaultLib(unittest.TestCase): def test_methods_exist(self): v = VaultLib('ansible') slots = ['is_encrypted', 'encrypt', 'decrypt', '_format_output', '_split_header',] for slot in slots: assert hasattr(v, slot), "VaultLib is missing the %s method" % slot def test_is_encrypted(self): v = VaultLib(None) assert not v.is_encrypted(u"foobar"), "encryption check on plaintext failed" data = u"$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(b"ansible") assert v.is_encrypted(data), "encryption check on headered text failed" def test_format_output(self): v = VaultLib('ansible') v.cipher_name = "TEST" sensitive_data = "ansible" data = v._format_output(sensitive_data) lines = data.split(b'\n') assert len(lines) > 1, "failed to properly add header" header = to_unicode(lines[0]) assert header.endswith(';TEST'), "header does end with cipher name" header_parts = header.split(';') assert len(header_parts) == 3, "header has the wrong number of parts" assert header_parts[0] == '$ANSIBLE_VAULT', "header does not start with $ANSIBLE_VAULT" assert header_parts[1] == v.b_version, "header version is incorrect" assert header_parts[2] == 'TEST', "header does end with cipher name" def test_split_header(self): v = VaultLib('ansible') data = b"$ANSIBLE_VAULT;9.9;TEST\nansible" rdata = v._split_header(data) lines = rdata.split(b'\n') assert lines[0] == b"ansible" assert v.cipher_name == 'TEST', "cipher name was not set" assert v.b_version == "9.9" def test_encrypt_decrypt_aes(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = u'AES' # AES encryption code has been removed, so this is old output for # AES-encrypted 'foobar' with password 'ansible'. enc_data = '$ANSIBLE_VAULT;1.1;AES\n53616c7465645f5fc107ce1ef4d7b455e038a13b053225776458052f8f8f332d554809d3f150bfa3\nfe3db930508b65e0ff5947e4386b79af8ab094017629590ef6ba486814cf70f8e4ab0ed0c7d2587e\n786a5a15efeb787e1958cbdd480d076c\n' dec_data = v.decrypt(enc_data) assert dec_data == "foobar", "decryption failed" def test_encrypt_decrypt_aes256(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES256' enc_data = v.encrypt("foobar") dec_data = v.decrypt(enc_data) assert enc_data != "foobar", "encryption failed" assert dec_data == "foobar", "decryption failed" def test_encrypt_encrypted(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') v.cipher_name = 'AES' data = "$ANSIBLE_VAULT;9.9;TEST\n%s" % hexlify(six.b("ansible")) error_hit = False try: enc_data = v.encrypt(data) except errors.AnsibleError as e: error_hit = True assert error_hit, "No error was thrown when trying to encrypt data with a header" def test_decrypt_decrypted(self): if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" error_hit = False try: dec_data = v.decrypt(data) except errors.AnsibleError as e: error_hit = True assert error_hit, "No error was thrown when trying to decrypt data without a header" def test_cipher_not_set(self): # not setting the cipher should default to AES256 if not HAS_AES or not HAS_COUNTER or not HAS_PBKDF2: raise SkipTest v = VaultLib('ansible') data = "ansible" error_hit = False try: enc_data = v.encrypt(data) except errors.AnsibleError as e: error_hit = True assert not error_hit, "An error was thrown when trying to encrypt data without the cipher set" assert v.cipher_name == "AES256", "cipher name is not set to AES256: %s" % v.cipher_name
gpl-3.0
Swedemon/plugin.video.nba.video
default.py
1
7312
import xbmc, xbmcaddon, xbmcvfs, xbmcgui, xbmcplugin, urllib2, urllib, re, string, sys, os, traceback, time, buggalo, datetime, _strptime import simplejson as json plugin = 'NBA Video' __author__ = 'stacked <stacked.xbmc@gmail.com>' __url__ = 'http://code.google.com/p/plugin/' __date__ = '01-06-2013' __version__ = '2.0.3' settings = xbmcaddon.Addon( id = 'plugin.video.nba.video' ) buggalo.SUBMIT_URL = 'http://www.xbmc.byethost17.com/submit.php' dbg = False dbglevel = 3 highlights_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'highlights.png' ) topplays_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'topplays.png' ) teams_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'teams.png' ) search_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'search.png' ) next_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'next.png' ) fanart = os.path.join( settings.getAddonInfo( 'path' ), 'fanart.jpg' ) import CommonFunctions common = CommonFunctions common.plugin = plugin + ' ' + __version__ from addonfunc import addListItem, playListItem, getUrl, getPage, setViewMode, getParameters, retry @retry(IndexError) def build_main_directory(): main=[ ( settings.getLocalizedString( 30000 ), highlights_thumb, 'games%2F*%7Cchannels%2Fplayoffs', '0', 'highlights' ), ( settings.getLocalizedString( 30001 ), topplays_thumb, 'channels%2Ftop_plays', '0', 'top_plays' ), ( settings.getLocalizedString( 30002 ), teams_thumb, 'null', '1', 'null' ), ( settings.getLocalizedString( 30003 ), search_thumb, 'search', '0', 'null' ) ] for name, thumbnailImage, url, mode, section in main: u = { 'mode': mode, 'url': url, 'section': section } infoLabels = { "Title": name, "Plot": name } addListItem(label = '[ ' + name + ' ]', image = thumbnailImage, url = u, isFolder = True, infoLabels = infoLabels, fanart = fanart) build_video_directory('channels%2F*%7Cgames%2F*%7Cflip_video_diaries%7Cfiba', 1, 'all_videos') setViewMode("503") xbmcplugin.endOfDirectory( int( sys.argv[1] ) ) @retry(IndexError) def build_video_directory( url, page, section ): if url == 'search': search = True text = common.getUserInput(settings.getLocalizedString( 30003 ), "") if text == None: return url = 'channels%2F*%7Cgames%2F*%7Cflip_video_diaries%7Cfiba&text=' + urllib.quote( text ) else: search = False if url == 'team': team = True text = section if text == None: return url = 'channels%2F*%7Cgames%2F*%7Cflip_video_diaries%7Cfiba&text=' + urllib.quote( text ) else: team = False save_url = url if page == 1 and search != True and team != True: base = 'http://www.nba.com/.element/ssi/auto/2.0/aps/video/playlists/' + section + '.html?section=' else: base = 'http://searchapp2.nba.com/nba-search/query.jsp?section=' url = base + url + '&sort=recent&hide=true&type=advvideo&npp=15&start=' + str(1+(15*(page-1))) #print 'NBA-URL=' + str(url) html = getUrl(url) textarea = common.parseDOM(html, "textarea", attrs = { "id": "jsCode" })[0] content = textarea.replace("\\'","\\\\'").replace('\\\\"','\\\\\\"').replace('\\n','').replace('\\t','').replace('\\x','') query = json.loads(content) data = query['results'][0] count = query['metaResults']['advvideo'] if len(data) == 0: dialog = xbmcgui.Dialog() ok = dialog.ok( plugin , settings.getLocalizedString( 30004 ) + '\n' + settings.getLocalizedString( 30005 ) ) return for results in data: mediaDateUts = time.ctime(float(results['mediaDateUts'])) date = datetime.datetime.fromtimestamp(time.mktime(time.strptime(mediaDateUts, '%a %b %d %H:%M:%S %Y'))).strftime('%d.%m.%Y') title = results['title'].replace('\\','') thumb = results['metadata']['media']['thumbnail']['url'].replace('136x96','576x324') length = results['metadata']['video']['length'].split(':') duration = int(length[0]) * 60 + int(length[1]) plot = results['metadata']['media']['excerpt'].replace('\\','') url = results['id'].replace('/video/', '').replace('/index.html','') infoLabels={ "Title": title, "Plot": plot, "Aired": date } u = { 'mode': '3', 'name': title, 'url': url, 'thumb': thumb, 'plot': plot } addListItem(label = title, image = thumb, url = u, isFolder = False, infoLabels = infoLabels, fanart = fanart, duration = duration) if int(page) * 15 < int(count): u = { 'mode': '0', 'page': str( int( page ) + 1 ), 'url': save_url, 'section': section } addListItem(label = '[Next Page (' + str( int( page ) + 1 ) + ')]', image = next_thumb, url = u, isFolder = True, infoLabels = False, fanart = fanart) xbmcplugin.addSortMethod( handle = int(sys.argv[1]), sortMethod = xbmcplugin.SORT_METHOD_NONE ) setViewMode("503") xbmcplugin.endOfDirectory( int( sys.argv[1] ) ) @retry(IndexError) def build_teams_directory(): html = getUrl( 'http://www.nba.com/video/' ) team_names = common.parseDOM(html, "a", attrs = { "id": "nbaVidSelectLk" }) team_nicks = common.parseDOM(html, "a", attrs = { "id": "nbaVidSelectLk" }, ret="onclick") item_count = 0 for title in team_names: nick = re.compile("\(\'teams/(.+?)\'").findall(team_nicks[item_count])[0] url = 'teams%2F' + nick + '%7Cgames%2F*%7Cchannels%2F*&team=' + title.replace(' ','%20') + '&site=nba%2C' + nick u = { 'mode': '0', 'url': 'team', 'section': nick } addListItem(label = title, image = teams_thumb, url = u, isFolder = True, infoLabels = False, fanart = fanart) item_count += 1 xbmcplugin.addSortMethod( handle = int(sys.argv[1]), sortMethod = xbmcplugin.SORT_METHOD_LABEL ) setViewMode("515") xbmcplugin.endOfDirectory( int( sys.argv[1] ) ) @retry(IndexError) def play_video( name, url, thumb, plot ): url = 'http://www.nba.com/video/' + url + '.xml' html = getPage( url ) if html['error'] == 'HTTP Error 404: Not Found': dialog = xbmcgui.Dialog() ok = dialog.ok( plugin , settings.getLocalizedString( 30006 ) ) return if common.parseDOM(html['content'], "file", attrs = { "bitrate": settings.getSetting("quality") }): url = common.parseDOM(html['content'], "file", attrs = { "bitrate": settings.getSetting("quality") })[0] else: url = 'http://nba.cdn.turner.com/nba/big' + common.parseDOM(html['content'], "file", attrs = { "type": "large" })[0] infoLabels = { "Title": name , "Studio": plugin, "Plot": plot } playListItem(label = name, image = thumb, path = url, infoLabels = infoLabels, PlayPath = False) params = getParameters(sys.argv[2]) mode = None name = None url = None plot = None thumb = None page = 1 try: url = urllib.unquote_plus(params["url"]) except: pass try: name = urllib.unquote_plus(params["name"]) except: pass try: mode = int(params["mode"]) except: pass try: plot = urllib.unquote_plus(params["plot"]) except: pass try: thumb = urllib.unquote_plus(params["thumb"]) except: pass try: section = urllib.unquote_plus(params["section"]) except: pass try: page = int(params["page"]) except: pass try: if mode == None: build_main_directory() elif mode == 0: build_video_directory( url, page, section ) elif mode == 1: build_teams_directory() elif mode == 3: play_video( name, url, thumb, plot ) except Exception: buggalo.onExceptionRaised()
gpl-2.0
dischinator/pyload
module/cli/AddPackage.py
42
1994
#!/usr/bin/env python # -*- coding: utf-8 -*- # #Copyright (C) 2011 RaNaN # #This program is free software; you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation; either version 3 of the License, #or (at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. #See the GNU General Public License for more details. # #You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/>. # ### from Handler import Handler from printer import * class AddPackage(Handler): """ let the user add packages """ def init(self): self.name = "" self.urls = [] def onEnter(self, inp): if inp == "0": self.cli.reset() if not self.name: self.name = inp self.setInput() elif inp == "END": #add package self.client.addPackage(self.name, self.urls, 1) self.cli.reset() else: if inp.strip(): self.urls.append(inp) self.setInput() def renderBody(self, line): println(line, white(_("Add Package:"))) println(line + 1, "") line += 2 if not self.name: println(line, _("Enter a name for the new package")) println(line + 1, "") line += 2 else: println(line, _("Package: %s") % self.name) println(line + 1, _("Parse the links you want to add.")) println(line + 2, _("Type %s when done.") % mag("END")) println(line + 3, _("Links added: ") + mag(len(self.urls))) line += 4 println(line, "") println(line + 1, mag("0.") + _(" back to main menu")) return line + 2
gpl-3.0
potash/scikit-learn
examples/decomposition/plot_image_denoising.py
70
6249
""" ========================================= Image denoising using dictionary learning ========================================= An example comparing the effect of reconstructing noisy fragments of a raccoon face image using firstly online :ref:`DictionaryLearning` and various transform methods. The dictionary is fitted on the distorted left half of the image, and subsequently used to reconstruct the right half. Note that even better performance could be achieved by fitting to an undistorted (i.e. noiseless) image, but here we start from the assumption that it is not available. A common practice for evaluating the results of image denoising is by looking at the difference between the reconstruction and the original image. If the reconstruction is perfect this will look like Gaussian noise. It can be seen from the plots that the results of :ref:`omp` with two non-zero coefficients is a bit less biased than when keeping only one (the edges look less prominent). It is in addition closer from the ground truth in Frobenius norm. The result of :ref:`least_angle_regression` is much more strongly biased: the difference is reminiscent of the local intensity value of the original image. Thresholding is clearly not useful for denoising, but it is here to show that it can produce a suggestive output with very high speed, and thus be useful for other tasks such as object classification, where performance is not necessarily related to visualisation. """ print(__doc__) from time import time import matplotlib.pyplot as plt import numpy as np import scipy as sp from sklearn.decomposition import MiniBatchDictionaryLearning from sklearn.feature_extraction.image import extract_patches_2d from sklearn.feature_extraction.image import reconstruct_from_patches_2d from sklearn.utils.testing import SkipTest from sklearn.utils.fixes import sp_version if sp_version < (0, 12): raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and " "thus does not include the scipy.misc.face() image.") ############################################################################### try: from scipy import misc face = misc.face(gray=True) except AttributeError: # Old versions of scipy have face in the top level package face = sp.face(gray=True) # Convert from uint8 representation with values between 0 and 255 to # a floating point representation with values between 0 and 1. face = face / 255 # downsample for higher speed face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2] face /= 4.0 height, width = face.shape # Distort the right half of the image print('Distorting image...') distorted = face.copy() distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2) # Extract all reference patches from the left half of the image print('Extracting reference patches...') t0 = time() patch_size = (7, 7) data = extract_patches_2d(distorted[:, :width // 2], patch_size) data = data.reshape(data.shape[0], -1) data -= np.mean(data, axis=0) data /= np.std(data, axis=0) print('done in %.2fs.' % (time() - t0)) ############################################################################### # Learn the dictionary from reference patches print('Learning the dictionary...') t0 = time() dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500) V = dico.fit(data).components_ dt = time() - t0 print('done in %.2fs.' % dt) plt.figure(figsize=(4.2, 4)) for i, comp in enumerate(V[:100]): plt.subplot(10, 10, i + 1) plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle('Dictionary learned from face patches\n' + 'Train time %.1fs on %d patches' % (dt, len(data)), fontsize=16) plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) ############################################################################### # Display the distorted image def show_with_diff(image, reference, title): """Helper function to display denoising""" plt.figure(figsize=(5, 3.3)) plt.subplot(1, 2, 1) plt.title('Image') plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.subplot(1, 2, 2) difference = image - reference plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2))) plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr, interpolation='nearest') plt.xticks(()) plt.yticks(()) plt.suptitle(title, size=16) plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2) show_with_diff(distorted, face, 'Distorted image') ############################################################################### # Extract noisy patches and reconstruct them using the dictionary print('Extracting noisy patches... ') t0 = time() data = extract_patches_2d(distorted[:, width // 2:], patch_size) data = data.reshape(data.shape[0], -1) intercept = np.mean(data, axis=0) data -= intercept print('done in %.2fs.' % (time() - t0)) transform_algorithms = [ ('Orthogonal Matching Pursuit\n1 atom', 'omp', {'transform_n_nonzero_coefs': 1}), ('Orthogonal Matching Pursuit\n2 atoms', 'omp', {'transform_n_nonzero_coefs': 2}), ('Least-angle regression\n5 atoms', 'lars', {'transform_n_nonzero_coefs': 5}), ('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})] reconstructions = {} for title, transform_algorithm, kwargs in transform_algorithms: print(title + '...') reconstructions[title] = face.copy() t0 = time() dico.set_params(transform_algorithm=transform_algorithm, **kwargs) code = dico.transform(data) patches = np.dot(code, V) patches += intercept patches = patches.reshape(len(data), *patch_size) if transform_algorithm == 'threshold': patches -= patches.min() patches /= patches.max() reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d( patches, (height, width // 2)) dt = time() - t0 print('done in %.2fs.' % dt) show_with_diff(reconstructions[title], face, title + ' (time: %.1fs)' % dt) plt.show()
bsd-3-clause
sunqb/oa_qian
flask/Lib/site-packages/html5lib/treebuilders/etree_lxml.py
1724
14031
"""Module for supporting the lxml.etree library. The idea here is to use as much of the native library as possible, without using fragile hacks like custom element names that break between releases. The downside of this is that we cannot represent all possible trees; specifically the following are known to cause problems: Text or comments as siblings of the root element Docypes with no name When any of these things occur, we emit a DataLossWarning """ from __future__ import absolute_import, division, unicode_literals import warnings import re import sys from . import _base from ..constants import DataLossWarning from .. import constants from . import etree as etree_builders from .. import ihatexml import lxml.etree as etree fullTree = True tag_regexp = re.compile("{([^}]*)}(.*)") comment_type = etree.Comment("asd").tag class DocumentType(object): def __init__(self, name, publicId, systemId): self.name = name self.publicId = publicId self.systemId = systemId class Document(object): def __init__(self): self._elementTree = None self._childNodes = [] def appendChild(self, element): self._elementTree.getroot().addnext(element._element) def _getChildNodes(self): return self._childNodes childNodes = property(_getChildNodes) def testSerializer(element): rv = [] finalText = None infosetFilter = ihatexml.InfosetFilter() def serializeElement(element, indent=0): if not hasattr(element, "tag"): if hasattr(element, "getroot"): # Full tree case rv.append("#document") if element.docinfo.internalDTD: if not (element.docinfo.public_id or element.docinfo.system_url): dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name else: dtd_str = """<!DOCTYPE %s "%s" "%s">""" % ( element.docinfo.root_name, element.docinfo.public_id, element.docinfo.system_url) rv.append("|%s%s" % (' ' * (indent + 2), dtd_str)) next_element = element.getroot() while next_element.getprevious() is not None: next_element = next_element.getprevious() while next_element is not None: serializeElement(next_element, indent + 2) next_element = next_element.getnext() elif isinstance(element, str) or isinstance(element, bytes): # Text in a fragment assert isinstance(element, str) or sys.version_info.major == 2 rv.append("|%s\"%s\"" % (' ' * indent, element)) else: # Fragment case rv.append("#document-fragment") for next_element in element: serializeElement(next_element, indent + 2) elif element.tag == comment_type: rv.append("|%s<!-- %s -->" % (' ' * indent, element.text)) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * indent, element.tail)) else: assert isinstance(element, etree._Element) nsmatch = etree_builders.tag_regexp.match(element.tag) if nsmatch is not None: ns = nsmatch.group(1) tag = nsmatch.group(2) prefix = constants.prefixes[ns] rv.append("|%s<%s %s>" % (' ' * indent, prefix, infosetFilter.fromXmlName(tag))) else: rv.append("|%s<%s>" % (' ' * indent, infosetFilter.fromXmlName(element.tag))) if hasattr(element, "attrib"): attributes = [] for name, value in element.attrib.items(): nsmatch = tag_regexp.match(name) if nsmatch is not None: ns, name = nsmatch.groups() name = infosetFilter.fromXmlName(name) prefix = constants.prefixes[ns] attr_string = "%s %s" % (prefix, name) else: attr_string = infosetFilter.fromXmlName(name) attributes.append((attr_string, value)) for name, value in sorted(attributes): rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value)) if element.text: rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text)) indent += 2 for child in element: serializeElement(child, indent) if hasattr(element, "tail") and element.tail: rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail)) serializeElement(element, 0) if finalText is not None: rv.append("|%s\"%s\"" % (' ' * 2, finalText)) return "\n".join(rv) def tostring(element): """Serialize an element and its child nodes to a string""" rv = [] finalText = None def serializeElement(element): if not hasattr(element, "tag"): if element.docinfo.internalDTD: if element.docinfo.doctype: dtd_str = element.docinfo.doctype else: dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name rv.append(dtd_str) serializeElement(element.getroot()) elif element.tag == comment_type: rv.append("<!--%s-->" % (element.text,)) else: # This is assumed to be an ordinary element if not element.attrib: rv.append("<%s>" % (element.tag,)) else: attr = " ".join(["%s=\"%s\"" % (name, value) for name, value in element.attrib.items()]) rv.append("<%s %s>" % (element.tag, attr)) if element.text: rv.append(element.text) for child in element: serializeElement(child) rv.append("</%s>" % (element.tag,)) if hasattr(element, "tail") and element.tail: rv.append(element.tail) serializeElement(element) if finalText is not None: rv.append("%s\"" % (' ' * 2, finalText)) return "".join(rv) class TreeBuilder(_base.TreeBuilder): documentClass = Document doctypeClass = DocumentType elementClass = None commentClass = None fragmentClass = Document implementation = etree def __init__(self, namespaceHTMLElements, fullTree=False): builder = etree_builders.getETreeModule(etree, fullTree=fullTree) infosetFilter = self.infosetFilter = ihatexml.InfosetFilter() self.namespaceHTMLElements = namespaceHTMLElements class Attributes(dict): def __init__(self, element, value={}): self._element = element dict.__init__(self, value) for key, value in self.items(): if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value def __setitem__(self, key, value): dict.__setitem__(self, key, value) if isinstance(key, tuple): name = "{%s}%s" % (key[2], infosetFilter.coerceAttribute(key[1])) else: name = infosetFilter.coerceAttribute(key) self._element._element.attrib[name] = value class Element(builder.Element): def __init__(self, name, namespace): name = infosetFilter.coerceElement(name) builder.Element.__init__(self, name, namespace=namespace) self._attributes = Attributes(self) def _setName(self, name): self._name = infosetFilter.coerceElement(name) self._element.tag = self._getETreeTag( self._name, self._namespace) def _getName(self): return infosetFilter.fromXmlName(self._name) name = property(_getName, _setName) def _getAttributes(self): return self._attributes def _setAttributes(self, attributes): self._attributes = Attributes(self, attributes) attributes = property(_getAttributes, _setAttributes) def insertText(self, data, insertBefore=None): data = infosetFilter.coerceCharacters(data) builder.Element.insertText(self, data, insertBefore) def appendChild(self, child): builder.Element.appendChild(self, child) class Comment(builder.Comment): def __init__(self, data): data = infosetFilter.coerceComment(data) builder.Comment.__init__(self, data) def _setData(self, data): data = infosetFilter.coerceComment(data) self._element.text = data def _getData(self): return self._element.text data = property(_getData, _setData) self.elementClass = Element self.commentClass = builder.Comment # self.fragmentClass = builder.DocumentFragment _base.TreeBuilder.__init__(self, namespaceHTMLElements) def reset(self): _base.TreeBuilder.reset(self) self.insertComment = self.insertCommentInitial self.initial_comments = [] self.doctype = None def testSerializer(self, element): return testSerializer(element) def getDocument(self): if fullTree: return self.document._elementTree else: return self.document._elementTree.getroot() def getFragment(self): fragment = [] element = self.openElements[0]._element if element.text: fragment.append(element.text) fragment.extend(list(element)) if element.tail: fragment.append(element.tail) return fragment def insertDoctype(self, token): name = token["name"] publicId = token["publicId"] systemId = token["systemId"] if not name: warnings.warn("lxml cannot represent empty doctype", DataLossWarning) self.doctype = None else: coercedName = self.infosetFilter.coerceElement(name) if coercedName != name: warnings.warn("lxml cannot represent non-xml doctype", DataLossWarning) doctype = self.doctypeClass(coercedName, publicId, systemId) self.doctype = doctype def insertCommentInitial(self, data, parent=None): self.initial_comments.append(data) def insertCommentMain(self, data, parent=None): if (parent == self.document and self.document._elementTree.getroot()[-1].tag == comment_type): warnings.warn("lxml cannot represent adjacent comments beyond the root elements", DataLossWarning) super(TreeBuilder, self).insertComment(data, parent) def insertRoot(self, token): """Create the document root""" # Because of the way libxml2 works, it doesn't seem to be possible to # alter information like the doctype after the tree has been parsed. # Therefore we need to use the built-in parser to create our iniial # tree, after which we can add elements like normal docStr = "" if self.doctype: assert self.doctype.name docStr += "<!DOCTYPE %s" % self.doctype.name if (self.doctype.publicId is not None or self.doctype.systemId is not None): docStr += (' PUBLIC "%s" ' % (self.infosetFilter.coercePubid(self.doctype.publicId or ""))) if self.doctype.systemId: sysid = self.doctype.systemId if sysid.find("'") >= 0 and sysid.find('"') >= 0: warnings.warn("DOCTYPE system cannot contain single and double quotes", DataLossWarning) sysid = sysid.replace("'", 'U00027') if sysid.find("'") >= 0: docStr += '"%s"' % sysid else: docStr += "'%s'" % sysid else: docStr += "''" docStr += ">" if self.doctype.name != token["name"]: warnings.warn("lxml cannot represent doctype with a different name to the root element", DataLossWarning) docStr += "<THIS_SHOULD_NEVER_APPEAR_PUBLICLY/>" root = etree.fromstring(docStr) # Append the initial comments: for comment_token in self.initial_comments: root.addprevious(etree.Comment(comment_token["data"])) # Create the root document and add the ElementTree to it self.document = self.documentClass() self.document._elementTree = root.getroottree() # Give the root element the right name name = token["name"] namespace = token.get("namespace", self.defaultNamespace) if namespace is None: etree_tag = name else: etree_tag = "{%s}%s" % (namespace, name) root.tag = etree_tag # Add the root element to the internal child/open data structures root_element = self.elementClass(name, namespace) root_element._element = root self.document._childNodes.append(root_element) self.openElements.append(root_element) # Reset to the default insert comment function self.insertComment = self.insertCommentMain
apache-2.0
max291/RLScore
rlscore/measure/accuracy_measure.py
1
1590
import numpy as np from numpy import * from rlscore.utilities import array_tools def accuracy_singletask(Y, P): assert Y.shape[0] == P.shape[0] vlen = float(Y.shape[0]) perf = sum(sign(multiply(Y, P)) + 1.) / (2 * vlen) return perf def accuracy_multitask(Y, P): Y = np.mat(Y) P = np.mat(P) vlen = float(Y.shape[0]) performances = sum(sign(multiply(Y, P)) + 1., axis = 0) / (2 * vlen) performances = np.array(performances)[0] return performances def accuracy(Y, P): """Binary classification accuracy. A performance measure for binary classification problems. Returns the fraction of correct class predictions. P[i]>0 is considered a positive class prediction and P[i]<0 negative. P[i]==0 is considered as classifier abstaining to make a decision, which incurs 0.5 errors (in contrast to 0 error for correct and 1 error for incorrect prediction). If 2-dimensional arrays are supplied as arguments, then accuracy is separately computed for each column, after which the accuracies are averaged. Parameters ---------- Y: {array-like}, shape = [n_samples] or [n_samples, n_labels] Correct labels, must belong to set {-1,1} P: {array-like}, shape = [n_samples] or [n_samples, n_labels] Predicted labels, can be any real numbers. Returns ------- accuracy: float number between 0 and 1 """ Y = array_tools.as_labelmatrix(Y) P = array_tools.as_labelmatrix(P) return np.mean(accuracy_multitask(Y, P)) accuracy.iserror = False
mit
Hoekz/hackness-monster
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euckrprober.py
2931
1675
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis from .mbcssm import EUCKRSMModel class EUCKRProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCKRSMModel) self._mDistributionAnalyzer = EUCKRDistributionAnalysis() self.reset() def get_charset_name(self): return "EUC-KR"
mit
sk1tt1sh/python-docx
docx/oxml/shared.py
1
10545
# encoding: utf-8 """ Objects shared by modules in the docx.oxml subpackage. """ from lxml import etree import re from .exceptions import ValidationError nsmap = { 'a': ('http://schemas.openxmlformats.org/drawingml/2006/main'), 'c': ('http://schemas.openxmlformats.org/drawingml/2006/chart'), 'dgm': ('http://schemas.openxmlformats.org/drawingml/2006/diagram'), 'pic': ('http://schemas.openxmlformats.org/drawingml/2006/picture'), 'r': ('http://schemas.openxmlformats.org/officeDocument/2006/relations' 'hips'), 'w': ('http://schemas.openxmlformats.org/wordprocessingml/2006/main'), 'wp': ('http://schemas.openxmlformats.org/drawingml/2006/wordprocessing' 'Drawing'), 'xml': ('http://www.w3.org/XML/1998/namespace') } # configure XML parser element_class_lookup = etree.ElementNamespaceClassLookup() oxml_parser = etree.XMLParser(remove_blank_text=True) oxml_parser.set_element_class_lookup(element_class_lookup) # =========================================================================== # utility functions # =========================================================================== class NamespacePrefixedTag(str): """ Value object that knows the semantics of an XML tag having a namespace prefix. """ def __new__(cls, nstag, *args): return super(NamespacePrefixedTag, cls).__new__(cls, nstag) def __init__(self, nstag): self._pfx, self._local_part = nstag.split(':') self._ns_uri = nsmap[self._pfx] @property def clark_name(self): return '{%s}%s' % (self._ns_uri, self._local_part) @property def local_part(self): """ Return the local part of the tag as a string. E.g. 'foobar' is returned for tag 'f:foobar'. """ return self._local_part @property def nsmap(self): """ Return a dict having a single member, mapping the namespace prefix of this tag to it's namespace name (e.g. {'f': 'http://foo/bar'}). This is handy for passing to xpath calls and other uses. """ return {self._pfx: self._ns_uri} @property def nspfx(self): """ Return the string namespace prefix for the tag, e.g. 'f' is returned for tag 'f:foobar'. """ return self._pfx @property def nsuri(self): """ Return the namespace URI for the tag, e.g. 'http://foo/bar' would be returned for tag 'f:foobar' if the 'f' prefix maps to 'http://foo/bar' in nsmap. """ return self._ns_uri def nsdecls(*prefixes): return ' '.join(['xmlns:%s="%s"' % (pfx, nsmap[pfx]) for pfx in prefixes]) def nspfxmap(*nspfxs): """ Return a dict containing the subset namespace prefix mappings specified by *nspfxs*. Any number of namespace prefixes can be supplied, e.g. namespaces('a', 'r', 'p'). """ return dict((pfx, nsmap[pfx]) for pfx in nspfxs) def OxmlElement(nsptag_str, attrs=None, nsmap=None): """ Return a 'loose' lxml element having the tag specified by *nsptag_str*. *nsptag_str* must contain the standard namespace prefix, e.g. 'a:tbl'. The resulting element is an instance of the custom element class for this tag name if one is defined. A dictionary of attribute values may be provided as *attrs*; they are set if present. """ nsptag = NamespacePrefixedTag(nsptag_str) nsmap = nsmap if nsmap is not None else nsptag.nsmap return oxml_parser.makeelement( nsptag.clark_name, attrib=attrs, nsmap=nsmap ) def oxml_fromstring(text): """ ``etree.fromstring()`` replacement that uses oxml parser """ return etree.fromstring(text, oxml_parser) def qn(tag): """ Stands for "qualified name", a utility function to turn a namespace prefixed tag name into a Clark-notation qualified tag name for lxml. For example, ``qn('p:cSld')`` returns ``'{http://schemas.../main}cSld'``. """ prefix, tagroot = tag.split(':') uri = nsmap[prefix] return '{%s}%s' % (uri, tagroot) def register_custom_element_class(tag, cls): """ Register *cls* to be constructed when the oxml parser encounters an element with matching *tag*. *tag* is a string of the form ``nspfx:tagroot``, e.g. ``'w:document'``. """ nspfx, tagroot = tag.split(':') namespace = element_class_lookup.get_namespace(nsmap[nspfx]) namespace[tagroot] = cls def serialize_for_reading(element): """ Serialize *element* to human-readable XML suitable for tests. No XML declaration. """ xml = etree.tostring(element, encoding='unicode', pretty_print=True) return XmlString(xml) def _SubElement(parent, tag): return etree.SubElement(parent, qn(tag), nsmap=nsmap) class XmlString(str): """ Provides string comparison override suitable for serialized XML that is useful for tests. """ # ' <w:xyz xmlns:a="http://ns/decl/a" attr_name="val">text</w:xyz>' # | | || | # +----------+------------------------------------------++-----------+ # front attrs | text # close _xml_elm_line_patt = re.compile( '( *</?[\w:]+)(.*?)(/?>)([^<]*</\w+>)?' ) def __eq__(self, other): lines = self.splitlines() lines_other = other.splitlines() if len(lines) != len(lines_other): return False for line, line_other in zip(lines, lines_other): if not self._eq_elm_strs(line, line_other): return False return True def __ne__(self, other): return not self.__eq__(other) def _attr_seq(self, attrs): """ Return a sequence of attribute strings parsed from *attrs*. Each attribute string is stripped of whitespace on both ends. """ attrs = attrs.strip() attr_lst = attrs.split() return sorted(attr_lst) def _eq_elm_strs(self, line, line_2): """ Return True if the element in *line_2* is XML equivalent to the element in *line*. """ front, attrs, close, text = self._parse_line(line) front_2, attrs_2, close_2, text_2 = self._parse_line(line_2) if front != front_2: return False if self._attr_seq(attrs) != self._attr_seq(attrs_2): return False if close != close_2: return False if text != text_2: return False return True def _parse_line(self, line): """ Return front, attrs, close, text 4-tuple result of parsing XML element string *line*. """ match = self._xml_elm_line_patt.match(line) front, attrs, close, text = [match.group(n) for n in range(1, 5)] return front, attrs, close, text # =========================================================================== # shared custom element classes # =========================================================================== class OxmlBaseElement(etree.ElementBase): """ Base class for all custom element classes, to add standardized behavior to all classes in one place. """ def first_child_found_in(self, *tagnames): """ Return the first child found with tag in *tagnames*, or None if not found. """ for tagname in tagnames: child = self.find(qn(tagname)) if child is not None: return child return None def insert_element_before(self, elm, *tagnames): successor = self.first_child_found_in(*tagnames) if successor is not None: successor.addprevious(elm) else: self.append(elm) return elm @property def xml(self): """ Return XML string for this element, suitable for testing purposes. Pretty printed for readability and without an XML declaration at the top. """ return serialize_for_reading(self) class CT_DecimalNumber(OxmlBaseElement): """ Used for ``<w:numId>``, ``<w:ilvl>``, ``<w:abstractNumId>`` and several others, containing a text representation of a decimal number (e.g. 42) in its ``val`` attribute. """ @classmethod def new(cls, nsptagname, val): """ Return a new ``CT_DecimalNumber`` element having tagname *nsptagname* and ``val`` attribute set to *val*. """ return OxmlElement(nsptagname, attrs={qn('w:val'): str(val)}) @property def val(self): """ Required attribute containing a decimal integer """ number_str = self.get(qn('w:val')) return int(number_str) @val.setter def val(self, val): decimal_number_str = '%d' % val self.set(qn('w:val'), decimal_number_str) class CT_OnOff(OxmlBaseElement): """ Used for ``<w:b>``, ``<w:i>`` elements and others, containing a bool-ish string in its ``val`` attribute, xsd:boolean plus 'on' and 'off'. """ @property def val(self): val = self.get(qn('w:val')) if val is None: return True elif val in ('0', 'false', 'off'): return False elif val in ('1', 'true', 'on'): return True raise ValidationError("expected xsd:boolean, got '%s'" % val) @val.setter def val(self, value): val = qn('w:val') if bool(value) is True: if val in self.attrib: del self.attrib[val] else: self.set(val, '0') class CT_String(OxmlBaseElement): """ Used for ``<w:pStyle>`` and ``<w:tblStyle>`` elements and others, containing a style name in its ``val`` attribute. """ @classmethod def new(cls, nsptagname, val): """ Return a new ``CT_String`` element with tagname *nsptagname* and ``val`` attribute set to *val*. """ return OxmlElement(nsptagname, attrs={qn('w:val'): val}) @classmethod def new_pStyle(cls, val): """ Return a new ``<w:pStyle>`` element with ``val`` attribute set to *val*. """ return OxmlElement('w:pStyle', attrs={qn('w:val'): val}) @property def val(self): return self.get(qn('w:val')) @val.setter def val(self, val): return self.set(qn('w:val'), val)
mit
mustafat/odoo-1
openerp/api.py
30
35543
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## """ This module provides the elements for managing two different API styles, namely the "traditional" and "record" styles. In the "traditional" style, parameters like the database cursor, user id, context dictionary and record ids (usually denoted as ``cr``, ``uid``, ``context``, ``ids``) are passed explicitly to all methods. In the "record" style, those parameters are hidden into model instances, which gives it a more object-oriented feel. For instance, the statements:: model = self.pool.get(MODEL) ids = model.search(cr, uid, DOMAIN, context=context) for rec in model.browse(cr, uid, ids, context=context): print rec.name model.write(cr, uid, ids, VALUES, context=context) may also be written as:: env = Environment(cr, uid, context) # cr, uid, context wrapped in env model = env[MODEL] # retrieve an instance of MODEL recs = model.search(DOMAIN) # search returns a recordset for rec in recs: # iterate over the records print rec.name recs.write(VALUES) # update all records in recs Methods written in the "traditional" style are automatically decorated, following some heuristics based on parameter names. """ __all__ = [ 'Environment', 'Meta', 'guess', 'noguess', 'model', 'multi', 'one', 'cr', 'cr_context', 'cr_uid', 'cr_uid_context', 'cr_uid_id', 'cr_uid_id_context', 'cr_uid_ids', 'cr_uid_ids_context', 'constrains', 'depends', 'onchange', 'returns', ] import logging import operator from inspect import currentframe, getargspec from collections import defaultdict, MutableMapping from contextlib import contextmanager from pprint import pformat from weakref import WeakSet from werkzeug.local import Local, release_local from openerp.tools import frozendict, classproperty _logger = logging.getLogger(__name__) # The following attributes are used, and reflected on wrapping methods: # - method._constrains: set by @constrains, specifies constraint dependencies # - method._depends: set by @depends, specifies compute dependencies # - method._returns: set by @returns, specifies return model # - method._onchange: set by @onchange, specifies onchange fields # - method.clear_cache: set by @ormcache, used to clear the cache # # On wrapping method only: # - method._api: decorator function, used for re-applying decorator # - method._orig: original method # WRAPPED_ATTRS = ('__module__', '__name__', '__doc__', '_constrains', '_depends', '_onchange', '_returns', 'clear_cache') INHERITED_ATTRS = ('_returns',) class Meta(type): """ Metaclass that automatically decorates traditional-style methods by guessing their API. It also implements the inheritance of the :func:`returns` decorators. """ def __new__(meta, name, bases, attrs): # dummy parent class to catch overridden methods decorated with 'returns' parent = type.__new__(meta, name, bases, {}) for key, value in attrs.items(): if not key.startswith('__') and callable(value): # make the method inherit from decorators value = propagate(getattr(parent, key, None), value) # guess calling convention if none is given if not hasattr(value, '_api'): try: value = guess(value) except TypeError: pass attrs[key] = value return type.__new__(meta, name, bases, attrs) identity = lambda x: x def decorate(method, attr, value): """ Decorate ``method`` or its original method. """ if getattr(method, '_api', False): # decorate the original method, and re-apply the api decorator setattr(method._orig, attr, value) return method._api(method._orig) else: # simply decorate the method itself setattr(method, attr, value) return method def propagate(from_method, to_method): """ Propagate decorators from ``from_method`` to ``to_method``, and return the resulting method. """ if from_method: for attr in INHERITED_ATTRS: if hasattr(from_method, attr) and not hasattr(to_method, attr): to_method = decorate(to_method, attr, getattr(from_method, attr)) return to_method def constrains(*args): """ Decorates a constraint checker. Each argument must be a field name used in the check:: @api.one @api.constrains('name', 'description') def _check_description(self): if self.name == self.description: raise ValidationError("Fields name and description must be different") Invoked on the records on which one of the named fields has been modified. Should raise :class:`~openerp.exceptions.ValidationError` if the validation failed. .. warning:: ``@constrains`` only supports simple field names, dotted names (fields of relational fields e.g. ``partner_id.customer``) are not supported and will be ignored """ return lambda method: decorate(method, '_constrains', args) def onchange(*args): """ Return a decorator to decorate an onchange method for given fields. Each argument must be a field name:: @api.onchange('partner_id') def _onchange_partner(self): self.message = "Dear %s" % (self.partner_id.name or "") In the form views where the field appears, the method will be called when one of the given fields is modified. The method is invoked on a pseudo-record that contains the values present in the form. Field assignments on that record are automatically sent back to the client. The method may return a dictionary for changing field domains and pop up a warning message, like in the old API:: return { 'domain': {'other_id': [('partner_id', '=', partner_id)]}, 'warning': {'title': "Warning", 'message': "What is this?"}, } .. warning:: ``@onchange`` only supports simple field names, dotted names (fields of relational fields e.g. ``partner_id.tz``) are not supported and will be ignored """ return lambda method: decorate(method, '_onchange', args) def depends(*args): """ Return a decorator that specifies the field dependencies of a "compute" method (for new-style function fields). Each argument must be a string that consists in a dot-separated sequence of field names:: pname = fields.Char(compute='_compute_pname') @api.one @api.depends('partner_id.name', 'partner_id.is_company') def _compute_pname(self): if self.partner_id.is_company: self.pname = (self.partner_id.name or "").upper() else: self.pname = self.partner_id.name One may also pass a single function as argument. In that case, the dependencies are given by calling the function with the field's model. """ if args and callable(args[0]): args = args[0] elif any('id' in arg.split('.') for arg in args): raise NotImplementedError("Compute method cannot depend on field 'id'.") return lambda method: decorate(method, '_depends', args) def returns(model, downgrade=None, upgrade=None): """ Return a decorator for methods that return instances of ``model``. :param model: a model name, or ``'self'`` for the current model :param downgrade: a function ``downgrade(self, value, *args, **kwargs)`` to convert the record-style ``value`` to a traditional-style output :param upgrade: a function ``upgrade(self, value, *args, **kwargs)`` to convert the traditional-style ``value`` to a record-style output The arguments ``self``, ``*args`` and ``**kwargs`` are the ones passed to the method in the record-style. The decorator adapts the method output to the api style: ``id``, ``ids`` or ``False`` for the traditional style, and recordset for the record style:: @model @returns('res.partner') def find_partner(self, arg): ... # return some record # output depends on call style: traditional vs record style partner_id = model.find_partner(cr, uid, arg, context=context) # recs = model.browse(cr, uid, ids, context) partner_record = recs.find_partner(arg) Note that the decorated method must satisfy that convention. Those decorators are automatically *inherited*: a method that overrides a decorated existing method will be decorated with the same ``@returns(model)``. """ return lambda method: decorate(method, '_returns', (model, downgrade, upgrade)) def make_wrapper(decorator, method, old_api, new_api): """ Return a wrapper method for ``method``. """ def wrapper(self, *args, **kwargs): # avoid hasattr(self, '_ids') because __getattr__() is overridden if '_ids' in self.__dict__: return new_api(self, *args, **kwargs) else: return old_api(self, *args, **kwargs) # propagate specific openerp attributes from method to wrapper for attr in WRAPPED_ATTRS: if hasattr(method, attr): setattr(wrapper, attr, getattr(method, attr)) wrapper._api = decorator wrapper._orig = method return wrapper def get_downgrade(method): """ Return a function `downgrade(self, value, *args, **kwargs)` that adapts ``value`` from record-style to traditional-style, following the convention of ``method``. """ spec = getattr(method, '_returns', None) if spec: _, downgrade, _ = spec if downgrade and len(getargspec(downgrade).args) > 1: return downgrade elif downgrade: return lambda self, *args, **kwargs: downgrade(args[0]) else: return lambda self, *args, **kwargs: args[0].ids else: return lambda self, *args, **kwargs: args[0] def get_upgrade(method): """ Return a function `upgrade(self, value, *args, **kwargs)` that adapts ``value`` from traditional-style to record-style, following the convention of ``method``. """ spec = getattr(method, '_returns', None) if spec: model, _, upgrade = spec if upgrade: return upgrade elif model == 'self': return lambda self, *args, **kwargs: self.browse(args[0]) else: return lambda self, *args, **kwargs: self.env[model].browse(args[0]) else: return lambda self, *args, **kwargs: args[0] def get_aggregate(method): """ Return a function `aggregate(self, value)` that aggregates record-style ``value`` for a method decorated with ``@one``. """ spec = getattr(method, '_returns', None) if spec: # value is a list of instances, concatenate them model, _, _ = spec if model == 'self': return lambda self, value: sum(value, self.browse()) else: return lambda self, value: sum(value, self.env[model].browse()) else: return lambda self, value: value def get_context_split(method): """ Return a function ``split`` that extracts the context from a pair of positional and keyword arguments:: context, args, kwargs = split(args, kwargs) """ pos = len(getargspec(method).args) - 1 def split(args, kwargs): if pos < len(args): return args[pos], args[:pos], kwargs else: return kwargs.pop('context', None), args, kwargs return split def model(method): """ Decorate a record-style method where ``self`` is a recordset, but its contents is not relevant, only the model is. Such a method:: @api.model def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) Notice that no ``ids`` are passed to the method in the traditional style. """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, [], context) result = method(recs, *args, **kwargs) return downgrade(recs, result, *args, **kwargs) return make_wrapper(model, method, old_api, method) def multi(method): """ Decorate a record-style method where ``self`` is a recordset. The method typically defines an operation on records. Such a method:: @api.multi def method(self, args): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) """ split = get_context_split(method) downgrade = get_downgrade(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = method(recs, *args, **kwargs) return downgrade(recs, result, *args, **kwargs) return make_wrapper(multi, method, old_api, method) def one(method): """ Decorate a record-style method where ``self`` is expected to be a singleton instance. The decorated method automatically loops on records, and makes a list with the results. In case the method is decorated with :func:`returns`, it concatenates the resulting instances. Such a method:: @api.one def method(self, args): return self.name may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) names = recs.method(args) names = model.method(cr, uid, ids, args, context=context) .. deprecated:: 9.0 :func:`~.one` often makes the code less clear and behaves in ways developers and readers may not expect. It is strongly recommended to use :func:`~.multi` and either iterate on the ``self`` recordset or ensure that the recordset is a single record with :meth:`~openerp.models.Model.ensure_one`. """ split = get_context_split(method) downgrade = get_downgrade(method) aggregate = get_aggregate(method) def old_api(self, cr, uid, ids, *args, **kwargs): context, args, kwargs = split(args, kwargs) recs = self.browse(cr, uid, ids, context) result = new_api(recs, *args, **kwargs) return downgrade(recs, result, *args, **kwargs) def new_api(self, *args, **kwargs): result = [method(rec, *args, **kwargs) for rec in self] return aggregate(self, result) return make_wrapper(one, method, old_api, new_api) def cr(method): """ Decorate a traditional-style method that takes ``cr`` as a parameter. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, args) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, *args, **kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr, method, method, new_api) def cr_context(method): """ Decorate a traditional-style method that takes ``cr``, ``context`` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args old_kwargs = dict(kwargs, context=context) result = method(self._model, cr, *args, **old_kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_context, method, method, new_api) def cr_uid(method): """ Decorate a traditional-style method that takes ``cr``, ``uid`` as parameters. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, *args, **kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid, method, method, new_api) def cr_uid_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``context`` as parameters. Such a method may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args old_kwargs = dict(kwargs, context=context) result = method(self._model, cr, uid, *args, **old_kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid_context, method, method, new_api) def cr_uid_id(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``id`` as parameters. Such a method may be called in both record and traditional styles. In the record style, the method automatically loops on records. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = [method(self._model, cr, uid, id, *args, **kwargs) for id in self.ids] return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid_id, method, method, new_api) def cr_uid_id_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``id``, ``context`` as parameters. Such a method:: @api.cr_uid_id def method(self, cr, uid, id, args, context=None): ... may be called in both record and traditional styles, like:: # rec = model.browse(cr, uid, id, context) rec.method(args) model.method(cr, uid, id, args, context=context) """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args old_kwargs = dict(kwargs, context=context) result = [method(self._model, cr, uid, id, *args, **old_kwargs) for id in self.ids] return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid_id_context, method, method, new_api) def cr_uid_ids(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids`` as parameters. Such a method may be called in both record and traditional styles. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args result = method(self._model, cr, uid, self.ids, *args, **kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid_ids, method, method, new_api) def cr_uid_ids_context(method): """ Decorate a traditional-style method that takes ``cr``, ``uid``, ``ids``, ``context`` as parameters. Such a method:: @api.cr_uid_ids_context def method(self, cr, uid, ids, args, context=None): ... may be called in both record and traditional styles, like:: # recs = model.browse(cr, uid, ids, context) recs.method(args) model.method(cr, uid, ids, args, context=context) It is generally not necessary, see :func:`guess`. """ upgrade = get_upgrade(method) def new_api(self, *args, **kwargs): cr, uid, context = self.env.args old_kwargs = dict(kwargs, context=context) result = method(self._model, cr, uid, self.ids, *args, **old_kwargs) return upgrade(self, result, *args, **kwargs) return make_wrapper(cr_uid_ids_context, method, method, new_api) def v7(method_v7): """ Decorate a method that supports the old-style api only. A new-style api may be provided by redefining a method with the same name and decorated with :func:`~.v8`:: @api.v7 def foo(self, cr, uid, ids, context=None): ... @api.v8 def foo(self): ... Special care must be taken if one method calls the other one, because the method may be overridden! In that case, one should call the method from the current class (say ``MyClass``), for instance:: @api.v7 def foo(self, cr, uid, ids, context=None): # Beware: records.foo() may call an overriding of foo() records = self.browse(cr, uid, ids, context) return MyClass.foo(records) Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v8 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v7.__name__) method_v8 = getattr(method, '_v8', method) wrapper = make_wrapper(v7, method_v7, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def v8(method_v8): """ Decorate a method that supports the new-style api only. An old-style api may be provided by redefining a method with the same name and decorated with :func:`~.v7`:: @api.v8 def foo(self): ... @api.v7 def foo(self, cr, uid, ids, context=None): ... Note that the wrapper method uses the docstring of the first method. """ # retrieve method_v7 from the caller's frame frame = currentframe().f_back method = frame.f_locals.get(method_v8.__name__) method_v7 = getattr(method, '_v7', method) wrapper = make_wrapper(v8, method_v8, method_v7, method_v8) wrapper._v7 = method_v7 wrapper._v8 = method_v8 return wrapper def noguess(method): """ Decorate a method to prevent any effect from :func:`guess`. """ method._api = False return method def guess(method): """ Decorate ``method`` to make it callable in both traditional and record styles. This decorator is applied automatically by the model's metaclass, and has no effect on already-decorated methods. The API style is determined by heuristics on the parameter names: ``cr`` or ``cursor`` for the cursor, ``uid`` or ``user`` for the user id, ``id`` or ``ids`` for a list of record ids, and ``context`` for the context dictionary. If a traditional API is recognized, one of the decorators :func:`cr`, :func:`cr_context`, :func:`cr_uid`, :func:`cr_uid_context`, :func:`cr_uid_id`, :func:`cr_uid_id_context`, :func:`cr_uid_ids`, :func:`cr_uid_ids_context` is applied on the method. Method calls are considered traditional style when their first parameter is a database cursor. """ if hasattr(method, '_api'): return method # introspection on argument names to determine api style args, vname, kwname, defaults = getargspec(method) names = tuple(args) + (None,) * 4 if names[0] == 'self': if names[1] in ('cr', 'cursor'): if names[2] in ('uid', 'user'): if names[3] == 'ids': if 'context' in names or kwname: return cr_uid_ids_context(method) else: return cr_uid_ids(method) elif names[3] == 'id' or names[3] == 'res_id': if 'context' in names or kwname: return cr_uid_id_context(method) else: return cr_uid_id(method) elif 'context' in names or kwname: return cr_uid_context(method) else: return cr_uid(method) elif 'context' in names: return cr_context(method) else: return cr(method) # no wrapping by default return noguess(method) def expected(decorator, func): """ Decorate ``func`` with ``decorator`` if ``func`` is not wrapped yet. """ return decorator(func) if not hasattr(func, '_api') else func class Environment(object): """ An environment wraps data for ORM records: - :attr:`cr`, the current database cursor; - :attr:`uid`, the current user id; - :attr:`context`, the current context dictionary. It also provides access to the registry, a cache for records, and a data structure to manage recomputations. """ _local = Local() @classproperty def envs(cls): return cls._local.environments @classmethod @contextmanager def manage(cls): """ Context manager for a set of environments. """ if hasattr(cls._local, 'environments'): yield else: try: cls._local.environments = Environments() yield finally: release_local(cls._local) @classmethod def reset(cls): """ Clear the set of environments. This may be useful when recreating a registry inside a transaction. """ cls._local.environments = Environments() def __new__(cls, cr, uid, context): assert context is not None args = (cr, uid, context) # if env already exists, return it env, envs = None, cls.envs for env in envs: if env.args == args: return env # otherwise create environment, and add it in the set self = object.__new__(cls) self.cr, self.uid, self.context = self.args = (cr, uid, frozendict(context)) self.registry = RegistryManager.get(cr.dbname) self.cache = defaultdict(dict) # {field: {id: value, ...}, ...} self.prefetch = defaultdict(set) # {model_name: set(id), ...} self.computed = defaultdict(set) # {field: set(id), ...} self.dirty = defaultdict(set) # {record: set(field_name), ...} self.all = envs envs.add(self) return self def __getitem__(self, model_name): """ return a given model """ return self.registry[model_name]._browse(self, ()) def __call__(self, cr=None, user=None, context=None): """ Return an environment based on ``self`` with modified parameters. :param cr: optional database cursor to change the current cursor :param user: optional user/user id to change the current user :param context: optional context dictionary to change the current context """ cr = self.cr if cr is None else cr uid = self.uid if user is None else int(user) context = self.context if context is None else context return Environment(cr, uid, context) def ref(self, xml_id, raise_if_not_found=True): """ return the record corresponding to the given ``xml_id`` """ return self['ir.model.data'].xmlid_to_object(xml_id, raise_if_not_found=raise_if_not_found) @property def user(self): """ return the current user (as an instance) """ return self(user=SUPERUSER_ID)['res.users'].browse(self.uid) @property def lang(self): """ return the current language code """ return self.context.get('lang') @contextmanager def _do_in_mode(self, mode): if self.all.mode: yield else: try: self.all.mode = mode yield finally: self.all.mode = False self.dirty.clear() def do_in_draft(self): """ Context-switch to draft mode, where all field updates are done in cache only. """ return self._do_in_mode(True) @property def in_draft(self): """ Return whether we are in draft mode. """ return bool(self.all.mode) def do_in_onchange(self): """ Context-switch to 'onchange' draft mode, which is a specialized draft mode used during execution of onchange methods. """ return self._do_in_mode('onchange') @property def in_onchange(self): """ Return whether we are in 'onchange' draft mode. """ return self.all.mode == 'onchange' def invalidate(self, spec): """ Invalidate some fields for some records in the cache of all environments. :param spec: what to invalidate, a list of `(field, ids)` pair, where ``field`` is a field object, and ``ids`` is a list of record ids or ``None`` (to invalidate all records). """ if not spec: return for env in list(self.all): c = env.cache for field, ids in spec: if ids is None: if field in c: del c[field] else: field_cache = c[field] for id in ids: field_cache.pop(id, None) def invalidate_all(self): """ Clear the cache of all environments. """ for env in list(self.all): env.cache.clear() env.prefetch.clear() env.computed.clear() env.dirty.clear() def clear(self): """ Clear all record caches, and discard all fields to recompute. This may be useful when recovering from a failed ORM operation. """ self.invalidate_all() self.all.todo.clear() @contextmanager def clear_upon_failure(self): """ Context manager that clears the environments (caches and fields to recompute) upon exception. """ try: yield except Exception: self.clear() raise def field_todo(self, field): """ Check whether ``field`` must be recomputed, and returns a recordset with all records to recompute for ``field``. """ if field in self.all.todo: ids = set(rid for recs in self.all.todo[field] for rid in recs.ids) return self[field.model_name].browse(ids) def check_todo(self, field, record): """ Check whether ``field`` must be recomputed on ``record``, and if so, returns the corresponding recordset to recompute. """ for recs in self.all.todo.get(field, []): if recs & record: return recs def add_todo(self, field, records): """ Mark ``field`` to be recomputed on ``records``. """ recs_list = self.all.todo.setdefault(field, []) for i, recs in enumerate(recs_list): if recs.env == records.env: recs_list[i] |= records break else: recs_list.append(records) def remove_todo(self, field, records): """ Mark ``field`` as recomputed on ``records``. """ recs_list = [recs - records for recs in self.all.todo.pop(field, [])] recs_list = filter(None, recs_list) if recs_list: self.all.todo[field] = recs_list def has_todo(self): """ Return whether some fields must be recomputed. """ return bool(self.all.todo) def get_todo(self): """ Return a pair `(field, records)` to recompute. """ for field, recs_list in self.all.todo.iteritems(): return field, recs_list[0] def check_cache(self): """ Check the cache consistency. """ # make a full copy of the cache, and invalidate it cache_dump = dict( (field, dict(field_cache)) for field, field_cache in self.cache.iteritems() ) self.invalidate_all() # re-fetch the records, and compare with their former cache invalids = [] for field, field_dump in cache_dump.iteritems(): ids = filter(None, field_dump) records = self[field.model_name].browse(ids) for record in records: try: cached = field_dump[record.id] fetched = record[field.name] if fetched != cached: info = {'cached': cached, 'fetched': fetched} invalids.append((field, record, info)) except (AccessError, MissingError): pass if invalids: raise Warning('Invalid cache for fields\n' + pformat(invalids)) @property def recompute(self): return self.all.recompute @contextmanager def norecompute(self): tmp = self.all.recompute self.all.recompute = False try: yield finally: self.all.recompute = tmp @property def recompute_old(self): return self.all.recompute_old def clear_recompute_old(self): del self.all.recompute_old[:] class Environments(object): """ A common object for all environments in a request. """ def __init__(self): self.envs = WeakSet() # weak set of environments self.todo = {} # recomputations {field: [records]} self.mode = False # flag for draft/onchange self.recompute = True self.recompute_old = [] # list of old api compute fields to recompute def add(self, env): """ Add the environment ``env``. """ self.envs.add(env) def __iter__(self): """ Iterate over environments. """ return iter(self.envs) # keep those imports here in order to handle cyclic dependencies correctly from openerp import SUPERUSER_ID from openerp.exceptions import Warning, AccessError, MissingError from openerp.modules.registry import RegistryManager
agpl-3.0
synopat/pyload
module/lib/jinja2/_markupsafe/_bundle.py
186
1312
# -*- coding: utf-8 -*- """ jinja2._markupsafe._bundle ~~~~~~~~~~~~~~~~~~~~~~~~~~ This script pulls in markupsafe from a source folder and bundles it with Jinja2. It does not pull in the speedups module though. :copyright: Copyright 2010 by the Jinja team, see AUTHORS. :license: BSD, see LICENSE for details. """ import sys import os import re def rewrite_imports(lines): for idx, line in enumerate(lines): new_line = re.sub(r'(import|from)\s+markupsafe\b', r'\1 jinja2._markupsafe', line) if new_line != line: lines[idx] = new_line def main(): if len(sys.argv) != 2: print 'error: only argument is path to markupsafe' sys.exit(1) basedir = os.path.dirname(__file__) markupdir = sys.argv[1] for filename in os.listdir(markupdir): if filename.endswith('.py'): f = open(os.path.join(markupdir, filename)) try: lines = list(f) finally: f.close() rewrite_imports(lines) f = open(os.path.join(basedir, filename), 'w') try: for line in lines: f.write(line) finally: f.close() if __name__ == '__main__': main()
gpl-3.0
mrlb05/Nifty4Gemini
nifty/pipeline/steps/nifsReduce.py
2
43248
#!/usr/bin/env python # MIT License # Copyright (c) 2015, 2017 Marie Lemoine-Busserolle # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. ################################################################################ # Import some useful Python utilities/modules # ################################################################################ # STDLIB import sys, glob, shutil, os, time, logging, glob, urllib, re, pkg_resources from pyraf import iraf, iraffunctions import astropy.io.fits import numpy as np # LOCAL # Import config parsing. from ..configobj.configobj import ConfigObj # Import custom Nifty functions. from ..nifsUtils import datefmt, listit, writeList, checkLists, makeSkyList, MEFarith, convertRAdec, copyResultsToScience # Define constants # Paths to Nifty data. RECIPES_PATH = pkg_resources.resource_filename('nifty', 'recipes/') RUNTIME_DATA_PATH = pkg_resources.resource_filename('nifty', 'runtimeData/') def start(kind, telluricDirectoryList="", scienceDirectoryList=""): """ start(kind): Do a full reduction of either Science or Telluric data. nifsReduce- for the telluric and science data reduction. Reduces NIFS telluric and science frames and attempts a flux calibration. Parameters are loaded from runtimeData/config.cfg. This script will automatically detect if it is being run on telluric data or science data. There are 6 steps. INPUT: + Raw files - Science frames - Sky frames + Calibration files - MDF shift file - Bad Pixel Mask (BPM) - Flat field frame - Reduced arc frame - Reduced ronchi mask frame - arc and ronchi database/ files OUTPUT: - If telluric reduction an efficiency spectrum used to telluric correct and absolute flux calibrate science frames - If science reduction a reduced science data cube. Args: kind (string): either 'Telluric' or 'Science'. telluricDirectoryList (string): Used by low memory pipeline. scienceDirectoryList (string): Used by low memory pipeline. """ # TODO(nat): Right now the pipeline will crash if you decide to skip, say, doing a bad # pixel correction. This is because each step adds a prefix to the frame name, and most following # steps depend on that prefix being there. # One way to fix this is if a step is to be skipped, iraf.copy() is called instead to copy the frame and # add the needed prefix. Messy but it might work for now. ########################################################################### ## ## ## BEGIN - GENERAL REDUCTION SETUP ## ## ## ########################################################################### # Store current working directory for later use. path = os.getcwd() # Set up the logging file. log = os.getcwd()+'/Nifty.log' logging.info('\n#################################################') logging.info('# #') logging.info('# Start the NIFS Science and Telluric Reduction #') logging.info('# #') logging.info('#################################################\n') # Set up/prepare IRAF. iraf.gemini() iraf.gemtools() iraf.gnirs() iraf.nifs() # Reset to default parameters the used IRAF tasks. iraf.unlearn(iraf.gemini,iraf.gemtools,iraf.gnirs,iraf.nifs,iraf.imcopy) # From http://bishop.astro.pomona.edu/Penprase/webdocuments/iraf/beg/beg-image.html: # Before doing anything involving image display the environment variable # stdimage must be set to the correct frame buffer size for the display # servers (as described in the dev$graphcap file under the section "STDIMAGE # devices") or to the correct image display device. The task GDEVICES is # helpful for determining this information for the display servers. iraf.set(stdimage='imt2048') # Prepare the IRAF package for NIFS. # NSHEADERS lists the header parameters used by the various tasks in the # NIFS package (excluding headers values which have values fixed by IRAF or # FITS conventions). iraf.nsheaders("nifs",logfile=log) # Set clobber to 'yes' for the script. This still does not make the gemini # tasks overwrite files, so: # YOU WILL LIKELY HAVE TO REMOVE FILES IF YOU RE_RUN THE SCRIPT. user_clobber=iraf.envget("clobber") iraf.reset(clobber='yes') # This helps make sure all variables are initialized to prevent bugs. scienceSkySubtraction = None scienceOneDExtraction = None extractionXC = None extractionYC = None extractionRadius = None telluricSkySubtraction = None # Load reduction parameters from runtimeData/config.cfg. with open('./config.cfg') as config_file: config = ConfigObj(config_file, unrepr=True) # Read general pipeline config. over = config['over'] manualMode = config['manualMode'] calDirList = config['calibrationDirectoryList'] scienceOneDExtraction = config['scienceOneDExtraction'] extractionXC = config['extractionXC'] extractionYC = config['extractionYC'] extractionRadius = config['extractionRadius'] if kind == 'Telluric': # Telluric reduction specific config. telluricReductionConfig = config['telluricReductionConfig'] if telluricDirectoryList: observationDirectoryList = telluricDirectoryList elif not telluricDirectoryList: observationDirectoryList = config['telluricDirectoryList'] start = telluricReductionConfig['telStart'] stop = telluricReductionConfig['telStop'] telluricSkySubtraction = telluricReductionConfig['telluricSkySubtraction'] if kind == 'Science': # Science reduction specific config. scienceReductionConfig = config['scienceReductionConfig'] if scienceDirectoryList: observationDirectoryList = scienceDirectoryList elif not scienceDirectoryList: observationDirectoryList = config['scienceDirectoryList'] start = scienceReductionConfig['sciStart'] stop = scienceReductionConfig['sciStop'] scienceSkySubtraction = scienceReductionConfig['scienceSkySubtraction'] ########################################################################### ## ## ## COMPLETE - GENERAL REDUCTION SETUP ## ## ## ########################################################################### # nifsReduce has two nested loops that reduced data. # It loops through each science (or telluric) directory, and # runs through a series of calibrations steps on the data in that directory. # Loop through all the observation (telluric or science) directories to perform a reduction on each one. for observationDirectory in observationDirectoryList: ########################################################################### ## ## ## BEGIN - OBSERVATION SPECIFIC SETUP ## ## ## ########################################################################### # Print the current directory of data being reduced. logging.info("\n#################################################################################") logging.info(" ") logging.info(" Currently working on reductions in") logging.info(" in "+ str(observationDirectory)) logging.info(" ") logging.info("#################################################################################\n") os.chdir(observationDirectory) tempObs = observationDirectory.split(os.sep) obsid = tempObs[-1] # Change the iraf directory to the current directory. pwd = os.getcwd() iraffunctions.chdir(pwd) # Copy relevant calibrations over to the science directory. # Open and store the name of the MDF shift reference file from shiftfile into shift. shift = 'calibrations/shiftFile' # Open and store the name of the flat frame flat = 'calibrations/finalFlat' # Open and store the bad pixel mask finalBadPixelMask = 'calibrations/finalBadPixelMask' # Ronchi, arc and database must all be in local calibrations directory # Open and store the name of the reduced spatial correction ronchi flat frame name from ronchifile in ronchi. ronchi = 'finalRonchi' # Open and store the name of the reduced wavelength calibration arc frame from arclist in arc. arc = 'finalArc' if os.path.exists(os.getcwd()+'/'+ronchi+".fits"): if over: iraf.delete(os.getcwd()+'/calibrations/finalRonchi.fits') # Copy the spatial calibration ronchi flat frame from Calibrations_grating to the observation directory. shutil.copy(os.getcwd()+'/calibrations/finalRonchi.fits', ronchi+'.fits') else: print "\nOutput exists and -over not set - skipping copy of reduced ronchi" else: shutil.copy(os.getcwd()+'/calibrations/finalRonchi.fits', ronchi+'.fits') if os.path.exists(os.getcwd()+'/'+arc+".fits"): if over: iraf.delete(os.getcwd()+'/calibrations/finalArc.fits') # Copy the spatial calibration arc flat frame from Calibrations_grating to the observation directory. shutil.copy(os.getcwd()+'/calibrations/finalArc.fits', arc+'.fits') else: print "\nOutput exists and -over not set - skipping copy of reduced arc" else: shutil.copy(os.getcwd()+'/calibrations/finalArc.fits', arc+'.fits') # Make sure the database files are in place. Current understanding is that # these should be local to the reduction directory, so need to be copied from # the calDir. if os.path.isdir("./database"): if over: shutil.rmtree("./database") os.mkdir("./database") for item in glob.glob("calibrations/database/*"): shutil.copy(item, "./database/") else: print "\nOutput exists and -over not set - skipping copy of database directory" else: os.mkdir('./database/') for item in glob.glob("calibrations/database/*"): shutil.copy(item, "./database/") if telluricSkySubtraction or scienceSkySubtraction: # Read the list of sky frames in the observation directory. try: skyFrameList = open("skyFrameList", "r").readlines() skyFrameList = [frame.strip() for frame in skyFrameList] except: logging.info("\n#####################################################################") logging.info("#####################################################################") logging.info("") logging.info(" WARNING in reduce: No sky frames were found in a directory.") logging.info(" Please make a skyFrameList in: " + str(os.getcwd())) logging.info("") logging.info("#####################################################################") logging.info("#####################################################################\n") raise SystemExit sky = skyFrameList[0] # If we are doing a telluric reduction, open the list of telluric frames in the observation directory. # If we are doing a science reduction, open the list of science frames in the observation directory. if kind == 'Telluric': tellist = open('tellist', 'r').readlines() tellist = [frame.strip() for frame in tellist] elif kind == 'Science': scienceFrameList = open("scienceFrameList", "r").readlines() scienceFrameList = [frame.strip() for frame in scienceFrameList] # For science frames, check to see if the number of sky frames matches the number of science frames. # IF NOT duplicate the sky frames and rewrite the sky file and skyFrameList. if scienceSkySubtraction: if not len(skyFrameList)==len(scienceFrameList): skyFrameList = makeSkyList(skyFrameList, scienceFrameList, observationDirectory) ########################################################################### ## ## ## COMPLETE - OBSERVATION SPECIFIC SETUP ## ## BEGIN DATA REDUCTION FOR AN OBSERVATION ## ## ## ########################################################################### # Check start and stop values for reduction steps. Ask user for a correction if # input is not valid. valindex = start while valindex > stop or valindex < 1 or stop > 6: logging.info("\n#####################################################################") logging.info("#####################################################################") logging.info("") logging.info(" WARNING in reduce: invalid start/stop values of observation") logging.info(" reduction steps.") logging.info("") logging.info("#####################################################################") logging.info("#####################################################################\n") valindex = int(raw_input("\nPlease enter a valid start value (1 to 7, default 1): ")) stop = int(raw_input("\nPlease enter a valid stop value (1 to 7, default 7): ")) while valindex <= stop : ########################################################################### ## STEP 1: Prepare raw data; science, telluric and sky frames ->n ## ########################################################################### if valindex == 1: if manualMode: a = raw_input("About to enter step 1: locate the spectrum.") if kind=='Telluric': tellist = prepare(tellist, shift, finalBadPixelMask, log, over) elif kind=='Science': scienceFrameList = prepare(scienceFrameList, shift, finalBadPixelMask, log, over) if telluricSkySubtraction or scienceSkySubtraction: skyFrameList = prepare(skyFrameList, shift, finalBadPixelMask, log, over) logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 1: Locate the Spectrum (and prepare raw data) ->n - COMPLETED ") logging.info("") logging.info("##############################################################################\n") ########################################################################### ## STEP 2: Sky Subtraction ->sn ## ########################################################################### elif valindex == 2: if manualMode: a = raw_input("About to enter step 2: sky subtraction.") # Combine telluric sky frames. if kind=='Telluric': if telluricSkySubtraction: if len(skyFrameList)>1: combineImages(skyFrameList, "gn"+sky, log, over) else: copyImage(skyFrameList, 'gn'+sky+'.fits', over) skySubtractTel(tellist, "gn"+sky, log, over) else: for image in tellist: iraf.copy('n'+image+'.fits', 'sn'+image+'.fits') if kind=='Science': if scienceSkySubtraction: skySubtractObj(scienceFrameList, skyFrameList, log, over) else: for image in scienceFrameList: iraf.copy('n'+image+'.fits', 'sn'+image+'.fits') logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 2: Sky Subtraction ->sn - COMPLETED ") logging.info("") logging.info("##############################################################################\n") ############################################################################## ## STEP 3: Flat field, slice, subtract dark and correct bad pixels ->brsn ## ############################################################################## elif valindex == 3: if manualMode: a = raw_input("About to enter step 3: flat fielding and bad pixels correction.") if kind=='Telluric': applyFlat(tellist, flat, log, over, kind) fixBad(tellist, log, over) elif kind=='Science': applyFlat(scienceFrameList, flat, log, over, kind) fixBad(scienceFrameList, log, over) logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 3: Flat fielding and Bad Pixels Correction ->brsn - COMPLETED ") logging.info("") logging.info("##############################################################################\n") ########################################################################### ## STEP 4: Derive and apply 2D to 3D transformation ->tfbrsn ## ########################################################################### elif valindex == 4: if manualMode: a = raw_input("About to enter step 4: 2D to 3D transformation and Wavelength Calibration.") if kind=='Telluric': fitCoords(tellist, arc, ronchi, log, over, kind) transform(tellist, log, over) elif kind=='Science': fitCoords(scienceFrameList, arc, ronchi, log, over, kind) transform(scienceFrameList, log, over) logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 4: 2D to 3D transformation and Wavelength Calibration ->tfbrsn - COMPLETED ") logging.info("") logging.info("##############################################################################\n") ############################################################################ ## STEP 5 (tellurics): For telluric data derive a telluric ## ## correction ->gxtfbrsn ## ## STEP 5 (science): For science apply an efficiency correction and make ## ## a data cube (not necessarily in that order). ## ## (i) Python method applies correction to nftransformed cube. ## ## Good for faint objects. ->cptfbrsn ## ## (ii) iraf.telluric method applies correction to ## ## nftransformed result (not quite a data cube) then ## ## nftransforms cube. ->catfbrsn ## ## (iii) If no telluric correction/flux calibration to be ## ## applied make a plain data cube. ->ctfbrsn ## ############################################################################ elif valindex == 5: if manualMode: a = raw_input("About to enter step 5.") # For telluric data: # Make a combined extracted 1D standard star spectrum. if kind=='Telluric': extractOneD(tellist, kind, log, over, extractionXC, extractionYC, extractionRadius) # TODO(nat): add this as a parameter; encapsulate this. copyToScience = True if copyToScience: # Copy final extracted results to science directory. try: with open("scienceMatchedTellsList", "r") as f: lines = f.readlines() lines = [x.strip() for x in lines] for i in range(len(lines)): if "obs" in lines[i]: k = 1 while i+k != len(lines) and "obs" not in lines[i+k]: copyResultsToScience("gxtfbrsn"+tellist[0]+".fits", "0_tel"+lines[i+k]+".fits", over) k+=1 except IOError: logging.info("\nNo scienceMatchedTellsList found in "+ os.getcwd() +" . Skipping copy of extracted spectra to science directory.") logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 5a: Extract 1D Spectra and Make Combined 1D Standard Star Spectrum") logging.info(" ->gxtfbrsn - COMPLETED") logging.info("") logging.info("##############################################################################\n") #TODO(nat): add this as a parameter. makeTelluricCube = True if makeTelluricCube: makeCube('tfbrsn', tellist, log, over) logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 5b: Make uncorrected standard star data cubes, ->ctfbrsn - COMPLETED") logging.info("") logging.info("##############################################################################\n") # For Science data: # Possibly extract 1D spectra, and make uncorrected cubes. elif kind=='Science': if scienceOneDExtraction: extractOneD(scienceFrameList, kind, log, over, extractionXC, extractionYC, extractionRadius) copyExtracted(scienceFrameList, over) logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 5a: Make extracted 1D Science spectra, ->ctgbrsn - COMPLETED") logging.info("") logging.info("##############################################################################\n") makeCube('tfbrsn', scienceFrameList, log, over) # TODO(nat): encapsulate this inside a function. if os.path.exists('products_uncorrected'): if over: shutil.rmtree('products_uncorrected') os.mkdir('products_uncorrected') else: logging.info("\nOutput exists and -over not set - skipping creating of products_uncorrected directory") else: os.mkdir('products_uncorrected') for item in scienceFrameList: if os.path.exists('products_uncorrected/ctfbrsn'+item+'.fits'): if over: os.remove('products_uncorrected/ctfbrsn'+item+'.fits') shutil.copy('ctfbrsn'+item+'.fits', 'products_uncorrected/ctfbrsn'+item+'.fits') else: logging.info("\nOutput exists and -over not set - skipping copy of uncorrected cube") else: shutil.copy('ctfbrsn'+item+'.fits', 'products_uncorrected/ctfbrsn'+item+'.fits') if os.path.exists('products_telluric_corrected'): if over: shutil.rmtree('products_telluric_corrected') os.mkdir('products_telluric_corrected') else: logging.info("\nOutput exists and -over not set - skipping creating of products_telluric_corrected directory") else: os.mkdir('products_telluric_corrected') for item in scienceFrameList: if os.path.exists('products_telluric_corrected/ctfbrsn'+item+'.fits'): if over: os.remove('products_telluric_corrected/ctfbrsn'+item+'.fits') shutil.copy('ctfbrsn'+item+'.fits', 'products_telluric_corrected/ctfbrsn'+item+'.fits') else: logging.info("\nOutput exists and -over not set - skipping copy of uncorrected cube") else: shutil.copy('ctfbrsn'+item+'.fits', 'products_telluric_corrected/ctfbrsn'+item+'.fits') logging.info("\n##############################################################################") logging.info("") logging.info(" STEP 5b: Make uncorrected science data cubes, ->ctfbrsn - COMPLETED") logging.info("") logging.info("##############################################################################\n") valindex += 1 logging.info("\n##############################################################################") logging.info("") logging.info(" COMPLETE - Reductions completed for " + str(observationDirectory)) logging.info("") logging.info("##############################################################################\n") # Return to directory script was begun from. os.chdir(path) ################################################################################################################## # ROUTINES # ################################################################################################################## def prepare(inlist, shiftima, finalBadPixelMask, log, over): """Prepare list of frames using iraf.nfprepare. Output: -->n. Processing with NFPREPARE (this task is used only for NIFS data but other instruments have their own preparation tasks with similar actions) will rename the data extension and add variance and data quality extensions. By default (see NSHEADERS) the extension names are SCI for science data, VAR for variance, and DQ for data quality (0 = good). Generation of the data quality plane (DQ) is important in order to fix hot and dark pixels on the NIFS detector in subsequent steps in the data reduction process. Various header keywords (used later) are also added in NFPREPARE. NFPREPARE will also add an MDF file (extension MDF) describing the NIFS image slicer pattern and how the IFU maps to the sky field. """ # Update frames with mdf offset value and generate variance and data quality extensions. for frame in inlist: if os.path.exists("n"+frame+".fits"): if over: os.remove("n"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping prepare_list") continue iraf.nfprepare(frame, rawpath="", shiftimage=shiftima, fl_vardq="yes", bpm=finalBadPixelMask, fl_int='yes', fl_corr='no', fl_nonl='no', logfile=log) inlist = checkLists(inlist, '.', 'n', '.fits') return inlist #--------------------------------------------------------------------------------------------------------------------------------# def combineImages(inlist, out, log, over): """Gemcombine multiple frames. Output: -->gn.""" if os.path.exists(out+".fits"): if over: iraf.delete(out+".fits") else: logging.info("Output file exists and -over not set - skipping combine_ima") return iraf.gemcombine(listit(inlist,"n"),output=out,fl_dqpr='yes', fl_vardq='yes',masktype="none", combine="median", logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def copyImage(input, output, over): """Copy a frame (used to add the correct prefix when skipping steps).""" if os.path.exists(output): if over: iraf.delete(output) else: logging.info("Output file exists and -over not set - skipping copy_ima") return iraf.copy('n'+input[0]+'.fits', output) #--------------------------------------------------------------------------------------------------------------------------------# def skySubtractObj(objlist, skyFrameList, log, over): """"Sky subtraction for science using iraf.gemarith. Output: ->sgn""" for i in range(len(objlist)): frame = str(objlist[i]) sky = str(skyFrameList[i]) if os.path.exists("sn"+frame+".fits"): if over: os.remove("sn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping skysub_list") continue iraf.gemarith ("n"+frame, "-", "n"+sky, "sn"+frame, fl_vardq="yes", logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def skySubtractTel(tellist, sky, log, over): """Sky subtraction for telluric using iraf.gemarith. Output: ->sgn""" for frame in tellist: if os.path.exists("sn"+frame+".fits"): if over: os.remove("sn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping skySubtractTel.") continue iraf.gemarith ("n"+frame, "-", sky, "sn"+frame, fl_vardq="yes", logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def applyFlat(objlist, flat, log, over, kind, dark=""): """Flat field and cut the data with iraf.nsreduce. Output: ->rsgn. NSREDUCE is used for basic reduction of raw data - it provides a single, unified interface to several tasks and also allows for the subtraction of dark frames and dividing by the flat. For NIFS reduction, NSREDUCE is used to call the NSCUT and NSAPPWAVE routines. """ # By default don't subtract darks from tellurics. fl_dark = "no" if dark != "": fl_dark = "yes" for frame in objlist: frame = str(frame).strip() if os.path.exists("rsn"+frame+".fits"): if over: os.remove("rsn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping apply_flat_list") continue iraf.nsreduce("sn"+frame, fl_cut="yes", fl_nsappw="yes", fl_dark="no", fl_sky="no", fl_flat="yes", flatimage=flat, fl_vardq="yes",logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def fixBad(objlist, log, over): """Interpolate over bad pixels flagged in the DQ plane with iraf.nffixbad. Output: -->brsn. NFFIXBAD - Fix Hot/Cold pixels on the NIFS detector. This routine uses the information in the Data Quality extensions to fix hot and cold pixels in the NIFS science fields. NFFIXBAD is a wrapper script which calls the task FIXPIX, using the DQ plane to define the pixels to be corrected. """ for frame in objlist: frame = str(frame).strip() if os.path.exists("brsn"+frame+".fits"): if over: os.remove("brsn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping fixbad_list") continue iraf.nffixbad("rsn"+frame,logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def fitCoords(objlist, arc, ronchi, log, over, kind): """Derive the 2D to 3D spatial/spectral transformation with iraf.nsfitcoords. Output: -->fbrsn NFFITCOORDS - Compute 2D dispersion and distortion maps. This routine uses as inputs the output from the NSWAVELENGTH and NFSDIST routines. NFFITCOORDS takes the spatial and spectral rectification information from NSWAVELENGTH and NFSDIST and converts this into a calculation of where the data information should map to in a final IFU dataset. """ for frame in objlist: frame = str(frame).strip() if os.path.exists("fbrsn"+frame+".fits"): if over: os.remove("fbrsn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping fitcoord_list") continue iraf.nsfitcoords("brsn"+frame, lamptransf=arc, sdisttransf=ronchi, database="database", lxorder=3, lyorder=2, sxorder=3, syorder=3, logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def transform(objlist, log, over): """Apply the transformation determined in iraf.nffitcoords with iraf.nstransform. Output: -->tfbrsgn NSTRANSFORM - Spatially rectify and wavelength calibrate data. NFTRANSFORM applies the wavelength solution found by NSWAVELENGTH and the spatial correction found by NFSDIST, aligning all the IFU extensions consistently onto a common coordinate system. The output of this routine is still in 2D format, with each of the IFU slices represented by its own data extension. """ for frame in objlist: frame = str(frame).strip() if os.path.exists("tfbrsn"+frame+".fits"): if over: iraf.delete("tfbrsn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping transform_list") continue iraf.nstransform("fbrsn"+frame, logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def makeCube(pre, scienceFrameList, log, over): """ Reformat the data into a 3-D datacube using iraf.nifcube. Output: -->ctfbrsgn. NIFCUBE - Construct 3D NIFS datacubes. NIFCUBE takes input from data output by either NFFITCOORDS or NFTRANSFORM and converts the 2D data images into data cubes that have coordinates of x, y, lambda. """ for frame in scienceFrameList: if os.path.exists("c"+pre+frame+".fits"): if over: iraf.delete("c"+pre+frame+".fits") else: logging.info("Output file exists and -over not set - skipping make_cube_list") continue iraf.nifcube (pre+frame, outcubes = 'c'+pre+frame, logfile=log) #--------------------------------------------------------------------------------------------------------------------------------# def extractOneD(inputList, kind, log, over, extractionXC=15.0, extractionYC=33.0, extractionRadius=2.5): """Extracts 1-D spectra with iraf.nfextract and combines them with iraf.gemcombine. iraf.nfextract is currently only done interactively. Output: -->xtfbrsn and gxtfbrsn NFEXTRACT - Extract NIFS spectra. This could be used to extract a 1D spectra from IFU data and is particularly useful for extracting the bright spectra of telluric calibrator stars. Note that this routine only works on data that has been run through NFTRANSFORM. """ for frame in inputList: frame = str(frame).strip() if os.path.exists("xtfbrsn"+frame+".fits"): if over: iraf.delete("xtfbrsn"+frame+".fits") else: logging.info("Output file exists and -over not set - skipping nfextract in extract1D") continue iraf.nfextract("tfbrsn"+frame, outpref="x", xc=extractionXC, yc=extractionYC, diameter=extractionRadius, fl_int='no', logfile=log) inputList = checkLists(inputList, '.', 'xtfbrsn', '.fits') # Combine all the 1D spectra to one final output file with the name of the first input file. combined = str(inputList[0]).strip() if len(inputList) > 1: if os.path.exists("gxtfbrsn"+combined+".fits"): if over: iraf.delete("gxtfbrsn"+combined+".fits") else: logging.info("Output file exists and -over not set - skipping gemcombine in extract1D") return iraf.gemcombine(listit(inputList,"xtfbrsn"),output="gxtfbrsn"+combined, statsec="[*]", combine="median",masktype="none",fl_vardq="yes", logfile=log) else: if over: iraf.delete("gxtfbrsn"+combined+".fits") iraf.copy(input="xtfbrsn"+combined+".fits", output="gxtfbrsn"+combined+".fits") if kind == 'Telluric': # Put the name of the final combined file into a text file called # telluricfile to be used by the pipeline later. open("telluricfile", "w").write("gxtfbrsn"+combined) elif kind == 'Science': open("combinedOneD", "w").write("gxtfbrsn"+combined) #--------------------------------------------------------------------------------------------------------------------------------# def copyExtracted(scienceFrameList, over): """ Copy all extracted 1D spectra to objectname/ExtractedOneD/date_obsname/, and combined 1D spectra to objectname/ExtractedOneD """ # TODO(nat): make this clearer. obsDir = os.getcwd() temp1 = os.path.split(obsDir) temp2 = os.path.split(temp1[0]) temp3 = os.path.split(temp2[0]) temp4 = os.path.split(temp3[0]) objname = temp4[1] date = temp3[1] obsid = temp1[1] obsPath = temp3[0] os.chdir(obsDir) # Create a directory called ExtractedOneD and copy all the data cubes to this directory. if not os.path.exists(obsPath+'/ExtractedOneD/'): os.mkdir(obsPath+'/ExtractedOneD/') logging.info('I am creating a directory called ExtractedOneD') ExtractedOneD = obsPath+'/ExtractedOneD' # Make the appropriate directory structure. if not os.path.exists(ExtractedOneD+'/'+date+'_'+obsid): os.mkdir(ExtractedOneD+'/'+date+'_'+obsid) logging.info('I am creating a directory with date and abs ID inside ExtractedOneD ') # Get the filenames of the uncombined spectra. uncombinedSpectra = glob.glob('xtfbrsnN*.fits') # Copy the uncombined spectra to the appropriate directory. for spectra in uncombinedSpectra: if os.path.exists(ExtractedOneD+'/'+date+'_'+obsid+'/'+spectra): if over: os.remove(ExtractedOneD+'/'+date+'_'+obsid+'/'+spectra) shutil.copy(spectra, ExtractedOneD+'/'+date+'_'+obsid) else: logging.info("Output file exists and -over not set - skipping copy one D spectra") else: shutil.copy(spectra, ExtractedOneD+'/'+date+'_'+obsid) # Get the file name of the combined spectra combinedSpectrum = glob.glob('gxtfbrsnN*.fits') combinedSpectrum = combinedSpectrum[0] # Copy the combined spectrum to the appropriate directory. if os.path.exists(ExtractedOneD+'/'+combinedSpectrum): if over: os.remove(ExtractedOneD+'/'+combinedSpectrum) shutil.copy(combinedSpectrum, ExtractedOneD) else: logging.info("Output file exists and -over not set - skipping copy combined one D spectra") else: shutil.copy(spectra, ExtractedOneD+'/combined'+date+'_'+obsid+'.fits') #--------------------------------------------------------------------------------------------------------------------------------# if __name__ == '__main__': a = raw_input('Enter <Science> for science reduction or <Telluric> for telluric reduction: ') start(a)
mit
supersven/intellij-community
python/lib/Lib/site-packages/django/utils/_threading_local.py
343
6655
"""Thread-local objects (Note that this module provides a Python version of thread threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the local class from threading.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = mydata.__dict__.items() ... items.sort() ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ # Threading import is at end class _localbase(object): __slots__ = '_local__key', '_local__args', '_local__lock' def __new__(cls, *args, **kw): self = object.__new__(cls) key = '_local__key', 'thread.local.' + str(id(self)) object.__setattr__(self, '_local__key', key) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. dict = object.__getattribute__(self, '__dict__') currentThread().__dict__[key] = dict return self def _patch(self): key = object.__getattribute__(self, '_local__key') d = currentThread().__dict__.get(key) if d is None: d = {} currentThread().__dict__[key] = d object.__setattr__(self, '__dict__', d) # we have a new instance dict, so call out __init__ if we have # one cls = type(self) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(self, '_local__args') cls.__init__(self, *args, **kw) else: object.__setattr__(self, '__dict__', d) class local(_localbase): def __getattribute__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__getattribute__(self, name) finally: lock.release() def __setattr__(self, name, value): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__setattr__(self, name, value) finally: lock.release() def __delattr__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__delattr__(self, name) finally: lock.release() def __del__(): threading_enumerate = enumerate __getattribute__ = object.__getattribute__ def __del__(self): key = __getattribute__(self, '_local__key') try: threads = list(threading_enumerate()) except: # if enumerate fails, as it seems to do during # shutdown, we'll skip cleanup under the assumption # that there is nothing to clean up return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: # Thread is dying, rest in peace continue if key in __dict__: try: del __dict__[key] except KeyError: pass # didn't have anything in this thread return __del__ __del__ = __del__() try: from threading import currentThread, enumerate, RLock except ImportError: from dummy_threading import currentThread, enumerate, RLock
apache-2.0
adviti/melange
thirdparty/google_appengine/google/appengine/runtime/wsgi.py
2
8418
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """WSGI server interface to Python runtime. WSGI-compliant interface between the Python runtime and user-provided Python code. """ import logging from google.appengine.api import lib_config class Error(Exception): pass class InvalidResponseError(Error): """An error indicating that the response is invalid.""" pass class WsgiRequest(object): """A single WSGI request.""" def __init__(self, environ, handler_name, url, post_data, error): """Creates a single WSGI request. Creates a request for handler_name in the form 'path.to.handler' for url with the environment contained in environ. Args: environ: A dict containing the environ for this request (e.g. like from os.environ). handler_name: A str containing the user-specified handler to use for this request as specified in the script field of a handler in app.yaml using the Python dot notation; e.g. 'package.module.application'. url: An urlparse.SplitResult instance containing the request url. post_data: A stream containing the post data for this request. error: A stream into which errors are to be written. """ self._handler = handler_name self._status = 500 self._response_headers = [] self._started_handling = False self._body = [] self._written_body = [] environ['wsgi.multiprocess'] = True environ['wsgi.run_once'] = False environ['wsgi.version'] = (1, 0) environ.setdefault('wsgi.multithread', False) self._error = error environ['wsgi.url_scheme'] = url.scheme environ['wsgi.input'] = post_data environ['wsgi.errors'] = self._error self._environ = environ def _Write(self, body_data): """Writes some body_data to the response. Args: body_data: data to be written. Raises: InvalidResponseError: body_data is not a str. """ if not isinstance(body_data, str): raise InvalidResponseError('body_data must be a str.') self._written_body.append(body_data) def _StartResponse(self, status, response_headers, exc_info=None): """A PEP 333 start_response callable. Implements the start_response behaviour of PEP 333. Sets the status code and response headers as provided. If exc_info is not None, then the previously provided status and response headers are replaced; this implementation buffers the complete response so valid use of exc_info never raises an exception. Otherwise, _StartResponse may only be called once. Args: status: A string containing the status code and status string. response_headers: a list of pairs representing header keys and values. exc_info: exception info as obtained from sys.exc_info(). Returns: A Write method as per PEP 333. Raises: InvalidResponseError: The arguments passed are invalid. """ if not isinstance(status, str): raise InvalidResponseError('status must be a string') if not status: raise InvalidResponseError('status must not be empty') if not isinstance(response_headers, list): raise InvalidResponseError('response_headers must be a list') for header in response_headers: if not isinstance(header, tuple): raise InvalidResponseError('headers must be tuples') if len(header) != 2: raise InvalidResponseError('header tuples must have length 2') if not isinstance(header[0], str) or not isinstance(header[1], str): raise InvalidResponseError('headers must be str') try: status_number = int(status.split(' ')[0]) except ValueError: raise InvalidResponseError('status code is not a number') if status_number < 200 or status_number >= 600: raise InvalidResponseError('status code must be in the range [200,600)') if exc_info is not None: self._status = status_number self._response_headers = response_headers exc_info = None elif self._started_handling: raise InvalidResponseError('_StartResponse may only be called once' ' without exc_info') else: self._status = status_number self._response_headers = response_headers self._started_handling = True self._body = [] self._written_body = [] return self._Write def Handle(self): """Handles the request represented by the WsgiRequest object. Loads the handler from the handler name provided. Calls the handler with the environ. Any exceptions in loading the user handler and executing it are caught and logged. Returns: A dict containing: error: App Engine error code. 0 for OK, 1 for error. response_code: HTTP response code. headers: A list of tuples (key, value) of HTTP headers. body: A str of the body of the response """ try: handler = _config_handle.add_wsgi_middleware(self._LoadHandler()) except: logging.exception('') return {'error': 1} result = None try: result = handler(self._environ, self._StartResponse) for chunk in result: if not isinstance(chunk, str): raise InvalidResponseError('handler must return an iterable of str') self._body.append(chunk) body = ''.join(self._written_body + self._body) return {'response_code': self._status, 'headers': self._response_headers, 'body': body} except: logging.exception('') return {'error': 1} finally: if hasattr(result, 'close'): result.close() def _LoadHandler(self): """Find and return a Python object with name handler_name. Find and return a Python object specified by self._handler. Packages and modules are imported as necessary. If successful, the filename of the module is inserted into environ with key 'PATH_TRANSLATED' if it has one. Returns: A Python object. Raises: ImportError: An element of the path cannot be resolved. """ path = self._handler.split('.') handler = __import__(path[0]) is_parent_package = True cumulative_path = path[0] for name in path[1:]: if hasattr(handler, '__file__'): self._environ['PATH_TRANSLATED'] = handler.__file__ is_parent_package = is_parent_package and hasattr(handler, '__path__') cumulative_path += '.' + name if hasattr(handler, name): handler = getattr(handler, name) elif is_parent_package: __import__(cumulative_path) handler = getattr(handler, name) else: raise ImportError('%s has no attribute %s' % (handler, name)) return handler def HandleRequest(environ, handler_name, url, post_data, error): """Handle a single WSGI request. Creates a request for handler_name in the form 'path.to.handler' for url with the environment contained in environ. Args: environ: A dict containing the environ for this request (e.g. like from os.environ). handler_name: A str containing the user-specified handler to use for this request as specified in the script field of a handler in app.yaml using the Python dot notation; e.g. 'package.module.application'. url: An urlparse.SplitResult instance containing the request url. post_data: A stream containing the post data for this request. error: A stream into which errors are to be written. Returns: A dict containing: error: App Engine error code. 0 for OK, 1 for error. response_code: HTTP response code. headers: A list of tuples (key, value) of HTTP headers. body: A str of the body of the response """ return WsgiRequest(environ, handler_name, url, post_data, error).Handle() _config_handle = lib_config.register( 'webapp', {'add_wsgi_middleware': lambda app: app})
apache-2.0
chrisglass/django-shop-simplecategories
shop_simplecategories/tests/categories.py
1
1094
#-*- coding: utf-8 -*- from decimal import Decimal from django.test.testcases import TestCase from shop.models.productmodel import Product from shop_simplecategories.models import Category class CategoriesTestCase(TestCase): def setUp(self): self.category = Category() self.category.name = "test_category" self.category.save() self.product = Product() self.product.name = 'test' self.product.short_description = 'test' self.product.long_description = 'test' self.product.unit_price = Decimal('1.0') self.product.save() self.product.categories.add(self.category) def test_01_category_unicode_returns_name(self): #self.create_fixtures() ret = self.category.__unicode__() self.assertEqual(ret, self.category.name) def test_02_category_get_products_works(self): #self.create_fixtures() ret = self.category.get_products() self.assertEqual(len(ret),1) cat_product = ret[0] self.assertEqual(cat_product,self.product)
bsd-3-clause
azunite/gyp_20150930
pylib/gyp/MSVSSettings.py
1361
45045
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. r"""Code to validate and convert settings of the Microsoft build tools. This file contains code to validate and convert settings of the Microsoft build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(), and ValidateMSBuildSettings() are the entry points. This file was created by comparing the projects created by Visual Studio 2008 and Visual Studio 2010 for all available settings through the user interface. The MSBuild schemas were also considered. They are typically found in the MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild """ import sys import re # Dictionaries of settings validators. The key is the tool name, the value is # a dictionary mapping setting names to validation functions. _msvs_validators = {} _msbuild_validators = {} # A dictionary of settings converters. The key is the tool name, the value is # a dictionary mapping setting names to conversion functions. _msvs_to_msbuild_converters = {} # Tool name mapping from MSVS to MSBuild. _msbuild_name_of_tool = {} class _Tool(object): """Represents a tool used by MSVS or MSBuild. Attributes: msvs_name: The name of the tool in MSVS. msbuild_name: The name of the tool in MSBuild. """ def __init__(self, msvs_name, msbuild_name): self.msvs_name = msvs_name self.msbuild_name = msbuild_name def _AddTool(tool): """Adds a tool to the four dictionaries used to process settings. This only defines the tool. Each setting also needs to be added. Args: tool: The _Tool object to be added. """ _msvs_validators[tool.msvs_name] = {} _msbuild_validators[tool.msbuild_name] = {} _msvs_to_msbuild_converters[tool.msvs_name] = {} _msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name def _GetMSBuildToolSettings(msbuild_settings, tool): """Returns an MSBuild tool dictionary. Creates it if needed.""" return msbuild_settings.setdefault(tool.msbuild_name, {}) class _Type(object): """Type of settings (Base class).""" def ValidateMSVS(self, value): """Verifies that the value is legal for MSVS. Args: value: the value to check for this type. Raises: ValueError if value is not valid for MSVS. """ def ValidateMSBuild(self, value): """Verifies that the value is legal for MSBuild. Args: value: the value to check for this type. Raises: ValueError if value is not valid for MSBuild. """ def ConvertToMSBuild(self, value): """Returns the MSBuild equivalent of the MSVS value given. Args: value: the MSVS value to convert. Returns: the MSBuild equivalent. Raises: ValueError if value is not valid. """ return value class _String(_Type): """A setting that's just a string.""" def ValidateMSVS(self, value): if not isinstance(value, basestring): raise ValueError('expected string; got %r' % value) def ValidateMSBuild(self, value): if not isinstance(value, basestring): raise ValueError('expected string; got %r' % value) def ConvertToMSBuild(self, value): # Convert the macros return ConvertVCMacrosToMSBuild(value) class _StringList(_Type): """A settings that's a list of strings.""" def ValidateMSVS(self, value): if not isinstance(value, basestring) and not isinstance(value, list): raise ValueError('expected string list; got %r' % value) def ValidateMSBuild(self, value): if not isinstance(value, basestring) and not isinstance(value, list): raise ValueError('expected string list; got %r' % value) def ConvertToMSBuild(self, value): # Convert the macros if isinstance(value, list): return [ConvertVCMacrosToMSBuild(i) for i in value] else: return ConvertVCMacrosToMSBuild(value) class _Boolean(_Type): """Boolean settings, can have the values 'false' or 'true'.""" def _Validate(self, value): if value != 'true' and value != 'false': raise ValueError('expected bool; got %r' % value) def ValidateMSVS(self, value): self._Validate(value) def ValidateMSBuild(self, value): self._Validate(value) def ConvertToMSBuild(self, value): self._Validate(value) return value class _Integer(_Type): """Integer settings.""" def __init__(self, msbuild_base=10): _Type.__init__(self) self._msbuild_base = msbuild_base def ValidateMSVS(self, value): # Try to convert, this will raise ValueError if invalid. self.ConvertToMSBuild(value) def ValidateMSBuild(self, value): # Try to convert, this will raise ValueError if invalid. int(value, self._msbuild_base) def ConvertToMSBuild(self, value): msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x' return msbuild_format % int(value) class _Enumeration(_Type): """Type of settings that is an enumeration. In MSVS, the values are indexes like '0', '1', and '2'. MSBuild uses text labels that are more representative, like 'Win32'. Constructor args: label_list: an array of MSBuild labels that correspond to the MSVS index. In the rare cases where MSVS has skipped an index value, None is used in the array to indicate the unused spot. new: an array of labels that are new to MSBuild. """ def __init__(self, label_list, new=None): _Type.__init__(self) self._label_list = label_list self._msbuild_values = set(value for value in label_list if value is not None) if new is not None: self._msbuild_values.update(new) def ValidateMSVS(self, value): # Try to convert. It will raise an exception if not valid. self.ConvertToMSBuild(value) def ValidateMSBuild(self, value): if value not in self._msbuild_values: raise ValueError('unrecognized enumerated value %s' % value) def ConvertToMSBuild(self, value): index = int(value) if index < 0 or index >= len(self._label_list): raise ValueError('index value (%d) not in expected range [0, %d)' % (index, len(self._label_list))) label = self._label_list[index] if label is None: raise ValueError('converted value for %s not specified.' % value) return label # Instantiate the various generic types. _boolean = _Boolean() _integer = _Integer() # For now, we don't do any special validation on these types: _string = _String() _file_name = _String() _folder_name = _String() _file_list = _StringList() _folder_list = _StringList() _string_list = _StringList() # Some boolean settings went from numerical values to boolean. The # mapping is 0: default, 1: false, 2: true. _newly_boolean = _Enumeration(['', 'false', 'true']) def _Same(tool, name, setting_type): """Defines a setting that has the same name in MSVS and MSBuild. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. name: the name of the setting. setting_type: the type of this setting. """ _Renamed(tool, name, name, setting_type) def _Renamed(tool, msvs_name, msbuild_name, setting_type): """Defines a setting for which the name has changed. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. msvs_name: the name of the MSVS setting. msbuild_name: the name of the MSBuild setting. setting_type: the type of this setting. """ def _Translate(value, msbuild_settings): msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value) _msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS _msbuild_validators[tool.msbuild_name][msbuild_name] = ( setting_type.ValidateMSBuild) _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate def _Moved(tool, settings_name, msbuild_tool_name, setting_type): _MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name, setting_type) def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name, msbuild_settings_name, setting_type): """Defines a setting that may have moved to a new section. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. msvs_settings_name: the MSVS name of the setting. msbuild_tool_name: the name of the MSBuild tool to place the setting under. msbuild_settings_name: the MSBuild name of the setting. setting_type: the type of this setting. """ def _Translate(value, msbuild_settings): tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {}) tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value) _msvs_validators[tool.msvs_name][msvs_settings_name] = ( setting_type.ValidateMSVS) validator = setting_type.ValidateMSBuild _msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator _msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate def _MSVSOnly(tool, name, setting_type): """Defines a setting that is only found in MSVS. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. name: the name of the setting. setting_type: the type of this setting. """ def _Translate(unused_value, unused_msbuild_settings): # Since this is for MSVS only settings, no translation will happen. pass _msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate def _MSBuildOnly(tool, name, setting_type): """Defines a setting that is only found in MSBuild. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. name: the name of the setting. setting_type: the type of this setting. """ def _Translate(value, msbuild_settings): # Let msbuild-only properties get translated as-is from msvs_settings. tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {}) tool_settings[name] = value _msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate def _ConvertedToAdditionalOption(tool, msvs_name, flag): """Defines a setting that's handled via a command line option in MSBuild. Args: tool: a dictionary that gives the names of the tool for MSVS and MSBuild. msvs_name: the name of the MSVS setting that if 'true' becomes a flag flag: the flag to insert at the end of the AdditionalOptions """ def _Translate(value, msbuild_settings): if value == 'true': tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) if 'AdditionalOptions' in tool_settings: new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag) else: new_flags = flag tool_settings['AdditionalOptions'] = new_flags _msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate def _CustomGeneratePreprocessedFile(tool, msvs_name): def _Translate(value, msbuild_settings): tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool) if value == '0': tool_settings['PreprocessToFile'] = 'false' tool_settings['PreprocessSuppressLineNumbers'] = 'false' elif value == '1': # /P tool_settings['PreprocessToFile'] = 'true' tool_settings['PreprocessSuppressLineNumbers'] = 'false' elif value == '2': # /EP /P tool_settings['PreprocessToFile'] = 'true' tool_settings['PreprocessSuppressLineNumbers'] = 'true' else: raise ValueError('value must be one of [0, 1, 2]; got %s' % value) # Create a bogus validator that looks for '0', '1', or '2' msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS _msvs_validators[tool.msvs_name][msvs_name] = msvs_validator msbuild_validator = _boolean.ValidateMSBuild msbuild_tool_validators = _msbuild_validators[tool.msbuild_name] msbuild_tool_validators['PreprocessToFile'] = msbuild_validator msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir') fix_vc_macro_slashes_regex = re.compile( r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list) ) # Regular expression to detect keys that were generated by exclusion lists _EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$') def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr): """Verify that 'setting' is valid if it is generated from an exclusion list. If the setting appears to be generated from an exclusion list, the root name is checked. Args: setting: A string that is the setting name to validate settings: A dictionary where the keys are valid settings error_msg: The message to emit in the event of error stderr: The stream receiving the error messages. """ # This may be unrecognized because it's an exclusion list. If the # setting name has the _excluded suffix, then check the root name. unrecognized = True m = re.match(_EXCLUDED_SUFFIX_RE, setting) if m: root_setting = m.group(1) unrecognized = root_setting not in settings if unrecognized: # We don't know this setting. Give a warning. print >> stderr, error_msg def FixVCMacroSlashes(s): """Replace macros which have excessive following slashes. These macros are known to have a built-in trailing slash. Furthermore, many scripts hiccup on processing paths with extra slashes in the middle. This list is probably not exhaustive. Add as needed. """ if '$' in s: s = fix_vc_macro_slashes_regex.sub(r'\1', s) return s def ConvertVCMacrosToMSBuild(s): """Convert the the MSVS macros found in the string to the MSBuild equivalent. This list is probably not exhaustive. Add as needed. """ if '$' in s: replace_map = { '$(ConfigurationName)': '$(Configuration)', '$(InputDir)': '%(RelativeDir)', '$(InputExt)': '%(Extension)', '$(InputFileName)': '%(Filename)%(Extension)', '$(InputName)': '%(Filename)', '$(InputPath)': '%(Identity)', '$(ParentName)': '$(ProjectFileName)', '$(PlatformName)': '$(Platform)', '$(SafeInputName)': '%(Filename)', } for old, new in replace_map.iteritems(): s = s.replace(old, new) s = FixVCMacroSlashes(s) return s def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr): """Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+). Args: msvs_settings: A dictionary. The key is the tool name. The values are themselves dictionaries of settings and their values. stderr: The stream receiving the error messages. Returns: A dictionary of MSBuild settings. The key is either the MSBuild tool name or the empty string (for the global settings). The values are themselves dictionaries of settings and their values. """ msbuild_settings = {} for msvs_tool_name, msvs_tool_settings in msvs_settings.iteritems(): if msvs_tool_name in _msvs_to_msbuild_converters: msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name] for msvs_setting, msvs_value in msvs_tool_settings.iteritems(): if msvs_setting in msvs_tool: # Invoke the translation function. try: msvs_tool[msvs_setting](msvs_value, msbuild_settings) except ValueError, e: print >> stderr, ('Warning: while converting %s/%s to MSBuild, ' '%s' % (msvs_tool_name, msvs_setting, e)) else: _ValidateExclusionSetting(msvs_setting, msvs_tool, ('Warning: unrecognized setting %s/%s ' 'while converting to MSBuild.' % (msvs_tool_name, msvs_setting)), stderr) else: print >> stderr, ('Warning: unrecognized tool %s while converting to ' 'MSBuild.' % msvs_tool_name) return msbuild_settings def ValidateMSVSSettings(settings, stderr=sys.stderr): """Validates that the names of the settings are valid for MSVS. Args: settings: A dictionary. The key is the tool name. The values are themselves dictionaries of settings and their values. stderr: The stream receiving the error messages. """ _ValidateSettings(_msvs_validators, settings, stderr) def ValidateMSBuildSettings(settings, stderr=sys.stderr): """Validates that the names of the settings are valid for MSBuild. Args: settings: A dictionary. The key is the tool name. The values are themselves dictionaries of settings and their values. stderr: The stream receiving the error messages. """ _ValidateSettings(_msbuild_validators, settings, stderr) def _ValidateSettings(validators, settings, stderr): """Validates that the settings are valid for MSBuild or MSVS. We currently only validate the names of the settings, not their values. Args: validators: A dictionary of tools and their validators. settings: A dictionary. The key is the tool name. The values are themselves dictionaries of settings and their values. stderr: The stream receiving the error messages. """ for tool_name in settings: if tool_name in validators: tool_validators = validators[tool_name] for setting, value in settings[tool_name].iteritems(): if setting in tool_validators: try: tool_validators[setting](value) except ValueError, e: print >> stderr, ('Warning: for %s/%s, %s' % (tool_name, setting, e)) else: _ValidateExclusionSetting(setting, tool_validators, ('Warning: unrecognized setting %s/%s' % (tool_name, setting)), stderr) else: print >> stderr, ('Warning: unrecognized tool %s' % tool_name) # MSVS and MBuild names of the tools. _compile = _Tool('VCCLCompilerTool', 'ClCompile') _link = _Tool('VCLinkerTool', 'Link') _midl = _Tool('VCMIDLTool', 'Midl') _rc = _Tool('VCResourceCompilerTool', 'ResourceCompile') _lib = _Tool('VCLibrarianTool', 'Lib') _manifest = _Tool('VCManifestTool', 'Manifest') _masm = _Tool('MASM', 'MASM') _AddTool(_compile) _AddTool(_link) _AddTool(_midl) _AddTool(_rc) _AddTool(_lib) _AddTool(_manifest) _AddTool(_masm) # Add sections only found in the MSBuild settings. _msbuild_validators[''] = {} _msbuild_validators['ProjectReference'] = {} _msbuild_validators['ManifestResourceCompile'] = {} # Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and # ClCompile in MSBuild. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for # the schema of the MSBuild ClCompile settings. # Options that have the same name in MSVS and MSBuild _Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I _Same(_compile, 'AdditionalOptions', _string_list) _Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI _Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa _Same(_compile, 'BrowseInformationFile', _file_name) _Same(_compile, 'BufferSecurityCheck', _boolean) # /GS _Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za _Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd _Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT _Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false' _Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx _Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except _Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope _Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI _Same(_compile, 'ForcedUsingFiles', _file_list) # /FU _Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc _Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X _Same(_compile, 'MinimalRebuild', _boolean) # /Gm _Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl _Same(_compile, 'OmitFramePointers', _boolean) # /Oy _Same(_compile, 'PreprocessorDefinitions', _string_list) # /D _Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd _Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR _Same(_compile, 'ShowIncludes', _boolean) # /showIncludes _Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc _Same(_compile, 'StringPooling', _boolean) # /GF _Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo _Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t _Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u _Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U _Same(_compile, 'UseFullPaths', _boolean) # /FC _Same(_compile, 'WholeProgramOptimization', _boolean) # /GL _Same(_compile, 'XMLDocumentationFileName', _file_name) _Same(_compile, 'AssemblerOutput', _Enumeration(['NoListing', 'AssemblyCode', # /FA 'All', # /FAcs 'AssemblyAndMachineCode', # /FAc 'AssemblyAndSourceCode'])) # /FAs _Same(_compile, 'BasicRuntimeChecks', _Enumeration(['Default', 'StackFrameRuntimeCheck', # /RTCs 'UninitializedLocalUsageCheck', # /RTCu 'EnableFastChecks'])) # /RTC1 _Same(_compile, 'BrowseInformation', _Enumeration(['false', 'true', # /FR 'true'])) # /Fr _Same(_compile, 'CallingConvention', _Enumeration(['Cdecl', # /Gd 'FastCall', # /Gr 'StdCall', # /Gz 'VectorCall'])) # /Gv _Same(_compile, 'CompileAs', _Enumeration(['Default', 'CompileAsC', # /TC 'CompileAsCpp'])) # /TP _Same(_compile, 'DebugInformationFormat', _Enumeration(['', # Disabled 'OldStyle', # /Z7 None, 'ProgramDatabase', # /Zi 'EditAndContinue'])) # /ZI _Same(_compile, 'EnableEnhancedInstructionSet', _Enumeration(['NotSet', 'StreamingSIMDExtensions', # /arch:SSE 'StreamingSIMDExtensions2', # /arch:SSE2 'AdvancedVectorExtensions', # /arch:AVX (vs2012+) 'NoExtensions', # /arch:IA32 (vs2012+) # This one only exists in the new msbuild format. 'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+) ])) _Same(_compile, 'ErrorReporting', _Enumeration(['None', # /errorReport:none 'Prompt', # /errorReport:prompt 'Queue'], # /errorReport:queue new=['Send'])) # /errorReport:send" _Same(_compile, 'ExceptionHandling', _Enumeration(['false', 'Sync', # /EHsc 'Async'], # /EHa new=['SyncCThrow'])) # /EHs _Same(_compile, 'FavorSizeOrSpeed', _Enumeration(['Neither', 'Speed', # /Ot 'Size'])) # /Os _Same(_compile, 'FloatingPointModel', _Enumeration(['Precise', # /fp:precise 'Strict', # /fp:strict 'Fast'])) # /fp:fast _Same(_compile, 'InlineFunctionExpansion', _Enumeration(['Default', 'OnlyExplicitInline', # /Ob1 'AnySuitable'], # /Ob2 new=['Disabled'])) # /Ob0 _Same(_compile, 'Optimization', _Enumeration(['Disabled', # /Od 'MinSpace', # /O1 'MaxSpeed', # /O2 'Full'])) # /Ox _Same(_compile, 'RuntimeLibrary', _Enumeration(['MultiThreaded', # /MT 'MultiThreadedDebug', # /MTd 'MultiThreadedDLL', # /MD 'MultiThreadedDebugDLL'])) # /MDd _Same(_compile, 'StructMemberAlignment', _Enumeration(['Default', '1Byte', # /Zp1 '2Bytes', # /Zp2 '4Bytes', # /Zp4 '8Bytes', # /Zp8 '16Bytes'])) # /Zp16 _Same(_compile, 'WarningLevel', _Enumeration(['TurnOffAllWarnings', # /W0 'Level1', # /W1 'Level2', # /W2 'Level3', # /W3 'Level4'], # /W4 new=['EnableAllWarnings'])) # /Wall # Options found in MSVS that have been renamed in MSBuild. _Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking', _boolean) # /Gy _Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions', _boolean) # /Oi _Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C _Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo _Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp _Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile', _file_name) # Used with /Yc and /Yu _Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile', _file_name) # /Fp _Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader', _Enumeration(['NotUsing', # VS recognized '' for this value too. 'Create', # /Yc 'Use'])) # /Yu _Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX _ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J') # MSVS options not found in MSBuild. _MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean) _MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean) # MSBuild options not found in MSVS. _MSBuildOnly(_compile, 'BuildingInIDE', _boolean) _MSBuildOnly(_compile, 'CompileAsManaged', _Enumeration([], new=['false', 'true'])) # /clr _MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch _MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP _MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi _MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors _MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name) _MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we _MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu # Defines a setting that needs very customized processing _CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile') # Directives for converting MSVS VCLinkerTool to MSBuild Link. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for # the schema of the MSBuild Link settings. # Options that have the same name in MSVS and MSBuild _Same(_link, 'AdditionalDependencies', _file_list) _Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH # /MANIFESTDEPENDENCY: _Same(_link, 'AdditionalManifestDependencies', _file_list) _Same(_link, 'AdditionalOptions', _string_list) _Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE _Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION _Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE _Same(_link, 'BaseAddress', _string) # /BASE _Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK _Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD _Same(_link, 'DelaySign', _boolean) # /DELAYSIGN _Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE _Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC _Same(_link, 'EntryPointSymbol', _string) # /ENTRY _Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE _Same(_link, 'FunctionOrder', _file_name) # /ORDER _Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG _Same(_link, 'GenerateMapFile', _boolean) # /MAP _Same(_link, 'HeapCommitSize', _string) _Same(_link, 'HeapReserveSize', _string) # /HEAP _Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB _Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL _Same(_link, 'ImportLibrary', _file_name) # /IMPLIB _Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER _Same(_link, 'KeyFile', _file_name) # /KEYFILE _Same(_link, 'ManifestFile', _file_name) # /ManifestFile _Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS _Same(_link, 'MapFileName', _file_name) _Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT _Same(_link, 'MergeSections', _string) # /MERGE _Same(_link, 'MidlCommandFile', _file_name) # /MIDL _Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF _Same(_link, 'OutputFile', _file_name) # /OUT _Same(_link, 'PerUserRedirection', _boolean) _Same(_link, 'Profile', _boolean) # /PROFILE _Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD _Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB _Same(_link, 'RegisterOutput', _boolean) _Same(_link, 'SetChecksum', _boolean) # /RELEASE _Same(_link, 'StackCommitSize', _string) _Same(_link, 'StackReserveSize', _string) # /STACK _Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED _Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD _Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO _Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD _Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY _Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT _Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID _Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true' _Same(_link, 'Version', _string) # /VERSION _Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF _Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED _Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE _Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF _Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE _Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE _subsystem_enumeration = _Enumeration( ['NotSet', 'Console', # /SUBSYSTEM:CONSOLE 'Windows', # /SUBSYSTEM:WINDOWS 'Native', # /SUBSYSTEM:NATIVE 'EFI Application', # /SUBSYSTEM:EFI_APPLICATION 'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER 'EFI ROM', # /SUBSYSTEM:EFI_ROM 'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER 'WindowsCE'], # /SUBSYSTEM:WINDOWSCE new=['POSIX']) # /SUBSYSTEM:POSIX _target_machine_enumeration = _Enumeration( ['NotSet', 'MachineX86', # /MACHINE:X86 None, 'MachineARM', # /MACHINE:ARM 'MachineEBC', # /MACHINE:EBC 'MachineIA64', # /MACHINE:IA64 None, 'MachineMIPS', # /MACHINE:MIPS 'MachineMIPS16', # /MACHINE:MIPS16 'MachineMIPSFPU', # /MACHINE:MIPSFPU 'MachineMIPSFPU16', # /MACHINE:MIPSFPU16 None, None, None, 'MachineSH4', # /MACHINE:SH4 None, 'MachineTHUMB', # /MACHINE:THUMB 'MachineX64']) # /MACHINE:X64 _Same(_link, 'AssemblyDebug', _Enumeration(['', 'true', # /ASSEMBLYDEBUG 'false'])) # /ASSEMBLYDEBUG:DISABLE _Same(_link, 'CLRImageType', _Enumeration(['Default', 'ForceIJWImage', # /CLRIMAGETYPE:IJW 'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE 'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE _Same(_link, 'CLRThreadAttribute', _Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE 'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA 'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA _Same(_link, 'DataExecutionPrevention', _Enumeration(['', 'false', # /NXCOMPAT:NO 'true'])) # /NXCOMPAT _Same(_link, 'Driver', _Enumeration(['NotSet', 'Driver', # /Driver 'UpOnly', # /DRIVER:UPONLY 'WDM'])) # /DRIVER:WDM _Same(_link, 'LinkTimeCodeGeneration', _Enumeration(['Default', 'UseLinkTimeCodeGeneration', # /LTCG 'PGInstrument', # /LTCG:PGInstrument 'PGOptimization', # /LTCG:PGOptimize 'PGUpdate'])) # /LTCG:PGUpdate _Same(_link, 'ShowProgress', _Enumeration(['NotSet', 'LinkVerbose', # /VERBOSE 'LinkVerboseLib'], # /VERBOSE:Lib new=['LinkVerboseICF', # /VERBOSE:ICF 'LinkVerboseREF', # /VERBOSE:REF 'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH 'LinkVerboseCLR'])) # /VERBOSE:CLR _Same(_link, 'SubSystem', _subsystem_enumeration) _Same(_link, 'TargetMachine', _target_machine_enumeration) _Same(_link, 'UACExecutionLevel', _Enumeration(['AsInvoker', # /level='asInvoker' 'HighestAvailable', # /level='highestAvailable' 'RequireAdministrator'])) # /level='requireAdministrator' _Same(_link, 'MinimumRequiredVersion', _string) _Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX # Options found in MSVS that have been renamed in MSBuild. _Renamed(_link, 'ErrorReporting', 'LinkErrorReporting', _Enumeration(['NoErrorReport', # /ERRORREPORT:NONE 'PromptImmediately', # /ERRORREPORT:PROMPT 'QueueForNextLogin'], # /ERRORREPORT:QUEUE new=['SendErrorReport'])) # /ERRORREPORT:SEND _Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB _Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY _Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET _Moved(_link, 'GenerateManifest', '', _boolean) _Moved(_link, 'IgnoreImportLibrary', '', _boolean) _Moved(_link, 'LinkIncremental', '', _newly_boolean) _Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean) _Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean) # MSVS options not found in MSBuild. _MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean) _MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean) # MSBuild options not found in MSVS. _MSBuildOnly(_link, 'BuildingInIDE', _boolean) _MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH _MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false' _MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS _MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND _MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND _MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name) _MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false' _MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN _MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION _MSBuildOnly(_link, 'ForceFileOutput', _Enumeration([], new=['Enabled', # /FORCE # /FORCE:MULTIPLE 'MultiplyDefinedSymbolOnly', 'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED _MSBuildOnly(_link, 'CreateHotPatchableImage', _Enumeration([], new=['Enabled', # /FUNCTIONPADMIN 'X86Image', # /FUNCTIONPADMIN:5 'X64Image', # /FUNCTIONPADMIN:6 'ItaniumImage'])) # /FUNCTIONPADMIN:16 _MSBuildOnly(_link, 'CLRSupportLastError', _Enumeration([], new=['Enabled', # /CLRSupportLastError 'Disabled', # /CLRSupportLastError:NO # /CLRSupportLastError:SYSTEMDLL 'SystemDlls'])) # Directives for converting VCResourceCompilerTool to ResourceCompile. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for # the schema of the MSBuild ResourceCompile settings. _Same(_rc, 'AdditionalOptions', _string_list) _Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I _Same(_rc, 'Culture', _Integer(msbuild_base=16)) _Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X _Same(_rc, 'PreprocessorDefinitions', _string_list) # /D _Same(_rc, 'ResourceOutputFileName', _string) # /fo _Same(_rc, 'ShowProgress', _boolean) # /v # There is no UI in VisualStudio 2008 to set the following properties. # However they are found in CL and other tools. Include them here for # completeness, as they are very likely to have the same usage pattern. _Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo _Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u # MSBuild options not found in MSVS. _MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n _MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name) # Directives for converting VCMIDLTool to Midl. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for # the schema of the MSBuild Midl settings. _Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I _Same(_midl, 'AdditionalOptions', _string_list) _Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt _Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation _Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check _Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum _Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref _Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data _Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf _Same(_midl, 'GenerateTypeLibrary', _boolean) _Same(_midl, 'HeaderFileName', _file_name) # /h _Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir _Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid _Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203 _Same(_midl, 'OutputDirectory', _string) # /out _Same(_midl, 'PreprocessorDefinitions', _string_list) # /D _Same(_midl, 'ProxyFileName', _file_name) # /proxy _Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o _Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo _Same(_midl, 'TypeLibraryName', _file_name) # /tlb _Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U _Same(_midl, 'WarnAsError', _boolean) # /WX _Same(_midl, 'DefaultCharType', _Enumeration(['Unsigned', # /char unsigned 'Signed', # /char signed 'Ascii'])) # /char ascii7 _Same(_midl, 'TargetEnvironment', _Enumeration(['NotSet', 'Win32', # /env win32 'Itanium', # /env ia64 'X64'])) # /env x64 _Same(_midl, 'EnableErrorChecks', _Enumeration(['EnableCustom', 'None', # /error none 'All'])) # /error all _Same(_midl, 'StructMemberAlignment', _Enumeration(['NotSet', '1', # Zp1 '2', # Zp2 '4', # Zp4 '8'])) # Zp8 _Same(_midl, 'WarningLevel', _Enumeration(['0', # /W0 '1', # /W1 '2', # /W2 '3', # /W3 '4'])) # /W4 _Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata _Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters', _boolean) # /robust # MSBuild options not found in MSVS. _MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config _MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub _MSBuildOnly(_midl, 'GenerateClientFiles', _Enumeration([], new=['Stub', # /client stub 'None'])) # /client none _MSBuildOnly(_midl, 'GenerateServerFiles', _Enumeration([], new=['Stub', # /client stub 'None'])) # /client none _MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL _MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub _MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn _MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name) _MSBuildOnly(_midl, 'TypeLibFormat', _Enumeration([], new=['NewFormat', # /newtlb 'OldFormat'])) # /oldtlb # Directives for converting VCLibrarianTool to Lib. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for # the schema of the MSBuild Lib settings. _Same(_lib, 'AdditionalDependencies', _file_list) _Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH _Same(_lib, 'AdditionalOptions', _string_list) _Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT _Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE _Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB _Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB _Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF _Same(_lib, 'OutputFile', _file_name) # /OUT _Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO _Same(_lib, 'UseUnicodeResponseFiles', _boolean) _Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG _Same(_lib, 'TargetMachine', _target_machine_enumeration) # TODO(jeanluc) _link defines the same value that gets moved to # ProjectReference. We may want to validate that they are consistent. _Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean) _MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false' _MSBuildOnly(_lib, 'ErrorReporting', _Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT 'QueueForNextLogin', # /ERRORREPORT:QUEUE 'SendErrorReport', # /ERRORREPORT:SEND 'NoErrorReport'])) # /ERRORREPORT:NONE _MSBuildOnly(_lib, 'MinimumRequiredVersion', _string) _MSBuildOnly(_lib, 'Name', _file_name) # /NAME _MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE _MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration) _MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name) _MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX _MSBuildOnly(_lib, 'Verbose', _boolean) # Directives for converting VCManifestTool to Mt. # See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for # the schema of the MSBuild Lib settings. # Options that have the same name in MSVS and MSBuild _Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest _Same(_manifest, 'AdditionalOptions', _string_list) _Same(_manifest, 'AssemblyIdentity', _string) # /identity: _Same(_manifest, 'ComponentFileName', _file_name) # /dll _Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs _Same(_manifest, 'InputResourceManifests', _string) # /inputresource _Same(_manifest, 'OutputManifestFile', _file_name) # /out _Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs _Same(_manifest, 'ReplacementsFile', _file_name) # /replacements _Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo _Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb: _Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate _Same(_manifest, 'UpdateFileHashesSearchPath', _file_name) _Same(_manifest, 'VerboseOutput', _boolean) # /verbose # Options that have moved location. _MovedAndRenamed(_manifest, 'ManifestResourceFile', 'ManifestResourceCompile', 'ResourceOutputFileName', _file_name) _Moved(_manifest, 'EmbedManifest', '', _boolean) # MSVS options not found in MSBuild. _MSVSOnly(_manifest, 'DependencyInformationFile', _file_name) _MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean) _MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean) # MSBuild options not found in MSVS. _MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean) _MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category _MSBuildOnly(_manifest, 'ManifestFromManagedAssembly', _file_name) # /managedassemblyname _MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource _MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency _MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name) # Directives for MASM. # See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the # MSBuild MASM settings. # Options that have the same name in MSVS and MSBuild. _Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
bsd-3-clause
StackStorm/st2
st2actions/st2actions/notifier/notifier.py
3
13732
# Copyright 2020 The StackStorm Authors. # Copyright 2019 Extreme Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from datetime import datetime import json from oslo_config import cfg from st2common import log as logging from st2common.constants.action import LIVEACTION_STATUS_SUCCEEDED from st2common.constants.action import LIVEACTION_FAILED_STATES from st2common.constants.action import LIVEACTION_COMPLETED_STATES from st2common.constants.triggers import INTERNAL_TRIGGER_TYPES from st2common.models.api.trace import TraceContext from st2common.models.db.execution import ActionExecutionDB from st2common.persistence.action import Action from st2common.persistence.liveaction import LiveAction from st2common.models.system.common import ResourceReference from st2common.persistence.execution import ActionExecution from st2common.services import policies as policy_service from st2common.services import trace as trace_service from st2common.services import workflows as workflow_service from st2common.transport import consumers from st2common.transport import utils as transport_utils from st2common.transport.reactor import TriggerDispatcher from st2common.util import isotime from st2common.util import jinja as jinja_utils from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX from st2common.constants.action import ACTION_PARAMETERS_KV_PREFIX from st2common.constants.action import ACTION_RESULTS_KV_PREFIX from st2common.constants.keyvalue import ( FULL_SYSTEM_SCOPE, SYSTEM_SCOPE, DATASTORE_PARENT_SCOPE, ) from st2common.services.keyvalues import KeyValueLookup from st2common.transport.queues import NOTIFIER_ACTIONUPDATE_WORK_QUEUE from st2common.metrics.base import CounterWithTimer from st2common.metrics.base import Timer __all__ = ["Notifier", "get_notifier"] LOG = logging.getLogger(__name__) # XXX: Fix this nasty positional dependency. ACTION_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES["action"][0] NOTIFY_TRIGGER_TYPE = INTERNAL_TRIGGER_TYPES["action"][1] class Notifier(consumers.MessageHandler): message_type = ActionExecutionDB def __init__(self, connection, queues, trigger_dispatcher=None): super(Notifier, self).__init__(connection, queues) if not trigger_dispatcher: trigger_dispatcher = TriggerDispatcher(LOG) self._trigger_dispatcher = trigger_dispatcher self._notify_trigger = ResourceReference.to_string_reference( pack=NOTIFY_TRIGGER_TYPE["pack"], name=NOTIFY_TRIGGER_TYPE["name"] ) self._action_trigger = ResourceReference.to_string_reference( pack=ACTION_TRIGGER_TYPE["pack"], name=ACTION_TRIGGER_TYPE["name"] ) @CounterWithTimer(key="notifier.action.executions") def process(self, execution_db): execution_id = str(execution_db.id) extra = {"execution": execution_db} LOG.debug('Processing action execution "%s".', execution_id, extra=extra) # Get the corresponding liveaction record. liveaction_db = LiveAction.get_by_id(execution_db.liveaction["id"]) if execution_db.status in LIVEACTION_COMPLETED_STATES: # If the action execution is executed under an orquesta workflow, policies for the # action execution will be applied by the workflow engine. A policy may affect the # final state of the action execution thereby impacting the state of the workflow. if not workflow_service.is_action_execution_under_workflow_context( execution_db ): with CounterWithTimer(key="notifier.apply_post_run_policies"): policy_service.apply_post_run_policies(liveaction_db) if liveaction_db.notify: with CounterWithTimer(key="notifier.notify_trigger.post"): self._post_notify_triggers( liveaction_db=liveaction_db, execution_db=execution_db ) self._post_generic_trigger( liveaction_db=liveaction_db, execution_db=execution_db ) def _get_execution_for_liveaction(self, liveaction): execution = ActionExecution.get(liveaction__id=str(liveaction.id)) if not execution: return None return execution def _post_notify_triggers(self, liveaction_db=None, execution_db=None): notify = getattr(liveaction_db, "notify", None) if not notify: return if notify.on_complete: self._post_notify_subsection_triggers( liveaction_db=liveaction_db, execution_db=execution_db, notify_subsection=notify.on_complete, default_message_suffix="completed.", ) if liveaction_db.status == LIVEACTION_STATUS_SUCCEEDED and notify.on_success: self._post_notify_subsection_triggers( liveaction_db=liveaction_db, execution_db=execution_db, notify_subsection=notify.on_success, default_message_suffix="succeeded.", ) if liveaction_db.status in LIVEACTION_FAILED_STATES and notify.on_failure: self._post_notify_subsection_triggers( liveaction_db=liveaction_db, execution_db=execution_db, notify_subsection=notify.on_failure, default_message_suffix="failed.", ) def _post_notify_subsection_triggers( self, liveaction_db=None, execution_db=None, notify_subsection=None, default_message_suffix=None, ): routes = ( getattr(notify_subsection, "routes") or getattr(notify_subsection, "channels", []) ) or [] execution_id = str(execution_db.id) if routes and len(routes) >= 1: payload = {} message = notify_subsection.message or ( "Action " + liveaction_db.action + " " + default_message_suffix ) data = notify_subsection.data or {} jinja_context = self._build_jinja_context( liveaction_db=liveaction_db, execution_db=execution_db ) try: with Timer(key="notifier.transform_message"): message = self._transform_message( message=message, context=jinja_context ) except: LOG.exception("Failed (Jinja) transforming `message`.") try: with Timer(key="notifier.transform_data"): data = self._transform_data(data=data, context=jinja_context) except: LOG.exception("Failed (Jinja) transforming `data`.") # At this point convert result to a string. This restricts the rulesengines # ability to introspect the result. On the other handle atleast a json usable # result is sent as part of the notification. If jinja is required to convert # to a string representation it uses str(...) which make it impossible to # parse the result as json any longer. # TODO: Use to_serializable_dict data["result"] = json.dumps(liveaction_db.result) payload["message"] = message payload["data"] = data payload["execution_id"] = execution_id payload["status"] = liveaction_db.status payload["start_timestamp"] = isotime.format(liveaction_db.start_timestamp) try: payload["end_timestamp"] = isotime.format(liveaction_db.end_timestamp) except AttributeError: # This can be raised if liveaction.end_timestamp is None, which is caused # when policy cancels a request due to concurrency # In this case, use datetime.now() instead payload["end_timestamp"] = isotime.format(datetime.utcnow()) payload["action_ref"] = liveaction_db.action payload["runner_ref"] = self._get_runner_ref(liveaction_db.action) trace_context = self._get_trace_context(execution_id=execution_id) failed_routes = [] for route in routes: try: payload["route"] = route # Deprecated. Only for backward compatibility reasons. payload["channel"] = route LOG.debug( "POSTing %s for %s. Payload - %s.", NOTIFY_TRIGGER_TYPE["name"], liveaction_db.id, payload, ) with CounterWithTimer(key="notifier.notify_trigger.dispatch"): self._trigger_dispatcher.dispatch( self._notify_trigger, payload=payload, trace_context=trace_context, ) except: failed_routes.append(route) if len(failed_routes) > 0: raise Exception( "Failed notifications to routes: %s" % ", ".join(failed_routes) ) def _build_jinja_context(self, liveaction_db, execution_db): context = {} context.update( { DATASTORE_PARENT_SCOPE: { SYSTEM_SCOPE: KeyValueLookup(scope=FULL_SYSTEM_SCOPE) } } ) context.update({ACTION_PARAMETERS_KV_PREFIX: liveaction_db.parameters}) context.update({ACTION_CONTEXT_KV_PREFIX: liveaction_db.context}) context.update({ACTION_RESULTS_KV_PREFIX: execution_db.result}) return context def _transform_message(self, message, context=None): mapping = {"message": message} context = context or {} return (jinja_utils.render_values(mapping=mapping, context=context)).get( "message", message ) def _transform_data(self, data, context=None): return jinja_utils.render_values(mapping=data, context=context) def _get_trace_context(self, execution_id): trace_db = trace_service.get_trace_db_by_action_execution( action_execution_id=execution_id ) if trace_db: return TraceContext(id_=str(trace_db.id), trace_tag=trace_db.trace_tag) # If no trace_context is found then do not create a new one here. If necessary # it shall be created downstream. Sure this is impl leakage of some sort. return None def _post_generic_trigger(self, liveaction_db=None, execution_db=None): if not cfg.CONF.action_sensor.enable: LOG.debug("Action trigger is disabled, skipping trigger dispatch...") return execution_id = str(execution_db.id) extra = {"execution": execution_db} target_statuses = cfg.CONF.action_sensor.emit_when if execution_db.status not in target_statuses: msg = 'Skip action execution "%s" because state "%s" is not in %s' LOG.debug( msg % (execution_id, execution_db.status, target_statuses), extra=extra ) return with CounterWithTimer(key="notifier.generic_trigger.post"): payload = { "execution_id": execution_id, "status": liveaction_db.status, "start_timestamp": str(liveaction_db.start_timestamp), # deprecate 'action_name' at some point and switch to 'action_ref' "action_name": liveaction_db.action, "action_ref": liveaction_db.action, "runner_ref": self._get_runner_ref(liveaction_db.action), "parameters": liveaction_db.get_masked_parameters(), "result": liveaction_db.result, } # Use execution_id to extract trace rather than liveaction. execution_id # will look-up an exact TraceDB while liveaction depending on context # may not end up going to the DB. trace_context = self._get_trace_context(execution_id=execution_id) LOG.debug( "POSTing %s for %s. Payload - %s. TraceContext - %s", ACTION_TRIGGER_TYPE["name"], liveaction_db.id, payload, trace_context, ) with CounterWithTimer(key="notifier.generic_trigger.dispatch"): self._trigger_dispatcher.dispatch( self._action_trigger, payload=payload, trace_context=trace_context ) def _get_runner_ref(self, action_ref): """ Retrieve a runner reference for the provided action. :rtype: ``str`` """ action = Action.get_by_ref(action_ref) return action["runner_type"]["name"] def get_notifier(): with transport_utils.get_connection() as conn: return Notifier( conn, [NOTIFIER_ACTIONUPDATE_WORK_QUEUE], trigger_dispatcher=TriggerDispatcher(LOG), )
apache-2.0
danielharbor/openerp
addons/account_payment/__openerp__.py
116
2997
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Suppliers Payment Management', 'version': '1.1', 'author': 'OpenERP SA', 'category': 'Accounting & Finance', 'description': """ Module to manage the payment of your supplier invoices. ======================================================= This module allows you to create and manage your payment orders, with purposes to --------------------------------------------------------------------------------- * serve as base for an easy plug-in of various automated payment mechanisms. * provide a more efficient way to manage invoice payment. Warning: ~~~~~~~~ The confirmation of a payment order does _not_ create accounting entries, it just records the fact that you gave your payment order to your bank. The booking of your order must be encoded as usual through a bank statement. Indeed, it's only when you get the confirmation from your bank that your order has been accepted that you can book it in your accounting. To help you with that operation, you have a new option to import payment orders as bank statement lines. """, 'images': ['images/payment_mode.jpeg','images/payment_order.jpeg'], 'depends': ['account','account_voucher'], 'data': [ 'security/account_payment_security.xml', 'security/ir.model.access.csv', 'wizard/account_payment_pay_view.xml', 'wizard/account_payment_populate_statement_view.xml', 'wizard/account_payment_create_order_view.xml', 'account_payment_view.xml', 'account_payment_workflow.xml', 'account_payment_sequence.xml', 'account_payment_report.xml', 'views/report_paymentorder.xml', ], 'demo': ['account_payment_demo.xml'], 'test': [ 'test/account_payment_demo.yml', 'test/cancel_payment_order.yml', 'test/payment_order_process.yml', 'test/account_payment_report.yml', ], 'installable': True, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
gorkinovich/DefendersOfMankind
dependencies/boost-1.46.0/tools/build/v2/test/suffix.py
59
1660
#!/usr/bin/python # Copyright 2003, 2004 Vladimir Prus # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt) import BoostBuild t = BoostBuild.Tester() # Regression test: when staging V2 used to change suffixes on targets # corresponding to real files. t.write("jamfile.jam", """ import type : register ; register A : a1 a2 a3 ; stage a : a.a3 ; """) t.write("jamroot.jam", "") t.write("a.a3", "") t.run_build_system() t.expect_addition("a/a.a3"); # Regression test: we should be able to specify empty suffix for derived target # type, even if base type has non-empty suffix. t.write("a.cpp", "") t.write("suffixes.jam", """ import type ; import generators ; import common ; type.register First : first : ; type.register Second : "" : First ; generators.register-standard $(__name__).second : CPP : Second ; rule second { TOUCH on $(<) = [ common.file-creation-command ] ; } actions second { $(TOUCH) $(<) } """) t.write("suffixes.py", """ import b2.build.type as type import b2.build.generators as generators import b2.tools.common as common from b2.manager import get_manager type.register("First", ["first"]) type.register("Second", [""], "First") generators.register_standard("suffixes.second", ["CPP"], ["Second"]) get_manager().engine().register_action("suffixes.second", "%s $(<)" % common.file_creation_command()) """) t.write("jamroot.jam", """ import suffixes ; """) t.write("jamfile.jam", """ second a : a.cpp ; """) t.run_build_system() t.expect_addition("bin/$toolset/debug/a") t.cleanup()
gpl-3.0
pwong-mapr/private-hue
desktop/core/ext-py/requests-2.0.0/requests/packages/charade/jpcntx.py
151
19323
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .compat import wrap_ord NUM_OF_CATEGORY = 6 DONT_KNOW = -1 ENOUGH_REL_THRESHOLD = 100 MAX_REL_THRESHOLD = 1000 MINIMUM_DATA_THRESHOLD = 4 # This is hiragana 2-char sequence table, the number in each cell represents its frequency category jp2CharContext = ( (0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1), (2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4), (0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2), (0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4), (1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4), (0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3), (0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3), (0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3), (0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4), (0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3), (2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4), (0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3), (0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5), (0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3), (2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5), (0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4), (1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4), (0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3), (0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3), (0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3), (0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5), (0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4), (0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5), (0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3), (0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4), (0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4), (0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4), (0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1), (0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0), (1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3), (0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0), (0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3), (0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3), (0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5), (0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4), (2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5), (0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3), (0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3), (0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3), (0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3), (0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4), (0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4), (0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2), (0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3), (0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3), (0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3), (0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3), (0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4), (0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3), (0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4), (0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3), (0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3), (0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4), (0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4), (0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3), (2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4), (0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4), (0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3), (0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4), (0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4), (1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4), (0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3), (0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2), (0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2), (0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3), (0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3), (0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5), (0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3), (0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4), (1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4), (0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), (0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3), (0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1), (0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2), (0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3), (0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1), ) class JapaneseContextAnalysis: def __init__(self): self.reset() def reset(self): self._mTotalRel = 0 # total sequence received # category counters, each interger counts sequence in its category self._mRelSample = [0] * NUM_OF_CATEGORY # if last byte in current buffer is not the last byte of a character, # we need to know how many bytes to skip in next buffer self._mNeedToSkipCharNum = 0 self._mLastCharOrder = -1 # The order of previous char # If this flag is set to True, detection is done and conclusion has # been made self._mDone = False def feed(self, aBuf, aLen): if self._mDone: return # The buffer we got is byte oriented, and a character may span in more than one # buffers. In case the last one or two byte in last buffer is not # complete, we record how many byte needed to complete that character # and skip these bytes here. We can choose to record those bytes as # well and analyse the character once it is complete, but since a # character will not make much difference, by simply skipping # this character will simply our logic and improve performance. i = self._mNeedToSkipCharNum while i < aLen: order, charLen = self.get_order(aBuf[i:i + 2]) i += charLen if i > aLen: self._mNeedToSkipCharNum = i - aLen self._mLastCharOrder = -1 else: if (order != -1) and (self._mLastCharOrder != -1): self._mTotalRel += 1 if self._mTotalRel > MAX_REL_THRESHOLD: self._mDone = True break self._mRelSample[jp2CharContext[self._mLastCharOrder][order]] += 1 self._mLastCharOrder = order def got_enough_data(self): return self._mTotalRel > ENOUGH_REL_THRESHOLD def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW def get_order(self, aBuf): return -1, 1 class SJISContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if ((0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC)): charLen = 2 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 202) and (0x9F <= second_char <= 0xF1): return second_char - 0x9F, charLen return -1, charLen class EUCJPContextAnalysis(JapaneseContextAnalysis): def get_order(self, aBuf): if not aBuf: return -1, 1 # find out current char's byte length first_char = wrap_ord(aBuf[0]) if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE): charLen = 2 elif first_char == 0x8F: charLen = 3 else: charLen = 1 # return its order if it is hiragana if len(aBuf) > 1: second_char = wrap_ord(aBuf[1]) if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3): return second_char - 0xA1, charLen return -1, charLen # flake8: noqa
apache-2.0
schanjr/Misc
threading.py
1
3461
import logging import threading import time #credit to http://pymotw.com/2/threading/ logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) def wait_for_event(e): """Wait for the event to be set before doing anything""" logging.debug('wait_for_event starting') event_is_set = e.wait() logging.debug('event set: %s', event_is_set) def wait_for_event_timeout(e, t): """Wait t seconds and then timeout""" while not e.isSet(): logging.debug('wait_for_event_timeout starting') event_is_set = e.wait(t) logging.debug('event set: %s', event_is_set) if event_is_set: logging.debug('processing event') else: logging.debug('doing other work') e = threading.Event() t1 = threading.Thread(name='block', target=wait_for_event, args=(e,)) t1.start() t2 = threading.Thread(name='non-block', target=wait_for_event_timeout, args=(e, 2)) t2.start() logging.debug('Waiting before calling Event.set()') time.sleep(3) e.set() logging.debug('Event is set') ############################################################################################################################ import threading import time import logging import random logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(threadName)-10s) %(message)s',) #Try to comment this out and see what happens def worker(): """thread worker function""" t = threading.currentThread() pause = random.randint(1,5) logging.debug('sleeping %s', pause) time.sleep(pause) logging.debug('ending') return for i in range(3): t = threading.Thread(target=worker) t.setDaemon(True) t.start() main_thread = threading.currentThread() for t in threading.enumerate(): if t is main_thread: continue logging.debug('joining %s', t.getName()) t.join() ############################################################################################################################## import logging import threading import time logging.basicConfig(level=logging.DEBUG, format='(%(threadName)-10s) %(message)s', ) def wait_for_event(e): """Wait for the event to be set before doing anything""" logging.debug('wait_for_event starting') event_is_set = e.wait() logging.debug('event set: %s', event_is_set) def wait_for_event_timeout(e, t): """Wait t seconds and then timeout""" while not e.isSet(): logging.debug('wait_for_event_timeout starting') event_is_set = e.wait(t) #notice variable "t" is used for a loop call back wait time. logging.debug('event set: %s', event_is_set) if event_is_set: logging.debug('processing event') else: logging.debug('doing other work') e = threading.Event() t1 = threading.Thread(name='block', target=wait_for_event, args=(e,)) t1.start() t2 = threading.Thread(name='non-block', target=wait_for_event_timeout, args=(e, 3)) t2.start() logging.debug('Waiting before calling Event.set()') time.sleep(3) #e.set() #this code would just run forever because event is not set logging.debug('Event is set')
gpl-2.0
akashsinghal/Speech-Memorization-App
Python_Backend/env/lib/python3.6/site-packages/pip/_vendor/progress/__init__.py
916
3023
# Copyright (c) 2012 Giorgos Verigakis <verigak@gmail.com> # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. from __future__ import division from collections import deque from datetime import timedelta from math import ceil from sys import stderr from time import time __version__ = '1.2' class Infinite(object): file = stderr sma_window = 10 def __init__(self, *args, **kwargs): self.index = 0 self.start_ts = time() self._ts = self.start_ts self._dt = deque(maxlen=self.sma_window) for key, val in kwargs.items(): setattr(self, key, val) def __getitem__(self, key): if key.startswith('_'): return None return getattr(self, key, None) @property def avg(self): return sum(self._dt) / len(self._dt) if self._dt else 0 @property def elapsed(self): return int(time() - self.start_ts) @property def elapsed_td(self): return timedelta(seconds=self.elapsed) def update(self): pass def start(self): pass def finish(self): pass def next(self, n=1): if n > 0: now = time() dt = (now - self._ts) / n self._dt.append(dt) self._ts = now self.index = self.index + n self.update() def iter(self, it): for x in it: yield x self.next() self.finish() class Progress(Infinite): def __init__(self, *args, **kwargs): super(Progress, self).__init__(*args, **kwargs) self.max = kwargs.get('max', 100) @property def eta(self): return int(ceil(self.avg * self.remaining)) @property def eta_td(self): return timedelta(seconds=self.eta) @property def percent(self): return self.progress * 100 @property def progress(self): return min(1, self.index / self.max) @property def remaining(self): return max(self.max - self.index, 0) def start(self): self.update() def goto(self, index): incr = index - self.index self.next(incr) def iter(self, it): try: self.max = len(it) except TypeError: pass for x in it: yield x self.next() self.finish()
apache-2.0
Suncatcher/aws_sg_migrate
aws_sg_migrate.py
1
9756
#!/usr/bin/python import sys, os, stat, re, subprocess, getopt, json cmd = [ 'aws', 'ec2', 'describe-regions' ] ap = subprocess.check_output(cmd) data = json.loads(ap.decode('utf-8')) if 'Regions' not in data: print("Internal error: no Regions in return structure") sys.exit(3) regions_dict = data['Regions'] regions = list(map(lambda x : x['RegionName'], regions_dict)) vpcs = [] for region in regions_dict: cmd = [ 'aws', 'ec2', 'describe-vpcs', '--region=%s' % region["RegionName"] ] ap = subprocess.check_output(cmd) data = json.loads(ap.decode('utf-8')) vpcs_dict = data['Vpcs'] vpcs.append(vpcs_dict[0]['VpcId']) regions_vpcs = dict(zip(regions, vpcs)) def makesg(profile, sgid, vpcid, source, destin, shell): script = open('%s.sh' % sgid, 'w') cmd = [ 'aws', 'ec2', 'describe-security-groups', '--region=%s' % source, '--output=json', ] if source: cmd.append("--group-id=%s" % (sgid)) else: cmd = [ 'aws', 'ec2', 'describe-security-groups', '--group-id=%s' % sgid, '--output=json', ] if profile: cmd.append('--profile') cmd.append(profile) ap = subprocess.check_output(cmd) data = json.loads(ap.decode('utf-8')) if 'SecurityGroups' not in data: print("Internal error: no SecurityGroups key in data") sys.exit(3) sg1 = data['SecurityGroups'][0] groupName = sg1['GroupName'] #+ '_migrated' groupDesc = sg1['Description'] groupTags = sg1['Tags'] # Sanity check perms = sg1['IpPermissions'] + sg1['IpPermissionsEgress'] for ipp in perms: if 'FromPort' not in ipp: continue if 'IpProtocol' not in ipp: continue if 'IpRanges' not in ipp: continue if 'ToPort' not in ipp: continue if len(ipp['UserIdGroupPairs']) > 0: sys.stderr.write("Warning: ignoring User Id info\n") for ipr in ipp['IpRanges']: for k in ipr.keys(): if k != 'CidrIp' and k != 'Description': sys.stderr.write("Error: Don't know how to handle") sys.stderr.write("key %s in IpRanges\n" % (k)) sys.exit(4) destinations = [] if shell: print("# Commands auto-generated by the copysg.py script", file=script) print(" ", file=script) destinations = regions_vpcs cmd.append("--filters=Name=group-name,Values='%s'" % (groupName)) if destin is not None: filtered = {dest:vpc for dest,vpc in destinations.items() if dest not in [source] and dest == destin} else: filtered = {dest:vpc for dest,vpc in destinations.items() if dest not in [source]} for dest,vpc in filtered.items(): ap = subprocess.check_output(cmd) if ap is not None: delete_cmd = "aws ec2 delete-security-group" create_cmd = "aws ec2 create-security-group --vpc-id=%s" % (vpc) cmd_existing = [ 'aws', 'ec2', 'describe-security-groups', '--region=%s' % dest, "--query=SecurityGroups[].GroupName" ] ap = subprocess.check_output(cmd_existing) sgNames = json.loads(ap) if shell: if ap is not None: if groupName != 'default' and groupName in sgNames: print("sgout=(`%s --group-name='%s' --region %s --output table`)" % (delete_cmd, groupName, dest), file=script) print('if [ $? != 0 ]; then', file=script) print(' echo "Error: %s failed"' % (delete_cmd), file=script) print(' exit 1', file=script) print('fi', file=script) print("sgout=(`%s --group-name='%s' --region %s --description='%s' --output table`)" % (create_cmd, groupName, dest, groupDesc), file=script) print('if [ $? != 0 ]; then', file=script) print(' echo "Error: %s failed"' % (create_cmd), file=script) print(' exit 1', file=script) print('fi', file=script) print('if [ "${sgout[6]}" != \'GroupId\' ]; then', file=script) print(' echo "Error: expected \'GroupId\', got ${sgout[6]}"', file=script) print(' exit 1', file=script) print('fi', file=script) print('SGID=${sgout[8]}', file=script) else: if groupName != 'default' and groupName in sgNames: print("%s --group-name='%s' --region %s" % (delete_cmd, groupName, dest), file=script) print("sgcreate=$(%s --group-name='%s' --region %s --description='%s')" % (create_cmd, groupName, dest, groupDesc), file=script) print("sgid=$(echo $sgcreate|sed 's/{//g;s/}//g;s/\"//g'|cut -d ':' -f2)", file=script) for ipp in sg1['IpPermissions']: if 'FromPort' not in ipp: continue if 'IpProtocol' not in ipp: continue if 'IpRanges' not in ipp: continue if 'ToPort' not in ipp: continue for ipr in ipp['IpRanges']: cidr = ipr['CidrIp'] auth_cmd = "aws ec2 authorize-security-group-ingress" if shell: print("%s --region %s --group-id=$SGID --protocol='%s'" % (auth_cmd, dest, ipp['IpProtocol']),end=" ", file=script) else: print("%s --region %s --group-name=%s --protocol='%s'" % (auth_cmd, dest, groupName, ipp['IpProtocol']),end=" ", file=script) if ipp['ToPort'] < 0: ipp['ToPort'] = ipp['FromPort'] if ipp['FromPort'] != ipp['ToPort']: print("--port=%s-%s" % (ipp['FromPort'], ipp['ToPort']),end=" ", file=script) else: print("--port=%s" % (ipp['FromPort']),end=" ", file=script) print("--cidr=%s" % (ipr['CidrIp']), file=script) if shell: print('if [ $? != 0 ]; then', file=script) print(' echo "Error: %s failed"' % (auth_cmd), file=script) print(' exit 1', file=script) print('fi', file=script) for ipp in sg1['IpPermissionsEgress']: if 'FromPort' not in ipp: continue if 'IpProtocol' not in ipp: continue if 'IpRanges' not in ipp: continue if 'ToPort' not in ipp: continue for ipr in ipp['IpRanges']: cidr = ipr['CidrIp'] auth_cmd = "aws ec2 authorize-security-group-egress" if shell: print("%s --region %s --group-id=$SGID --protocol='%s'" % (auth_cmd, dest, ipp['IpProtocol']), end=" ", file=script) else: print("%s --region %s --group-id=$SGID --protocol='%s'" % (auth_cmd, dest, ipp['IpProtocol']), end=" ", file=script) if ipp['ToPort'] < 0: # ICMP ToPort was -1 ??? ipp['ToPort'] = ipp['FromPort'] if ipp['FromPort'] != ipp['ToPort']: print("--port=%s-%s" % (ipp['FromPort'], ipp['ToPort']),end=" ", file=script) else: print("--port=%s" % (ipp['FromPort']),end=" ", file=script) print("--cidr=%s" % (ipr['CidrIp']), file=script) if shell: print('if [ $? != 0 ]; then') print(' echo "Error: %s failed"' % (auth_cmd)) print(' exit 1', file=script) print('fi', file=script) for tag in groupTags: if shell: print("aws ec2 create-tags --region %s --resources $SGID" % (dest), end=" ", file=script) print('--tags "Key=%s,Value=%s"' % (tag['Key'], tag['Value']), file=script) else: print("aws ec2 create-tags --region %s --resources $sgid" % (dest), end=" ", file=script) print('--tags "Key=%s,Value=%s"' % (tag['Key'], tag['Value']), file=script) #setting script permissions os.chmod(script.name, 0o755) ############################### MAIN ####################################### def main(): try: opts, args = getopt.getopt(sys.argv[1:], "hp:sv:", [ "help", "profile=", "shell", "vpc=", "src=", "dest=", ]) except getopt.GetoptError: usage() sys.exit(2) profile = None vpcid = None shell = False source = None destination = None for o,a in opts: if o in ("-h", "--help"): usage() return if o in ("-p", "--profile"): profile = a if o in ("-s", "--shell"): shell = True if o in ("-sc", "--src"): source = a if o in ("-ds", "--dest"): destination = a if o in ("-v", "--vpc"): vpcid = a if len(args) != 1: print("ERROR: You must give a security group id") usage() sys.exit(1) sgid = args[0] makesg(profile, sgid, vpcid, source, destination, shell) ############################## USAGE ######################################### def usage(): print("copysg.py [-h] [--profile=alt_profile] [--shell] [--vpc=vpcid] [-src=source_region] [--dest=dest_region]", end=" ") print("sg_id") print(" -h - help") print(" --profile (or -p) - use alternate aws cli profile") print(" --shell (or -s) - wrap commands in shell syntax to capture id") print(" --vpc (or -v) - specify destination VPC ID for new SG") print(" --src (or -sc) - specify source region for new SG") print(" --dest (or -ds) - specify destination region for new SG") if __name__ == "__main__": main()
gpl-3.0
zeickan/Infected-Engine
paypal/pro/migrations/0001_initial.py
2
9390
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'PayPalNVP' db.create_table('paypal_nvp', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('method', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), ('ack', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('profilestatus', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('timestamp', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('profileid', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('profilereference', self.gf('django.db.models.fields.CharField')(max_length=128, blank=True)), ('correlationid', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('token', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), ('payerid', self.gf('django.db.models.fields.CharField')(max_length=64, blank=True)), ('firstname', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('lastname', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('street', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('city', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('state', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('countrycode', self.gf('django.db.models.fields.CharField')(max_length=2, blank=True)), ('zip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('invnum', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('custom', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)), ('flag', self.gf('django.db.models.fields.BooleanField')(default=False)), ('flag_code', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('flag_info', self.gf('django.db.models.fields.TextField')(blank=True)), ('ipaddress', self.gf('django.db.models.fields.IPAddressField')(max_length=15, blank=True)), ('query', self.gf('django.db.models.fields.TextField')(blank=True)), ('response', self.gf('django.db.models.fields.TextField')(blank=True)), ('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)), )) db.send_create_signal('pro', ['PayPalNVP']) def backwards(self, orm): # Deleting model 'PayPalNVP' db.delete_table('paypal_nvp') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'pro.paypalnvp': { 'Meta': {'object_name': 'PayPalNVP', 'db_table': "'paypal_nvp'"}, 'ack': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'correlationid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'countrycode': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}), 'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'custom': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'firstname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'flag_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'flag_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invnum': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'ipaddress': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'blank': 'True'}), 'lastname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'method': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'payerid': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'profileid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'profilereference': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}), 'profilestatus': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'query': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'response': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'street': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}), 'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'zip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) } } complete_apps = ['pro']
apache-2.0
xresloader/xresloader
tools/gen_header_v2.py
1
1709
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import re import string import glob import sys from subprocess import Popen work_dir = os.getcwd() script_dir = os.path.dirname(os.path.realpath(__file__)) os.chdir(script_dir) sys.path.append(script_dir) os.chdir(os.path.join('..')) project_dir = os.getcwd() proto_dir = os.path.join(project_dir, 'header') proto_file = os.path.join(proto_dir, 'pb_header.proto') extension_proto_file = glob.glob(os.path.join(proto_dir, 'extensions', 'v2', '*.proto')) os.chdir(work_dir) java_out_dir = proto_dir pb_out_file = os.path.join(proto_dir, 'pb_header.pb') from find_protoc import find_protoc common_args = [ "-I", os.path.join(proto_dir, 'extensions', 'v2'), "-I", os.path.join(proto_dir, 'extensions'), "-I", os.path.join(proto_dir) ] # java 文件为非LITE版本 proto_file_data = open(proto_file).read() proto_file_no_lite_data = re.sub('\s*option\s*optimize_for\s*=\s*LITE_RUNTIME\s*;','// option optimize_for = LITE_RUNTIME;', proto_file_data) open(proto_file, 'wb').write(proto_file_no_lite_data.encode('utf-8')) print('[PROCESS] generate java source ... ') exec_args = [find_protoc(), java_out_dir] exec_args.extend(common_args) exec_args.extend([proto_file]) exec_args.extend(extension_proto_file) Popen( exec_args, shell=False).wait() print('[PROCESS] generate java source done.') # pb 文件为LITE版本 open(proto_file, 'wb').write(proto_file_data.encode('utf-8')) print('[PROCESS] generate proto pb file ... ') exec_args = [find_protoc(), '-o', pb_out_file] exec_args.extend(common_args) exec_args.extend([proto_file]) Popen( exec_args, shell=False).wait() print('[PROCESS] generate proto pb file done.')
mit
AntidoteLabs/Antidote-DM
Antidotes DM/youtube_dl/extractor/democracynow.py
23
3247
# coding: utf-8 from __future__ import unicode_literals import re import os.path from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( url_basename, remove_start, ) class DemocracynowIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?democracynow.org/(?P<id>[^\?]*)' IE_NAME = 'democracynow' _TESTS = [{ 'url': 'http://www.democracynow.org/shows/2015/7/3', 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': 'July 03, 2015 - Democracy Now!', 'description': 'A daily independent global news hour with Amy Goodman & Juan González "What to the Slave is 4th of July?": James Earl Jones Reads Frederick Douglass\u2019 Historic Speech : "This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag : "We Shall Overcome": Remembering Folk Icon, Activist Pete Seeger in His Own Words & Songs', }, }, { 'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree', 'md5': 'fbb8fe3d7a56a5e12431ce2f9b2fab0d', 'info_dict': { 'id': '2015-0703-001', 'ext': 'mp4', 'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag', 'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21', }, }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) description = self._og_search_description(webpage) json_data = self._parse_json(self._search_regex( r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'), display_id) video_id = None formats = [] default_lang = 'en' subtitles = {} def add_subtitle_item(lang, info_dict): if lang not in subtitles: subtitles[lang] = [] subtitles[lang].append(info_dict) # chapter_file are not subtitles if 'caption_file' in json_data: add_subtitle_item(default_lang, { 'url': compat_urlparse.urljoin(url, json_data['caption_file']), }) for subtitle_item in json_data.get('captions', []): lang = subtitle_item.get('language', '').lower() or default_lang add_subtitle_item(lang, { 'url': compat_urlparse.urljoin(url, subtitle_item['url']), }) for key in ('file', 'audio', 'video'): media_url = json_data.get(key, '') if not media_url: continue media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url)) video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn') formats.append({ 'url': media_url, }) self._sort_formats(formats) return { 'id': video_id or display_id, 'title': json_data['title'], 'description': description, 'subtitles': subtitles, 'formats': formats, }
gpl-2.0
souravbadami/zeroclickinfo-fathead
lib/fathead/beautifulsoup/parse.py
6
5700
#!/usr/bin/env python3 # # -*- coding: utf-8 -*- from bs4 import BeautifulSoup, NavigableString SOUP_HOME = 'https://www.crummy.com/software/BeautifulSoup/bs4/doc/' class SoupData(object): """ Object responsible for loading raw HTML data from BeautifulSoup doc: """ def __init__(self): """ Initialize SoupData object. Load data from HTML. """ self.SOUP_HTML = "" self.load_data() def load_data(self): """ Open the HTML file and load it into the object. """ with open('download/index.html', 'r') as data_file: self.SOUP_HTML = data_file.read() def get_raw_data(self): """ Returns: The raw HTML that was loaded. """ return self.SOUP_HTML class SoupDataParser(object): """ Object responsible for parsing the raw HTML that contains the beautifulsoup data """ def __init__(self, raw_data): """ Given raw data, get the relevant section Args: raw_data: HTML data """ self.parsed_data = None self.sections = list() soup_data = BeautifulSoup(raw_data, 'html.parser') for content in soup_data.find('div', {'class': 'body'}): if not content or isinstance(content, NavigableString): continue self.sections.append(content) def parse_name_and_anchor_from_data(self, section): """ Find the name and anchor for a given section Args: section: A section of parsed HTML Returns: name: Name of the section anchor: Anchor tag to use when linking back to the docs (ie #tags) """ name, anchor = None, None a_tag = section.find('a', {'class': 'headerlink'}) if a_tag: anchor = a_tag['href'] name = a_tag.parent.text[:-1].replace('()', '').replace('.', '') return name, anchor def parse_first_paragraph_from_data(self, section): """ Get the first paragraph for display Args: section: A section of parsed HTML Returns: First paragraph in the HTML """ data = section.find('p') if data: return data.text.replace('\n', ' ').replace(' ', ' ') return None def parse_code_from_data(self, section): """ Look for example code block to output Args: section: A section of parsed HTML Returns: Formatted code string """ code = section.find('div', {'class': 'highlight-python'}) if code: return '<pre><code>{}</code></pre>'.format(code.text.replace('¶', '').replace('\n', '\\n')) return None def parse_for_data(self): """ Main gateway into parsing the data. Will retrieve all necessary data elements. """ data = list() for section in self.sections: for sub_section in section.find_all('div', {'class': 'section'}): name, anchor = self.parse_name_and_anchor_from_data(sub_section) first_paragraph = self.parse_first_paragraph_from_data(sub_section) code = self.parse_code_from_data(sub_section) if name and first_paragraph and code: data_elements = { 'name': name, 'anchor': anchor, 'first_paragraph': first_paragraph, 'code': code } data.append(data_elements) self.parsed_data = data def get_data(self): """ Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements """ return self.parsed_data class SoupDataOutput(object): """ Object responsible for outputting data into the output.txt file """ def __init__(self, data): """ Initialize SoupDataOutput object Args: data: Dict containing the data elements """ self.data = data def create_file(self): """ Iterate through the data and create the necessary output.txt file """ with open('output.txt', 'w+') as output_file: for data_element in self.data: name = data_element.get('name') code = data_element.get('code') first_paragraph = data_element.get('first_paragraph') abstract = '{}{}{}'.format(code, '<br>' if code else '', first_paragraph) url = '{}{}'.format(SOUP_HOME, data_element.get('anchor')) list_of_data = [ name, #unique name 'A', #type is article '', #no redirect data '', #ignore '', #no categories '', #ignore '', #no related topics '', #ignore SOUP_HOME, #add an external link back to BeautifulSoup Home '', #no disambiguation '', #images abstract, #abstract url #url to the relevant tag doc ] output_file.write('{}\n'.format('\t'.join(list_of_data))) if __name__ == "__main__": data = SoupData() parser = SoupDataParser(data.get_raw_data()) parser.parse_for_data() output = SoupDataOutput(parser.get_data()) output.create_file()
apache-2.0
noba3/KoTos
addons/plugin.video.iptvxtra-trtv/resources/lib/requests/packages/chardet/big5freq.py
3133
82594
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # Big5 frequency table # by Taiwan's Mandarin Promotion Council # <http://www.edu.tw:81/mandr/> # # 128 --> 0.42261 # 256 --> 0.57851 # 512 --> 0.74851 # 1024 --> 0.89384 # 2048 --> 0.97583 # # Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98 # Random Distribution Ration = 512/(5401-512)=0.105 # # Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75 #Char to FreqOrder table BIG5_TABLE_SIZE = 5376 Big5CharToFreqOrder = ( 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16 3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32 1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64 3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80 4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96 5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160 2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176 1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192 3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224 1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240 3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256 2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288 3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304 1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320 5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352 5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368 1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416 3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432 3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464 2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480 2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528 3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544 1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560 1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576 1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592 2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624 4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640 1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656 5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672 2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752 5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784 1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832 5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848 1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880 3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896 4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912 3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960 1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976 4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992 3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008 3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024 2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040 5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056 3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072 5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088 1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104 2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120 1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152 1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168 4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184 3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248 2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264 5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280 1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296 2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312 1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328 1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344 5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360 5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376 5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392 3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408 4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424 4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440 2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456 5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472 3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504 5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520 5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536 1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552 2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568 3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584 4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600 5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616 3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632 4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648 1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664 1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680 4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696 1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728 1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744 1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760 3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792 5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808 2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824 1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840 1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856 5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888 4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920 2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952 1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968 1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000 4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016 4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032 1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048 3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064 5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080 5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096 1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112 2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128 1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144 3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160 2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176 3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192 2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208 4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224 4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240 3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272 3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304 3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320 4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336 3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352 1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368 5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400 5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416 1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448 4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464 4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496 2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512 2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528 3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544 1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560 4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576 2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592 1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608 1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624 2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640 3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656 1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672 5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688 1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704 4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720 1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752 1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768 4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784 4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800 2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816 1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832 4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864 5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880 2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896 3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912 4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944 5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960 5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976 1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992 4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008 4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024 2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040 3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056 3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072 2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088 1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104 4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120 3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136 3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152 2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168 4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184 5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200 3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216 2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232 3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248 1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264 2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280 3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296 4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312 2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328 2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344 5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360 1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376 2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392 1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408 3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424 4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440 2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456 3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472 3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488 2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504 4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520 2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536 3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552 4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568 5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584 3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616 1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632 4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648 1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664 4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680 5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712 5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728 5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744 2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760 3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776 2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792 2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824 1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840 4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856 3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872 3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904 2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936 2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952 4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968 1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984 4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000 1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016 3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048 3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064 5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080 5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096 3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112 3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128 1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144 2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160 5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176 1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192 1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208 3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240 1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256 4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272 5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288 2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304 3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336 1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352 2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368 2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384 5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400 5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416 5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432 2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448 2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464 1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480 4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496 3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512 3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528 4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544 4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560 2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576 2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592 5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608 4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624 5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640 4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688 1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704 3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720 4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736 1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752 5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768 2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784 2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800 3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816 5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832 1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848 3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864 5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880 1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896 5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912 2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928 3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944 2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960 3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976 3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992 3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008 4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040 2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056 4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072 3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088 5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104 1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120 5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152 1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184 4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200 1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216 4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232 1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264 3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280 4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296 5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328 3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360 2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512 #Everything below is of no interest for detection purpose 2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392 2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408 5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424 5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440 5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456 5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472 5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488 5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504 5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520 5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536 5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552 5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568 5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584 5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600 6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616 6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632 6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648 6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664 6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680 6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696 6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712 6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728 6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744 6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760 6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776 6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792 6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808 6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824 6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840 6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856 6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872 6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888 6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904 6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920 6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936 6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952 6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968 6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984 6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000 6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016 6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032 6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048 6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064 6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080 6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096 6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112 6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128 6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144 6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160 6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176 6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192 6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208 6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224 6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240 6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256 3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272 6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288 6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304 3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320 6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336 6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352 6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368 6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384 6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400 6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416 6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432 4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448 6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464 6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480 3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496 6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512 6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528 6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544 6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560 6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576 6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592 6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608 6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624 6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640 6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656 6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672 7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688 7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704 7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720 7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736 7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752 7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768 7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784 7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800 7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816 7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832 7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848 7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864 7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880 7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896 7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912 7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928 7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944 7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960 7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976 7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992 7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008 7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024 7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040 7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056 7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072 7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088 7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104 7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120 7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136 7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152 7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168 7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184 7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200 7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216 7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232 7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248 7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264 7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280 7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296 7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312 7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328 7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344 7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360 7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376 7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392 7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408 7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424 7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440 3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456 7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472 7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488 7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504 7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520 4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536 7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552 7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568 7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584 7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600 7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616 7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632 7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648 7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664 7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680 7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696 7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712 8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728 8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744 8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760 8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776 8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792 8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808 8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824 8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840 8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856 8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872 8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888 8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904 8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920 8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936 8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952 8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968 8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984 8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000 8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016 8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032 8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048 8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064 8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080 8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096 8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112 8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128 8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144 8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160 8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176 8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192 8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208 8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224 8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240 8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256 8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272 8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288 8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304 8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320 8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336 8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352 8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368 8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384 8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400 8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416 8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432 8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448 8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464 8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480 8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496 8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512 8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528 8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544 8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560 8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576 8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592 8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608 8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624 8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640 8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656 8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672 8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688 4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704 8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720 8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736 8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752 8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768 9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784 9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800 9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816 9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832 9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848 9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864 9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880 9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896 9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912 9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928 9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944 9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960 9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976 9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992 9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008 9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024 9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040 9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056 9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072 9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088 9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104 9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120 9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136 9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152 9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168 9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184 9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200 9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216 9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232 9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248 9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264 9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280 9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296 9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312 9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328 9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344 9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360 9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376 3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392 9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408 9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424 9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440 4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456 9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472 9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488 9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504 9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520 9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536 9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552 9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568 9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584 9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600 9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616 9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632 9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648 9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664 9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680 9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696 9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712 9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728 9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744 9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760 9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776 9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792 9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808 9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824 10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840 10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856 10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872 10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888 10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904 10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920 10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936 10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952 10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968 4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984 10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000 10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016 10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032 10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048 10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064 10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080 10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096 10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112 4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128 10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144 10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160 10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176 10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192 10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208 10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224 10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240 10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256 10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272 10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288 10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304 10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320 10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336 10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352 10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368 10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384 10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400 4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416 10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432 10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448 10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464 10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480 10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496 10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512 10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528 10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544 10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560 10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576 10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592 10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608 10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624 10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640 10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656 10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672 10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688 10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704 10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720 10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736 10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752 10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768 10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784 10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800 10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816 10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832 10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848 10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864 10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880 10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896 11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912 11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928 11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944 4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960 11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976 11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992 11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008 11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024 11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040 11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056 11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072 11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088 11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104 11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120 11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136 11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152 11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168 11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184 11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200 11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216 11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232 11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248 11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264 11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280 11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296 11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312 11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328 11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344 11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360 11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376 11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392 11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408 11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424 11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440 11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456 11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472 4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488 11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504 11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520 11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536 11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552 11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568 11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584 11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600 11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616 11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632 11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648 11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664 11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680 11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696 11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712 11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728 11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744 11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760 11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776 11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792 11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808 11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824 11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840 11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856 11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872 11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888 11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904 11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920 11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936 12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952 12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968 12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984 12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000 12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016 12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032 12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048 12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064 12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080 12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096 12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112 12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128 12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144 12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160 12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176 4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192 4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208 4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224 12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240 12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256 12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272 12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288 12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304 12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320 12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336 12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352 12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368 12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384 12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400 12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416 12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432 12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448 12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464 12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480 12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496 12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512 12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528 12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544 12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560 12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576 12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592 12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608 12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624 12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640 12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656 12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672 12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688 12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704 12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720 12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736 12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752 12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768 12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784 12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800 12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816 12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832 12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848 12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864 12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880 12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896 12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912 12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928 12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944 12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960 12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976 4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992 13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008 13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024 13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040 13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056 13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072 13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088 13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104 4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120 13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136 13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152 13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168 13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184 13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200 13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216 13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232 13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248 13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264 13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280 13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296 13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312 13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328 13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344 13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360 5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376 13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392 13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408 13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424 13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440 13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456 13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472 13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488 13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504 13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520 13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536 13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552 13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568 13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584 13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600 13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616 13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632 13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648 13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664 13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680 13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696 13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712 13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728 13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744 13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760 13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776 13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792 13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808 13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824 13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840 13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856 13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872 13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888 13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904 13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920 13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936 13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952 13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968 13968,13969,13970,13971,13972) #13973 # flake8: noqa
gpl-2.0
weimingtom/python-for-android
python-modules/twisted/twisted/conch/insults/helper.py
61
13635
# -*- test-case-name: twisted.conch.test.test_helper -*- # Copyright (c) 2001-2004 Twisted Matrix Laboratories. # See LICENSE for details. """ Partial in-memory terminal emulator @author: Jp Calderone """ import re, string from zope.interface import implements from twisted.internet import defer, protocol, reactor from twisted.python import log from twisted.conch.insults import insults FOREGROUND = 30 BACKGROUND = 40 BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, N_COLORS = range(9) class CharacterAttribute: """Represents the attributes of a single character. Character set, intensity, underlinedness, blinkitude, video reversal, as well as foreground and background colors made up a character's attributes. """ def __init__(self, charset=insults.G0, bold=False, underline=False, blink=False, reverseVideo=False, foreground=WHITE, background=BLACK, _subtracting=False): self.charset = charset self.bold = bold self.underline = underline self.blink = blink self.reverseVideo = reverseVideo self.foreground = foreground self.background = background self._subtracting = _subtracting def __eq__(self, other): return vars(self) == vars(other) def __ne__(self, other): return not self.__eq__(other) def copy(self): c = self.__class__() c.__dict__.update(vars(self)) return c def wantOne(self, **kw): k, v = kw.popitem() if getattr(self, k) != v: attr = self.copy() attr._subtracting = not v setattr(attr, k, v) return attr else: return self.copy() def toVT102(self): # Spit out a vt102 control sequence that will set up # all the attributes set here. Except charset. attrs = [] if self._subtracting: attrs.append(0) if self.bold: attrs.append(insults.BOLD) if self.underline: attrs.append(insults.UNDERLINE) if self.blink: attrs.append(insults.BLINK) if self.reverseVideo: attrs.append(insults.REVERSE_VIDEO) if self.foreground != WHITE: attrs.append(FOREGROUND + self.foreground) if self.background != BLACK: attrs.append(BACKGROUND + self.background) if attrs: return '\x1b[' + ';'.join(map(str, attrs)) + 'm' return '' # XXX - need to support scroll regions and scroll history class TerminalBuffer(protocol.Protocol): """ An in-memory terminal emulator. """ implements(insults.ITerminalTransport) for keyID in ('UP_ARROW', 'DOWN_ARROW', 'RIGHT_ARROW', 'LEFT_ARROW', 'HOME', 'INSERT', 'DELETE', 'END', 'PGUP', 'PGDN', 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12'): exec '%s = object()' % (keyID,) TAB = '\t' BACKSPACE = '\x7f' width = 80 height = 24 fill = ' ' void = object() def getCharacter(self, x, y): return self.lines[y][x] def connectionMade(self): self.reset() def write(self, bytes): """ Add the given printable bytes to the terminal. Line feeds in C{bytes} will be replaced with carriage return / line feed pairs. """ for b in bytes.replace('\n', '\r\n'): self.insertAtCursor(b) def _currentCharacterAttributes(self): return CharacterAttribute(self.activeCharset, **self.graphicRendition) def insertAtCursor(self, b): """ Add one byte to the terminal at the cursor and make consequent state updates. If b is a carriage return, move the cursor to the beginning of the current row. If b is a line feed, move the cursor to the next row or scroll down if the cursor is already in the last row. Otherwise, if b is printable, put it at the cursor position (inserting or overwriting as dictated by the current mode) and move the cursor. """ if b == '\r': self.x = 0 elif b == '\n': self._scrollDown() elif b in string.printable: if self.x >= self.width: self.nextLine() ch = (b, self._currentCharacterAttributes()) if self.modes.get(insults.modes.IRM): self.lines[self.y][self.x:self.x] = [ch] self.lines[self.y].pop() else: self.lines[self.y][self.x] = ch self.x += 1 def _emptyLine(self, width): return [(self.void, self._currentCharacterAttributes()) for i in xrange(width)] def _scrollDown(self): self.y += 1 if self.y >= self.height: self.y -= 1 del self.lines[0] self.lines.append(self._emptyLine(self.width)) def _scrollUp(self): self.y -= 1 if self.y < 0: self.y = 0 del self.lines[-1] self.lines.insert(0, self._emptyLine(self.width)) def cursorUp(self, n=1): self.y = max(0, self.y - n) def cursorDown(self, n=1): self.y = min(self.height - 1, self.y + n) def cursorBackward(self, n=1): self.x = max(0, self.x - n) def cursorForward(self, n=1): self.x = min(self.width, self.x + n) def cursorPosition(self, column, line): self.x = column self.y = line def cursorHome(self): self.x = self.home.x self.y = self.home.y def index(self): self._scrollDown() def reverseIndex(self): self._scrollUp() def nextLine(self): """ Update the cursor position attributes and scroll down if appropriate. """ self.x = 0 self._scrollDown() def saveCursor(self): self._savedCursor = (self.x, self.y) def restoreCursor(self): self.x, self.y = self._savedCursor del self._savedCursor def setModes(self, modes): for m in modes: self.modes[m] = True def resetModes(self, modes): for m in modes: try: del self.modes[m] except KeyError: pass def setPrivateModes(self, modes): """ Enable the given modes. Track which modes have been enabled so that the implementations of other L{insults.ITerminalTransport} methods can be properly implemented to respect these settings. @see: L{resetPrivateModes} @see: L{insults.ITerminalTransport.setPrivateModes} """ for m in modes: self.privateModes[m] = True def resetPrivateModes(self, modes): """ Disable the given modes. @see: L{setPrivateModes} @see: L{insults.ITerminalTransport.resetPrivateModes} """ for m in modes: try: del self.privateModes[m] except KeyError: pass def applicationKeypadMode(self): self.keypadMode = 'app' def numericKeypadMode(self): self.keypadMode = 'num' def selectCharacterSet(self, charSet, which): self.charsets[which] = charSet def shiftIn(self): self.activeCharset = insults.G0 def shiftOut(self): self.activeCharset = insults.G1 def singleShift2(self): oldActiveCharset = self.activeCharset self.activeCharset = insults.G2 f = self.insertAtCursor def insertAtCursor(b): f(b) del self.insertAtCursor self.activeCharset = oldActiveCharset self.insertAtCursor = insertAtCursor def singleShift3(self): oldActiveCharset = self.activeCharset self.activeCharset = insults.G3 f = self.insertAtCursor def insertAtCursor(b): f(b) del self.insertAtCursor self.activeCharset = oldActiveCharset self.insertAtCursor = insertAtCursor def selectGraphicRendition(self, *attributes): for a in attributes: if a == insults.NORMAL: self.graphicRendition = { 'bold': False, 'underline': False, 'blink': False, 'reverseVideo': False, 'foreground': WHITE, 'background': BLACK} elif a == insults.BOLD: self.graphicRendition['bold'] = True elif a == insults.UNDERLINE: self.graphicRendition['underline'] = True elif a == insults.BLINK: self.graphicRendition['blink'] = True elif a == insults.REVERSE_VIDEO: self.graphicRendition['reverseVideo'] = True else: try: v = int(a) except ValueError: log.msg("Unknown graphic rendition attribute: " + repr(a)) else: if FOREGROUND <= v <= FOREGROUND + N_COLORS: self.graphicRendition['foreground'] = v - FOREGROUND elif BACKGROUND <= v <= BACKGROUND + N_COLORS: self.graphicRendition['background'] = v - BACKGROUND else: log.msg("Unknown graphic rendition attribute: " + repr(a)) def eraseLine(self): self.lines[self.y] = self._emptyLine(self.width) def eraseToLineEnd(self): width = self.width - self.x self.lines[self.y][self.x:] = self._emptyLine(width) def eraseToLineBeginning(self): self.lines[self.y][:self.x + 1] = self._emptyLine(self.x + 1) def eraseDisplay(self): self.lines = [self._emptyLine(self.width) for i in xrange(self.height)] def eraseToDisplayEnd(self): self.eraseToLineEnd() height = self.height - self.y - 1 self.lines[self.y + 1:] = [self._emptyLine(self.width) for i in range(height)] def eraseToDisplayBeginning(self): self.eraseToLineBeginning() self.lines[:self.y] = [self._emptyLine(self.width) for i in range(self.y)] def deleteCharacter(self, n=1): del self.lines[self.y][self.x:self.x+n] self.lines[self.y].extend(self._emptyLine(min(self.width - self.x, n))) def insertLine(self, n=1): self.lines[self.y:self.y] = [self._emptyLine(self.width) for i in range(n)] del self.lines[self.height:] def deleteLine(self, n=1): del self.lines[self.y:self.y+n] self.lines.extend([self._emptyLine(self.width) for i in range(n)]) def reportCursorPosition(self): return (self.x, self.y) def reset(self): self.home = insults.Vector(0, 0) self.x = self.y = 0 self.modes = {} self.privateModes = {} self.setPrivateModes([insults.privateModes.AUTO_WRAP, insults.privateModes.CURSOR_MODE]) self.numericKeypad = 'app' self.activeCharset = insults.G0 self.graphicRendition = { 'bold': False, 'underline': False, 'blink': False, 'reverseVideo': False, 'foreground': WHITE, 'background': BLACK} self.charsets = { insults.G0: insults.CS_US, insults.G1: insults.CS_US, insults.G2: insults.CS_ALTERNATE, insults.G3: insults.CS_ALTERNATE_SPECIAL} self.eraseDisplay() def unhandledControlSequence(self, buf): print 'Could not handle', repr(buf) def __str__(self): lines = [] for L in self.lines: buf = [] length = 0 for (ch, attr) in L: if ch is not self.void: buf.append(ch) length = len(buf) else: buf.append(self.fill) lines.append(''.join(buf[:length])) return '\n'.join(lines) class ExpectationTimeout(Exception): pass class ExpectableBuffer(TerminalBuffer): _mark = 0 def connectionMade(self): TerminalBuffer.connectionMade(self) self._expecting = [] def write(self, bytes): TerminalBuffer.write(self, bytes) self._checkExpected() def cursorHome(self): TerminalBuffer.cursorHome(self) self._mark = 0 def _timeoutExpected(self, d): d.errback(ExpectationTimeout()) self._checkExpected() def _checkExpected(self): s = str(self)[self._mark:] while self._expecting: expr, timer, deferred = self._expecting[0] if timer and not timer.active(): del self._expecting[0] continue for match in expr.finditer(s): if timer: timer.cancel() del self._expecting[0] self._mark += match.end() s = s[match.end():] deferred.callback(match) break else: return def expect(self, expression, timeout=None, scheduler=reactor): d = defer.Deferred() timer = None if timeout: timer = scheduler.callLater(timeout, self._timeoutExpected, d) self._expecting.append((re.compile(expression), timer, d)) self._checkExpected() return d __all__ = ['CharacterAttribute', 'TerminalBuffer', 'ExpectableBuffer']
apache-2.0
sanghinitin/golismero
thirdparty_libs/django/middleware/doc.py
327
1197
from django.conf import settings from django import http class XViewMiddleware(object): """ Adds an X-View header to internal HEAD requests -- used by the documentation system. """ def process_view(self, request, view_func, view_args, view_kwargs): """ If the request method is HEAD and either the IP is internal or the user is a logged-in staff member, quickly return with an x-header indicating the view function. This is used by the documentation module to lookup the view function for an arbitrary page. """ assert hasattr(request, 'user'), ( "The XView middleware requires authentication middleware to be " "installed. Edit your MIDDLEWARE_CLASSES setting to insert " "'django.contrib.auth.middleware.AuthenticationMiddleware'.") if request.method == 'HEAD' and (request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS or (request.user.is_active and request.user.is_staff)): response = http.HttpResponse() response['X-View'] = "%s.%s" % (view_func.__module__, view_func.__name__) return response
gpl-2.0
idea4bsd/idea4bsd
python/lib/Lib/site-packages/django/contrib/auth/tests/__init__.py
231
1092
from django.contrib.auth.tests.auth_backends import (BackendTest, RowlevelBackendTest, AnonymousUserBackendTest, NoAnonymousUserBackendTest, NoBackendsTest, InActiveUserBackendTest, NoInActiveUserBackendTest) from django.contrib.auth.tests.basic import BasicTestCase from django.contrib.auth.tests.decorators import LoginRequiredTestCase from django.contrib.auth.tests.forms import (UserCreationFormTest, AuthenticationFormTest, SetPasswordFormTest, PasswordChangeFormTest, UserChangeFormTest, PasswordResetFormTest) from django.contrib.auth.tests.remote_user import (RemoteUserTest, RemoteUserNoCreateTest, RemoteUserCustomTest) from django.contrib.auth.tests.models import ProfileTestCase from django.contrib.auth.tests.signals import SignalTestCase from django.contrib.auth.tests.tokens import TokenGeneratorTest from django.contrib.auth.tests.views import (PasswordResetTest, ChangePasswordTest, LoginTest, LogoutTest, LoginURLSettings) from django.contrib.auth.tests.permissions import TestAuthPermissions # The password for the fixture data users is 'password'
apache-2.0
voodka/ghostbakup
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/math.py
291
93298
# -*- coding: utf-8 -*- """ pygments.lexers.math ~~~~~~~~~~~~~~~~~~~~ Lexers for math languages. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.util import shebang_matches from pygments.lexer import Lexer, RegexLexer, bygroups, include, \ combined, do_insertions from pygments.token import Comment, String, Punctuation, Keyword, Name, \ Operator, Number, Text, Generic from pygments.lexers.agile import PythonLexer from pygments.lexers import _scilab_builtins from pygments.lexers import _stan_builtins __all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer', 'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer', 'IDLLexer', 'RdLexer', 'IgorLexer'] class JuliaLexer(RegexLexer): """ For `Julia <http://julialang.org/>`_ source code. *New in Pygments 1.6.* """ name = 'Julia' aliases = ['julia','jl'] filenames = ['*.jl'] mimetypes = ['text/x-julia','application/x-julia'] builtins = [ 'exit','whos','edit','load','is','isa','isequal','typeof','tuple', 'ntuple','uid','hash','finalizer','convert','promote','subtype', 'typemin','typemax','realmin','realmax','sizeof','eps','promote_type', 'method_exists','applicable','invoke','dlopen','dlsym','system', 'error','throw','assert','new','Inf','Nan','pi','im', ] tokens = { 'root': [ (r'\n', Text), (r'[^\S\n]+', Text), (r'#.*$', Comment), (r'[]{}:(),;[@]', Punctuation), (r'\\\n', Text), (r'\\', Text), # keywords (r'(begin|while|for|in|return|break|continue|' r'macro|quote|let|if|elseif|else|try|catch|end|' r'bitstype|ccall|do|using|module|import|export|' r'importall|baremodule|immutable)\b', Keyword), (r'(local|global|const)\b', Keyword.Declaration), (r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64' r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b', Keyword.Type), # functions (r'(function)((?:\s|\\\s)+)', bygroups(Keyword,Name.Function), 'funcname'), # types (r'(type|typealias|abstract)((?:\s|\\\s)+)', bygroups(Keyword,Name.Class), 'typename'), # operators (r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator), (r'\.\*|\.\^|\.\\|\.\/|\\', Operator), # builtins ('(' + '|'.join(builtins) + r')\b', Name.Builtin), # backticks (r'`(?s).*?`', String.Backtick), # chars (r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|" r"\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char), # try to match trailing transpose (r'(?<=[.\w\)\]])\'+', Operator), # strings (r'(?:[IL])"', String, 'string'), (r'[E]?"', String, combined('stringescape', 'string')), # names (r'@[a-zA-Z0-9_.]+', Name.Decorator), (r'[a-zA-Z_][a-zA-Z0-9_]*', Name), # numbers (r'(\d+(_\d+)+\.\d*|\d*\.\d+(_\d+)+)([eEf][+-]?[0-9]+)?', Number.Float), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+(_\d+)+[eEf][+-]?[0-9]+', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'0b[01]+(_[01]+)+', Number.Binary), (r'0b[01]+', Number.Binary), (r'0o[0-7]+(_[0-7]+)+', Number.Oct), (r'0o[0-7]+', Number.Oct), (r'0x[a-fA-F0-9]+(_[a-fA-F0-9]+)+', Number.Hex), (r'0x[a-fA-F0-9]+', Number.Hex), (r'\d+(_\d+)+', Number.Integer), (r'\d+', Number.Integer) ], 'funcname': [ ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'), ('\([^\s\w{]{1,2}\)', Operator, '#pop'), ('[^\s\w{]{1,2}', Operator, '#pop'), ], 'typename': [ ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop') ], 'stringescape': [ (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|' r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape) ], 'string': [ (r'"', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings (r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?', String.Interpol), (r'[^\\"$]+', String), # quotes, dollar signs, and backslashes must be parsed one at a time (r'["\\]', String), # unhandled string formatting sign (r'\$', String) ], } def analyse_text(text): return shebang_matches(text, r'julia') line_re = re.compile('.*?\n') class JuliaConsoleLexer(Lexer): """ For Julia console sessions. Modeled after MatlabSessionLexer. *New in Pygments 1.6.* """ name = 'Julia console' aliases = ['jlcon'] def get_tokens_unprocessed(self, text): jllexer = JuliaLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() if line.startswith('julia>'): insertions.append((len(curcode), [(0, Generic.Prompt, line[:3])])) curcode += line[3:] elif line.startswith(' '): idx = len(curcode) # without is showing error on same line as before...? line = "\n" + line token = (0, Generic.Traceback, line) insertions.append((idx, [token])) else: if curcode: for item in do_insertions( insertions, jllexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: # or item: for item in do_insertions( insertions, jllexer.get_tokens_unprocessed(curcode)): yield item class MuPADLexer(RegexLexer): """ A `MuPAD <http://www.mupad.com>`_ lexer. Contributed by Christopher Creutzig <christopher@creutzig.de>. *New in Pygments 0.8.* """ name = 'MuPAD' aliases = ['mupad'] filenames = ['*.mu'] tokens = { 'root' : [ (r'//.*?$', Comment.Single), (r'/\*', Comment.Multiline, 'comment'), (r'"(?:[^"\\]|\\.)*"', String), (r'\(|\)|\[|\]|\{|\}', Punctuation), (r'''(?x)\b(?: next|break|end| axiom|end_axiom|category|end_category|domain|end_domain|inherits| if|%if|then|elif|else|end_if| case|of|do|otherwise|end_case| while|end_while| repeat|until|end_repeat| for|from|to|downto|step|end_for| proc|local|option|save|begin|end_proc| delete|frame )\b''', Keyword), (r'''(?x)\b(?: DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR| DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT| DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC| DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR )\b''', Name.Class), (r'''(?x)\b(?: PI|EULER|E|CATALAN| NIL|FAIL|undefined|infinity| TRUE|FALSE|UNKNOWN )\b''', Name.Constant), (r'\b(?:dom|procname)\b', Name.Builtin.Pseudo), (r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator), (r'''(?x)\b(?: and|or|not|xor| assuming| div|mod| union|minus|intersect|in|subset )\b''', Operator.Word), (r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number), #(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin), (r'''(?x) ((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''', bygroups(Name.Function, Text, Punctuation)), (r'''(?x) (?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`) (?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable), (r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number), (r'\.[0-9]+(?:e[0-9]+)?', Number), (r'.', Text) ], 'comment' : [ (r'[^*/]', Comment.Multiline), (r'/\*', Comment.Multiline, '#push'), (r'\*/', Comment.Multiline, '#pop'), (r'[*/]', Comment.Multiline) ] } class MatlabLexer(RegexLexer): """ For Matlab source code. *New in Pygments 0.10.* """ name = 'Matlab' aliases = ['matlab'] filenames = ['*.m'] mimetypes = ['text/matlab'] # # These lists are generated automatically. # Run the following in bash shell: # # for f in elfun specfun elmat; do # echo -n "$f = " # matlab -nojvm -r "help $f;exit;" | perl -ne \ # 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}' # done # # elfun: Elementary math functions # specfun: Special Math functions # elmat: Elementary matrices and matrix manipulation # # taken from Matlab version 7.4.0.336 (R2007a) # elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh", "acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2", "atanh","sec","secd","sech","asec","asecd","asech","csc","cscd", "csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd", "acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2", "realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs", "angle","complex","conj","imag","real","unwrap","isreal","cplxpair", "fix","floor","ceil","round","mod","rem","sign"] specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta", "betainc","betaln","ellipj","ellipke","erf","erfc","erfcx", "erfinv","expint","gamma","gammainc","gammaln","psi","legendre", "cross","dot","factor","isprime","primes","gcd","lcm","rat", "rats","perms","nchoosek","factorial","cart2sph","cart2pol", "pol2cart","sph2cart","hsv2rgb","rgb2hsv"] elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace", "freqspace","meshgrid","accumarray","size","length","ndims","numel", "disp","isempty","isequal","isequalwithequalnans","cat","reshape", "diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90", "find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute", "ipermute","shiftdim","circshift","squeeze","isscalar","isvector", "ans","eps","realmax","realmin","pi","i","inf","nan","isnan", "isinf","isfinite","j","why","compan","gallery","hadamard","hankel", "hilb","invhilb","magic","pascal","rosser","toeplitz","vander", "wilkinson"] tokens = { 'root': [ # line starting with '!' is sent as a system command. not sure what # label to use... (r'^!.*', String.Other), (r'%\{\s*\n', Comment.Multiline, 'blockcomment'), (r'%.*$', Comment), (r'^\s*function', Keyword, 'deffunc'), # from 'iskeyword' on version 7.11 (R2010): (r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|' r'events|for|function|global|if|methods|otherwise|parfor|' r'persistent|properties|return|spmd|switch|try|while)\b', Keyword), ("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin), # line continuation with following comment: (r'\.\.\..*$', Comment), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), (r'=|:|;', Punctuation), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w\)\]])\'', Operator), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), (r'(?<![\w\)\]])\'', String, 'string'), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'.', Text), ], 'string': [ (r'[^\']*\'', String, '#pop') ], 'blockcomment': [ (r'^\s*%\}', Comment.Multiline, '#pop'), (r'^.*\n', Comment.Multiline), (r'.', Comment.Multiline), ], 'deffunc': [ (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation, Text.Whitespace, Name.Function, Punctuation, Text, Punctuation, Text.Whitespace), '#pop'), ], } def analyse_text(text): if re.match('^\s*%', text, re.M): # comment return 0.9 elif re.match('^!\w+', text, re.M): # system cmd return 0.9 return 0.1 line_re = re.compile('.*?\n') class MatlabSessionLexer(Lexer): """ For Matlab sessions. Modeled after PythonConsoleLexer. Contributed by Ken Schutte <kschutte@csail.mit.edu>. *New in Pygments 0.10.* """ name = 'Matlab session' aliases = ['matlabsession'] def get_tokens_unprocessed(self, text): mlexer = MatlabLexer(**self.options) curcode = '' insertions = [] for match in line_re.finditer(text): line = match.group() if line.startswith('>>'): insertions.append((len(curcode), [(0, Generic.Prompt, line[:3])])) curcode += line[3:] elif line.startswith('???'): idx = len(curcode) # without is showing error on same line as before...? line = "\n" + line token = (0, Generic.Traceback, line) insertions.append((idx, [token])) else: if curcode: for item in do_insertions( insertions, mlexer.get_tokens_unprocessed(curcode)): yield item curcode = '' insertions = [] yield match.start(), Generic.Output, line if curcode: # or item: for item in do_insertions( insertions, mlexer.get_tokens_unprocessed(curcode)): yield item class OctaveLexer(RegexLexer): """ For GNU Octave source code. *New in Pygments 1.5.* """ name = 'Octave' aliases = ['octave'] filenames = ['*.m'] mimetypes = ['text/octave'] # These lists are generated automatically. # Run the following in bash shell: # # First dump all of the Octave manual into a plain text file: # # $ info octave --subnodes -o octave-manual # # Now grep through it: # for i in \ # "Built-in Function" "Command" "Function File" \ # "Loadable Function" "Mapping Function"; # do # perl -e '@name = qw('"$i"'); # print lc($name[0]),"_kw = [\n"'; # # perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \ # octave-manual | sort | uniq ; # echo "]" ; # echo; # done # taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011) builtin_kw = [ "addlistener", "addpath", "addproperty", "all", "and", "any", "argnames", "argv", "assignin", "atexit", "autoload", "available_graphics_toolkits", "beep_on_error", "bitand", "bitmax", "bitor", "bitshift", "bitxor", "cat", "cell", "cellstr", "char", "class", "clc", "columns", "command_line_path", "completion_append_char", "completion_matches", "complex", "confirm_recursive_rmdir", "cputime", "crash_dumps_octave_core", "ctranspose", "cumprod", "cumsum", "debug_on_error", "debug_on_interrupt", "debug_on_warning", "default_save_options", "dellistener", "diag", "diff", "disp", "doc_cache_file", "do_string_escapes", "double", "drawnow", "e", "echo_executing_commands", "eps", "eq", "errno", "errno_list", "error", "eval", "evalin", "exec", "exist", "exit", "eye", "false", "fclear", "fclose", "fcntl", "fdisp", "feof", "ferror", "feval", "fflush", "fgetl", "fgets", "fieldnames", "file_in_loadpath", "file_in_path", "filemarker", "filesep", "find_dir_in_path", "fixed_point_format", "fnmatch", "fopen", "fork", "formula", "fprintf", "fputs", "fread", "freport", "frewind", "fscanf", "fseek", "fskipl", "ftell", "functions", "fwrite", "ge", "genpath", "get", "getegid", "getenv", "geteuid", "getgid", "getpgrp", "getpid", "getppid", "getuid", "glob", "gt", "gui_mode", "history_control", "history_file", "history_size", "history_timestamp_format_string", "home", "horzcat", "hypot", "ifelse", "ignore_function_time_stamp", "inferiorto", "info_file", "info_program", "inline", "input", "intmax", "intmin", "ipermute", "is_absolute_filename", "isargout", "isbool", "iscell", "iscellstr", "ischar", "iscomplex", "isempty", "isfield", "isfloat", "isglobal", "ishandle", "isieee", "isindex", "isinteger", "islogical", "ismatrix", "ismethod", "isnull", "isnumeric", "isobject", "isreal", "is_rooted_relative_filename", "issorted", "isstruct", "isvarname", "kbhit", "keyboard", "kill", "lasterr", "lasterror", "lastwarn", "ldivide", "le", "length", "link", "linspace", "logical", "lstat", "lt", "make_absolute_filename", "makeinfo_program", "max_recursion_depth", "merge", "methods", "mfilename", "minus", "mislocked", "mkdir", "mkfifo", "mkstemp", "mldivide", "mlock", "mouse_wheel_zoom", "mpower", "mrdivide", "mtimes", "munlock", "nargin", "nargout", "native_float_format", "ndims", "ne", "nfields", "nnz", "norm", "not", "numel", "nzmax", "octave_config_info", "octave_core_file_limit", "octave_core_file_name", "octave_core_file_options", "ones", "or", "output_max_field_width", "output_precision", "page_output_immediately", "page_screen_output", "path", "pathsep", "pause", "pclose", "permute", "pi", "pipe", "plus", "popen", "power", "print_empty_dimensions", "printf", "print_struct_array_contents", "prod", "program_invocation_name", "program_name", "putenv", "puts", "pwd", "quit", "rats", "rdivide", "readdir", "readlink", "read_readline_init_file", "realmax", "realmin", "rehash", "rename", "repelems", "re_read_readline_init_file", "reset", "reshape", "resize", "restoredefaultpath", "rethrow", "rmdir", "rmfield", "rmpath", "rows", "save_header_format_string", "save_precision", "saving_history", "scanf", "set", "setenv", "shell_cmd", "sighup_dumps_octave_core", "sigterm_dumps_octave_core", "silent_functions", "single", "size", "size_equal", "sizemax", "sizeof", "sleep", "source", "sparse_auto_mutate", "split_long_rows", "sprintf", "squeeze", "sscanf", "stat", "stderr", "stdin", "stdout", "strcmp", "strcmpi", "string_fill_char", "strncmp", "strncmpi", "struct", "struct_levels_to_print", "strvcat", "subsasgn", "subsref", "sum", "sumsq", "superiorto", "suppress_verbose_help_message", "symlink", "system", "tic", "tilde_expand", "times", "tmpfile", "tmpnam", "toc", "toupper", "transpose", "true", "typeinfo", "umask", "uminus", "uname", "undo_string_escapes", "unlink", "uplus", "upper", "usage", "usleep", "vec", "vectorize", "vertcat", "waitpid", "warning", "warranty", "whos_line_format", "yes_or_no", "zeros", "inf", "Inf", "nan", "NaN"] command_kw = [ "close", "load", "who", "whos", ] function_kw = [ "accumarray", "accumdim", "acosd", "acotd", "acscd", "addtodate", "allchild", "ancestor", "anova", "arch_fit", "arch_rnd", "arch_test", "area", "arma_rnd", "arrayfun", "ascii", "asctime", "asecd", "asind", "assert", "atand", "autoreg_matrix", "autumn", "axes", "axis", "bar", "barh", "bartlett", "bartlett_test", "beep", "betacdf", "betainv", "betapdf", "betarnd", "bicgstab", "bicubic", "binary", "binocdf", "binoinv", "binopdf", "binornd", "bitcmp", "bitget", "bitset", "blackman", "blanks", "blkdiag", "bone", "box", "brighten", "calendar", "cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf", "cauchy_rnd", "caxis", "celldisp", "center", "cgs", "chisquare_test_homogeneity", "chisquare_test_independence", "circshift", "cla", "clabel", "clf", "clock", "cloglog", "closereq", "colon", "colorbar", "colormap", "colperm", "comet", "common_size", "commutation_matrix", "compan", "compare_versions", "compass", "computer", "cond", "condest", "contour", "contourc", "contourf", "contrast", "conv", "convhull", "cool", "copper", "copyfile", "cor", "corrcoef", "cor_test", "cosd", "cotd", "cov", "cplxpair", "cross", "cscd", "cstrcat", "csvread", "csvwrite", "ctime", "cumtrapz", "curl", "cut", "cylinder", "date", "datenum", "datestr", "datetick", "datevec", "dblquad", "deal", "deblank", "deconv", "delaunay", "delaunayn", "delete", "demo", "detrend", "diffpara", "diffuse", "dir", "discrete_cdf", "discrete_inv", "discrete_pdf", "discrete_rnd", "display", "divergence", "dlmwrite", "dos", "dsearch", "dsearchn", "duplication_matrix", "durbinlevinson", "ellipsoid", "empirical_cdf", "empirical_inv", "empirical_pdf", "empirical_rnd", "eomday", "errorbar", "etime", "etreeplot", "example", "expcdf", "expinv", "expm", "exppdf", "exprnd", "ezcontour", "ezcontourf", "ezmesh", "ezmeshc", "ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor", "factorial", "fail", "fcdf", "feather", "fftconv", "fftfilt", "fftshift", "figure", "fileattrib", "fileparts", "fill", "findall", "findobj", "findstr", "finv", "flag", "flipdim", "fliplr", "flipud", "fpdf", "fplot", "fractdiff", "freqz", "freqz_plot", "frnd", "fsolve", "f_test_regression", "ftp", "fullfile", "fzero", "gamcdf", "gaminv", "gampdf", "gamrnd", "gca", "gcbf", "gcbo", "gcf", "genvarname", "geocdf", "geoinv", "geopdf", "geornd", "getfield", "ginput", "glpk", "gls", "gplot", "gradient", "graphics_toolkit", "gray", "grid", "griddata", "griddatan", "gtext", "gunzip", "gzip", "hadamard", "hamming", "hankel", "hanning", "hggroup", "hidden", "hilb", "hist", "histc", "hold", "hot", "hotelling_test", "housh", "hsv", "hurst", "hygecdf", "hygeinv", "hygepdf", "hygernd", "idivide", "ifftshift", "image", "imagesc", "imfinfo", "imread", "imshow", "imwrite", "index", "info", "inpolygon", "inputname", "interpft", "interpn", "intersect", "invhilb", "iqr", "isa", "isdefinite", "isdir", "is_duplicate_entry", "isequal", "isequalwithequalnans", "isfigure", "ishermitian", "ishghandle", "is_leap_year", "isletter", "ismac", "ismember", "ispc", "isprime", "isprop", "isscalar", "issquare", "isstrprop", "issymmetric", "isunix", "is_valid_file_id", "isvector", "jet", "kendall", "kolmogorov_smirnov_cdf", "kolmogorov_smirnov_test", "kruskal_wallis_test", "krylov", "kurtosis", "laplace_cdf", "laplace_inv", "laplace_pdf", "laplace_rnd", "legend", "legendre", "license", "line", "linkprop", "list_primes", "loadaudio", "loadobj", "logistic_cdf", "logistic_inv", "logistic_pdf", "logistic_rnd", "logit", "loglog", "loglogerr", "logm", "logncdf", "logninv", "lognpdf", "lognrnd", "logspace", "lookfor", "ls_command", "lsqnonneg", "magic", "mahalanobis", "manova", "matlabroot", "mcnemar_test", "mean", "meansq", "median", "menu", "mesh", "meshc", "meshgrid", "meshz", "mexext", "mget", "mkpp", "mode", "moment", "movefile", "mpoles", "mput", "namelengthmax", "nargchk", "nargoutchk", "nbincdf", "nbininv", "nbinpdf", "nbinrnd", "nchoosek", "ndgrid", "newplot", "news", "nonzeros", "normcdf", "normest", "norminv", "normpdf", "normrnd", "now", "nthroot", "null", "ocean", "ols", "onenormest", "optimget", "optimset", "orderfields", "orient", "orth", "pack", "pareto", "parseparams", "pascal", "patch", "pathdef", "pcg", "pchip", "pcolor", "pcr", "peaks", "periodogram", "perl", "perms", "pie", "pink", "planerot", "playaudio", "plot", "plotmatrix", "plotyy", "poisscdf", "poissinv", "poisspdf", "poissrnd", "polar", "poly", "polyaffine", "polyarea", "polyderiv", "polyfit", "polygcd", "polyint", "polyout", "polyreduce", "polyval", "polyvalm", "postpad", "powerset", "ppder", "ppint", "ppjumps", "ppplot", "ppval", "pqpnonneg", "prepad", "primes", "print", "print_usage", "prism", "probit", "qp", "qqplot", "quadcc", "quadgk", "quadl", "quadv", "quiver", "qzhess", "rainbow", "randi", "range", "rank", "ranks", "rat", "reallog", "realpow", "realsqrt", "record", "rectangle_lw", "rectangle_sw", "rectint", "refresh", "refreshdata", "regexptranslate", "repmat", "residue", "ribbon", "rindex", "roots", "rose", "rosser", "rotdim", "rref", "run", "run_count", "rundemos", "run_test", "runtests", "saveas", "saveaudio", "saveobj", "savepath", "scatter", "secd", "semilogx", "semilogxerr", "semilogy", "semilogyerr", "setaudio", "setdiff", "setfield", "setxor", "shading", "shift", "shiftdim", "sign_test", "sinc", "sind", "sinetone", "sinewave", "skewness", "slice", "sombrero", "sortrows", "spaugment", "spconvert", "spdiags", "spearman", "spectral_adf", "spectral_xdf", "specular", "speed", "spencer", "speye", "spfun", "sphere", "spinmap", "spline", "spones", "sprand", "sprandn", "sprandsym", "spring", "spstats", "spy", "sqp", "stairs", "statistics", "std", "stdnormal_cdf", "stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd", "stem", "stft", "strcat", "strchr", "strjust", "strmatch", "strread", "strsplit", "strtok", "strtrim", "strtrunc", "structfun", "studentize", "subplot", "subsindex", "subspace", "substr", "substruct", "summer", "surf", "surface", "surfc", "surfl", "surfnorm", "svds", "swapbytes", "sylvester_matrix", "symvar", "synthesis", "table", "tand", "tar", "tcdf", "tempdir", "tempname", "test", "text", "textread", "textscan", "tinv", "title", "toeplitz", "tpdf", "trace", "trapz", "treelayout", "treeplot", "triangle_lw", "triangle_sw", "tril", "trimesh", "triplequad", "triplot", "trisurf", "triu", "trnd", "tsearchn", "t_test", "t_test_regression", "type", "unidcdf", "unidinv", "unidpdf", "unidrnd", "unifcdf", "unifinv", "unifpdf", "unifrnd", "union", "unique", "unix", "unmkpp", "unpack", "untabify", "untar", "unwrap", "unzip", "u_test", "validatestring", "vander", "var", "var_test", "vech", "ver", "version", "view", "voronoi", "voronoin", "waitforbuttonpress", "wavread", "wavwrite", "wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday", "welch_test", "what", "white", "whitebg", "wienrnd", "wilcoxon_test", "wilkinson", "winter", "xlabel", "xlim", "ylabel", "yulewalker", "zip", "zlabel", "z_test", ] loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli", "besselj", "besselk", "bessely", "bitpack", "bsxfun", "builtin", "ccolamd", "cellfun", "cellslices", "chol", "choldelete", "cholinsert", "cholinv", "cholshift", "cholupdate", "colamd", "colloc", "convhulln", "convn", "csymamd", "cummax", "cummin", "daspk", "daspk_options", "dasrt", "dasrt_options", "dassl", "dassl_options", "dbclear", "dbdown", "dbstack", "dbstatus", "dbstop", "dbtype", "dbup", "dbwhere", "det", "dlmread", "dmperm", "dot", "eig", "eigs", "endgrent", "endpwent", "etree", "fft", "fftn", "fftw", "filter", "find", "full", "gcd", "getgrent", "getgrgid", "getgrnam", "getpwent", "getpwnam", "getpwuid", "getrusage", "givens", "gmtime", "gnuplot_binary", "hess", "ifft", "ifftn", "inv", "isdebugmode", "issparse", "kron", "localtime", "lookup", "lsode", "lsode_options", "lu", "luinc", "luupdate", "matrix_type", "max", "min", "mktime", "pinv", "qr", "qrdelete", "qrinsert", "qrshift", "qrupdate", "quad", "quad_options", "qz", "rand", "rande", "randg", "randn", "randp", "randperm", "rcond", "regexp", "regexpi", "regexprep", "schur", "setgrent", "setpwent", "sort", "spalloc", "sparse", "spparms", "sprank", "sqrtm", "strfind", "strftime", "strptime", "strrep", "svd", "svd_driver", "syl", "symamd", "symbfact", "symrcm", "time", "tsearch", "typecast", "urlread", "urlwrite", ] mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc", "acsch", "angle", "arg", "asec", "asech", "asin", "asinh", "atan", "atanh", "beta", "betainc", "betaln", "bincoeff", "cbrt", "ceil", "conj", "cos", "cosh", "cot", "coth", "csc", "csch", "erf", "erfc", "erfcx", "erfinv", "exp", "finite", "fix", "floor", "fmod", "gamma", "gammainc", "gammaln", "imag", "isalnum", "isalpha", "isascii", "iscntrl", "isdigit", "isfinite", "isgraph", "isinf", "islower", "isna", "isnan", "isprint", "ispunct", "isspace", "isupper", "isxdigit", "lcm", "lgamma", "log", "lower", "mod", "real", "rem", "round", "roundb", "sec", "sech", "sign", "sin", "sinh", "sqrt", "tan", "tanh", "toascii", "tolower", "xor", ] builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA", "OCTAVE_HOME", "OCTAVE_VERSION", "PAGER", "PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET", "SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO", "S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE", "WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED", "WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG", "WSTOPSIG", "WTERMSIG", "WUNTRACED", ] tokens = { 'root': [ #We should look into multiline comments (r'[%#].*$', Comment), (r'^\s*function', Keyword, 'deffunc'), # from 'iskeyword' on hg changeset 8cc154f45e37 (r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|' r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|' r'endevents|endfor|endfunction|endif|endmethods|endproperties|' r'endswitch|endwhile|events|for|function|get|global|if|methods|' r'otherwise|persistent|properties|return|set|static|switch|try|' r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword), ("(" + "|".join( builtin_kw + command_kw + function_kw + loadable_kw + mapping_kw) + r')\b', Name.Builtin), ("(" + "|".join(builtin_consts) + r')\b', Name.Constant), # operators in Octave but not Matlab: (r'-=|!=|!|/=|--', Operator), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators in Octave but not Matlab requiring escape for re: (r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation), (r'=|:|;', Punctuation), (r'"[^"]*"', String), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w\)\]])\'', Operator), (r'(?<![\w\)\]])\'', String, 'string'), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'.', Text), ], 'string': [ (r"[^']*'", String, '#pop'), ], 'deffunc': [ (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation, Text.Whitespace, Name.Function, Punctuation, Text, Punctuation, Text.Whitespace), '#pop'), ], } def analyse_text(text): if re.match('^\s*[%#]', text, re.M): #Comment return 0.1 class ScilabLexer(RegexLexer): """ For Scilab source code. *New in Pygments 1.5.* """ name = 'Scilab' aliases = ['scilab'] filenames = ['*.sci', '*.sce', '*.tst'] mimetypes = ['text/scilab'] tokens = { 'root': [ (r'//.*?$', Comment.Single), (r'^\s*function', Keyword, 'deffunc'), (r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|' r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|' r'endevents|endfor|endfunction|endif|endmethods|endproperties|' r'endswitch|endwhile|events|for|function|get|global|if|methods|' r'otherwise|persistent|properties|return|set|static|switch|try|' r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword), ("(" + "|".join(_scilab_builtins.functions_kw + _scilab_builtins.commands_kw + _scilab_builtins.macros_kw ) + r')\b', Name.Builtin), (r'(%s)\b' % "|".join(map(re.escape, _scilab_builtins.builtin_consts)), Name.Constant), # operators: (r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator), # operators requiring escape for re: (r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator), # punctuation: (r'[\[\](){}@.,=:;]', Punctuation), (r'"[^"]*"', String), # quote can be transpose, instead of string: # (not great, but handles common cases...) (r'(?<=[\w\)\]])\'', Operator), (r'(?<![\w\)\]])\'', String, 'string'), (r'(\d+\.\d*|\d*\.\d+)([eEf][+-]?[0-9]+)?', Number.Float), (r'\d+[eEf][+-]?[0-9]+', Number.Float), (r'\d+', Number.Integer), ('[a-zA-Z_][a-zA-Z0-9_]*', Name), (r'.', Text), ], 'string': [ (r"[^']*'", String, '#pop'), (r'.', String, '#pop'), ], 'deffunc': [ (r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)', bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation, Text.Whitespace, Name.Function, Punctuation, Text, Punctuation, Text.Whitespace), '#pop'), ], } class NumPyLexer(PythonLexer): """ A Python lexer recognizing Numerical Python builtins. *New in Pygments 0.10.* """ name = 'NumPy' aliases = ['numpy'] # override the mimetypes to not inherit them from python mimetypes = [] filenames = [] EXTRA_KEYWORDS = set([ 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose', 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append', 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin', 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal', 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange', 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray', 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype', 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett', 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial', 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman', 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_', 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type', 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate', 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov', 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate', 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide', 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty', 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye', 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill', 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud', 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer', 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring', 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include', 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize', 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater', 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram', 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0', 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info', 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d', 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj', 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf', 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_', 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_', 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort', 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2', 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace', 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype', 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min', 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan', 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum', 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer', 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones', 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload', 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv', 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod', 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers', 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close', 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require', 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll', 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_', 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select', 'set_numeric_ops', 'set_printoptions', 'set_string_function', 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj', 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape', 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh', 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source', 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std', 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot', 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose', 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict', 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index', 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises', 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like' ]) def get_tokens_unprocessed(self, text): for index, token, value in \ PythonLexer.get_tokens_unprocessed(self, text): if token is Name and value in self.EXTRA_KEYWORDS: yield index, Keyword.Pseudo, value else: yield index, token, value def analyse_text(text): return (shebang_matches(text, r'pythonw?(2(\.\d)?)?') or 'import ' in text[:1000]) \ and ('import numpy' in text or 'from numpy import' in text) class RConsoleLexer(Lexer): """ For R console transcripts or R CMD BATCH output files. """ name = 'RConsole' aliases = ['rconsole', 'rout'] filenames = ['*.Rout'] def get_tokens_unprocessed(self, text): slexer = SLexer(**self.options) current_code_block = '' insertions = [] for match in line_re.finditer(text): line = match.group() if line.startswith('>') or line.startswith('+'): # Colorize the prompt as such, # then put rest of line into current_code_block insertions.append((len(current_code_block), [(0, Generic.Prompt, line[:2])])) current_code_block += line[2:] else: # We have reached a non-prompt line! # If we have stored prompt lines, need to process them first. if current_code_block: # Weave together the prompts and highlight code. for item in do_insertions(insertions, slexer.get_tokens_unprocessed(current_code_block)): yield item # Reset vars for next code block. current_code_block = '' insertions = [] # Now process the actual line itself, this is output from R. yield match.start(), Generic.Output, line # If we happen to end on a code block with nothing after it, need to # process the last code block. This is neither elegant nor DRY so # should be changed. if current_code_block: for item in do_insertions(insertions, slexer.get_tokens_unprocessed(current_code_block)): yield item class SLexer(RegexLexer): """ For S, S-plus, and R source code. *New in Pygments 0.10.* """ name = 'S' aliases = ['splus', 's', 'r'] filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile'] mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile'] tokens = { 'comments': [ (r'#.*$', Comment.Single), ], 'valid_name': [ (r'[a-zA-Z][0-9a-zA-Z\._]*', Text), # can begin with ., but not if that is followed by a digit (r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text), ], 'punctuation': [ (r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation), ], 'keywords': [ (r'(if|else|for|while|repeat|in|next|break|return|switch|function)' r'(?![0-9a-zA-Z\._])', Keyword.Reserved) ], 'operators': [ (r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator), (r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator) ], 'builtin_symbols': [ (r'(NULL|NA(_(integer|real|complex|character)_)?|' r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))' r'(?![0-9a-zA-Z\._])', Keyword.Constant), (r'(T|F)\b', Keyword.Variable), ], 'numbers': [ # hex number (r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex), # decimal number (r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?', Number), ], 'statements': [ include('comments'), # whitespaces (r'\s+', Text), (r'`.*?`', String.Backtick), (r'\'', String, 'string_squote'), (r'\"', String, 'string_dquote'), include('builtin_symbols'), include('numbers'), include('keywords'), include('punctuation'), include('operators'), include('valid_name'), ], 'root': [ include('statements'), # blocks: (r'\{|\}', Punctuation), #(r'\{', Punctuation, 'block'), (r'.', Text), ], #'block': [ # include('statements'), # ('\{', Punctuation, '#push'), # ('\}', Punctuation, '#pop') #], 'string_squote': [ (r'([^\'\\]|\\.)*\'', String, '#pop'), ], 'string_dquote': [ (r'([^"\\]|\\.)*"', String, '#pop'), ], } def analyse_text(text): if re.search(r'[a-z0-9_\])\s]<-(?!-)', text): return 0.11 class BugsLexer(RegexLexer): """ Pygments Lexer for `OpenBugs <http://www.openbugs.info/w/>`_ and WinBugs models. *New in Pygments 1.6.* """ name = 'BUGS' aliases = ['bugs', 'winbugs', 'openbugs'] filenames = ['*.bug'] _FUNCTIONS = [ # Scalar functions 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance', 'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log', 'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value', 'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior', 'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh', 'trunc', # Vector functions 'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals', 'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM', 'sd', 'sort', 'sum', ## Special 'D', 'I', 'F', 'T', 'C'] """ OpenBUGS built-in functions From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII This also includes - T, C, I : Truncation and censoring. ``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS. - D : ODE - F : Functional http://www.openbugs.info/Examples/Functionals.html """ _DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois', 'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp', 'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar', 'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar', 'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm', 'dmt', 'dwish'] """ OpenBUGS built-in distributions Functions from http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI """ tokens = { 'whitespace' : [ (r"\s+", Text), ], 'comments' : [ # Comments (r'#.*$', Comment.Single), ], 'root': [ # Comments include('comments'), include('whitespace'), # Block start (r'(model)(\s+)({)', bygroups(Keyword.Namespace, Text, Punctuation)), # Reserved Words (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), # Built-in Functions (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS), Name.Builtin), # Regular variable names (r'[A-Za-z][A-Za-z0-9_.]*', Name), # Number Literals (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), # Punctuation (r'\[|\]|\(|\)|:|,|;', Punctuation), # Assignment operators # SLexer makes these tokens Operators. (r'<-|~', Operator), # Infix and prefix operators (r'\+|-|\*|/', Operator), # Block (r'[{}]', Punctuation), ] } def analyse_text(text): if re.search(r"^\s*model\s*{", text, re.M): return 0.7 else: return 0.0 class JagsLexer(RegexLexer): """ Pygments Lexer for JAGS. *New in Pygments 1.6.* """ name = 'JAGS' aliases = ['jags'] filenames = ['*.jag', '*.bug'] ## JAGS _FUNCTIONS = [ 'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh', 'cos', 'cosh', 'cloglog', 'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact', 'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh', 'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin', 'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse', 'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan', # Truncation/Censoring (should I include) 'T', 'I'] # Distributions with density, probability and quartile functions _DISTRIBUTIONS = ['[dpq]%s' % x for x in ['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp', 'df', 'gamma', 'gen.gamma', 'logis', 'lnorm', 'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']] # Other distributions without density and probability _OTHER_DISTRIBUTIONS = [ 'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper', 'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq', 'dnbinom', 'dweibull', 'ddirich'] tokens = { 'whitespace' : [ (r"\s+", Text), ], 'names' : [ # Regular variable names (r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name), ], 'comments' : [ # do not use stateful comments (r'(?s)/\*.*?\*/', Comment.Multiline), # Comments (r'#.*$', Comment.Single), ], 'root': [ # Comments include('comments'), include('whitespace'), # Block start (r'(model|data)(\s+)({)', bygroups(Keyword.Namespace, Text, Punctuation)), (r'var(?![0-9a-zA-Z\._])', Keyword.Declaration), # Reserved Words (r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved), # Builtins # Need to use lookahead because . is a valid char (r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS + _DISTRIBUTIONS + _OTHER_DISTRIBUTIONS), Name.Builtin), # Names include('names'), # Number Literals (r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number), (r'\[|\]|\(|\)|:|,|;', Punctuation), # Assignment operators (r'<-|~', Operator), # # JAGS includes many more than OpenBUGS (r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator), (r'[{}]', Punctuation), ] } def analyse_text(text): if re.search(r'^\s*model\s*\{', text, re.M): if re.search(r'^\s*data\s*\{', text, re.M): return 0.9 elif re.search(r'^\s*var', text, re.M): return 0.9 else: return 0.3 else: return 0 class StanLexer(RegexLexer): """Pygments Lexer for Stan models. The Stan modeling language is specified in the *Stan 1.3.0 Modeling Language Manual* `pdf <http://code.google.com/p/stan/downloads/detail?name=stan-reference-1.3.0.pdf>`_. *New in Pygments 1.6.* """ name = 'Stan' aliases = ['stan'] filenames = ['*.stan'] tokens = { 'whitespace' : [ (r"\s+", Text), ], 'comments' : [ (r'(?s)/\*.*?\*/', Comment.Multiline), # Comments (r'(//|#).*$', Comment.Single), ], 'root': [ # Stan is more restrictive on strings than this regex (r'"[^"]*"', String), # Comments include('comments'), # block start include('whitespace'), # Block start (r'(%s)(\s*)({)' % r'|'.join(('data', r'transformed\s+?data', 'parameters', r'transformed\s+parameters', 'model', r'generated\s+quantities')), bygroups(Keyword.Namespace, Text, Punctuation)), # Reserved Words (r'(%s)\b' % r'|'.join(_stan_builtins.KEYWORDS), Keyword), # Truncation (r'T(?=\s*\[)', Keyword), # Data types (r'(%s)\b' % r'|'.join(_stan_builtins.TYPES), Keyword.Type), # Punctuation (r"[;:,\[\]()]", Punctuation), # Builtin (r'(%s)(?=\s*\()' % r'|'.join(_stan_builtins.FUNCTIONS + _stan_builtins.DISTRIBUTIONS), Name.Builtin), # Special names ending in __, like lp__ (r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo), (r'(%s)\b' % r'|'.join(_stan_builtins.RESERVED), Keyword.Reserved), # Regular variable names (r'[A-Za-z][A-Za-z0-9_]*\b', Name), # Real Literals (r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float), (r'-?[0-9]*\.[0-9]*', Number.Float), # Integer Literals (r'-?[0-9]+', Number.Integer), # Assignment operators # SLexer makes these tokens Operators. (r'<-|~', Operator), # Infix and prefix operators (and = ) (r"\+|-|\.?\*|\.?/|\\|'|==?|!=?|<=?|>=?|\|\||&&", Operator), # Block delimiters (r'[{}]', Punctuation), ] } def analyse_text(text): if re.search(r'^\s*parameters\s*\{', text, re.M): return 1.0 else: return 0.0 class IDLLexer(RegexLexer): """ Pygments Lexer for IDL (Interactive Data Language). *New in Pygments 1.6.* """ name = 'IDL' aliases = ['idl'] filenames = ['*.pro'] mimetypes = ['text/idl'] _RESERVED = ['and', 'begin', 'break', 'case', 'common', 'compile_opt', 'continue', 'do', 'else', 'end', 'endcase', 'elseelse', 'endfor', 'endforeach', 'endif', 'endrep', 'endswitch', 'endwhile', 'eq', 'for', 'foreach', 'forward_function', 'function', 'ge', 'goto', 'gt', 'if', 'inherits', 'le', 'lt', 'mod', 'ne', 'not', 'of', 'on_ioerror', 'or', 'pro', 'repeat', 'switch', 'then', 'until', 'while', 'xor'] """Reserved words from: http://www.exelisvis.com/docs/reswords.html""" _BUILTIN_LIB = ['abs', 'acos', 'adapt_hist_equal', 'alog', 'alog10', 'amoeba', 'annotate', 'app_user_dir', 'app_user_dir_query', 'arg_present', 'array_equal', 'array_indices', 'arrow', 'ascii_template', 'asin', 'assoc', 'atan', 'axis', 'a_correlate', 'bandpass_filter', 'bandreject_filter', 'barplot', 'bar_plot', 'beseli', 'beselj', 'beselk', 'besely', 'beta', 'bilinear', 'binary_template', 'bindgen', 'binomial', 'bin_date', 'bit_ffs', 'bit_population', 'blas_axpy', 'blk_con', 'box_cursor', 'breakpoint', 'broyden', 'butterworth', 'bytarr', 'byte', 'byteorder', 'bytscl', 'caldat', 'calendar', 'call_external', 'call_function', 'call_method', 'call_procedure', 'canny', 'catch', 'cd', 'cdf_[0-9a-za-z_]*', 'ceil', 'chebyshev', 'check_math', 'chisqr_cvf', 'chisqr_pdf', 'choldc', 'cholsol', 'cindgen', 'cir_3pnt', 'close', 'cluster', 'cluster_tree', 'clust_wts', 'cmyk_convert', 'colorbar', 'colorize_sample', 'colormap_applicable', 'colormap_gradient', 'colormap_rotation', 'colortable', 'color_convert', 'color_exchange', 'color_quan', 'color_range_map', 'comfit', 'command_line_args', 'complex', 'complexarr', 'complexround', 'compute_mesh_normals', 'cond', 'congrid', 'conj', 'constrained_min', 'contour', 'convert_coord', 'convol', 'convol_fft', 'coord2to3', 'copy_lun', 'correlate', 'cos', 'cosh', 'cpu', 'cramer', 'create_cursor', 'create_struct', 'create_view', 'crossp', 'crvlength', 'cti_test', 'ct_luminance', 'cursor', 'curvefit', 'cvttobm', 'cv_coord', 'cw_animate', 'cw_animate_getp', 'cw_animate_load', 'cw_animate_run', 'cw_arcball', 'cw_bgroup', 'cw_clr_index', 'cw_colorsel', 'cw_defroi', 'cw_field', 'cw_filesel', 'cw_form', 'cw_fslider', 'cw_light_editor', 'cw_light_editor_get', 'cw_light_editor_set', 'cw_orient', 'cw_palette_editor', 'cw_palette_editor_get', 'cw_palette_editor_set', 'cw_pdmenu', 'cw_rgbslider', 'cw_tmpl', 'cw_zoom', 'c_correlate', 'dblarr', 'db_exists', 'dcindgen', 'dcomplex', 'dcomplexarr', 'define_key', 'define_msgblk', 'define_msgblk_from_file', 'defroi', 'defsysv', 'delvar', 'dendrogram', 'dendro_plot', 'deriv', 'derivsig', 'determ', 'device', 'dfpmin', 'diag_matrix', 'dialog_dbconnect', 'dialog_message', 'dialog_pickfile', 'dialog_printersetup', 'dialog_printjob', 'dialog_read_image', 'dialog_write_image', 'digital_filter', 'dilate', 'dindgen', 'dissolve', 'dist', 'distance_measure', 'dlm_load', 'dlm_register', 'doc_library', 'double', 'draw_roi', 'edge_dog', 'efont', 'eigenql', 'eigenvec', 'ellipse', 'elmhes', 'emboss', 'empty', 'enable_sysrtn', 'eof', 'eos_[0-9a-za-z_]*', 'erase', 'erf', 'erfc', 'erfcx', 'erode', 'errorplot', 'errplot', 'estimator_filter', 'execute', 'exit', 'exp', 'expand', 'expand_path', 'expint', 'extrac', 'extract_slice', 'factorial', 'fft', 'filepath', 'file_basename', 'file_chmod', 'file_copy', 'file_delete', 'file_dirname', 'file_expand_path', 'file_info', 'file_lines', 'file_link', 'file_mkdir', 'file_move', 'file_poll_input', 'file_readlink', 'file_same', 'file_search', 'file_test', 'file_which', 'findgen', 'finite', 'fix', 'flick', 'float', 'floor', 'flow3', 'fltarr', 'flush', 'format_axis_values', 'free_lun', 'fstat', 'fulstr', 'funct', 'fv_test', 'fx_root', 'fz_roots', 'f_cvf', 'f_pdf', 'gamma', 'gamma_ct', 'gauss2dfit', 'gaussfit', 'gaussian_function', 'gaussint', 'gauss_cvf', 'gauss_pdf', 'gauss_smooth', 'getenv', 'getwindows', 'get_drive_list', 'get_dxf_objects', 'get_kbrd', 'get_login_info', 'get_lun', 'get_screen_size', 'greg2jul', 'grib_[0-9a-za-z_]*', 'grid3', 'griddata', 'grid_input', 'grid_tps', 'gs_iter', 'h5[adfgirst]_[0-9a-za-z_]*', 'h5_browser', 'h5_close', 'h5_create', 'h5_get_libversion', 'h5_open', 'h5_parse', 'hanning', 'hash', 'hdf_[0-9a-za-z_]*', 'heap_free', 'heap_gc', 'heap_nosave', 'heap_refcount', 'heap_save', 'help', 'hilbert', 'histogram', 'hist_2d', 'hist_equal', 'hls', 'hough', 'hqr', 'hsv', 'h_eq_ct', 'h_eq_int', 'i18n_multibytetoutf8', 'i18n_multibytetowidechar', 'i18n_utf8tomultibyte', 'i18n_widechartomultibyte', 'ibeta', 'icontour', 'iconvertcoord', 'idelete', 'identity', 'idlexbr_assistant', 'idlitsys_createtool', 'idl_base64', 'idl_validname', 'iellipse', 'igamma', 'igetcurrent', 'igetdata', 'igetid', 'igetproperty', 'iimage', 'image', 'image_cont', 'image_statistics', 'imaginary', 'imap', 'indgen', 'intarr', 'interpol', 'interpolate', 'interval_volume', 'int_2d', 'int_3d', 'int_tabulated', 'invert', 'ioctl', 'iopen', 'iplot', 'ipolygon', 'ipolyline', 'iputdata', 'iregister', 'ireset', 'iresolve', 'irotate', 'ir_filter', 'isa', 'isave', 'iscale', 'isetcurrent', 'isetproperty', 'ishft', 'isocontour', 'isosurface', 'isurface', 'itext', 'itranslate', 'ivector', 'ivolume', 'izoom', 'i_beta', 'journal', 'json_parse', 'json_serialize', 'jul2greg', 'julday', 'keyword_set', 'krig2d', 'kurtosis', 'kw_test', 'l64indgen', 'label_date', 'label_region', 'ladfit', 'laguerre', 'laplacian', 'la_choldc', 'la_cholmprove', 'la_cholsol', 'la_determ', 'la_eigenproblem', 'la_eigenql', 'la_eigenvec', 'la_elmhes', 'la_gm_linear_model', 'la_hqr', 'la_invert', 'la_least_squares', 'la_least_square_equality', 'la_linear_equation', 'la_ludc', 'la_lumprove', 'la_lusol', 'la_svd', 'la_tridc', 'la_trimprove', 'la_triql', 'la_trired', 'la_trisol', 'least_squares_filter', 'leefilt', 'legend', 'legendre', 'linbcg', 'lindgen', 'linfit', 'linkimage', 'list', 'll_arc_distance', 'lmfit', 'lmgr', 'lngamma', 'lnp_test', 'loadct', 'locale_get', 'logical_and', 'logical_or', 'logical_true', 'lon64arr', 'lonarr', 'long', 'long64', 'lsode', 'ludc', 'lumprove', 'lusol', 'lu_complex', 'machar', 'make_array', 'make_dll', 'make_rt', 'map', 'mapcontinents', 'mapgrid', 'map_2points', 'map_continents', 'map_grid', 'map_image', 'map_patch', 'map_proj_forward', 'map_proj_image', 'map_proj_info', 'map_proj_init', 'map_proj_inverse', 'map_set', 'matrix_multiply', 'matrix_power', 'max', 'md_test', 'mean', 'meanabsdev', 'mean_filter', 'median', 'memory', 'mesh_clip', 'mesh_decimate', 'mesh_issolid', 'mesh_merge', 'mesh_numtriangles', 'mesh_obj', 'mesh_smooth', 'mesh_surfacearea', 'mesh_validate', 'mesh_volume', 'message', 'min', 'min_curve_surf', 'mk_html_help', 'modifyct', 'moment', 'morph_close', 'morph_distance', 'morph_gradient', 'morph_hitormiss', 'morph_open', 'morph_thin', 'morph_tophat', 'multi', 'm_correlate', 'ncdf_[0-9a-za-z_]*', 'newton', 'noise_hurl', 'noise_pick', 'noise_scatter', 'noise_slur', 'norm', 'n_elements', 'n_params', 'n_tags', 'objarr', 'obj_class', 'obj_destroy', 'obj_hasmethod', 'obj_isa', 'obj_new', 'obj_valid', 'online_help', 'on_error', 'open', 'oplot', 'oploterr', 'parse_url', 'particle_trace', 'path_cache', 'path_sep', 'pcomp', 'plot', 'plot3d', 'ploterr', 'plots', 'plot_3dbox', 'plot_field', 'pnt_line', 'point_lun', 'polarplot', 'polar_contour', 'polar_surface', 'poly', 'polyfill', 'polyfillv', 'polygon', 'polyline', 'polyshade', 'polywarp', 'poly_2d', 'poly_area', 'poly_fit', 'popd', 'powell', 'pref_commit', 'pref_get', 'pref_set', 'prewitt', 'primes', 'print', 'printd', 'product', 'profile', 'profiler', 'profiles', 'project_vol', 'psafm', 'pseudo', 'ps_show_fonts', 'ptrarr', 'ptr_free', 'ptr_new', 'ptr_valid', 'pushd', 'p_correlate', 'qgrid3', 'qhull', 'qromb', 'qromo', 'qsimp', 'query_ascii', 'query_bmp', 'query_csv', 'query_dicom', 'query_gif', 'query_image', 'query_jpeg', 'query_jpeg2000', 'query_mrsid', 'query_pict', 'query_png', 'query_ppm', 'query_srf', 'query_tiff', 'query_wav', 'radon', 'randomn', 'randomu', 'ranks', 'rdpix', 'read', 'reads', 'readu', 'read_ascii', 'read_binary', 'read_bmp', 'read_csv', 'read_dicom', 'read_gif', 'read_image', 'read_interfile', 'read_jpeg', 'read_jpeg2000', 'read_mrsid', 'read_pict', 'read_png', 'read_ppm', 'read_spr', 'read_srf', 'read_sylk', 'read_tiff', 'read_wav', 'read_wave', 'read_x11_bitmap', 'read_xwd', 'real_part', 'rebin', 'recall_commands', 'recon3', 'reduce_colors', 'reform', 'region_grow', 'register_cursor', 'regress', 'replicate', 'replicate_inplace', 'resolve_all', 'resolve_routine', 'restore', 'retall', 'return', 'reverse', 'rk4', 'roberts', 'rot', 'rotate', 'round', 'routine_filepath', 'routine_info', 'rs_test', 'r_correlate', 'r_test', 'save', 'savgol', 'scale3', 'scale3d', 'scope_level', 'scope_traceback', 'scope_varfetch', 'scope_varname', 'search2d', 'search3d', 'sem_create', 'sem_delete', 'sem_lock', 'sem_release', 'setenv', 'set_plot', 'set_shading', 'sfit', 'shade_surf', 'shade_surf_irr', 'shade_volume', 'shift', 'shift_diff', 'shmdebug', 'shmmap', 'shmunmap', 'shmvar', 'show3', 'showfont', 'simplex', 'sin', 'sindgen', 'sinh', 'size', 'skewness', 'skip_lun', 'slicer3', 'slide_image', 'smooth', 'sobel', 'socket', 'sort', 'spawn', 'spher_harm', 'sph_4pnt', 'sph_scat', 'spline', 'spline_p', 'spl_init', 'spl_interp', 'sprsab', 'sprsax', 'sprsin', 'sprstp', 'sqrt', 'standardize', 'stddev', 'stop', 'strarr', 'strcmp', 'strcompress', 'streamline', 'stregex', 'stretch', 'string', 'strjoin', 'strlen', 'strlowcase', 'strmatch', 'strmessage', 'strmid', 'strpos', 'strput', 'strsplit', 'strtrim', 'struct_assign', 'struct_hide', 'strupcase', 'surface', 'surfr', 'svdc', 'svdfit', 'svsol', 'swap_endian', 'swap_endian_inplace', 'symbol', 'systime', 's_test', 't3d', 'tag_names', 'tan', 'tanh', 'tek_color', 'temporary', 'tetra_clip', 'tetra_surface', 'tetra_volume', 'text', 'thin', 'threed', 'timegen', 'time_test2', 'tm_test', 'total', 'trace', 'transpose', 'triangulate', 'trigrid', 'triql', 'trired', 'trisol', 'tri_surf', 'truncate_lun', 'ts_coef', 'ts_diff', 'ts_fcast', 'ts_smooth', 'tv', 'tvcrs', 'tvlct', 'tvrd', 'tvscl', 'typename', 't_cvt', 't_pdf', 'uindgen', 'uint', 'uintarr', 'ul64indgen', 'ulindgen', 'ulon64arr', 'ulonarr', 'ulong', 'ulong64', 'uniq', 'unsharp_mask', 'usersym', 'value_locate', 'variance', 'vector', 'vector_field', 'vel', 'velovect', 'vert_t3d', 'voigt', 'voronoi', 'voxel_proj', 'wait', 'warp_tri', 'watershed', 'wdelete', 'wf_draw', 'where', 'widget_base', 'widget_button', 'widget_combobox', 'widget_control', 'widget_displaycontextmen', 'widget_draw', 'widget_droplist', 'widget_event', 'widget_info', 'widget_label', 'widget_list', 'widget_propertysheet', 'widget_slider', 'widget_tab', 'widget_table', 'widget_text', 'widget_tree', 'widget_tree_move', 'widget_window', 'wiener_filter', 'window', 'writeu', 'write_bmp', 'write_csv', 'write_gif', 'write_image', 'write_jpeg', 'write_jpeg2000', 'write_nrif', 'write_pict', 'write_png', 'write_ppm', 'write_spr', 'write_srf', 'write_sylk', 'write_tiff', 'write_wav', 'write_wave', 'wset', 'wshow', 'wtn', 'wv_applet', 'wv_cwt', 'wv_cw_wavelet', 'wv_denoise', 'wv_dwt', 'wv_fn_coiflet', 'wv_fn_daubechies', 'wv_fn_gaussian', 'wv_fn_haar', 'wv_fn_morlet', 'wv_fn_paul', 'wv_fn_symlet', 'wv_import_data', 'wv_import_wavelet', 'wv_plot3d_wps', 'wv_plot_multires', 'wv_pwt', 'wv_tool_denoise', 'xbm_edit', 'xdisplayfile', 'xdxf', 'xfont', 'xinteranimate', 'xloadct', 'xmanager', 'xmng_tmpl', 'xmtool', 'xobjview', 'xobjview_rotate', 'xobjview_write_image', 'xpalette', 'xpcolor', 'xplot3d', 'xregistered', 'xroi', 'xsq_test', 'xsurface', 'xvaredit', 'xvolume', 'xvolume_rotate', 'xvolume_write_image', 'xyouts', 'zoom', 'zoom_24'] """Functions from: http://www.exelisvis.com/docs/routines-1.html""" tokens = { 'root': [ (r'^\s*;.*?\n', Comment.Singleline), (r'\b(' + '|'.join(_RESERVED) + r')\b', Keyword), (r'\b(' + '|'.join(_BUILTIN_LIB) + r')\b', Name.Builtin), (r'\+=|-=|\^=|\*=|/=|#=|##=|<=|>=|=', Operator), (r'\+\+|--|->|\+|-|##|#|\*|/|<|>|&&|\^|~|\|\|\?|:', Operator), (r'\b(mod=|lt=|le=|eq=|ne=|ge=|gt=|not=|and=|or=|xor=)', Operator), (r'\b(mod|lt|le|eq|ne|ge|gt|not|and|or|xor)\b', Operator), (r'\b[0-9](L|B|S|UL|ULL|LL)?\b', Number), (r'.', Text), ] } class RdLexer(RegexLexer): """ Pygments Lexer for R documentation (Rd) files This is a very minimal implementation, highlighting little more than the macros. A description of Rd syntax is found in `Writing R Extensions <http://cran.r-project.org/doc/manuals/R-exts.html>`_ and `Parsing Rd files <developer.r-project.org/parseRd.pdf>`_. *New in Pygments 1.6.* """ name = 'Rd' aliases = ['rd'] filenames = ['*.Rd'] mimetypes = ['text/x-r-doc'] # To account for verbatim / LaTeX-like / and R-like areas # would require parsing. tokens = { 'root' : [ # catch escaped brackets and percent sign (r'\\[\\{}%]', String.Escape), # comments (r'%.*$', Comment), # special macros with no arguments (r'\\(?:cr|l?dots|R|tab)\b', Keyword.Constant), # macros (r'\\[a-zA-Z]+\b', Keyword), # special preprocessor macros (r'^\s*#(?:ifn?def|endif).*\b', Comment.Preproc), # non-escaped brackets (r'[{}]', Name.Builtin), # everything else (r'[^\\%\n{}]+', Text), (r'.', Text), ] } class IgorLexer(RegexLexer): """ Pygments Lexer for Igor Pro procedure files (.ipf). See http://www.wavemetrics.com/ and http://www.igorexchange.com/. *New in Pygments 1.7.* """ name = 'Igor' aliases = ['igor', 'igorpro'] filenames = ['*.ipf'] mimetypes = ['text/ipf'] flags = re.IGNORECASE flowControl = [ 'if', 'else', 'elseif', 'endif', 'for', 'endfor', 'strswitch', 'switch', 'case', 'endswitch', 'do', 'while', 'try', 'catch', 'endtry', 'break', 'continue', 'return', ] types = [ 'variable', 'string', 'constant', 'strconstant', 'NVAR', 'SVAR', 'WAVE', 'STRUCT', 'ThreadSafe', 'function', 'end', 'static', 'macro', 'window', 'graph', 'Structure', 'EndStructure', 'EndMacro', 'FuncFit', 'Proc', 'Picture', 'Menu', 'SubMenu', 'Prompt', 'DoPrompt', ] operations = [ 'Abort', 'AddFIFOData', 'AddFIFOVectData', 'AddMovieAudio', 'AddMovieFrame', 'APMath', 'Append', 'AppendImage', 'AppendLayoutObject', 'AppendMatrixContour', 'AppendText', 'AppendToGraph', 'AppendToLayout', 'AppendToTable', 'AppendXYZContour', 'AutoPositionWindow', 'BackgroundInfo', 'Beep', 'BoundingBall', 'BrowseURL', 'BuildMenu', 'Button', 'cd', 'Chart', 'CheckBox', 'CheckDisplayed', 'ChooseColor', 'Close', 'CloseMovie', 'CloseProc', 'ColorScale', 'ColorTab2Wave', 'Concatenate', 'ControlBar', 'ControlInfo', 'ControlUpdate', 'ConvexHull', 'Convolve', 'CopyFile', 'CopyFolder', 'CopyScales', 'Correlate', 'CreateAliasShortcut', 'Cross', 'CtrlBackground', 'CtrlFIFO', 'CtrlNamedBackground', 'Cursor', 'CurveFit', 'CustomControl', 'CWT', 'Debugger', 'DebuggerOptions', 'DefaultFont', 'DefaultGuiControls', 'DefaultGuiFont', 'DefineGuide', 'DelayUpdate', 'DeleteFile', 'DeleteFolder', 'DeletePoints', 'Differentiate', 'dir', 'Display', 'DisplayHelpTopic', 'DisplayProcedure', 'DoAlert', 'DoIgorMenu', 'DoUpdate', 'DoWindow', 'DoXOPIdle', 'DrawAction', 'DrawArc', 'DrawBezier', 'DrawLine', 'DrawOval', 'DrawPICT', 'DrawPoly', 'DrawRect', 'DrawRRect', 'DrawText', 'DSPDetrend', 'DSPPeriodogram', 'Duplicate', 'DuplicateDataFolder', 'DWT', 'EdgeStats', 'Edit', 'ErrorBars', 'Execute', 'ExecuteScriptText', 'ExperimentModified', 'Extract', 'FastGaussTransform', 'FastOp', 'FBinRead', 'FBinWrite', 'FFT', 'FIFO2Wave', 'FIFOStatus', 'FilterFIR', 'FilterIIR', 'FindLevel', 'FindLevels', 'FindPeak', 'FindPointsInPoly', 'FindRoots', 'FindSequence', 'FindValue', 'FPClustering', 'fprintf', 'FReadLine', 'FSetPos', 'FStatus', 'FTPDelete', 'FTPDownload', 'FTPUpload', 'FuncFit', 'FuncFitMD', 'GetAxis', 'GetFileFolderInfo', 'GetLastUserMenuInfo', 'GetMarquee', 'GetSelection', 'GetWindow', 'GraphNormal', 'GraphWaveDraw', 'GraphWaveEdit', 'Grep', 'GroupBox', 'Hanning', 'HideIgorMenus', 'HideInfo', 'HideProcedures', 'HideTools', 'HilbertTransform', 'Histogram', 'IFFT', 'ImageAnalyzeParticles', 'ImageBlend', 'ImageBoundaryToMask', 'ImageEdgeDetection', 'ImageFileInfo', 'ImageFilter', 'ImageFocus', 'ImageGenerateROIMask', 'ImageHistModification', 'ImageHistogram', 'ImageInterpolate', 'ImageLineProfile', 'ImageLoad', 'ImageMorphology', 'ImageRegistration', 'ImageRemoveBackground', 'ImageRestore', 'ImageRotate', 'ImageSave', 'ImageSeedFill', 'ImageSnake', 'ImageStats', 'ImageThreshold', 'ImageTransform', 'ImageUnwrapPhase', 'ImageWindow', 'IndexSort', 'InsertPoints', 'Integrate', 'IntegrateODE', 'Interp3DPath', 'Interpolate3D', 'KillBackground', 'KillControl', 'KillDataFolder', 'KillFIFO', 'KillFreeAxis', 'KillPath', 'KillPICTs', 'KillStrings', 'KillVariables', 'KillWaves', 'KillWindow', 'KMeans', 'Label', 'Layout', 'Legend', 'LinearFeedbackShiftRegister', 'ListBox', 'LoadData', 'LoadPackagePreferences', 'LoadPICT', 'LoadWave', 'Loess', 'LombPeriodogram', 'Make', 'MakeIndex', 'MarkPerfTestTime', 'MatrixConvolve', 'MatrixCorr', 'MatrixEigenV', 'MatrixFilter', 'MatrixGaussJ', 'MatrixInverse', 'MatrixLinearSolve', 'MatrixLinearSolveTD', 'MatrixLLS', 'MatrixLUBkSub', 'MatrixLUD', 'MatrixMultiply', 'MatrixOP', 'MatrixSchur', 'MatrixSolve', 'MatrixSVBkSub', 'MatrixSVD', 'MatrixTranspose', 'MeasureStyledText', 'Modify', 'ModifyContour', 'ModifyControl', 'ModifyControlList', 'ModifyFreeAxis', 'ModifyGraph', 'ModifyImage', 'ModifyLayout', 'ModifyPanel', 'ModifyTable', 'ModifyWaterfall', 'MoveDataFolder', 'MoveFile', 'MoveFolder', 'MoveString', 'MoveSubwindow', 'MoveVariable', 'MoveWave', 'MoveWindow', 'NeuralNetworkRun', 'NeuralNetworkTrain', 'NewDataFolder', 'NewFIFO', 'NewFIFOChan', 'NewFreeAxis', 'NewImage', 'NewLayout', 'NewMovie', 'NewNotebook', 'NewPanel', 'NewPath', 'NewWaterfall', 'Note', 'Notebook', 'NotebookAction', 'Open', 'OpenNotebook', 'Optimize', 'ParseOperationTemplate', 'PathInfo', 'PauseForUser', 'PauseUpdate', 'PCA', 'PlayMovie', 'PlayMovieAction', 'PlaySnd', 'PlaySound', 'PopupContextualMenu', 'PopupMenu', 'Preferences', 'PrimeFactors', 'Print', 'printf', 'PrintGraphs', 'PrintLayout', 'PrintNotebook', 'PrintSettings', 'PrintTable', 'Project', 'PulseStats', 'PutScrapText', 'pwd', 'Quit', 'RatioFromNumber', 'Redimension', 'Remove', 'RemoveContour', 'RemoveFromGraph', 'RemoveFromLayout', 'RemoveFromTable', 'RemoveImage', 'RemoveLayoutObjects', 'RemovePath', 'Rename', 'RenameDataFolder', 'RenamePath', 'RenamePICT', 'RenameWindow', 'ReorderImages', 'ReorderTraces', 'ReplaceText', 'ReplaceWave', 'Resample', 'ResumeUpdate', 'Reverse', 'Rotate', 'Save', 'SaveData', 'SaveExperiment', 'SaveGraphCopy', 'SaveNotebook', 'SavePackagePreferences', 'SavePICT', 'SaveTableCopy', 'SetActiveSubwindow', 'SetAxis', 'SetBackground', 'SetDashPattern', 'SetDataFolder', 'SetDimLabel', 'SetDrawEnv', 'SetDrawLayer', 'SetFileFolderInfo', 'SetFormula', 'SetIgorHook', 'SetIgorMenuMode', 'SetIgorOption', 'SetMarquee', 'SetProcessSleep', 'SetRandomSeed', 'SetScale', 'SetVariable', 'SetWaveLock', 'SetWindow', 'ShowIgorMenus', 'ShowInfo', 'ShowTools', 'Silent', 'Sleep', 'Slider', 'Smooth', 'SmoothCustom', 'Sort', 'SoundInRecord', 'SoundInSet', 'SoundInStartChart', 'SoundInStatus', 'SoundInStopChart', 'SphericalInterpolate', 'SphericalTriangulate', 'SplitString', 'sprintf', 'sscanf', 'Stack', 'StackWindows', 'StatsAngularDistanceTest', 'StatsANOVA1Test', 'StatsANOVA2NRTest', 'StatsANOVA2RMTest', 'StatsANOVA2Test', 'StatsChiTest', 'StatsCircularCorrelationTest', 'StatsCircularMeans', 'StatsCircularMoments', 'StatsCircularTwoSampleTest', 'StatsCochranTest', 'StatsContingencyTable', 'StatsDIPTest', 'StatsDunnettTest', 'StatsFriedmanTest', 'StatsFTest', 'StatsHodgesAjneTest', 'StatsJBTest', 'StatsKendallTauTest', 'StatsKSTest', 'StatsKWTest', 'StatsLinearCorrelationTest', 'StatsLinearRegression', 'StatsMultiCorrelationTest', 'StatsNPMCTest', 'StatsNPNominalSRTest', 'StatsQuantiles', 'StatsRankCorrelationTest', 'StatsResample', 'StatsSample', 'StatsScheffeTest', 'StatsSignTest', 'StatsSRTest', 'StatsTTest', 'StatsTukeyTest', 'StatsVariancesTest', 'StatsWatsonUSquaredTest', 'StatsWatsonWilliamsTest', 'StatsWheelerWatsonTest', 'StatsWilcoxonRankTest', 'StatsWRCorrelationTest', 'String', 'StructGet', 'StructPut', 'TabControl', 'Tag', 'TextBox', 'Tile', 'TileWindows', 'TitleBox', 'ToCommandLine', 'ToolsGrid', 'Triangulate3d', 'Unwrap', 'ValDisplay', 'Variable', 'WaveMeanStdv', 'WaveStats', 'WaveTransform', 'wfprintf', 'WignerTransform', 'WindowFunction', ] functions = [ 'abs', 'acos', 'acosh', 'AiryA', 'AiryAD', 'AiryB', 'AiryBD', 'alog', 'area', 'areaXY', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'AxisValFromPixel', 'Besseli', 'Besselj', 'Besselk', 'Bessely', 'bessi', 'bessj', 'bessk', 'bessy', 'beta', 'betai', 'BinarySearch', 'BinarySearchInterp', 'binomial', 'binomialln', 'binomialNoise', 'cabs', 'CaptureHistoryStart', 'ceil', 'cequal', 'char2num', 'chebyshev', 'chebyshevU', 'CheckName', 'cmplx', 'cmpstr', 'conj', 'ContourZ', 'cos', 'cosh', 'cot', 'CountObjects', 'CountObjectsDFR', 'cpowi', 'CreationDate', 'csc', 'DataFolderExists', 'DataFolderRefsEqual', 'DataFolderRefStatus', 'date2secs', 'datetime', 'DateToJulian', 'Dawson', 'DDEExecute', 'DDEInitiate', 'DDEPokeString', 'DDEPokeWave', 'DDERequestWave', 'DDEStatus', 'DDETerminate', 'deltax', 'digamma', 'DimDelta', 'DimOffset', 'DimSize', 'ei', 'enoise', 'equalWaves', 'erf', 'erfc', 'exists', 'exp', 'expInt', 'expNoise', 'factorial', 'fakedata', 'faverage', 'faverageXY', 'FindDimLabel', 'FindListItem', 'floor', 'FontSizeHeight', 'FontSizeStringWidth', 'FresnelCos', 'FresnelSin', 'gamma', 'gammaInc', 'gammaNoise', 'gammln', 'gammp', 'gammq', 'Gauss', 'Gauss1D', 'Gauss2D', 'gcd', 'GetDefaultFontSize', 'GetDefaultFontStyle', 'GetKeyState', 'GetRTError', 'gnoise', 'GrepString', 'hcsr', 'hermite', 'hermiteGauss', 'HyperG0F1', 'HyperG1F1', 'HyperG2F1', 'HyperGNoise', 'HyperGPFQ', 'IgorVersion', 'ilim', 'imag', 'Inf', 'Integrate1D', 'interp', 'Interp2D', 'Interp3D', 'inverseERF', 'inverseERFC', 'ItemsInList', 'jlim', 'Laguerre', 'LaguerreA', 'LaguerreGauss', 'leftx', 'LegendreA', 'limit', 'ln', 'log', 'logNormalNoise', 'lorentzianNoise', 'magsqr', 'MandelbrotPoint', 'MarcumQ', 'MatrixDet', 'MatrixDot', 'MatrixRank', 'MatrixTrace', 'max', 'mean', 'min', 'mod', 'ModDate', 'NaN', 'norm', 'NumberByKey', 'numpnts', 'numtype', 'NumVarOrDefault', 'NVAR_Exists', 'p2rect', 'ParamIsDefault', 'pcsr', 'Pi', 'PixelFromAxisVal', 'pnt2x', 'poissonNoise', 'poly', 'poly2D', 'PolygonArea', 'qcsr', 'r2polar', 'real', 'rightx', 'round', 'sawtooth', 'ScreenResolution', 'sec', 'SelectNumber', 'sign', 'sin', 'sinc', 'sinh', 'SphericalBessJ', 'SphericalBessJD', 'SphericalBessY', 'SphericalBessYD', 'SphericalHarmonics', 'sqrt', 'StartMSTimer', 'StatsBetaCDF', 'StatsBetaPDF', 'StatsBinomialCDF', 'StatsBinomialPDF', 'StatsCauchyCDF', 'StatsCauchyPDF', 'StatsChiCDF', 'StatsChiPDF', 'StatsCMSSDCDF', 'StatsCorrelation', 'StatsDExpCDF', 'StatsDExpPDF', 'StatsErlangCDF', 'StatsErlangPDF', 'StatsErrorPDF', 'StatsEValueCDF', 'StatsEValuePDF', 'StatsExpCDF', 'StatsExpPDF', 'StatsFCDF', 'StatsFPDF', 'StatsFriedmanCDF', 'StatsGammaCDF', 'StatsGammaPDF', 'StatsGeometricCDF', 'StatsGeometricPDF', 'StatsHyperGCDF', 'StatsHyperGPDF', 'StatsInvBetaCDF', 'StatsInvBinomialCDF', 'StatsInvCauchyCDF', 'StatsInvChiCDF', 'StatsInvCMSSDCDF', 'StatsInvDExpCDF', 'StatsInvEValueCDF', 'StatsInvExpCDF', 'StatsInvFCDF', 'StatsInvFriedmanCDF', 'StatsInvGammaCDF', 'StatsInvGeometricCDF', 'StatsInvKuiperCDF', 'StatsInvLogisticCDF', 'StatsInvLogNormalCDF', 'StatsInvMaxwellCDF', 'StatsInvMooreCDF', 'StatsInvNBinomialCDF', 'StatsInvNCChiCDF', 'StatsInvNCFCDF', 'StatsInvNormalCDF', 'StatsInvParetoCDF', 'StatsInvPoissonCDF', 'StatsInvPowerCDF', 'StatsInvQCDF', 'StatsInvQpCDF', 'StatsInvRayleighCDF', 'StatsInvRectangularCDF', 'StatsInvSpearmanCDF', 'StatsInvStudentCDF', 'StatsInvTopDownCDF', 'StatsInvTriangularCDF', 'StatsInvUsquaredCDF', 'StatsInvVonMisesCDF', 'StatsInvWeibullCDF', 'StatsKuiperCDF', 'StatsLogisticCDF', 'StatsLogisticPDF', 'StatsLogNormalCDF', 'StatsLogNormalPDF', 'StatsMaxwellCDF', 'StatsMaxwellPDF', 'StatsMedian', 'StatsMooreCDF', 'StatsNBinomialCDF', 'StatsNBinomialPDF', 'StatsNCChiCDF', 'StatsNCChiPDF', 'StatsNCFCDF', 'StatsNCFPDF', 'StatsNCTCDF', 'StatsNCTPDF', 'StatsNormalCDF', 'StatsNormalPDF', 'StatsParetoCDF', 'StatsParetoPDF', 'StatsPermute', 'StatsPoissonCDF', 'StatsPoissonPDF', 'StatsPowerCDF', 'StatsPowerNoise', 'StatsPowerPDF', 'StatsQCDF', 'StatsQpCDF', 'StatsRayleighCDF', 'StatsRayleighPDF', 'StatsRectangularCDF', 'StatsRectangularPDF', 'StatsRunsCDF', 'StatsSpearmanRhoCDF', 'StatsStudentCDF', 'StatsStudentPDF', 'StatsTopDownCDF', 'StatsTriangularCDF', 'StatsTriangularPDF', 'StatsTrimmedMean', 'StatsUSquaredCDF', 'StatsVonMisesCDF', 'StatsVonMisesNoise', 'StatsVonMisesPDF', 'StatsWaldCDF', 'StatsWaldPDF', 'StatsWeibullCDF', 'StatsWeibullPDF', 'StopMSTimer', 'str2num', 'stringCRC', 'stringmatch', 'strlen', 'strsearch', 'StudentA', 'StudentT', 'sum', 'SVAR_Exists', 'TagVal', 'tan', 'tanh', 'ThreadGroupCreate', 'ThreadGroupRelease', 'ThreadGroupWait', 'ThreadProcessorCount', 'ThreadReturnValue', 'ticks', 'trunc', 'Variance', 'vcsr', 'WaveCRC', 'WaveDims', 'WaveExists', 'WaveMax', 'WaveMin', 'WaveRefsEqual', 'WaveType', 'WhichListItem', 'WinType', 'WNoise', 'x', 'x2pnt', 'xcsr', 'y', 'z', 'zcsr', 'ZernikeR', ] functions += [ 'AddListItem', 'AnnotationInfo', 'AnnotationList', 'AxisInfo', 'AxisList', 'CaptureHistory', 'ChildWindowList', 'CleanupName', 'ContourInfo', 'ContourNameList', 'ControlNameList', 'CsrInfo', 'CsrWave', 'CsrXWave', 'CTabList', 'DataFolderDir', 'date', 'DDERequestString', 'FontList', 'FuncRefInfo', 'FunctionInfo', 'FunctionList', 'FunctionPath', 'GetDataFolder', 'GetDefaultFont', 'GetDimLabel', 'GetErrMessage', 'GetFormula', 'GetIndependentModuleName', 'GetIndexedObjName', 'GetIndexedObjNameDFR', 'GetRTErrMessage', 'GetRTStackInfo', 'GetScrapText', 'GetUserData', 'GetWavesDataFolder', 'GrepList', 'GuideInfo', 'GuideNameList', 'Hash', 'IgorInfo', 'ImageInfo', 'ImageNameList', 'IndexedDir', 'IndexedFile', 'JulianToDate', 'LayoutInfo', 'ListMatch', 'LowerStr', 'MacroList', 'NameOfWave', 'note', 'num2char', 'num2istr', 'num2str', 'OperationList', 'PadString', 'ParseFilePath', 'PathList', 'PICTInfo', 'PICTList', 'PossiblyQuoteName', 'ProcedureText', 'RemoveByKey', 'RemoveEnding', 'RemoveFromList', 'RemoveListItem', 'ReplaceNumberByKey', 'ReplaceString', 'ReplaceStringByKey', 'Secs2Date', 'Secs2Time', 'SelectString', 'SortList', 'SpecialCharacterInfo', 'SpecialCharacterList', 'SpecialDirPath', 'StringByKey', 'StringFromList', 'StringList', 'StrVarOrDefault', 'TableInfo', 'TextFile', 'ThreadGroupGetDF', 'time', 'TraceFromPixel', 'TraceInfo', 'TraceNameList', 'UniqueName', 'UnPadString', 'UpperStr', 'VariableList', 'WaveInfo', 'WaveList', 'WaveName', 'WaveUnits', 'WinList', 'WinName', 'WinRecreation', 'XWaveName', 'ContourNameToWaveRef', 'CsrWaveRef', 'CsrXWaveRef', 'ImageNameToWaveRef', 'NewFreeWave', 'TagWaveRef', 'TraceNameToWaveRef', 'WaveRefIndexed', 'XWaveRefFromTrace', 'GetDataFolderDFR', 'GetWavesDataFolderDFR', 'NewFreeDataFolder', 'ThreadGroupGetDFR', ] tokens = { 'root': [ (r'//.*$', Comment.Single), (r'"([^"\\]|\\.)*"', String), # Flow Control. (r'\b(%s)\b' % '|'.join(flowControl), Keyword), # Types. (r'\b(%s)\b' % '|'.join(types), Keyword.Type), # Built-in operations. (r'\b(%s)\b' % '|'.join(operations), Name.Class), # Built-in functions. (r'\b(%s)\b' % '|'.join(functions), Name.Function), # Compiler directives. (r'^#(include|pragma|define|ifdef|ifndef|endif)', Name.Decorator), (r'[^a-zA-Z"/]+', Text), (r'.', Text), ], }
mit
johnson1228/pymatgen
pymatgen/io/xcrysden.py
10
3382
# coding: utf-8 # Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. from __future__ import division, unicode_literals, print_function __author__ = "Matteo Giantomassi" __copyright__ = "Copyright 2013, The Materials Project" __version__ = "0.1" __maintainer__ = "Matteo Giantomassi" class XSF(object): """ Class for parsing XCrysden files. """ def __init__(self, structure): self.structure = structure def to_string(self): """ Returns a string with the structure in XSF format See http://www.xcrysden.org/doc/XSF.html """ lines = [] app = lines.append app("CRYSTAL") app("# Primitive lattice vectors in Angstrom") app("PRIMVEC") cell = self.structure.lattice.matrix for i in range(3): app(' %.14f %.14f %.14f' % tuple(cell[i])) cart_coords = self.structure.cart_coords app("# Cartesian coordinates in Angstrom.") app("PRIMCOORD") app(" %d 1" % len(cart_coords)) for a in range(len(cart_coords)): sp = "%d" % self.structure.atomic_numbers[a] app(sp + ' %20.14f %20.14f %20.14f' % tuple(cart_coords[a])) return "\n".join(lines) @classmethod def from_string(self, input_string, cls=None): """ Initialize a `Structure` object from a string with data in XSF format. Args: input_string: String with the structure in XSF format. See http://www.xcrysden.org/doc/XSF.html cls: Structure class to be created. default: pymatgen structure """ # CRYSTAL see (1) # these are primitive lattice vectors (in Angstroms) # PRIMVEC # 0.0000000 2.7100000 2.7100000 see (2) # 2.7100000 0.0000000 2.7100000 # 2.7100000 2.7100000 0.0000000 # these are conventional lattice vectors (in Angstroms) # CONVVEC # 5.4200000 0.0000000 0.0000000 see (3) # 0.0000000 5.4200000 0.0000000 # 0.0000000 0.0000000 5.4200000 # these are atomic coordinates in a primitive unit cell (in Angstroms) # PRIMCOORD # 2 1 see (4) # 16 0.0000000 0.0000000 0.0000000 see (5) # 30 1.3550000 -1.3550000 -1.3550000 lattice, coords, species = [], [], [] lines = input_string.splitlines() for i in range(len(lines)): if "PRIMVEC" in lines[i]: for j in range(i+1, i+4): lattice.append([float(c) for c in lines[j].split()]) if "PRIMCOORD" in lines[i]: num_sites = int(lines[i+1].split()[0]) for j in range(i+2, i+2+num_sites): tokens = lines[j].split() species.append(int(tokens[0])) coords.append([float(j) for j in tokens[1:]]) break else: raise ValueError("Invalid XSF data") if cls is None: from pymatgen.core.structure import Structure cls = Structure s = cls(lattice, species, coords, coords_are_cartesian=True) return XSF(s)
mit
aperigault/ansible
lib/ansible/modules/network/aci/aci_tenant_span_src_group.py
22
7039
#!/usr/bin/python # -*- coding: utf-8 -*- # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'certified'} DOCUMENTATION = r''' --- module: aci_tenant_span_src_group short_description: Manage SPAN source groups (span:SrcGrp) description: - Manage SPAN source groups on Cisco ACI fabrics. version_added: '2.4' options: admin_state: description: - Enable or disable the span sources. - The APIC defaults to C(yes) when unset during creation. type: bool description: description: - The description for Span source group. type: str aliases: [ descr ] dst_group: description: - The Span destination group to associate with the source group. type: str src_group: description: - The name of the Span source group. type: str aliases: [ name ] state: description: - Use C(present) or C(absent) for adding or removing. - Use C(query) for listing an object or multiple objects. type: str choices: [ absent, present, query ] default: present tenant: description: - The name of the Tenant. type: str aliases: [ tenant_name ] extends_documentation_fragment: aci notes: - The C(tenant) used must exist before using this module in your playbook. The M(aci_tenant) module can be used for this. seealso: - module: aci_tenant - name: APIC Management Information Model reference description: More information about the internal APIC class B(span:SrcGrp). link: https://developer.cisco.com/docs/apic-mim-ref/ author: - Jacob McGill (@jmcgill298) ''' EXAMPLES = r''' - aci_tenant_span_src_group: host: apic username: admin password: SomeSecretPassword tenant: production src_group: "{{ src_group }}" dst_group: "{{ dst_group }}" admin_state: "{{ admin_state }}" description: "{{ description }}" delegate_to: localhost ''' RETURN = r''' current: description: The existing configuration from the APIC after the module has finished returned: success type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production environment", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] error: description: The error information as returned from the APIC returned: failure type: dict sample: { "code": "122", "text": "unknown managed object class foo" } raw: description: The raw output returned by the APIC REST API (xml or json) returned: parse error type: str sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>' sent: description: The actual/minimal configuration pushed to the APIC returned: info type: list sample: { "fvTenant": { "attributes": { "descr": "Production environment" } } } previous: description: The original configuration from the APIC before the module has started returned: info type: list sample: [ { "fvTenant": { "attributes": { "descr": "Production", "dn": "uni/tn-production", "name": "production", "nameAlias": "", "ownerKey": "", "ownerTag": "" } } } ] proposed: description: The assembled configuration from the user-provided parameters returned: info type: dict sample: { "fvTenant": { "attributes": { "descr": "Production environment", "name": "production" } } } filter_string: description: The filter string used for the request returned: failure or debug type: str sample: ?rsp-prop-include=config-only method: description: The HTTP method used for the request to the APIC returned: failure or debug type: str sample: POST response: description: The HTTP response from the APIC returned: failure or debug type: str sample: OK (30 bytes) status: description: The HTTP status from the APIC returned: failure or debug type: int sample: 200 url: description: The HTTP url used for the request to the APIC returned: failure or debug type: str sample: https://10.11.12.13/api/mo/uni/tn-production.json ''' from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec def main(): argument_spec = aci_argument_spec() argument_spec.update( tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects src_group=dict(type='str', aliases=['name']), # Not required for querying all objects admin_state=dict(type='bool'), description=dict(type='str', aliases=['descr']), dst_group=dict(type='str'), state=dict(type='str', default='present', choices=['absent', 'present', 'query']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, required_if=[ ['state', 'absent', ['src_group', 'tenant']], ['state', 'present', ['src_group', 'tenant']], ], ) aci = ACIModule(module) admin_state = aci.boolean(module.params['admin_state'], 'enabled', 'disabled') description = module.params['description'] dst_group = module.params['dst_group'] src_group = module.params['src_group'] state = module.params['state'] tenant = module.params['tenant'] aci.construct_url( root_class=dict( aci_class='fvTenant', aci_rn='tn-{0}'.format(tenant), module_object=tenant, target_filter={'name': tenant}, ), subclass_1=dict( aci_class='spanSrcGrp', aci_rn='srcgrp-{0}'.format(src_group), module_object=src_group, target_filter={'name': src_group}, ), child_classes=['spanSpanLbl'], ) aci.get_existing() if state == 'present': aci.payload( aci_class='spanSrcGrp', class_config=dict( adminSt=admin_state, descr=description, name=src_group, ), child_configs=[{'spanSpanLbl': {'attributes': {'name': dst_group}}}], ) aci.get_diff(aci_class='spanSrcGrp') aci.post_config() elif state == 'absent': aci.delete_config() aci.exit_json() if __name__ == "__main__": main()
gpl-3.0
prakritish/ansible
lib/ansible/modules/packaging/language/cpanm.py
70
7024
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2012, Franck Cuny <franck@lumberjaph.net> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: cpanm short_description: Manages Perl library dependencies. description: - Manage Perl library dependencies. version_added: "1.6" options: name: description: - The name of the Perl library to install. You may use the "full distribution path", e.g. MIYAGAWA/Plack-0.99_05.tar.gz required: false default: null aliases: ["pkg"] from_path: description: - The local directory from where to install required: false default: null notest: description: - Do not run unit tests required: false default: false locallib: description: - Specify the install base to install modules required: false default: false mirror: description: - Specifies the base URL for the CPAN mirror to use required: false default: false mirror_only: description: - Use the mirror's index file instead of the CPAN Meta DB required: false default: false installdeps: description: - Only install dependencies required: false default: false version_added: "2.0" version: description: - minimum version of perl module to consider acceptable required: false default: false version_added: "2.1" system_lib: description: - Use this if you want to install modules to the system perl include path. You must be root or have "passwordless" sudo for this to work. - This uses the cpanm commandline option '--sudo', which has nothing to do with ansible privilege escalation. required: false default: false version_added: "2.0" aliases: ['use_sudo'] executable: description: - Override the path to the cpanm executable required: false default: null version_added: "2.1" notes: - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. author: "Franck Cuny (@franckcuny)" ''' EXAMPLES = ''' # install Dancer perl package - cpanm: name: Dancer # install version 0.99_05 of the Plack perl package - cpanm: name: MIYAGAWA/Plack-0.99_05.tar.gz # install Dancer into the specified locallib - cpanm: name: Dancer locallib: /srv/webapps/my_app/extlib # install perl dependencies from local directory - cpanm: from_path: /srv/webapps/my_app/src/ # install Dancer perl package without running the unit tests in indicated locallib - cpanm: name: Dancer notest: True locallib: /srv/webapps/my_app/extlib # install Dancer perl package from a specific mirror - cpanm: name: Dancer mirror: 'http://cpan.cpantesters.org/' # install Dancer perl package into the system root path - cpanm: name: Dancer system_lib: yes # install Dancer if it's not already installed # OR the installed version is older than version 1.0 - cpanm: name: Dancer version: '1.0' ''' def _is_package_installed(module, name, locallib, cpanm, version): cmd = "" if locallib: os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib cmd = "%s perl -e ' use %s" % (cmd, name) if version: cmd = "%s %s;'" % (cmd, version) else: cmd = "%s;'" % cmd res, stdout, stderr = module.run_command(cmd, check_rc=False) if res == 0: return True else: return False def _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo): # this code should use "%s" like everything else and just return early but not fixing all of it now. # don't copy stuff like this if from_path: cmd = cpanm + " " + from_path else: cmd = cpanm + " " + name if notest is True: cmd = cmd + " -n" if locallib is not None: cmd = cmd + " -l " + locallib if mirror is not None: cmd = cmd + " --mirror " + mirror if mirror_only is True: cmd = cmd + " --mirror-only" if installdeps is True: cmd = cmd + " --installdeps" if use_sudo is True: cmd = cmd + " --sudo" return cmd def _get_cpanm_path(module): if module.params['executable']: return module.params['executable'] else: return module.get_bin_path('cpanm', True) def main(): arg_spec = dict( name=dict(default=None, required=False, aliases=['pkg']), from_path=dict(default=None, required=False, type='path'), notest=dict(default=False, type='bool'), locallib=dict(default=None, required=False, type='path'), mirror=dict(default=None, required=False), mirror_only=dict(default=False, type='bool'), installdeps=dict(default=False, type='bool'), system_lib=dict(default=False, type='bool', aliases=['use_sudo']), version=dict(default=None, required=False), executable=dict(required=False, type='path'), ) module = AnsibleModule( argument_spec=arg_spec, required_one_of=[['name', 'from_path']], ) cpanm = _get_cpanm_path(module) name = module.params['name'] from_path = module.params['from_path'] notest = module.boolean(module.params.get('notest', False)) locallib = module.params['locallib'] mirror = module.params['mirror'] mirror_only = module.params['mirror_only'] installdeps = module.params['installdeps'] use_sudo = module.params['system_lib'] version = module.params['version'] changed = False installed = _is_package_installed(module, name, locallib, cpanm, version) if not installed: cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, mirror_only, installdeps, cpanm, use_sudo) rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) if rc_cpanm != 0: module.fail_json(msg=err_cpanm, cmd=cmd) if (err_cpanm.find('is up to date') == -1 and out_cpanm.find('is up to date') == -1): changed = True module.exit_json(changed=changed, binary=cpanm, name=name) # import module snippets from ansible.module_utils.basic import * if __name__ == '__main__': main()
gpl-3.0
jean/sentry
src/sentry/south_migrations/0042_auto__add_projectcountbyminute__add_unique_projectcountbyminute_projec.py
5
24885
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'ProjectCountByMinute' db.create_table( 'sentry_projectcountbyminute', ( ( 'id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')( primary_key=True ) ), ( 'project', self.gf('sentry.db.models.fields.FlexibleForeignKey')( to=orm['sentry.Project'], null=True ) ), ('date', self.gf('django.db.models.fields.DateTimeField')()), ('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)), ('time_spent_total', self.gf('django.db.models.fields.FloatField')(default=0)), ('time_spent_count', self.gf('django.db.models.fields.IntegerField')(default=0)), ) ) db.send_create_signal('sentry', ['ProjectCountByMinute']) # Adding unique constraint on 'ProjectCountByMinute', fields ['project', 'date'] db.create_unique('sentry_projectcountbyminute', ['project_id', 'date']) def backwards(self, orm): # Removing unique constraint on 'ProjectCountByMinute', fields ['project', 'date'] db.delete_unique('sentry_projectcountbyminute', ['project_id', 'date']) # Deleting model 'ProjectCountByMinute' db.delete_table('sentry_projectcountbyminute') models = { 'sentry.user': { 'Meta': { 'object_name': 'User', 'db_table': "'auth_user'" }, 'date_joined': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'email': ('django.db.models.fields.EmailField', [], { 'max_length': '75', 'blank': 'True' }), 'first_name': ('django.db.models.fields.CharField', [], { 'max_length': '30', 'blank': 'True' }), 'id': ('django.db.models.fields.AutoField', [], { 'primary_key': 'True' }), 'is_active': ('django.db.models.fields.BooleanField', [], { 'default': 'True' }), 'is_staff': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'is_superuser': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'last_login': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'last_name': ('django.db.models.fields.CharField', [], { 'max_length': '30', 'blank': 'True' }), 'password': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'username': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '30' }) }, 'contenttypes.contenttype': { 'Meta': { 'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'" }, 'app_label': ('django.db.models.fields.CharField', [], { 'max_length': '100' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'model': ('django.db.models.fields.CharField', [], { 'max_length': '100' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '100' }) }, 'sentry.event': { 'Meta': { 'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'" }, 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'db_index': 'True' }), 'culprit': ( 'django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True' } ), 'data': ('django.db.models.fields.TextField', [], { 'null': 'True', 'blank': 'True' }), 'datetime': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'event_id': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'null': 'True', 'db_column': "'message_id'" } ), 'group': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'level': ( 'django.db.models.fields.PositiveIntegerField', [], { 'default': '40', 'db_index': 'True', 'blank': 'True' } ), 'logger': ( 'django.db.models.fields.CharField', [], { 'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'server_name': ( 'django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True', 'db_index': 'True' } ), 'site': ( 'django.db.models.fields.CharField', [], { 'max_length': '128', 'null': 'True', 'db_index': 'True' } ), 'time_spent': ('django.db.models.fields.FloatField', [], { 'null': 'True' }) }, 'sentry.filtervalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.group': { 'Meta': { 'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'" }, 'checksum': ('django.db.models.fields.CharField', [], { 'max_length': '32', 'db_index': 'True' }), 'culprit': ( 'django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True' } ), 'data': ('django.db.models.fields.TextField', [], { 'null': 'True', 'blank': 'True' }), 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'db_index': 'True' } ), 'level': ( 'django.db.models.fields.PositiveIntegerField', [], { 'default': '40', 'db_index': 'True', 'blank': 'True' } ), 'logger': ( 'django.db.models.fields.CharField', [], { 'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True' } ), 'message': ('django.db.models.fields.TextField', [], {}), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'score': ('django.db.models.fields.IntegerField', [], { 'default': '0' }), 'status': ( 'django.db.models.fields.PositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ), 'time_spent_count': ('django.db.models.fields.IntegerField', [], { 'default': '0' }), 'time_spent_total': ('django.db.models.fields.FloatField', [], { 'default': '0' }), 'times_seen': ( 'django.db.models.fields.PositiveIntegerField', [], { 'default': '1', 'db_index': 'True' } ), 'views': ( 'django.db.models.fields.related.ManyToManyField', [], { 'to': "orm['sentry.View']", 'symmetrical': 'False', 'blank': 'True' } ) }, 'sentry.groupbookmark': { 'Meta': { 'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark' }, 'group': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']" } ), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']" } ), 'user': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.groupmeta': { 'Meta': { 'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta' }, 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.messagecountbyminute': { 'Meta': { 'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute' }, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'time_spent_count': ('django.db.models.fields.IntegerField', [], { 'default': '0' }), 'time_spent_total': ('django.db.models.fields.FloatField', [], { 'default': '0' }), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '0' }) }, 'sentry.messagefiltervalue': { 'Meta': { 'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue' }, 'first_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'last_seen': ( 'django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True' } ), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '0' }), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '200' }) }, 'sentry.messageindex': { 'Meta': { 'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex' }, 'column': ('django.db.models.fields.CharField', [], { 'max_length': '32' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'value': ('django.db.models.fields.CharField', [], { 'max_length': '128' }) }, 'sentry.option': { 'Meta': { 'object_name': 'Option' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '64' }), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.project': { 'Meta': { 'object_name': 'Project' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'name': ('django.db.models.fields.CharField', [], { 'max_length': '200' }), 'owner': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']" } ), 'public': ('django.db.models.fields.BooleanField', [], { 'default': 'False' }), 'status': ( 'django.db.models.fields.PositiveIntegerField', [], { 'default': '0', 'db_index': 'True' } ) }, 'sentry.projectcountbyminute': { 'Meta': { 'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute' }, 'date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']", 'null': 'True' } ), 'time_spent_count': ('django.db.models.fields.IntegerField', [], { 'default': '0' }), 'time_spent_total': ('django.db.models.fields.FloatField', [], { 'default': '0' }), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '0' }) }, 'sentry.projectdomain': { 'Meta': { 'unique_together': "(('project', 'domain'),)", 'object_name': 'ProjectDomain' }, 'domain': ('django.db.models.fields.CharField', [], { 'max_length': '128' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'domain_set'", 'to': "orm['sentry.Project']" } ) }, 'sentry.projectmember': { 'Meta': { 'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'member_set'", 'to': "orm['sentry.Project']" } ), 'public_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'secret_key': ( 'django.db.models.fields.CharField', [], { 'max_length': '32', 'unique': 'True', 'null': 'True' } ), 'type': ('django.db.models.fields.IntegerField', [], { 'default': '0' }), 'user': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'sentry_project_set'", 'to': "orm['sentry.User']" } ) }, 'sentry.projectoption': { 'Meta': { 'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'" }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'key': ('django.db.models.fields.CharField', [], { 'max_length': '64' }), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" }), 'value': ('django.db.models.fields.TextField', [], {}) }, 'sentry.searchdocument': { 'Meta': { 'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument' }, 'date_added': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'date_changed': ('django.db.models.fields.DateTimeField', [], { 'default': 'datetime.datetime.now' }), 'group': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Group']" }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'project': ('sentry.db.models.fields.FlexibleForeignKey', [], { 'to': "orm['sentry.Project']" }), 'status': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '0' }), 'total_events': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '1' }) }, 'sentry.searchtoken': { 'Meta': { 'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken' }, 'document': ( 'sentry.db.models.fields.FlexibleForeignKey', [], { 'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']" } ), 'field': ('django.db.models.fields.CharField', [], { 'default': "'text'", 'max_length': '64' }), 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'times_seen': ('django.db.models.fields.PositiveIntegerField', [], { 'default': '1' }), 'token': ('django.db.models.fields.CharField', [], { 'max_length': '128' }) }, 'sentry.view': { 'Meta': { 'object_name': 'View' }, 'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], { 'primary_key': 'True' }), 'path': ('django.db.models.fields.CharField', [], { 'unique': 'True', 'max_length': '100' }), 'verbose_name': ('django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True' }), 'verbose_name_plural': ('django.db.models.fields.CharField', [], { 'max_length': '200', 'null': 'True' }) } } complete_apps = ['sentry']
bsd-3-clause
richcaudle/ingestion-connectors
gmail-connector/sync.py
1
4478
#!/usr/bin/python import httplib2 import json import time import base64 import sys from threading import Thread from apiclient.discovery import build from apiclient.http import BatchHttpRequest from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import run messageLog = list() def GetPayloadHeaderValue(message,headerName): for header in message['payload']['headers']: if header['name'] == headerName: return header['value'] return None def GetMailAction(message): status = "READ" if "UNREAD" in message['labelIds']: status = "NEW" elif "TRASH" in message['labelIds']: status = "DELETED" elif "SENT" in message['labelIds']: status = "SENT" elif "DRAFT" in message['labelIds']: status = "DRAFT" return status def IsNew(messageId, labels): key = messageId + str(hash(''.join(labels))) if key in messageLog: return False else: messageLog.append(key) return True def PrintMessage(request_id, response,exception): if exception is not None: sys.stderr.write(exception) else: DecodeMessage(response) subject = GetPayloadHeaderValue(response,"Subject") fromWho = GetPayloadHeaderValue(response,"From") status = GetMailAction(response) if IsNew(response['id'], response['labelIds']): sys.stdout.write(json.dumps(response) + '\n') sys.stdout.flush() sys.stderr.write(status + ': ' + fromWho + ": " + subject + "(labels = " + json.dumps(response['labelIds']) + ")\n") def DecodePart(part): if part['mimeType'] == 'text/plain' or part['mimeType'] == 'text/html': part['decoded'] = base64.urlsafe_b64decode(part['body']['data'].encode("utf-8")) def DecodeMessage(message): for part in message['payload']['parts']: if 'parts' in part: for p in part['parts']: DecodePart(p) else: DecodePart(part) def GetMessages(historyItems): messageIds = list() for historyItem in historyItems: for message in historyItem['messages']: messageIds.append(message['id']) if len(messageIds) > 0: batch = BatchHttpRequest() for msgId in messageIds: batch.add(gmail_service.users().messages().get(id=msgId, userId='me', format='full'), callback=PrintMessage) batch.execute(http=http) def ListHistory(service, user_id, start_history_id='1'): try: history = (service.users().history().list(userId=user_id,startHistoryId=start_history_id).execute()) changes = history['history'] if 'history' in history else [] while 'nextPageToken' in history: page_token = history['nextPageToken'] history = (service.users().history().list(userId=user_id, startHistoryId=start_history_id, pageToken=page_token).execute()) changes.extend(history['history']) return changes, history['historyId'] except errors.HttpError, error: sys.stderr.write('An error occurred: %s' % error) # Path to the client_secret.json file downloaded from the Developer Console CLIENT_SECRET_FILE = 'creds.json' # Check https://developers.google.com/gmail/api/auth/scopes for all available scopes OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly' # Location of the credentials storage file STORAGE = Storage('gmail.storage') # Start the OAuth flow to retrieve credentials flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE) http = httplib2.Http() # Try to retrieve credentials from storage or run the flow to generate them credentials = STORAGE.get() if credentials is None or credentials.invalid: credentials = run(flow, STORAGE, http=http) # Authorize the httplib2.Http object with our credentials http = credentials.authorize(http) # Build the Gmail service from discovery gmail_service = build('gmail', 'v1',http=http) # Page through messages since last get historyId = None messages = gmail_service.users().messages().list(userId='me',maxResults=1).execute() last_message = gmail_service.users().messages().get(id=messages['messages'][0]['id'], userId='me', format='full').execute() historyId = last_message['historyId'] while True: time.sleep(30) changes,historyId = ListHistory(gmail_service, 'me',historyId) GetMessages(changes)
mit
hurrian/kernel_samsung_trelte
Documentation/networking/cxacru-cf.py
14668
1626
#!/usr/bin/env python # Copyright 2009 Simon Arlott # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the Free # Software Foundation; either version 2 of the License, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 59 # Temple Place - Suite 330, Boston, MA 02111-1307, USA. # # Usage: cxacru-cf.py < cxacru-cf.bin # Output: values string suitable for the sysfs adsl_config attribute # # Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110 # contains mis-aligned values which will stop the modem from being able # to make a connection. If the first and last two bytes are removed then # the values become valid, but the modulation will be forced to ANSI # T1.413 only which may not be appropriate. # # The original binary format is a packed list of le32 values. import sys import struct i = 0 while True: buf = sys.stdin.read(4) if len(buf) == 0: break elif len(buf) != 4: sys.stdout.write("\n") sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf))) sys.exit(1) if i > 0: sys.stdout.write(" ") sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0])) i += 1 sys.stdout.write("\n")
gpl-2.0
chugunovyar/factoryForBuild
env/lib/python2.7/site-packages/celery/bin/worker.py
2
11519
# -*- coding: utf-8 -*- """Program used to start a Celery worker instance. The :program:`celery worker` command (previously known as ``celeryd``) .. program:: celery worker .. seealso:: See :ref:`preload-options`. .. cmdoption:: -c, --concurrency Number of child processes processing the queue. The default is the number of CPUs available on your system. .. cmdoption:: -P, --pool Pool implementation: prefork (default), eventlet, gevent or solo. .. cmdoption:: -n, --hostname Set custom hostname (e.g., 'w1@%%h'). Expands: %%h (hostname), %%n (name) and %%d, (domain). .. cmdoption:: -B, --beat Also run the `celery beat` periodic task scheduler. Please note that there must only be one instance of this service. .. note:: ``-B`` is meant to be used for development purposes. For production environment, you need to start :program:`celery beat` separately. .. cmdoption:: -Q, --queues List of queues to enable for this worker, separated by comma. By default all configured queues are enabled. Example: `-Q video,image` .. cmdoption:: -X, --exclude-queues List of queues to disable for this worker, separated by comma. By default all configured queues are enabled. Example: `-X video,image`. .. cmdoption:: -I, --include Comma separated list of additional modules to import. Example: -I foo.tasks,bar.tasks .. cmdoption:: -s, --schedule Path to the schedule database if running with the `-B` option. Defaults to `celerybeat-schedule`. The extension ".db" may be appended to the filename. .. cmdoption:: -O Apply optimization profile. Supported: default, fair .. cmdoption:: --prefetch-multiplier Set custom prefetch multiplier value for this worker instance. .. cmdoption:: --scheduler Scheduler class to use. Default is :class:`celery.beat.PersistentScheduler` .. cmdoption:: -S, --statedb Path to the state database. The extension '.db' may be appended to the filename. Default: {default} .. cmdoption:: -E, --task-events Send task-related events that can be captured by monitors like :program:`celery events`, `celerymon`, and others. .. cmdoption:: --without-gossip Don't subscribe to other workers events. .. cmdoption:: --without-mingle Don't synchronize with other workers at start-up. .. cmdoption:: --without-heartbeat Don't send event heartbeats. .. cmdoption:: --heartbeat-interval Interval in seconds at which to send worker heartbeat .. cmdoption:: --purge Purges all waiting tasks before the daemon is started. **WARNING**: This is unrecoverable, and the tasks will be deleted from the messaging server. .. cmdoption:: --time-limit Enables a hard time limit (in seconds int/float) for tasks. .. cmdoption:: --soft-time-limit Enables a soft time limit (in seconds int/float) for tasks. .. cmdoption:: --max-tasks-per-child Maximum number of tasks a pool worker can execute before it's terminated and replaced by a new worker. .. cmdoption:: --max-memory-per-child Maximum amount of resident memory, in KiB, that may be consumed by a child process before it will be replaced by a new one. If a single task causes a child process to exceed this limit, the task will be completed and the child process will be replaced afterwards. Default: no limit. .. cmdoption:: --autoscale Enable autoscaling by providing max_concurrency, min_concurrency. Example:: --autoscale=10,3 (always keep 3 processes, but grow to 10 if necessary) .. cmdoption:: --detach Start worker as a background process. .. cmdoption:: -f, --logfile Path to log file. If no logfile is specified, `stderr` is used. .. cmdoption:: -l, --loglevel Logging level, choose between `DEBUG`, `INFO`, `WARNING`, `ERROR`, `CRITICAL`, or `FATAL`. .. cmdoption:: --pidfile Optional file used to store the process pid. The program won't start if this file already exists and the pid is still alive. .. cmdoption:: --uid User id, or user name of the user to run as after detaching. .. cmdoption:: --gid Group id, or group name of the main group to change to after detaching. .. cmdoption:: --umask Effective :manpage:`umask(1)` (in octal) of the process after detaching. Inherits the :manpage:`umask(1)` of the parent process by default. .. cmdoption:: --workdir Optional directory to change to after detaching. .. cmdoption:: --executable Executable to use for the detached process. """ from __future__ import absolute_import, unicode_literals import sys from celery import concurrency from celery.bin.base import Command, daemon_options from celery.bin.celeryd_detach import detached_celeryd from celery.five import string_t from celery.platforms import maybe_drop_privileges from celery.utils.log import LOG_LEVELS, mlevel from celery.utils.nodenames import default_nodename __all__ = ['worker', 'main'] HELP = __doc__ class worker(Command): """Start worker instance. Examples: .. code-block:: console $ celery worker --app=proj -l info $ celery worker -A proj -l info -Q hipri,lopri $ celery worker -A proj --concurrency=4 $ celery worker -A proj --concurrency=1000 -P eventlet $ celery worker --autoscale=10,0 """ doc = HELP # parse help from this too namespace = 'worker' enable_config_from_cmdline = True supports_args = False removed_flags = {'--no-execv', '--force-execv'} def run_from_argv(self, prog_name, argv=None, command=None): argv = [x for x in argv if x not in self.removed_flags] command = sys.argv[0] if command is None else command argv = sys.argv[1:] if argv is None else argv # parse options before detaching so errors can be handled. options, args = self.prepare_args( *self.parse_options(prog_name, argv, command)) self.maybe_detach([command] + argv) return self(*args, **options) def maybe_detach(self, argv, dopts=['-D', '--detach']): if any(arg in argv for arg in dopts): argv = [v for v in argv if v not in dopts] # will never return detached_celeryd(self.app).execute_from_commandline(argv) raise SystemExit(0) def run(self, hostname=None, pool_cls=None, app=None, uid=None, gid=None, loglevel=None, logfile=None, pidfile=None, statedb=None, **kwargs): maybe_drop_privileges(uid=uid, gid=gid) # Pools like eventlet/gevent needs to patch libs as early # as possible. pool_cls = (concurrency.get_implementation(pool_cls) or self.app.conf.worker_pool) if self.app.IS_WINDOWS and kwargs.get('beat'): self.die('-B option does not work on Windows. ' 'Please run celery beat as a separate service.') hostname = self.host_format(default_nodename(hostname)) if loglevel: try: loglevel = mlevel(loglevel) except KeyError: # pragma: no cover self.die('Unknown level {0!r}. Please use one of {1}.'.format( loglevel, '|'.join( l for l in LOG_LEVELS if isinstance(l, string_t)))) worker = self.app.Worker( hostname=hostname, pool_cls=pool_cls, loglevel=loglevel, logfile=logfile, # node format handled by celery.app.log.setup pidfile=self.node_format(pidfile, hostname), statedb=self.node_format(statedb, hostname), **kwargs) worker.start() return worker.exitcode def with_pool_option(self, argv): # this command support custom pools # that may have to be loaded as early as possible. return (['-P'], ['--pool']) def add_arguments(self, parser): conf = self.app.conf wopts = parser.add_argument_group('Worker Options') wopts.add_argument('-n', '--hostname') wopts.add_argument( '-D', '--detach', action='store_true', default=False, ) wopts.add_argument( '-S', '--statedb', default=conf.worker_state_db, ) wopts.add_argument('-l', '--loglevel', default='WARN') wopts.add_argument('-O', dest='optimization') wopts.add_argument( '--prefetch-multiplier', type=int, default=conf.worker_prefetch_multiplier, ) topts = parser.add_argument_group('Pool Options') topts.add_argument( '-c', '--concurrency', default=conf.worker_concurrency, type=int, ) topts.add_argument( '-P', '--pool', default=conf.worker_pool, ) topts.add_argument( '-E', '--task-events', '--events', action='store_true', default=conf.worker_send_task_events, ) topts.add_argument( '--time-limit', type=float, default=conf.task_time_limit, ) topts.add_argument( '--soft-time-limit', type=float, default=conf.task_soft_time_limit, ) topts.add_argument( '--max-tasks-per-child', '--maxtasksperchild', type=int, default=conf.worker_max_tasks_per_child, ) topts.add_argument( '--max-memory-per-child', '--maxmemperchild', type=int, default=conf.worker_max_memory_per_child, ) qopts = parser.add_argument_group('Queue Options') qopts.add_argument( '--purge', '--discard', action='store_true', default=False, ) qopts.add_argument('--queues', '-Q', default=[]) qopts.add_argument('--exclude-queues', '-X', default=[]) qopts.add_argument('--include', '-I', default=[]) fopts = parser.add_argument_group('Features') fopts.add_argument( '--without-gossip', action='store_true', default=False, ) fopts.add_argument( '--without-mingle', action='store_true', default=False, ) fopts.add_argument( '--without-heartbeat', action='store_true', default=False, ) fopts.add_argument('--heartbeat-interval', type=int) fopts.add_argument('--autoscale') daemon_options(parser) bopts = parser.add_argument_group('Embedded Beat Options') bopts.add_argument('-B', '--beat', action='store_true', default=False) bopts.add_argument( '-s', '--schedule-filename', '--schedule', default=conf.beat_schedule_filename, ) bopts.add_argument('--scheduler') user_options = self.app.user_options['worker'] if user_options: uopts = parser.add_argument_group('User Options') self.add_compat_options(uopts, user_options) def main(app=None): """Start worker.""" # Fix for setuptools generated scripts, so that it will # work with multiprocessing fork emulation. # (see multiprocessing.forking.get_preparation_data()) if __name__ != '__main__': # pragma: no cover sys.modules['__main__'] = sys.modules[__name__] from billiard import freeze_support freeze_support() worker(app=app).execute_from_commandline() if __name__ == '__main__': # pragma: no cover main()
gpl-3.0
kinap/scapy
scapy/layers/ir.py
24
1433
## This file is part of Scapy ## See http://www.secdev.org/projects/scapy for more informations ## Copyright (C) Philippe Biondi <phil@secdev.org> ## This program is published under a GPLv2 license """ IrDA infrared data communication. """ from scapy.packet import * from scapy.fields import * from scapy.layers.l2 import CookedLinux # IR class IrLAPHead(Packet): name = "IrDA Link Access Protocol Header" fields_desc = [ XBitField("Address", 0x7f, 7), BitEnumField("Type", 1, 1, {"Response":0, "Command":1})] class IrLAPCommand(Packet): name = "IrDA Link Access Protocol Command" fields_desc = [ XByteField("Control", 0), XByteField("Format identifier", 0), XIntField("Source address", 0), XIntField("Destination address", 0xffffffffL), XByteField("Discovery flags", 0x1), ByteEnumField("Slot number", 255, {"final":255}), XByteField("Version", 0)] class IrLMP(Packet): name = "IrDA Link Management Protocol" fields_desc = [ XShortField("Service hints", 0), XByteField("Character set", 0), StrField("Device name", "") ] bind_layers( CookedLinux, IrLAPHead, proto=23) bind_layers( IrLAPHead, IrLAPCommand, Type=1) bind_layers( IrLAPCommand, IrLMP, )
gpl-2.0
belmiromoreira/nova
nova/openstack/common/loopingcall.py
11
4768
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys import time from eventlet import event from eventlet import greenthread from nova.openstack.common._i18n import _LE, _LW LOG = logging.getLogger(__name__) # NOTE(zyluo): This lambda function was declared to avoid mocking collisions # with time.time() called in the standard logging module # during unittests. _ts = lambda: time.time() class LoopingCallDone(Exception): """Exception to break out and stop a LoopingCallBase. The poll-function passed to LoopingCallBase can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCallBase.wait() """ def __init__(self, retvalue=True): """:param retvalue: Value that LoopingCallBase.wait() should return.""" self.retvalue = retvalue class LoopingCallBase(object): def __init__(self, f=None, *args, **kw): self.args = args self.kw = kw self.f = f self._running = False self.done = None def stop(self): self._running = False def wait(self): return self.done.wait() class FixedIntervalLoopingCall(LoopingCallBase): """A fixed interval looping call.""" def start(self, interval, initial_delay=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = _ts() self.f(*self.args, **self.kw) end = _ts() if not self._running: break delay = end - start - interval if delay > 0: LOG.warning(_LW('task %(func_name)r run outlasted ' 'interval by %(delay).2f sec'), {'func_name': self.f, 'delay': delay}) greenthread.sleep(-delay if delay < 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn_n(_inner) return self.done class DynamicLoopingCall(LoopingCallBase): """A looping call which sleeps until the next known event. The function called should return how long to sleep for before being called again. """ def start(self, initial_delay=None, periodic_interval_max=None): self._running = True done = event.Event() def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug('Dynamic looping call %(func_name)r sleeping ' 'for %(idle).02f seconds', {'func_name': self.f, 'idle': idle}) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True) self.done = done greenthread.spawn(_inner) return self.done
apache-2.0
bbriggs/sithmail-tipbot
sithmail-tipbot/Local/Tip.py
1
2117
from Legobot.Lego import Lego import logging logger = logging.getLogger(__name__) class Tip(Lego): def __init__(self, baseplate, lock, redis, *args, **kwargs): super().__init__(baseplate, lock) self.r = redis # initialized redis connection self.prefix = "/kudos" def listening_for(self, message): if message['text'] is not None: # Only care if first "word" is a ++ or -- return self._is_incr(message['text'].split()[0]) @staticmethod def _is_incr(cmd): return cmd.endswith('++') or cmd.endswith('--') def handle(self, message): opts = self._handle_opts(message) cmd = message['text'].split()[0] return_val = "this is a default return val" if self._is_incr(cmd): resp = self._incr(cmd) if resp is not False: return_val = "{} has {} meaningless internet points.".format( cmd[:-2], resp) else: logger.debug(str(resp)) return_val = "Redis is sleeping right now." self.reply(message, return_val, opts) def _incr(self, cmd): target = cmd[:-2] if cmd.endswith("++"): # lol you can create nested keys resp = self.r.incr('{}/{}'.format(self.prefix, target)) logger.debug("Redis increment: {}".format(str(resp))) return resp elif cmd.endswith("--"): resp = self.r.decr('{}/{}'.format(self.prefix, target)) logger.debug("Redis decrement: {}".format(str(resp))) return resp else: return False def _handle_opts(self, message): try: target = message['metadata']['source_channel'] opts = {'target': target} except IndexError: opts = None logger.error('''Could not identify message source in message: {}'''.format(str(message))) return opts def get_name(self): return 'tip' def get_help(self): return 'Tip some imaginary internet points'
gpl-2.0
avoinsystems/odoo
addons/website/controllers/main.py
43
20211
# -*- coding: utf-8 -*- import cStringIO import datetime from itertools import islice import json import xml.etree.ElementTree as ET import logging import re import werkzeug.utils import urllib2 import werkzeug.wrappers from PIL import Image import openerp from openerp.addons.web.controllers.main import WebClient from openerp.addons.web import http from openerp.http import request, STATIC_CACHE from openerp.tools import image_save_for_web logger = logging.getLogger(__name__) # Completely arbitrary limits MAX_IMAGE_WIDTH, MAX_IMAGE_HEIGHT = IMAGE_LIMITS = (1024, 768) LOC_PER_SITEMAP = 45000 SITEMAP_CACHE_TIME = datetime.timedelta(hours=12) class Website(openerp.addons.web.controllers.main.Home): #------------------------------------------------------ # View #------------------------------------------------------ @http.route('/', type='http', auth="public", website=True) def index(self, **kw): page = 'homepage' try: main_menu = request.registry['ir.model.data'].get_object(request.cr, request.uid, 'website', 'main_menu') except Exception: pass else: first_menu = main_menu.child_id and main_menu.child_id[0] if first_menu: if not (first_menu.url.startswith(('/page/', '/?', '/#')) or (first_menu.url=='/')): return request.redirect(first_menu.url) if first_menu.url.startswith('/page/'): return request.registry['ir.http'].reroute(first_menu.url) return self.page(page) @http.route(website=True, auth="public") def web_login(self, *args, **kw): # TODO: can't we just put auth=public, ... in web client ? return super(Website, self).web_login(*args, **kw) @http.route('/website/lang/<lang>', type='http', auth="public", website=True, multilang=False) def change_lang(self, lang, r='/', **kwargs): if lang == 'default': lang = request.website.default_lang_code r = '/%s%s' % (lang, r or '/') redirect = werkzeug.utils.redirect(r or ('/%s' % lang), 303) redirect.set_cookie('website_lang', lang) return redirect @http.route('/page/<page:page>', type='http', auth="public", website=True) def page(self, page, **opt): values = { 'path': page, } # /page/website.XXX --> /page/XXX if page.startswith('website.'): return request.redirect('/page/' + page[8:], code=301) elif '.' not in page: page = 'website.%s' % page try: request.website.get_template(page) except ValueError, e: # page not found if request.website.is_publisher(): page = 'website.page_404' else: return request.registry['ir.http']._handle_exception(e, 404) return request.render(page, values) @http.route(['/robots.txt'], type='http', auth="public") def robots(self): return request.render('website.robots', {'url_root': request.httprequest.url_root}, mimetype='text/plain') @http.route('/sitemap.xml', type='http', auth="public", website=True) def sitemap_xml_index(self): cr, uid, context = request.cr, openerp.SUPERUSER_ID, request.context ira = request.registry['ir.attachment'] iuv = request.registry['ir.ui.view'] mimetype ='application/xml;charset=utf-8' content = None def create_sitemap(url, content): ira.create(cr, uid, dict( datas=content.encode('base64'), mimetype=mimetype, type='binary', name=url, url=url, ), context=context) sitemap = ira.search_read(cr, uid, [('url', '=' , '/sitemap.xml'), ('type', '=', 'binary')], ('datas', 'create_date'), context=context) if sitemap: # Check if stored version is still valid server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT create_date = datetime.datetime.strptime(sitemap[0]['create_date'], server_format) delta = datetime.datetime.now() - create_date if delta < SITEMAP_CACHE_TIME: content = sitemap[0]['datas'].decode('base64') if not content: # Remove all sitemaps in ir.attachments as we're going to regenerated them sitemap_ids = ira.search(cr, uid, [('url', '=like' , '/sitemap%.xml'), ('type', '=', 'binary')], context=context) if sitemap_ids: ira.unlink(cr, uid, sitemap_ids, context=context) pages = 0 first_page = None locs = request.website.sudo(user=request.website.user_id.id).enumerate_pages() while True: start = pages * LOC_PER_SITEMAP values = { 'locs': islice(locs, start, start + LOC_PER_SITEMAP), 'url_root': request.httprequest.url_root[:-1], } urls = iuv.render(cr, uid, 'website.sitemap_locs', values, context=context) if urls.strip(): page = iuv.render(cr, uid, 'website.sitemap_xml', dict(content=urls), context=context) if not first_page: first_page = page pages += 1 create_sitemap('/sitemap-%d.xml' % pages, page) else: break if not pages: return request.not_found() elif pages == 1: content = first_page else: # Sitemaps must be split in several smaller files with a sitemap index content = iuv.render(cr, uid, 'website.sitemap_index_xml', dict( pages=range(1, pages + 1), url_root=request.httprequest.url_root, ), context=context) create_sitemap('/sitemap.xml', content) return request.make_response(content, [('Content-Type', mimetype)]) @http.route('/website/info', type='http', auth="public", website=True) def website_info(self): try: request.website.get_template('website.info').name except Exception, e: return request.registry['ir.http']._handle_exception(e, 404) irm = request.env()['ir.module.module'].sudo() apps = irm.search([('state','=','installed'),('application','=',True)]) modules = irm.search([('state','=','installed'),('application','=',False)]) values = { 'apps': apps, 'modules': modules, 'version': openerp.service.common.exp_version() } return request.render('website.info', values) #------------------------------------------------------ # Edit #------------------------------------------------------ @http.route('/website/add/<path:path>', type='http', auth="user", website=True) def pagenew(self, path, noredirect=False, add_menu=None): xml_id = request.registry['website'].new_page(request.cr, request.uid, path, context=request.context) if add_menu: model, id = request.registry["ir.model.data"].get_object_reference(request.cr, request.uid, 'website', 'main_menu') request.registry['website.menu'].create(request.cr, request.uid, { 'name': path, 'url': "/page/" + xml_id[8:], 'parent_id': id, }, context=request.context) # Reverse action in order to allow shortcut for /page/<website_xml_id> url = "/page/" + re.sub(r"^website\.", '', xml_id) if noredirect: return werkzeug.wrappers.Response(url, mimetype='text/plain') return werkzeug.utils.redirect(url) @http.route('/website/theme_change', type='http', auth="user", website=True) def theme_change(self, theme_id=False, **kwargs): imd = request.registry['ir.model.data'] Views = request.registry['ir.ui.view'] _, theme_template_id = imd.get_object_reference( request.cr, request.uid, 'website', 'theme') views = Views.search(request.cr, request.uid, [ ('inherit_id', '=', theme_template_id), ], context=request.context) Views.write(request.cr, request.uid, views, { 'active': False, }, context=dict(request.context or {}, active_test=True)) if theme_id: module, xml_id = theme_id.split('.') _, view_id = imd.get_object_reference( request.cr, request.uid, module, xml_id) Views.write(request.cr, request.uid, [view_id], { 'active': True }, context=dict(request.context or {}, active_test=True)) return request.render('website.themes', {'theme_changed': True}) @http.route(['/website/snippets'], type='json', auth="public", website=True) def snippets(self): return request.website._render('website.snippets') @http.route('/website/reset_templates', type='http', auth='user', methods=['POST'], website=True) def reset_template(self, templates, redirect='/'): templates = request.httprequest.form.getlist('templates') modules_to_update = [] for temp_id in templates: view = request.registry['ir.ui.view'].browse(request.cr, request.uid, int(temp_id), context=request.context) if view.page: continue view.model_data_id.write({ 'noupdate': False }) if view.model_data_id.module not in modules_to_update: modules_to_update.append(view.model_data_id.module) if modules_to_update: module_obj = request.registry['ir.module.module'] module_ids = module_obj.search(request.cr, request.uid, [('name', 'in', modules_to_update)], context=request.context) if module_ids: module_obj.button_immediate_upgrade(request.cr, request.uid, module_ids, context=request.context) return request.redirect(redirect) @http.route('/website/customize_template_get', type='json', auth='user', website=True) def customize_template_get(self, xml_id, full=False): return request.registry["ir.ui.view"].customize_template_get( request.cr, request.uid, xml_id, full=full, context=request.context) @http.route('/website/get_view_translations', type='json', auth='public', website=True) def get_view_translations(self, xml_id, lang=None): lang = lang or request.context.get('lang') return request.registry["ir.ui.view"].get_view_translations( request.cr, request.uid, xml_id, lang=lang, context=request.context) @http.route('/website/set_translations', type='json', auth='public', website=True) def set_translations(self, data, lang): irt = request.registry.get('ir.translation') for view_id, trans in data.items(): view_id = int(view_id) for t in trans: initial_content = t['initial_content'].strip() new_content = t['new_content'].strip() tid = t['translation_id'] if not tid: old_trans = irt.search_read( request.cr, request.uid, [ ('type', '=', 'view'), ('res_id', '=', view_id), ('lang', '=', lang), ('src', '=', initial_content), ]) if old_trans: tid = old_trans[0]['id'] if tid: vals = {'value': new_content} irt.write(request.cr, request.uid, [tid], vals) else: new_trans = { 'name': 'website', 'res_id': view_id, 'lang': lang, 'type': 'view', 'source': initial_content, 'value': new_content, } if t.get('gengo_translation'): new_trans['gengo_translation'] = t.get('gengo_translation') new_trans['gengo_comment'] = t.get('gengo_comment') irt.create(request.cr, request.uid, new_trans) return True @http.route('/website/translations', type='json', auth="public", website=True) def get_website_translations(self, lang): module_obj = request.registry['ir.module.module'] module_ids = module_obj.search(request.cr, request.uid, [('name', 'ilike', 'website'), ('state', '=', 'installed')], context=request.context) modules = [x['name'] for x in module_obj.read(request.cr, request.uid, module_ids, ['name'], context=request.context)] return WebClient().translations(mods=modules, lang=lang) @http.route('/website/attach', type='http', auth='user', methods=['POST'], website=True) def attach(self, func, upload=None, url=None, disable_optimization=None): Attachments = request.registry['ir.attachment'] website_url = message = None if not upload: website_url = url name = url.split("/").pop() attachment_id = Attachments.create(request.cr, request.uid, { 'name': name, 'type': 'url', 'url': url, 'res_model': 'ir.ui.view', }, request.context) else: try: image_data = upload.read() image = Image.open(cStringIO.StringIO(image_data)) w, h = image.size if w*h > 42e6: # Nokia Lumia 1020 photo resolution raise ValueError( u"Image size excessive, uploaded images must be smaller " u"than 42 million pixel") if not disable_optimization and image.format in ('PNG', 'JPEG'): image_data = image_save_for_web(image) attachment_id = Attachments.create(request.cr, request.uid, { 'name': upload.filename, 'datas': image_data.encode('base64'), 'datas_fname': upload.filename, 'res_model': 'ir.ui.view', }, request.context) [attachment] = Attachments.read( request.cr, request.uid, [attachment_id], ['website_url'], context=request.context) website_url = attachment['website_url'] except Exception, e: logger.exception("Failed to upload image to attachment") message = unicode(e) return """<script type='text/javascript'> window.parent['%s'](%s, %s); </script>""" % (func, json.dumps(website_url), json.dumps(message)) @http.route(['/website/publish'], type='json', auth="public", website=True) def publish(self, id, object): _id = int(id) _object = request.registry[object] obj = _object.browse(request.cr, request.uid, _id) values = {} if 'website_published' in _object._fields: values['website_published'] = not obj.website_published _object.write(request.cr, request.uid, [_id], values, context=request.context) obj = _object.browse(request.cr, request.uid, _id) return bool(obj.website_published) @http.route(['/website/seo_suggest/<keywords>'], type='http', auth="public", website=True) def seo_suggest(self, keywords): url = "http://google.com/complete/search" try: req = urllib2.Request("%s?%s" % (url, werkzeug.url_encode({ 'ie': 'utf8', 'oe': 'utf8', 'output': 'toolbar', 'q': keywords}))) request = urllib2.urlopen(req) except (urllib2.HTTPError, urllib2.URLError): return [] xmlroot = ET.fromstring(request.read()) return json.dumps([sugg[0].attrib['data'] for sugg in xmlroot if len(sugg) and sugg[0].attrib['data']]) #------------------------------------------------------ # Helpers #------------------------------------------------------ @http.route(['/website/kanban'], type='http', auth="public", methods=['POST'], website=True) def kanban(self, **post): return request.website.kanban_col(**post) def placeholder(self, response): return request.registry['website']._image_placeholder(response) @http.route([ '/website/image', '/website/image/<model>/<id>/<field>', '/website/image/<model>/<id>/<field>/<int:max_width>x<int:max_height>' ], auth="public", website=True, multilang=False) def website_image(self, model, id, field, max_width=None, max_height=None): """ Fetches the requested field and ensures it does not go above (max_width, max_height), resizing it if necessary. If the record is not found or does not have the requested field, returns a placeholder image via :meth:`~.placeholder`. Sets and checks conditional response parameters: * :mailheader:`ETag` is always set (and checked) * :mailheader:`Last-Modified is set iif the record has a concurrency field (``__last_update``) The requested field is assumed to be base64-encoded image data in all cases. """ try: idsha = id.split('_') id = idsha[0] response = werkzeug.wrappers.Response() return request.registry['website']._image( request.cr, request.uid, model, id, field, response, max_width, max_height, cache=STATIC_CACHE if len(idsha) > 1 else None) except Exception: logger.exception("Cannot render image field %r of record %s[%s] at size(%s,%s)", field, model, id, max_width, max_height) response = werkzeug.wrappers.Response() return self.placeholder(response) #------------------------------------------------------ # Server actions #------------------------------------------------------ @http.route([ '/website/action/<path_or_xml_id_or_id>', '/website/action/<path_or_xml_id_or_id>/<path:path>', ], type='http', auth="public", website=True) def actions_server(self, path_or_xml_id_or_id, **post): cr, uid, context = request.cr, request.uid, request.context res, action_id, action = None, None, None ServerActions = request.registry['ir.actions.server'] # find the action_id: either an xml_id, the path, or an ID if isinstance(path_or_xml_id_or_id, basestring) and '.' in path_or_xml_id_or_id: action_id = request.registry['ir.model.data'].xmlid_to_res_id(request.cr, request.uid, path_or_xml_id_or_id, raise_if_not_found=False) if not action_id: action_ids = ServerActions.search(cr, uid, [('website_path', '=', path_or_xml_id_or_id), ('website_published', '=', True)], context=context) action_id = action_ids and action_ids[0] or None if not action_id: try: action_id = int(path_or_xml_id_or_id) except ValueError: pass # check it effectively exists if action_id: action_ids = ServerActions.exists(cr, uid, [action_id], context=context) action_id = action_ids and action_ids[0] or None # run it, return only if we got a Response object if action_id: action = ServerActions.browse(cr, uid, action_id, context=context) if action.state == 'code' and action.website_published: action_res = ServerActions.run(cr, uid, [action_id], context=context) if isinstance(action_res, werkzeug.wrappers.Response): res = action_res if res: return res return request.redirect('/')
agpl-3.0
JacerOmri/PokemonGo-Bot-Desktop
pywin/Lib/distutils/command/build_py.py
74
16338
"""distutils.command.build_py Implements the Distutils 'build_py' command.""" __revision__ = "$Id$" import os import sys from glob import glob from distutils.core import Command from distutils.errors import DistutilsOptionError, DistutilsFileError from distutils.util import convert_path from distutils import log class build_py(Command): description = "\"build\" pure Python modules (copy to build directory)" user_options = [ ('build-lib=', 'd', "directory to \"build\" (copy) to"), ('compile', 'c', "compile .py to .pyc"), ('no-compile', None, "don't compile .py files [default]"), ('optimize=', 'O', "also compile with optimization: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ('force', 'f', "forcibly build everything (ignore file timestamps)"), ] boolean_options = ['compile', 'force'] negative_opt = {'no-compile' : 'compile'} def initialize_options(self): self.build_lib = None self.py_modules = None self.package = None self.package_data = None self.package_dir = None self.compile = 0 self.optimize = 0 self.force = None def finalize_options(self): self.set_undefined_options('build', ('build_lib', 'build_lib'), ('force', 'force')) # Get the distribution options that are aliases for build_py # options -- list of packages and list of modules. self.packages = self.distribution.packages self.py_modules = self.distribution.py_modules self.package_data = self.distribution.package_data self.package_dir = {} if self.distribution.package_dir: for name, path in self.distribution.package_dir.items(): self.package_dir[name] = convert_path(path) self.data_files = self.get_data_files() # Ick, copied straight from install_lib.py (fancy_getopt needs a # type system! Hell, *everything* needs a type system!!!) if not isinstance(self.optimize, int): try: self.optimize = int(self.optimize) assert 0 <= self.optimize <= 2 except (ValueError, AssertionError): raise DistutilsOptionError("optimize must be 0, 1, or 2") def run(self): # XXX copy_file by default preserves atime and mtime. IMHO this is # the right thing to do, but perhaps it should be an option -- in # particular, a site administrator might want installed files to # reflect the time of installation rather than the last # modification time before the installed release. # XXX copy_file by default preserves mode, which appears to be the # wrong thing to do: if a file is read-only in the working # directory, we want it to be installed read/write so that the next # installation of the same module distribution can overwrite it # without problems. (This might be a Unix-specific issue.) Thus # we turn off 'preserve_mode' when copying to the build directory, # since the build directory is supposed to be exactly what the # installation will look like (ie. we preserve mode when # installing). # Two options control which modules will be installed: 'packages' # and 'py_modules'. The former lets us work with whole packages, not # specifying individual modules at all; the latter is for # specifying modules one-at-a-time. if self.py_modules: self.build_modules() if self.packages: self.build_packages() self.build_package_data() self.byte_compile(self.get_outputs(include_bytecode=0)) def get_data_files(self): """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" data = [] if not self.packages: return data for package in self.packages: # Locate package source directory src_dir = self.get_package_dir(package) # Compute package build directory build_dir = os.path.join(*([self.build_lib] + package.split('.'))) # Length of path to strip from found files plen = 0 if src_dir: plen = len(src_dir)+1 # Strip directory from globbed filenames filenames = [ file[plen:] for file in self.find_data_files(package, src_dir) ] data.append((package, src_dir, build_dir, filenames)) return data def find_data_files(self, package, src_dir): """Return filenames for package's data files in 'src_dir'""" globs = (self.package_data.get('', []) + self.package_data.get(package, [])) files = [] for pattern in globs: # Each pattern has to be converted to a platform-specific path filelist = glob(os.path.join(src_dir, convert_path(pattern))) # Files that match more than one pattern are only added once files.extend([fn for fn in filelist if fn not in files and os.path.isfile(fn)]) return files def build_package_data(self): """Copy data files into build directory""" for package, src_dir, build_dir, filenames in self.data_files: for filename in filenames: target = os.path.join(build_dir, filename) self.mkpath(os.path.dirname(target)) self.copy_file(os.path.join(src_dir, filename), target, preserve_mode=False) def get_package_dir(self, package): """Return the directory, relative to the top of the source distribution, where package 'package' should be found (at least according to the 'package_dir' option, if any).""" path = package.split('.') if not self.package_dir: if path: return os.path.join(*path) else: return '' else: tail = [] while path: try: pdir = self.package_dir['.'.join(path)] except KeyError: tail.insert(0, path[-1]) del path[-1] else: tail.insert(0, pdir) return os.path.join(*tail) else: # Oops, got all the way through 'path' without finding a # match in package_dir. If package_dir defines a directory # for the root (nameless) package, then fallback on it; # otherwise, we might as well have not consulted # package_dir at all, as we just use the directory implied # by 'tail' (which should be the same as the original value # of 'path' at this point). pdir = self.package_dir.get('') if pdir is not None: tail.insert(0, pdir) if tail: return os.path.join(*tail) else: return '' def check_package(self, package, package_dir): # Empty dir name means current directory, which we can probably # assume exists. Also, os.path.exists and isdir don't know about # my "empty string means current dir" convention, so we have to # circumvent them. if package_dir != "": if not os.path.exists(package_dir): raise DistutilsFileError( "package directory '%s' does not exist" % package_dir) if not os.path.isdir(package_dir): raise DistutilsFileError( "supposed package directory '%s' exists, " "but is not a directory" % package_dir) # Require __init__.py for all but the "root package" if package: init_py = os.path.join(package_dir, "__init__.py") if os.path.isfile(init_py): return init_py else: log.warn(("package init file '%s' not found " + "(or not a regular file)"), init_py) # Either not in a package at all (__init__.py not expected), or # __init__.py doesn't exist -- so don't return the filename. return None def check_module(self, module, module_file): if not os.path.isfile(module_file): log.warn("file %s (for module %s) not found", module_file, module) return False else: return True def find_package_modules(self, package, package_dir): self.check_package(package, package_dir) module_files = glob(os.path.join(package_dir, "*.py")) modules = [] setup_script = os.path.abspath(self.distribution.script_name) for f in module_files: abs_f = os.path.abspath(f) if abs_f != setup_script: module = os.path.splitext(os.path.basename(f))[0] modules.append((package, module, f)) else: self.debug_print("excluding %s" % setup_script) return modules def find_modules(self): """Finds individually-specified Python modules, ie. those listed by module name in 'self.py_modules'. Returns a list of tuples (package, module_base, filename): 'package' is a tuple of the path through package-space to the module; 'module_base' is the bare (no packages, no dots) module name, and 'filename' is the path to the ".py" file (relative to the distribution root) that implements the module. """ # Map package names to tuples of useful info about the package: # (package_dir, checked) # package_dir - the directory where we'll find source files for # this package # checked - true if we have checked that the package directory # is valid (exists, contains __init__.py, ... ?) packages = {} # List of (package, module, filename) tuples to return modules = [] # We treat modules-in-packages almost the same as toplevel modules, # just the "package" for a toplevel is empty (either an empty # string or empty list, depending on context). Differences: # - don't check for __init__.py in directory for empty package for module in self.py_modules: path = module.split('.') package = '.'.join(path[0:-1]) module_base = path[-1] try: (package_dir, checked) = packages[package] except KeyError: package_dir = self.get_package_dir(package) checked = 0 if not checked: init_py = self.check_package(package, package_dir) packages[package] = (package_dir, 1) if init_py: modules.append((package, "__init__", init_py)) # XXX perhaps we should also check for just .pyc files # (so greedy closed-source bastards can distribute Python # modules too) module_file = os.path.join(package_dir, module_base + ".py") if not self.check_module(module, module_file): continue modules.append((package, module_base, module_file)) return modules def find_all_modules(self): """Compute the list of all modules that will be built, whether they are specified one-module-at-a-time ('self.py_modules') or by whole packages ('self.packages'). Return a list of tuples (package, module, module_file), just like 'find_modules()' and 'find_package_modules()' do.""" modules = [] if self.py_modules: modules.extend(self.find_modules()) if self.packages: for package in self.packages: package_dir = self.get_package_dir(package) m = self.find_package_modules(package, package_dir) modules.extend(m) return modules def get_source_files(self): return [module[-1] for module in self.find_all_modules()] def get_module_outfile(self, build_dir, package, module): outfile_path = [build_dir] + list(package) + [module + ".py"] return os.path.join(*outfile_path) def get_outputs(self, include_bytecode=1): modules = self.find_all_modules() outputs = [] for (package, module, module_file) in modules: package = package.split('.') filename = self.get_module_outfile(self.build_lib, package, module) outputs.append(filename) if include_bytecode: if self.compile: outputs.append(filename + "c") if self.optimize > 0: outputs.append(filename + "o") outputs += [ os.path.join(build_dir, filename) for package, src_dir, build_dir, filenames in self.data_files for filename in filenames ] return outputs def build_module(self, module, module_file, package): if isinstance(package, str): package = package.split('.') elif not isinstance(package, (list, tuple)): raise TypeError( "'package' must be a string (dot-separated), list, or tuple") # Now put the module source file into the "build" area -- this is # easy, we just copy it somewhere under self.build_lib (the build # directory for Python source). outfile = self.get_module_outfile(self.build_lib, package, module) dir = os.path.dirname(outfile) self.mkpath(dir) return self.copy_file(module_file, outfile, preserve_mode=0) def build_modules(self): modules = self.find_modules() for (package, module, module_file) in modules: # Now "build" the module -- ie. copy the source file to # self.build_lib (the build directory for Python source). # (Actually, it gets copied to the directory for this package # under self.build_lib.) self.build_module(module, module_file, package) def build_packages(self): for package in self.packages: # Get list of (package, module, module_file) tuples based on # scanning the package directory. 'package' is only included # in the tuple so that 'find_modules()' and # 'find_package_tuples()' have a consistent interface; it's # ignored here (apart from a sanity check). Also, 'module' is # the *unqualified* module name (ie. no dots, no package -- we # already know its package!), and 'module_file' is the path to # the .py file, relative to the current directory # (ie. including 'package_dir'). package_dir = self.get_package_dir(package) modules = self.find_package_modules(package, package_dir) # Now loop over the modules we found, "building" each one (just # copy it to self.build_lib). for (package_, module, module_file) in modules: assert package == package_ self.build_module(module, module_file, package) def byte_compile(self, files): if sys.dont_write_bytecode: self.warn('byte-compiling is disabled, skipping.') return from distutils.util import byte_compile prefix = self.build_lib if prefix[-1] != os.sep: prefix = prefix + os.sep # XXX this code is essentially the same as the 'byte_compile() # method of the "install_lib" command, except for the determination # of the 'prefix' string. Hmmm. if self.compile: byte_compile(files, optimize=0, force=self.force, prefix=prefix, dry_run=self.dry_run) if self.optimize > 0: byte_compile(files, optimize=self.optimize, force=self.force, prefix=prefix, dry_run=self.dry_run)
mit
mahendra-r/edx-platform
lms/djangoapps/certificates/migrations/0018_add_example_cert_models.py
102
12495
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'CertificateGenerationCourseSetting' db.create_table('certificates_certificategenerationcoursesetting', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)), ('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)), ('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)), ('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)), )) db.send_create_signal('certificates', ['CertificateGenerationCourseSetting']) # Adding model 'ExampleCertificate' db.create_table('certificates_examplecertificate', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)), ('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)), ('example_cert_set', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['certificates.ExampleCertificateSet'])), ('description', self.gf('django.db.models.fields.CharField')(max_length=255)), ('uuid', self.gf('django.db.models.fields.CharField')(default='2017ea093523484cb355fac9c3e7a22b', unique=True, max_length=255, db_index=True)), ('access_key', self.gf('django.db.models.fields.CharField')(default='8dc842be46484291a65b9ea34c3a8af8', max_length=255, db_index=True)), ('full_name', self.gf('django.db.models.fields.CharField')(default=u'John Do\xeb', max_length=255)), ('template', self.gf('django.db.models.fields.CharField')(max_length=255)), ('status', self.gf('django.db.models.fields.CharField')(default='started', max_length=255)), ('error_reason', self.gf('django.db.models.fields.TextField')(default=None, null=True)), ('download_url', self.gf('django.db.models.fields.CharField')(default=None, max_length=255, null=True)), )) db.create_index('certificates_examplecertificate', ['uuid', 'access_key']) db.send_create_signal('certificates', ['ExampleCertificate']) # Adding model 'ExampleCertificateSet' db.create_table('certificates_examplecertificateset', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('created', self.gf('model_utils.fields.AutoCreatedField')(default=datetime.datetime.now)), ('modified', self.gf('model_utils.fields.AutoLastModifiedField')(default=datetime.datetime.now)), ('course_key', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)), )) db.send_create_signal('certificates', ['ExampleCertificateSet']) def backwards(self, orm): # Deleting model 'CertificateGenerationCourseSetting' db.delete_table('certificates_certificategenerationcoursesetting') # Deleting model 'ExampleCertificate' db.delete_table('certificates_examplecertificate') # Deleting model 'ExampleCertificateSet' db.delete_table('certificates_examplecertificateset') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'certificates.certificategenerationconfiguration': { 'Meta': {'object_name': 'CertificateGenerationConfiguration'}, 'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'certificates.certificategenerationcoursesetting': { 'Meta': {'object_name': 'CertificateGenerationCourseSetting'}, 'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}) }, 'certificates.certificatewhitelist': { 'Meta': {'object_name': 'CertificateWhitelist'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'whitelist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'certificates.examplecertificate': { 'Meta': {'object_name': 'ExampleCertificate'}, 'access_key': ('django.db.models.fields.CharField', [], {'default': "'616e8368b9b2458b8ef8217713275322'", 'max_length': '255', 'db_index': 'True'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'download_url': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True'}), 'error_reason': ('django.db.models.fields.TextField', [], {'default': 'None', 'null': 'True'}), 'example_cert_set': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['certificates.ExampleCertificateSet']"}), 'full_name': ('django.db.models.fields.CharField', [], {'default': "u'John Do\\xeb'", 'max_length': '255'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'started'", 'max_length': '255'}), 'template': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'uuid': ('django.db.models.fields.CharField', [], {'default': "'f38e7389280d4776907ebf96ac728bac'", 'unique': 'True', 'max_length': '255', 'db_index': 'True'}) }, 'certificates.examplecertificateset': { 'Meta': {'object_name': 'ExampleCertificateSet'}, 'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}), 'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime.now'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime.now'}) }, 'certificates.generatedcertificate': { 'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'GeneratedCertificate'}, 'course_id': ('xmodule_django.models.CourseKeyField', [], {'default': 'None', 'max_length': '255', 'blank': 'True'}), 'created_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}), 'distinction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'download_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '128', 'blank': 'True'}), 'download_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'error_reason': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}), 'grade': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '5', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}), 'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '32'}), 'modified_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'unavailable'", 'max_length': '32'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'verify_uuid': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['certificates']
agpl-3.0
gudcjfdldu/volatility
volatility/plugins/gui/windowstations.py
44
5265
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # Copyright (C) 2010,2011,2012 Michael Hale Ligh <michael.ligh@mnin.org> # # This file is part of Volatility. # # Volatility is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # Volatility is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Volatility. If not, see <http://www.gnu.org/licenses/>. # import volatility.obj as obj import volatility.scan as scan import volatility.utils as utils import volatility.plugins.filescan as filescan import volatility.plugins.common as common import volatility.plugins.gui.sessions as sessions class PoolScanWind(scan.PoolScanner): """PoolScanner for window station objects""" def object_offset(self, found, address_space): """ This returns the offset of the object contained within this pool allocation. """ pool_base = found - \ self.buffer.profile.get_obj_offset('_POOL_HEADER', 'PoolTag') pool_obj = obj.Object("_POOL_HEADER", vm = address_space, offset = pool_base) pool_alignment = obj.VolMagic(address_space).PoolAlignment.v() object_base = (pool_base + pool_obj.BlockSize * pool_alignment - common.pool_align(address_space, 'tagWINDOWSTATION', pool_alignment)) return object_base checks = [ ('PoolTagCheck', dict(tag = "Win\xe4")), # seen as 0x98 on xpsp2 and xpsp3, 0x90 on w2k3*, 0xa0 on w7sp0 ('CheckPoolSize', dict(condition = lambda x: x >= 0x90)), # only look in non-paged or free pools ('CheckPoolType', dict(paged = False, non_paged = True, free = True)), ('CheckPoolIndex', dict(value = 0)), ] class WndScan(filescan.FileScan, sessions.SessionsMixin): """Pool scanner for tagWINDOWSTATION (window stations)""" def calculate(self): flat_space = utils.load_as(self._config, astype = 'physical') kernel_space = utils.load_as(self._config) # Scan for window station objects for offset in PoolScanWind().scan(flat_space): window_station = obj.Object("tagWINDOWSTATION", offset = offset, vm = flat_space) # Basic sanity checks are included here if not window_station.is_valid(): continue # Find an address space for this window station's session session = self.find_session_space( kernel_space, window_station.dwSessionId) if not session: continue # Reset the object's native VM so pointers are # dereferenced in session space window_station.set_native_vm(session.obj_vm) for winsta in window_station.traverse(): if winsta.is_valid(): yield winsta def render_text(self, outfd, data): seen = [] for window_station in data: offset = window_station.PhysicalAddress if offset in seen: continue seen.append(offset) outfd.write("*" * 50 + "\n") outfd.write("WindowStation: {0:#x}, Name: {1}, Next: {2:#x}\n".format( offset, window_station.Name, window_station.rpwinstaNext.v(), )) outfd.write("SessionId: {0}, AtomTable: {1:#x}, Interactive: {2}\n".format( window_station.dwSessionId, window_station.pGlobalAtomTable, window_station.Interactive, )) outfd.write("Desktops: {0}\n".format( ', '.join([desk.Name for desk in window_station.desktops()]) )) outfd.write("ptiDrawingClipboard: pid {0} tid {1}\n".format( window_station.ptiDrawingClipboard.pEThread.Cid.UniqueProcess, window_station.ptiDrawingClipboard.pEThread.Cid.UniqueThread )) outfd.write("spwndClipOpen: {0:#x}, spwndClipViewer: {1:#x} {2} {3}\n".format( window_station.spwndClipOpen.v(), window_station.spwndClipViewer.v(), str(window_station.LastRegisteredViewer.UniqueProcessId or ""), str(window_station.LastRegisteredViewer.ImageFileName or ""), )) outfd.write("cNumClipFormats: {0}, iClipSerialNumber: {1}\n".format( window_station.cNumClipFormats, window_station.iClipSerialNumber, )) outfd.write("pClipBase: {0:#x}, Formats: {1}\n".format( window_station.pClipBase, ",".join([str(clip.fmt) for clip in window_station.pClipBase.dereference()]), ))
gpl-2.0
courtarro/gnuradio
gr-digital/python/digital/qa_ofdm_chanest_vcvc.py
47
13798
#!/usr/bin/env python # Copyright 2012-2014 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # import sys import numpy import random import numpy from gnuradio import gr, gr_unittest, blocks, analog, digital import pmt def shift_tuple(vec, N): """ Shifts a vector by N elements. Fills up with zeros. """ if N > 0: return (0,) * N + tuple(vec[0:-N]) else: N = -N return tuple(vec[N:]) + (0,) * N def rand_range(min_val, max_val): """ Returns a random value (uniform) from the interval min_val, max_val """ return random.random() * (max_val - min_val) + min_val class qa_ofdm_chanest_vcvc (gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_001_offset_2sym (self): """ Add a frequency offset, check if it's correctly detected. Also add some random tags and see if they come out at the correct position. """ fft_len = 16 carr_offset = -2 sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) sync_symbol2 = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) tx_data = shift_tuple(sync_symbol1, carr_offset) + \ shift_tuple(sync_symbol2, carr_offset) + \ shift_tuple(data_symbol, carr_offset) tag1 = gr.tag_t() tag1.offset = 0 tag1.key = pmt.string_to_symbol("test_tag_1") tag1.value = pmt.from_long(23) tag2 = gr.tag_t() tag2.offset = 2 tag2.key = pmt.string_to_symbol("test_tag_2") tag2.value = pmt.from_long(42) src = blocks.vector_source_c(tx_data, False, fft_len, (tag1, tag2)) chanest = digital.ofdm_chanest_vcvc(sync_symbol1, sync_symbol2, 1) sink = blocks.vector_sink_c(fft_len) self.tb.connect(src, chanest, sink) self.tb.run() self.assertEqual(shift_tuple(sink.data(), -carr_offset), data_symbol) tags = sink.tags() ptags = {} for tag in tags: ptag = gr.tag_to_python(tag) ptags[ptag.key] = (ptag.value, ptag.offset) if ptag.key == 'ofdm_sync_chan_taps': ptags[ptag.key] = (None, ptag.offset) expected_tags = { 'ofdm_sync_carr_offset': (-2, 0), 'ofdm_sync_chan_taps': (None, 0), 'test_tag_1': (23, 0), 'test_tag_2': (42, 0), } self.assertEqual(ptags, expected_tags) def test_002_offset_1sym (self): """ Add a frequency offset, check if it's correctly detected. Difference to previous test is, it only uses one synchronisation symbol. """ fft_len = 16 carr_offset = -2 # This will not correct for +2 because it thinks carrier 14 is used # (because of interpolation) sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) tx_data = shift_tuple(sync_symbol, carr_offset) + \ shift_tuple(data_symbol, carr_offset) src = blocks.vector_source_c(tx_data, False, fft_len) # 17 is out of bounds! chanest = digital.ofdm_chanest_vcvc(sync_symbol, (), 1, 0, 17) sink = blocks.vector_sink_c(fft_len) self.tb.connect(src, chanest, sink) self.tb.run() self.assertEqual(shift_tuple(sink.data(), -carr_offset), data_symbol) tags = sink.tags() for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': carr_offset_hat = pmt.to_long(tag.value) self.assertEqual(pmt.to_long(tag.value), carr_offset) def test_003_channel_no_carroffset (self): """ Add a channel, check if it's correctly estimated """ fft_len = 16 carr_offset = 0 sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) sync_symbol2 = (0, 0, 0, 1j, -1, 1, -1j, 1j, 0, 1, -1j, -1, -1j, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) tx_data = sync_symbol1 + sync_symbol2 + data_symbol channel = (0, 0, 0, 2, -2, 2, 3j, 2, 0, 2, 2, 2, 2, 3, 0, 0) src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) chanest = digital.ofdm_chanest_vcvc(sync_symbol1, sync_symbol2, 1) sink = blocks.vector_sink_c(fft_len) sink_chanest = blocks.vector_sink_c(fft_len) self.tb.connect(src, chan, chanest, sink) self.tb.connect((chanest, 1), sink_chanest) self.tb.run() tags = sink.tags() self.assertEqual(shift_tuple(sink.data(), -carr_offset), tuple(numpy.multiply(data_symbol, channel))) for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': self.assertEqual(pmt.to_long(tag.value), carr_offset) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': self.assertEqual(pmt.c32vector_elements(tag.value), channel) self.assertEqual(sink_chanest.data(), channel) def test_004_channel_no_carroffset_1sym (self): """ Add a channel, check if it's correctly estimated. Only uses 1 synchronisation symbol. """ fft_len = 16 carr_offset = 0 sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) tx_data = sync_symbol + data_symbol channel = (0, 0, 0, 2, 2, 2, 2, 3, 3, 2.5, 2.5, -3, -3, 1j, 1j, 0) #channel = (0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) chanest = digital.ofdm_chanest_vcvc(sync_symbol, (), 1) sink = blocks.vector_sink_c(fft_len) sink_chanest = blocks.vector_sink_c(fft_len) self.tb.connect(src, chan, chanest, sink) self.tb.connect((chanest, 1), sink_chanest) self.tb.run() self.assertEqual(sink_chanest.data(), channel) tags = sink.tags() for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': self.assertEqual(pmt.to_long(tag.value), carr_offset) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': self.assertEqual(pmt.c32vector_elements(tag.value), channel) def test_005_both_1sym_force (self): """ Add a channel, check if it's correctly estimated. Only uses 1 synchronisation symbol. """ fft_len = 16 carr_offset = 0 sync_symbol = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) ref_symbol = (0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) tx_data = sync_symbol + data_symbol channel = (0, 0, 0, 2, 2, 2, 2.5, 3, 2.5, 2, 2.5, 3, 2, 1, 1, 0) src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) chanest = digital.ofdm_chanest_vcvc(sync_symbol, ref_symbol, 1) sink = blocks.vector_sink_c(fft_len) self.tb.connect(src, chan, chanest, sink) self.tb.run() tags = sink.tags() for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': self.assertEqual(pmt.to_long(tag.value), carr_offset) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': self.assertEqual(pmt.c32vector_elements(tag.value), channel) def test_006_channel_and_carroffset (self): """ Add a channel, check if it's correctly estimated """ fft_len = 16 carr_offset = 2 # Index 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 sync_symbol1 = (0, 0, 0, 1, 0, 1, 0, -1, 0, 1, 0, -1, 0, 1, 0, 0) sync_symbol2 = (0, 0, 0, 1j, -1, 1, -1j, 1j, 0, 1, -1j, -1, -1j, 1, 0, 0) data_symbol = (0, 0, 0, 1, -1, 1, -1, 1, 0, 1, -1, -1, -1, 1, 0, 0) # Channel 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 # Shifted (0, 0, 0, 0, 0, 1j, -1, 1, -1j, 1j, 0, 1, -1j, -1, -1j, 1) chanest_exp = (0, 0, 0, 5, 6, 7, 8, 9, 0, 11, 12, 13, 14, 15, 0, 0) tx_data = shift_tuple(sync_symbol1, carr_offset) + \ shift_tuple(sync_symbol2, carr_offset) + \ shift_tuple(data_symbol, carr_offset) channel = range(fft_len) src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) chanest = digital.ofdm_chanest_vcvc(sync_symbol1, sync_symbol2, 1) sink = blocks.vector_sink_c(fft_len) self.tb.connect(src, chan, chanest, sink) self.tb.run() tags = sink.tags() chan_est = None for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': self.assertEqual(pmt.to_long(tag.value), carr_offset) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': chan_est = pmt.c32vector_elements(tag.value) self.assertEqual(chan_est, chanest_exp) self.assertEqual(sink.data(), tuple(numpy.multiply(shift_tuple(data_symbol, carr_offset), channel))) def test_999_all_at_once(self): """docstring for test_999_all_at_once""" fft_len = 32 # 6 carriers empty, 10 carriers full, 1 DC carrier, 10 carriers full, 5 carriers empty syncsym_mask = (0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0) carrier_mask = (0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0) max_offset = 4 wgn_amplitude = 0.05 min_chan_ampl = 0.1 max_chan_ampl = 5 n_iter = 20 # The more the accurater def run_flow_graph(sync_sym1, sync_sym2, data_sym): top_block = gr.top_block() carr_offset = random.randint(-max_offset/2, max_offset/2) * 2 tx_data = shift_tuple(sync_sym1, carr_offset) + \ shift_tuple(sync_sym2, carr_offset) + \ shift_tuple(data_sym, carr_offset) channel = [rand_range(min_chan_ampl, max_chan_ampl) * numpy.exp(1j * rand_range(0, 2 * numpy.pi)) for x in range(fft_len)] src = blocks.vector_source_c(tx_data, False, fft_len) chan = blocks.multiply_const_vcc(channel) noise = analog.noise_source_c(analog.GR_GAUSSIAN, wgn_amplitude) add = blocks.add_cc(fft_len) chanest = digital.ofdm_chanest_vcvc(sync_sym1, sync_sym2, 1) sink = blocks.vector_sink_c(fft_len) top_block.connect(src, chan, (add, 0), chanest, sink) top_block.connect(noise, blocks.stream_to_vector(gr.sizeof_gr_complex, fft_len), (add, 1)) top_block.run() channel_est = None carr_offset_hat = 0 rx_sym_est = [0,] * fft_len tags = sink.tags() for tag in tags: if pmt.symbol_to_string(tag.key) == 'ofdm_sync_carr_offset': carr_offset_hat = pmt.to_long(tag.value) self.assertEqual(carr_offset, carr_offset_hat) if pmt.symbol_to_string(tag.key) == 'ofdm_sync_chan_taps': channel_est = shift_tuple(pmt.c32vector_elements(tag.value), carr_offset) shifted_carrier_mask = shift_tuple(carrier_mask, carr_offset) for i in range(fft_len): if shifted_carrier_mask[i] and channel_est[i]: self.assertAlmostEqual(channel[i], channel_est[i], places=0) rx_sym_est[i] = (sink.data()[i] / channel_est[i]).real return (carr_offset, list(shift_tuple(rx_sym_est, -carr_offset_hat))) bit_errors = 0 for k in xrange(n_iter): sync_sym = [(random.randint(0, 1) * 2 - 1) * syncsym_mask[i] for i in range(fft_len)] ref_sym = [(random.randint(0, 1) * 2 - 1) * carrier_mask[i] for i in range(fft_len)] data_sym = [(random.randint(0, 1) * 2 - 1) * carrier_mask[i] for i in range(fft_len)] data_sym[26] = 1 (carr_offset, rx_sym) = run_flow_graph(sync_sym, ref_sym, data_sym) rx_sym_est = [0,] * fft_len for i in xrange(fft_len): if carrier_mask[i] == 0: continue rx_sym_est[i] = {True: 1, False: -1}[rx_sym[i] > 0] if rx_sym_est[i] != data_sym[i]: bit_errors += 1 # This is much more than we could allow self.assertTrue(bit_errors < n_iter) if __name__ == '__main__': gr_unittest.run(qa_ofdm_chanest_vcvc, "qa_ofdm_chanest_vcvc.xml")
gpl-3.0
tailhook/elasticsearch-py
test_elasticsearch/test_client/__init__.py
11
2996
from __future__ import unicode_literals from elasticsearch.client import _normalize_hosts, Elasticsearch from ..test_cases import TestCase, ElasticsearchTestCase class TestNormalizeHosts(TestCase): def test_none_uses_defaults(self): self.assertEquals([{}], _normalize_hosts(None)) def test_strings_are_used_as_hostnames(self): self.assertEquals([{"host": "elastic.co"}], _normalize_hosts(["elastic.co"])) def test_strings_are_parsed_for_port_and_user(self): self.assertEquals( [{"host": "elastic.co", "port": 42}, {"host": "elastic.co", "http_auth": "user:secret"}], _normalize_hosts(["elastic.co:42", "user:secret@elastic.co"]) ) def test_strings_are_parsed_for_scheme(self): self.assertEquals( [ { "host": "elastic.co", "port": 42, "use_ssl": True, 'scheme': 'http' }, { "host": "elastic.co", "http_auth": "user:secret", "use_ssl": True, "port": 443, 'scheme': 'http', 'url_prefix': '/prefix' } ], _normalize_hosts(["https://elastic.co:42", "https://user:secret@elastic.co/prefix"]) ) def test_dicts_are_left_unchanged(self): self.assertEquals([{"host": "local", "extra": 123}], _normalize_hosts([{"host": "local", "extra": 123}])) def test_single_string_is_wrapped_in_list(self): self.assertEquals( [{"host": "elastic.co"}], _normalize_hosts("elastic.co") ) class TestClient(ElasticsearchTestCase): def test_request_timeout_is_passed_through_unescaped(self): self.client.ping(request_timeout=.1) calls = self.assert_url_called('HEAD', '/') self.assertEquals([({'request_timeout': .1}, None)], calls) def test_from_in_search(self): self.client.search(index='i', doc_type='t', from_=10) calls = self.assert_url_called('GET', '/i/t/_search') self.assertEquals([({'from': '10'}, None)], calls) def test_repr_contains_hosts(self): self.assertEquals('<Elasticsearch([{}])>', repr(self.client)) def test_repr_contains_hosts_passed_in(self): self.assertIn("es.org", repr(Elasticsearch(['es.org:123']))) def test_repr_truncates_host_to_10(self): hosts = [{"host": "es" + str(i)} for i in range(20)] self.assertNotIn("es5", repr(Elasticsearch(hosts))) def test_index_uses_post_if_id_is_empty(self): self.client.index(index='my-index', doc_type='test-doc', id='', body={}) self.assert_url_called('POST', '/my-index/test-doc') def test_index_uses_put_if_id_is_not_empty(self): self.client.index(index='my-index', doc_type='test-doc', id=0, body={}) self.assert_url_called('PUT', '/my-index/test-doc/0')
apache-2.0
sharmaeklavya2/zulip
zerver/lib/type_debug.py
23
3071
from __future__ import print_function import sys import functools from typing import Any, Callable, IO, Mapping, Sequence, TypeVar, Text def get_mapping_type_str(x): # type: (Mapping) -> str container_type = type(x).__name__ if not x: if container_type == 'dict': return '{}' else: return container_type + '([])' key = next(iter(x)) key_type = get_type_str(key) value_type = get_type_str(x[key]) if container_type == 'dict': if len(x) == 1: return '{%s: %s}' % (key_type, value_type) else: return '{%s: %s, ...}' % (key_type, value_type) else: if len(x) == 1: return '%s([(%s, %s)])' % (container_type, key_type, value_type) else: return '%s([(%s, %s), ...])' % (container_type, key_type, value_type) def get_sequence_type_str(x): # type: (Sequence) -> str container_type = type(x).__name__ if not x: if container_type == 'list': return '[]' else: return container_type + '([])' elem_type = get_type_str(x[0]) if container_type == 'list': if len(x) == 1: return '[' + elem_type + ']' else: return '[' + elem_type + ', ...]' else: if len(x) == 1: return '%s([%s])' % (container_type, elem_type) else: return '%s([%s, ...])' % (container_type, elem_type) expansion_blacklist = [Text, bytes] def get_type_str(x): # type: (Any) -> str if x is None: return 'None' elif isinstance(x, tuple): types = [] for v in x: types.append(get_type_str(v)) if len(x) == 1: return '(' + types[0] + ',)' else: return '(' + ', '.join(types) + ')' elif isinstance(x, Mapping): return get_mapping_type_str(x) elif isinstance(x, Sequence) and not any(isinstance(x, t) for t in expansion_blacklist): return get_sequence_type_str(x) else: return type(x).__name__ FuncT = TypeVar('FuncT', bound=Callable) def print_types_to(file_obj): # type: (IO[str]) -> Callable[[FuncT], FuncT] def decorator(func): # type: (FuncT) -> FuncT @functools.wraps(func) def wrapper(*args, **kwargs): # type: (*Any, **Any) -> Any arg_types = [get_type_str(arg) for arg in args] kwarg_types = [key + "=" + get_type_str(value) for key, value in kwargs.items()] ret_val = func(*args, **kwargs) output = "%s(%s) -> %s" % (func.__name__, ", ".join(arg_types + kwarg_types), get_type_str(ret_val)) print(output, file=file_obj) return ret_val return wrapper # type: ignore # https://github.com/python/mypy/issues/1927 return decorator def print_types(func): # type: (FuncT) -> FuncT return print_types_to(sys.stdout)(func) # type: ignore # https://github.com/python/mypy/issues/1551
apache-2.0
pratikmallya/python-swiftclient
tests/unit/test_command_helpers.py
6
8523
# Copyright (c) 2010-2013 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. try: from unittest import mock except ImportError: import mock from six import StringIO import testtools from swiftclient import command_helpers as h from swiftclient.multithreading import OutputManager class TestStatHelpers(testtools.TestCase): def setUp(self): super(TestStatHelpers, self).setUp() conn_attrs = { 'url': 'http://storage/v1/a', 'token': 'tk12345', } self.conn = mock.MagicMock(**conn_attrs) self.options = {'human': False, 'verbose': 1} self.stdout = StringIO() self.stderr = StringIO() self.output_manager = OutputManager(self.stdout, self.stderr) def assertOut(self, expected): real = self.stdout.getvalue() # commonly if we strip of blank lines we have a match try: self.assertEqual(expected.strip('\n'), real.strip('\n')) except AssertionError: # could be anything, try to find typos line by line expected_lines = [line.lstrip() for line in expected.splitlines() if line.strip()] real_lines = [line.lstrip() for line in real.splitlines() if line.strip()] for expected, real in zip(expected_lines, real_lines): self.assertEqual(expected, real) # not a typo, might be an indent thing, hopefully you can spot it raise def test_stat_account_human(self): self.options['human'] = True # stub head_account stub_headers = { 'x-account-container-count': 42, 'x-account-object-count': 1000000, 'x-account-bytes-used': 2 ** 30, } self.conn.head_account.return_value = stub_headers with self.output_manager as output_manager: items, headers = h.stat_account(self.conn, self.options) h.print_account_stats(items, headers, output_manager) expected = """ Account: a Containers: 42 Objects: 976K Bytes: 1.0G """ self.assertOut(expected) def test_stat_account_verbose(self): self.options['verbose'] += 1 # stub head_account stub_headers = { 'x-account-container-count': 42, 'x-account-object-count': 1000000, 'x-account-bytes-used': 2 ** 30, } self.conn.head_account.return_value = stub_headers with self.output_manager as output_manager: items, headers = h.stat_account(self.conn, self.options) h.print_account_stats(items, headers, output_manager) expected = """ StorageURL: http://storage/v1/a Auth Token: tk12345 Account: a Containers: 42 Objects: 1000000 Bytes: 1073741824 """ self.assertOut(expected) def test_stat_account_policy_stat(self): # stub head_account stub_headers = { 'x-account-container-count': 42, 'x-account-object-count': 1000000, 'x-account-bytes-used': 2 ** 30, 'x-account-storage-policy-nada-object-count': 1000000, 'x-account-storage-policy-nada-bytes-used': 2 ** 30, } self.conn.head_account.return_value = stub_headers with self.output_manager as output_manager: items, headers = h.stat_account(self.conn, self.options) h.print_account_stats(items, headers, output_manager) expected = """ Account: a Containers: 42 Objects: 1000000 Bytes: 1073741824 Objects in policy "nada": 1000000 Bytes in policy "nada": 1073741824 """ self.assertOut(expected) def test_stat_account_policy_stat_with_container_counts(self): # stub head_account stub_headers = { 'x-account-container-count': 42, 'x-account-object-count': 1000000, 'x-account-bytes-used': 2 ** 30, 'x-account-storage-policy-nada-container-count': 10, 'x-account-storage-policy-nada-object-count': 1000000, 'x-account-storage-policy-nada-bytes-used': 2 ** 30, } self.conn.head_account.return_value = stub_headers with self.output_manager as output_manager: items, headers = h.stat_account(self.conn, self.options) h.print_account_stats(items, headers, output_manager) expected = """ Account: a Containers: 42 Objects: 1000000 Bytes: 1073741824 Containers in policy "nada": 10 Objects in policy "nada": 1000000 Bytes in policy "nada": 1073741824 """ self.assertOut(expected) def test_stat_container_human(self): self.options['human'] = True # stub head container request stub_headers = { 'x-container-object-count': 10 ** 6, 'x-container-bytes-used': 2 ** 30, } self.conn.head_container.return_value = stub_headers args = ('c',) with self.output_manager as output_manager: items, headers = h.stat_container(self.conn, self.options, *args) h.print_container_stats(items, headers, output_manager) expected = """ Account: a Container: c Objects: 976K Bytes: 1.0G Read ACL: Write ACL: Sync To: Sync Key: """ self.assertOut(expected) def test_stat_container_verbose(self): self.options['verbose'] += 1 # stub head container request stub_headers = { 'x-container-object-count': 10 ** 6, 'x-container-bytes-used': 2 ** 30, } self.conn.head_container.return_value = stub_headers args = ('c',) with self.output_manager as output_manager: items, headers = h.stat_container(self.conn, self.options, *args) h.print_container_stats(items, headers, output_manager) expected = """ URL: http://storage/v1/a/c Auth Token: tk12345 Account: a Container: c Objects: 1000000 Bytes: 1073741824 Read ACL: Write ACL: Sync To: Sync Key: """ self.assertOut(expected) def test_stat_object_human(self): self.options['human'] = True # stub head object request stub_headers = { 'content-length': 2 ** 20, 'x-object-meta-color': 'blue', 'etag': '68b329da9893e34099c7d8ad5cb9c940', 'content-encoding': 'gzip', } self.conn.head_object.return_value = stub_headers args = ('c', 'o') with self.output_manager as output_manager: items, headers = h.stat_object(self.conn, self.options, *args) h.print_object_stats(items, headers, output_manager) expected = """ Account: a Container: c Object: o Content Length: 1.0M ETag: 68b329da9893e34099c7d8ad5cb9c940 Meta Color: blue Content-Encoding: gzip """ self.assertOut(expected) def test_stat_object_verbose(self): self.options['verbose'] += 1 # stub head object request stub_headers = { 'content-length': 2 ** 20, 'x-object-meta-color': 'blue', 'etag': '68b329da9893e34099c7d8ad5cb9c940', 'content-encoding': 'gzip', } self.conn.head_object.return_value = stub_headers args = ('c', 'o') with self.output_manager as output_manager: items, headers = h.stat_object(self.conn, self.options, *args) h.print_object_stats(items, headers, output_manager) expected = """ URL: http://storage/v1/a/c/o Auth Token: tk12345 Account: a Container: c Object: o Content Length: 1048576 ETag: 68b329da9893e34099c7d8ad5cb9c940 Meta Color: blue Content-Encoding: gzip """ self.assertOut(expected)
apache-2.0
robinro/ansible
lib/ansible/modules/network/nxos/nxos_aaa_server_host.py
59
11475
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: nxos_aaa_server_host extends_documentation_fragment: nxos version_added: "2.2" short_description: Manages AAA server host-specific configuration. description: - Manages AAA server host-specific configuration. author: Jason Edelman (@jedelman8) notes: - Changes to the AAA server host key (shared secret) are not idempotent. - If C(state=absent) removes the whole host configuration. options: server_type: description: - The server type is either radius or tacacs. required: true choices: ['radius', 'tacacs'] address: description: - Address or name of the radius or tacacs host. required: true key: description: - Shared secret for the specified host. required: false default: null encrypt_type: description: - The state of encryption applied to the entered key. O for clear text, 7 for encrypted. Type-6 encryption is not supported. required: false default: null choices: ['0', '7'] host_timeout: description: - Timeout period for specified host, in seconds. Range is 1-60. required: false default: null auth_port: description: - Alternate UDP port for RADIUS authentication. required: false default: null acct_port: description: - Alternate UDP port for RADIUS accounting. required: false default: null tacacs_port: description: - Alternate TCP port TACACS Server. required: false default: null state: description: - Manage the state of the resource. required: false default: present choices: ['present','absent'] ''' EXAMPLES = ''' # Radius Server Host Basic settings - name: "Radius Server Host Basic settings" nxos_aaa_server_host: state: present server_type: radius address: 1.2.3.4 acct_port: 2084 host_timeout: 10 host: "{{ inventory_hostname }}" username: "{{ un }}" password: "{{ pwd }}" # Radius Server Host Key Configuration - name: "Radius Server Host Key Configuration" nxos_aaa_server_host: state: present server_type: radius address: 1.2.3.4 key: hello encrypt_type: 7 host: inventory_hostname }} username: "{{ un }}" password: "{{ pwd }}" # TACACS Server Host Configuration - name: "Tacacs Server Host Configuration" nxos_aaa_server_host: state: present server_type: tacacs tacacs_port: 89 host_timeout: 10 address: 5.6.7.8 host: inventory_hostname }} username: un }} password: pwd }} ''' RETURN = ''' proposed: description: k/v pairs of parameters passed into module returned: always type: dict sample: {"address": "1.2.3.4", "auth_port": "2084", "host_timeout": "10", "server_type": "radius"} existing: description: - k/v pairs of existing configuration returned: always type: dict sample: {} end_state: description: k/v pairs of configuration after module execution returned: always type: dict sample: {"address": "1.2.3.4", "auth_port": "2084", "host_timeout": "10", "server_type": "radius"} updates: description: command sent to the device returned: always type: list sample: ["radius-server host 1.2.3.4 auth-port 2084 timeout 10"] changed: description: check to see if a change was made on the device returned: always type: boolean sample: true ''' import re from ansible.module_utils.nxos import load_config, run_commands from ansible.module_utils.nxos import nxos_argument_spec, check_args from ansible.module_utils.basic import AnsibleModule def execute_show_command(command, module, command_type='cli_show'): if module.params['transport'] == 'cli': if 'show run' not in command: command += ' | json' cmds = [command] body = run_commands(module, cmds) elif module.params['transport'] == 'nxapi': cmds = [command] body = run_commands(module, cmds) return body def flatten_list(command_lists): flat_command_list = [] for command in command_lists: if isinstance(command, list): flat_command_list.extend(command) else: flat_command_list.append(command) return flat_command_list def _match_dict(match_list, key_map): no_blanks = [] match_dict = {} for match_set in match_list: match_set = tuple(v for v in match_set if v) no_blanks.append(match_set) for info in no_blanks: words = info[0].strip().split() length = len(words) alt_key = key_map.get(words[0]) first = alt_key or words[0] last = words[length - 1] match_dict[first] = last.replace('\"', '') return match_dict def get_aaa_host_info(module, server_type, address): aaa_host_info = {} command = 'show run | inc {0}-server.host.{1}'.format(server_type, address) body = execute_show_command(command, module, command_type='cli_show_ascii') if body: try: pattern = ('(acct-port \d+)|(timeout \d+)|(auth-port \d+)|' '(key 7 "\w+")|( port \d+)') raw_match = re.findall(pattern, body[0]) aaa_host_info = _match_dict(raw_match, {'acct-port': 'acct_port', 'auth-port': 'auth_port', 'port': 'tacacs_port', 'timeout': 'host_timeout'}) if aaa_host_info: aaa_host_info['server_type'] = server_type aaa_host_info['address'] = address except TypeError: return {} else: return {} return aaa_host_info def config_aaa_host(server_type, address, params, clear=False): cmds = [] if clear: cmds.append('no {0}-server host {1}'.format(server_type, address)) cmd_str = '{0}-server host {1}'.format(server_type, address) key = params.get('key') enc_type = params.get('encrypt_type', '') host_timeout = params.get('host_timeout') auth_port = params.get('auth_port') acct_port = params.get('acct_port') port = params.get('tacacs_port') if auth_port: cmd_str += ' auth-port {0}'.format(auth_port) if acct_port: cmd_str += ' acct-port {0}'.format(acct_port) if port: cmd_str += ' port {0}'.format(port) if host_timeout: cmd_str += ' timeout {0}'.format(host_timeout) if key: cmds.append('{0}-server host {1} key {2} {3}'.format(server_type, address, enc_type, key)) cmds.append(cmd_str) return cmds def main(): argument_spec = dict( server_type=dict(choices=['radius', 'tacacs'], required=True), address=dict(type='str', required=True), key=dict(type='str'), encrypt_type=dict(type='str', choices=['0', '7']), host_timeout=dict(type='str'), auth_port=dict(type='str'), acct_port=dict(type='str'), tacacs_port=dict(type='str'), state=dict(choices=['absent', 'present'], default='present'), ) argument_spec.update(nxos_argument_spec) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True) warnings = list() check_args(module, warnings) server_type = module.params['server_type'] address = module.params['address'] key = module.params['key'] encrypt_type = module.params['encrypt_type'] host_timeout = module.params['host_timeout'] auth_port = module.params['auth_port'] acct_port = module.params['acct_port'] tacacs_port = module.params['tacacs_port'] state = module.params['state'] args = dict(server_type=server_type, address=address, key=key, encrypt_type=encrypt_type, host_timeout=host_timeout, auth_port=auth_port, acct_port=acct_port, tacacs_port=tacacs_port) proposed = dict((k, v) for k, v in args.items() if v is not None) changed = False if encrypt_type and not key: module.fail_json(msg='encrypt_type must be used with key') if tacacs_port and server_type != 'tacacs': module.fail_json( msg='tacacs_port can only be used with server_type=tacacs') if (auth_port or acct_port) and server_type != 'radius': module.fail_json(msg='auth_port and acct_port can only be used' 'when server_type=radius') existing = get_aaa_host_info(module, server_type, address) end_state = existing commands = [] if state == 'present': host_timeout = proposed.get('host_timeout') if host_timeout: try: if int(host_timeout) < 1 or int(host_timeout) > 60: raise ValueError except ValueError: module.fail_json( msg='host_timeout must be an integer between 1 and 60') delta = dict( set(proposed.items()).difference(existing.items())) if delta: union = existing.copy() union.update(delta) command = config_aaa_host(server_type, address, union) if command: commands.append(command) elif state == 'absent': intersect = dict( set(proposed.items()).intersection(existing.items())) if intersect.get('address') and intersect.get('server_type'): command = 'no {0}-server host {1}'.format( intersect.get('server_type'), intersect.get('address')) commands.append(command) cmds = flatten_list(commands) if cmds: if module.check_mode: module.exit_json(changed=True, commands=cmds) else: changed = True load_config(module, cmds) end_state = get_aaa_host_info(module, server_type, address) results = {} results['proposed'] = proposed results['existing'] = existing results['updates'] = cmds results['changed'] = changed results['warnings'] = warnings results['end_state'] = end_state module.exit_json(**results) if __name__ == '__main__': main()
gpl-3.0
erichegt/askbot-devel
askbot/tasks.py
2
9532
"""Definitions of Celery tasks in Askbot in this module there are two types of functions: * those wrapped with a @task decorator and a ``_celery_task`` suffix - celery tasks * those with the same base name, but without the decorator and the name suffix the actual work units run by the task Celery tasks are special functions in a way that they require all the parameters be serializable - so instead of ORM objects we pass object id's and instead of query sets - lists of ORM object id's. That is the reason for having two types of methods here: * the base methods (those without the decorator and the ``_celery_task`` in the end of the name are work units that are called from the celery tasks. * celery tasks - shells that reconstitute the necessary ORM objects and call the base methods """ import sys import traceback import logging import uuid from django.contrib.contenttypes.models import ContentType from django.template import Context from django.template.loader import get_template from django.utils.translation import ugettext as _ from celery.decorators import task from askbot.conf import settings as askbot_settings from askbot import const from askbot import mail from askbot.models import Post, Thread, User, ReplyAddress from askbot.models.badges import award_badges_signal from askbot.models import get_reply_to_addresses, format_instant_notification_email from askbot import exceptions as askbot_exceptions # TODO: Make exceptions raised inside record_post_update_celery_task() ... # ... propagate upwards to test runner, if only CELERY_ALWAYS_EAGER = True # (i.e. if Celery tasks are not deferred but executed straight away) @task(ignore_result = True) def notify_author_of_published_revision_celery_task(revision): #todo: move this to ``askbot.mail`` module #for answerable email only for now, because #we don't yet have the template for the read-only notification if askbot_settings.REPLY_BY_EMAIL: #generate two reply codes (one for edit and one for addition) #to format an answerable email or not answerable email reply_options = { 'user': revision.author, 'post': revision.post, 'reply_action': 'append_content' } append_content_address = ReplyAddress.objects.create_new( **reply_options ).as_email_address() reply_options['reply_action'] = 'replace_content' replace_content_address = ReplyAddress.objects.create_new( **reply_options ).as_email_address() #populate template context variables reply_code = append_content_address + ',' + replace_content_address if revision.post.post_type == 'question': mailto_link_subject = revision.post.thread.title else: mailto_link_subject = _('An edit for my answer') #todo: possibly add more mailto thread headers to organize messages prompt = _('To add to your post EDIT ABOVE THIS LINE') reply_separator_line = const.SIMPLE_REPLY_SEPARATOR_TEMPLATE % prompt data = { 'site_name': askbot_settings.APP_SHORT_NAME, 'post': revision.post, 'author_email_signature': revision.author.email_signature, 'replace_content_address': replace_content_address, 'reply_separator_line': reply_separator_line, 'mailto_link_subject': mailto_link_subject, 'reply_code': reply_code } #load the template template = get_template('email/notify_author_about_approved_post.html') #todo: possibly add headers to organize messages in threads headers = {'Reply-To': append_content_address} #send the message mail.send_mail( subject_line = _('Your post at %(site_name)s is now published') % data, body_text = template.render(Context(data)), recipient_list = [revision.author.email,], related_object = revision, activity_type = const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT, headers = headers ) @task(ignore_result = True) def record_post_update_celery_task( post_id, post_content_type_id, newly_mentioned_user_id_list = None, updated_by_id = None, timestamp = None, created = False, diff = None, ): #reconstitute objects from the database updated_by = User.objects.get(id=updated_by_id) post_content_type = ContentType.objects.get(id=post_content_type_id) post = post_content_type.get_object_for_this_type(id=post_id) newly_mentioned_users = User.objects.filter( id__in=newly_mentioned_user_id_list ) try: notify_sets = post.get_notify_sets( mentioned_users=newly_mentioned_users, exclude_list=[updated_by,] ) #todo: take into account created == True case #update_object is not used (activity_type, update_object) = post.get_updated_activity_data(created) post.issue_update_notifications( updated_by=updated_by, notify_sets=notify_sets, activity_type=activity_type, timestamp=timestamp, diff=diff ) except Exception: # HACK: exceptions from Celery job don't propagate upwards # to the Django test runner # so at least let's print tracebacks print >>sys.stderr, traceback.format_exc() raise @task(ignore_result = True) def record_question_visit( question_post = None, user_id = None, update_view_count = False): """celery task which records question visit by a person updates view counter, if necessary, and awards the badges associated with the question visit """ #1) maybe update the view count #question_post = Post.objects.filter( # id = question_post_id #).select_related('thread')[0] if update_view_count: question_post.thread.increase_view_count() user = User.objects.get(id=user_id) if user.is_anonymous(): return #2) question view count per user and clear response displays #user = User.objects.get(id = user_id) if user.is_authenticated(): #get response notifications user.visit_question(question_post) #3) send award badges signal for any badges #that are awarded for question views award_badges_signal.send(None, event = 'view_question', actor = user, context_object = question_post, ) @task() def send_instant_notifications_about_activity_in_post( update_activity = None, post = None, recipients = None, ): #reload object from the database post = Post.objects.get(id=post.id) if post.is_approved() is False: return if recipients is None: return acceptable_types = const.RESPONSE_ACTIVITY_TYPES_FOR_INSTANT_NOTIFICATIONS if update_activity.activity_type not in acceptable_types: return #calculate some variables used in the loop below update_type_map = const.RESPONSE_ACTIVITY_TYPE_MAP_FOR_TEMPLATES update_type = update_type_map[update_activity.activity_type] origin_post = post.get_origin_post() headers = mail.thread_headers( post, origin_post, update_activity.activity_type ) logger = logging.getLogger() if logger.getEffectiveLevel() <= logging.DEBUG: log_id = uuid.uuid1() message = 'email-alert %s, logId=%s' % (post.get_absolute_url(), log_id) logger.debug(message) else: log_id = None for user in recipients: if user.is_blocked(): continue reply_address, alt_reply_address = get_reply_to_addresses(user, post) subject_line, body_text = format_instant_notification_email( to_user = user, from_user = update_activity.user, post = post, reply_address = reply_address, alt_reply_address = alt_reply_address, update_type = update_type, template = get_template('email/instant_notification.html') ) headers['Reply-To'] = reply_address try: mail.send_mail( subject_line=subject_line, body_text=body_text, recipient_list=[user.email], related_object=origin_post, activity_type=const.TYPE_ACTIVITY_EMAIL_UPDATE_SENT, headers=headers, raise_on_failure=True ) except askbot_exceptions.EmailNotSent, error: logger.debug( '%s, error=%s, logId=%s' % (user.email, error, log_id) ) else: logger.debug('success %s, logId=%s' % (user.email, log_id))
gpl-3.0
blighj/django
django/contrib/gis/db/backends/oracle/models.py
69
2084
""" The GeometryColumns and SpatialRefSys models for the Oracle spatial backend. It should be noted that Oracle Spatial does not have database tables named according to the OGC standard, so the closest analogs are used. For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model. """ from django.contrib.gis.db import models from django.contrib.gis.db.backends.base.models import SpatialRefSysMixin class OracleGeometryColumns(models.Model): "Maps to the Oracle USER_SDO_GEOM_METADATA table." table_name = models.CharField(max_length=32) column_name = models.CharField(max_length=1024) srid = models.IntegerField(primary_key=True) # TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY). class Meta: app_label = 'gis' db_table = 'USER_SDO_GEOM_METADATA' managed = False @classmethod def table_name_col(cls): """ Return the name of the metadata column used to store the feature table name. """ return 'table_name' @classmethod def geom_col_name(cls): """ Return the name of the metadata column used to store the feature geometry column. """ return 'column_name' def __str__(self): return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid) class OracleSpatialRefSys(models.Model, SpatialRefSysMixin): "Maps to the Oracle MDSYS.CS_SRS table." cs_name = models.CharField(max_length=68) srid = models.IntegerField(primary_key=True) auth_srid = models.IntegerField() auth_name = models.CharField(max_length=256) wktext = models.CharField(max_length=2046) # Optional geometry representing the bounds of this coordinate # system. By default, all are NULL in the table. cs_bounds = models.PolygonField(null=True) class Meta: app_label = 'gis' db_table = 'CS_SRS' managed = False @property def wkt(self): return self.wktext
bsd-3-clause
huiren/ece511
util/batch/batch.py
90
7895
# Copyright (c) 2006 The Regents of The University of Michigan # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Kevin Lim import os, popen2, re, sys class MyPOpen(object): def __init__(self, cmd, input = None, output = None, bufsize = -1): self.status = -1 if input is None: p2c_read, p2c_write = os.pipe() self.tochild = os.fdopen(p2c_write, 'w', bufsize) else: p2c_write = None if isinstance(input, file): p2c_read = input.fileno() elif isinstance(input, str): input = file(input, 'r') p2c_read = input.fileno() elif isinstance(input, int): p2c_read = input else: raise AttributeError if output is None: c2p_read, c2p_write = os.pipe() self.fromchild = os.fdopen(c2p_read, 'r', bufsize) else: c2p_read = None if isinstance(output, file): c2p_write = output.fileno() elif isinstance(output, str): output = file(output, 'w') c2p_write = output.fileno() elif isinstance(output, int): c2p_write = output else: raise AttributeError self.pid = os.fork() if self.pid == 0: os.dup2(p2c_read, sys.stdin.fileno()) os.dup2(c2p_write, sys.stdout.fileno()) os.dup2(c2p_write, sys.stderr.fileno()) try: os.execvp(cmd[0], cmd) finally: os._exit(1) os.close(p2c_read) os.close(c2p_write) def poll(self): if self.status < 0: pid, status = os.waitpid(self.pid, os.WNOHANG) if pid == self.pid: self.status = status return self.status def wait(self): if self.status < 0: pid, status = os.waitpid(self.pid, 0) if pid == self.pid: self.status = status return self.status class oarsub: def __init__(self): self.walltime = None self.queue = None self.properties = None # OAR 2.0 parameters only! self.name = None self.afterok = None self.notify = None self.stderr = None self.stdout = None self.oarhost = None self.oarsub = 'oarsub' self.jobid = re.compile('IdJob = (\S+)') #self.outfile = open("jobnames.dat", "a+") def build(self, script, args = []): self.cmd = [ self.oarsub ] print "args:", args print "script:", script if self.properties: self.cmd.append('-p"%s"' % self.properties ) if self.queue: self.cmd.append('-q "%s"' % self.queue) if self.walltime: self.cmd.append('-l walltime=%s' % self.walltime) if script[0] != "/": self.script = os.getcwd() else: self.script = script self.cmd.extend(args) self.cmd.append(self.script) #cmd = [ 'ssh', '-x', self.oarhost, '"cd %s; %s"' % (os.getcwd(), self.command) ] self.command = ' '.join(self.cmd) print "command: [%s]" % self.command def do(self): oar = MyPOpen(self.cmd) self.result = oar.fromchild.read() ec = oar.wait() if ec != 0 and self.oarhost: pstdin, pstdout = os.popen4(self.command) self.result = pstdout.read() jobid = self.jobid.match(self.result) if jobid == None: print "Couldn't get jobid from [%s]" % self.result sys.exit(1) else: #self.outfile.write("%d %s\n" %(int(jobid.group(1)), self.name)); #self.outfile.flush() self.result = jobid.group(1) return 0 class qsub: def __init__(self): self.afterok = None self.hold = False self.join = False self.keep_stdout = False self.keep_stderr = False self.node_type = None self.mail_abort = False self.mail_begin = False self.mail_end = False self.name = None self.stdout = None self.priority = None self.queue = None self.pbshost = None self.qsub = 'qsub' self.env = {} def build(self, script, args = []): self.cmd = [ self.qsub ] if self.env: arg = '-v' arg += ','.join([ '%s=%s' % i for i in self.env.iteritems() ]) self.cmd.append(arg) if self.hold: self.cmd.append('-h') if self.stdout: self.cmd.append('-olocalhost:' + self.stdout) if self.keep_stdout and self.keep_stderr: self.cmd.append('-koe') elif self.keep_stdout: self.cmd.append('-ko') elif self.keep_stderr: self.cmd.append('-ke') else: self.cmd.append('-kn') if self.join: self.cmd.append('-joe') if self.node_type: self.cmd.append('-lnodes=' + self.node_type) if self.mail_abort or self.mail_begin or self.mail_end: flags = '' if self.mail_abort: flags.append('a') if self.mail_begin: flags.append('b') if self.mail_end: flags.append('e') if len(flags): self.cmd.append('-m ' + flags) else: self.cmd.append('-mn') if self.name: self.cmd.append("-N%s" % self.name) if self.priority: self.cmd.append('-p' + self.priority) if self.queue: self.cmd.append('-q' + self.queue) if self.afterok: self.cmd.append('-Wdepend=afterok:%s' % self.afterok) self.cmd.extend(args) self.script = script self.command = ' '.join(self.cmd + [ self.script ]) def do(self): pbs = MyPOpen(self.cmd + [ self.script ]) self.result = pbs.fromchild.read() ec = pbs.wait() if ec != 0 and self.pbshost: cmd = ' '.join(self.cmd + [ '-' ]) cmd = [ 'ssh', '-x', self.pbshost, cmd ] self.command = ' '.join(cmd) ssh = MyPOpen(cmd, input = self.script) self.result = ssh.fromchild.read() ec = ssh.wait() return ec
bsd-3-clause
Ll0ir/android_external_chromium
testing/gmock/scripts/generator/cpp/keywords.py
1157
2004
#!/usr/bin/env python # # Copyright 2007 Neal Norwitz # Portions Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """C++ keywords and helper utilities for determining keywords.""" __author__ = 'nnorwitz@google.com (Neal Norwitz)' try: # Python 3.x import builtins except ImportError: # Python 2.x import __builtin__ as builtins if not hasattr(builtins, 'set'): # Nominal support for Python 2.3. from sets import Set as set TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split()) TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split()) ACCESS = set('public protected private friend'.split()) CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split()) OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split()) OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split()) CONTROL = set('case switch default if else return goto'.split()) EXCEPTION = set('try catch throw'.split()) LOOP = set('while do for break continue'.split()) ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP def IsKeyword(token): return token in ALL def IsBuiltinType(token): if token in ('virtual', 'inline'): # These only apply to methods, they can't be types by themselves. return False return token in TYPES or token in TYPE_MODIFIERS
bsd-3-clause
BrotherPhil/django
django/views/decorators/clickjacking.py
145
1747
from functools import wraps from django.utils.decorators import available_attrs def xframe_options_deny(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'DENY' as long as the response doesn't already have that header set. e.g. @xframe_options_deny def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options') is None: resp['X-Frame-Options'] = 'DENY' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_sameorigin(view_func): """ Modifies a view function so its response has the X-Frame-Options HTTP header set to 'SAMEORIGIN' as long as the response doesn't already have that header set. e.g. @xframe_options_sameorigin def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) if resp.get('X-Frame-Options') is None: resp['X-Frame-Options'] = 'SAMEORIGIN' return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view) def xframe_options_exempt(view_func): """ Modifies a view function by setting a response variable that instructs XFrameOptionsMiddleware to NOT set the X-Frame-Options HTTP header. e.g. @xframe_options_exempt def some_view(request): ... """ def wrapped_view(*args, **kwargs): resp = view_func(*args, **kwargs) resp.xframe_options_exempt = True return resp return wraps(view_func, assigned=available_attrs(view_func))(wrapped_view)
bsd-3-clause
Hybrid-Cloud/cloud_manager
code/cloudmanager/util/subnet_manager.py
2
3019
# -*- coding:utf-8 -*- __author__ = 'l00293089@huawei' import threading import os import json from heat.openstack.common import log as logging from conf_util import * LOG = logging.getLogger(__name__) subnet_manager_lock = threading.Lock() SUBNET_ID_MAX = 129 class SubnetManager(object): def __init__(self): pass def distribute_subnet_pair(self, api_cidr, tunnel_cidr, data_file): subnet_manager_lock.acquire() try: subnet_info = self.__get_subnet_info__(data_file) subnet_id = 0 for free_id in range(SUBNET_ID_MAX): if free_id in subnet_info["allocate"]: continue else: subnet_id = free_id break if subnet_id == SUBNET_ID_MAX-1: LOG.error("no more subnet can be allocate") return None subnet_info["allocate"].append(subnet_id) api_subnet = ".".join([api_cidr.split('.')[0], api_cidr.split('.')[1], '%s', api_cidr.split('.')[3]]) tunnel_subnet = ".".join( [tunnel_cidr.split('.')[0], tunnel_cidr.split('.')[1], '%s', tunnel_cidr.split('.')[3]]) api_subnet = api_subnet % subnet_id tunnel_subnet = tunnel_subnet % (subnet_id+128) self.__write_subnet_info__(subnet_info, data_file) return {'external_api_cidr': api_subnet, 'tunnel_bearing_cidr': tunnel_subnet} except Exception as e: LOG.error("distribute subnet pair error, error: %s" % e.message) finally: subnet_manager_lock.release() def release_subnet_pair(self, subnet_pair, data_file): subnet_manager_lock.acquire() try: api_subnet = subnet_pair['external_api_cidr'] api_subnet_id = api_subnet.split(".")[2] subnet_id = int(api_subnet_id) subnet_info = self.__get_subnet_info__(data_file) if subnet_id in subnet_info["allocate"]: subnet_info["allocate"].remove(subnet_id) self.__write_subnet_info__(subnet_info, data_file) return True except Exception as e: LOG.error("release subnet pair error, subnet_pair: %s, error: %s" % (subnet_pair, e.message)) finally: subnet_manager_lock.release() @staticmethod def __get_subnet_info__(subnet_data_file): cloud_info = read_conf(subnet_data_file) if "subnet_info" not in cloud_info.keys(): subnet_info = {"allocate": []} else: subnet_info = cloud_info["subnet_info"] return subnet_info @staticmethod def __write_subnet_info__(subnet_info, subnet_data_file): cloud_info = read_conf(subnet_data_file) cloud_info["subnet_info"] = subnet_info write_conf(subnet_data_file, cloud_info)
apache-2.0
palmer-dabbelt/linux
scripts/analyze_suspend.py
1537
120394
#!/usr/bin/python # # Tool for analyzing suspend/resume timing # Copyright (c) 2013, Intel Corporation. # # This program is free software; you can redistribute it and/or modify it # under the terms and conditions of the GNU General Public License, # version 2, as published by the Free Software Foundation. # # This program is distributed in the hope it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: # Todd Brandt <todd.e.brandt@linux.intel.com> # # Description: # This tool is designed to assist kernel and OS developers in optimizing # their linux stack's suspend/resume time. Using a kernel image built # with a few extra options enabled, the tool will execute a suspend and # will capture dmesg and ftrace data until resume is complete. This data # is transformed into a device timeline and a callgraph to give a quick # and detailed view of which devices and callbacks are taking the most # time in suspend/resume. The output is a single html file which can be # viewed in firefox or chrome. # # The following kernel build options are required: # CONFIG_PM_DEBUG=y # CONFIG_PM_SLEEP_DEBUG=y # CONFIG_FTRACE=y # CONFIG_FUNCTION_TRACER=y # CONFIG_FUNCTION_GRAPH_TRACER=y # # For kernel versions older than 3.15: # The following additional kernel parameters are required: # (e.g. in file /etc/default/grub) # GRUB_CMDLINE_LINUX_DEFAULT="... initcall_debug log_buf_len=16M ..." # # ----------------- LIBRARIES -------------------- import sys import time import os import string import re import platform from datetime import datetime import struct # ----------------- CLASSES -------------------- # Class: SystemValues # Description: # A global, single-instance container used to # store system values and test parameters class SystemValues: version = 3.0 verbose = False testdir = '.' tpath = '/sys/kernel/debug/tracing/' fpdtpath = '/sys/firmware/acpi/tables/FPDT' epath = '/sys/kernel/debug/tracing/events/power/' traceevents = [ 'suspend_resume', 'device_pm_callback_end', 'device_pm_callback_start' ] modename = { 'freeze': 'Suspend-To-Idle (S0)', 'standby': 'Power-On Suspend (S1)', 'mem': 'Suspend-to-RAM (S3)', 'disk': 'Suspend-to-disk (S4)' } mempath = '/dev/mem' powerfile = '/sys/power/state' suspendmode = 'mem' hostname = 'localhost' prefix = 'test' teststamp = '' dmesgfile = '' ftracefile = '' htmlfile = '' rtcwake = False rtcwaketime = 10 rtcpath = '' android = False adb = 'adb' devicefilter = [] stamp = 0 execcount = 1 x2delay = 0 usecallgraph = False usetraceevents = False usetraceeventsonly = False notestrun = False altdevname = dict() postresumetime = 0 tracertypefmt = '# tracer: (?P<t>.*)' firmwarefmt = '# fwsuspend (?P<s>[0-9]*) fwresume (?P<r>[0-9]*)$' postresumefmt = '# post resume time (?P<t>[0-9]*)$' stampfmt = '# suspend-(?P<m>[0-9]{2})(?P<d>[0-9]{2})(?P<y>[0-9]{2})-'+\ '(?P<H>[0-9]{2})(?P<M>[0-9]{2})(?P<S>[0-9]{2})'+\ ' (?P<host>.*) (?P<mode>.*) (?P<kernel>.*)$' def __init__(self): self.hostname = platform.node() if(self.hostname == ''): self.hostname = 'localhost' rtc = "rtc0" if os.path.exists('/dev/rtc'): rtc = os.readlink('/dev/rtc') rtc = '/sys/class/rtc/'+rtc if os.path.exists(rtc) and os.path.exists(rtc+'/date') and \ os.path.exists(rtc+'/time') and os.path.exists(rtc+'/wakealarm'): self.rtcpath = rtc def setOutputFile(self): if((self.htmlfile == '') and (self.dmesgfile != '')): m = re.match('(?P<name>.*)_dmesg\.txt$', self.dmesgfile) if(m): self.htmlfile = m.group('name')+'.html' if((self.htmlfile == '') and (self.ftracefile != '')): m = re.match('(?P<name>.*)_ftrace\.txt$', self.ftracefile) if(m): self.htmlfile = m.group('name')+'.html' if(self.htmlfile == ''): self.htmlfile = 'output.html' def initTestOutput(self, subdir): if(not self.android): self.prefix = self.hostname v = open('/proc/version', 'r').read().strip() kver = string.split(v)[2] else: self.prefix = 'android' v = os.popen(self.adb+' shell cat /proc/version').read().strip() kver = string.split(v)[2] testtime = datetime.now().strftime('suspend-%m%d%y-%H%M%S') if(subdir != "."): self.testdir = subdir+"/"+testtime else: self.testdir = testtime self.teststamp = \ '# '+testtime+' '+self.prefix+' '+self.suspendmode+' '+kver self.dmesgfile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_dmesg.txt' self.ftracefile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'_ftrace.txt' self.htmlfile = \ self.testdir+'/'+self.prefix+'_'+self.suspendmode+'.html' os.mkdir(self.testdir) def setDeviceFilter(self, devnames): self.devicefilter = string.split(devnames) def rtcWakeAlarm(self): os.system('echo 0 > '+self.rtcpath+'/wakealarm') outD = open(self.rtcpath+'/date', 'r').read().strip() outT = open(self.rtcpath+'/time', 'r').read().strip() mD = re.match('^(?P<y>[0-9]*)-(?P<m>[0-9]*)-(?P<d>[0-9]*)', outD) mT = re.match('^(?P<h>[0-9]*):(?P<m>[0-9]*):(?P<s>[0-9]*)', outT) if(mD and mT): # get the current time from hardware utcoffset = int((datetime.now() - datetime.utcnow()).total_seconds()) dt = datetime(\ int(mD.group('y')), int(mD.group('m')), int(mD.group('d')), int(mT.group('h')), int(mT.group('m')), int(mT.group('s'))) nowtime = int(dt.strftime('%s')) + utcoffset else: # if hardware time fails, use the software time nowtime = int(datetime.now().strftime('%s')) alarm = nowtime + self.rtcwaketime os.system('echo %d > %s/wakealarm' % (alarm, self.rtcpath)) sysvals = SystemValues() # Class: DeviceNode # Description: # A container used to create a device hierachy, with a single root node # and a tree of child nodes. Used by Data.deviceTopology() class DeviceNode: name = '' children = 0 depth = 0 def __init__(self, nodename, nodedepth): self.name = nodename self.children = [] self.depth = nodedepth # Class: Data # Description: # The primary container for suspend/resume test data. There is one for # each test run. The data is organized into a cronological hierarchy: # Data.dmesg { # root structure, started as dmesg & ftrace, but now only ftrace # contents: times for suspend start/end, resume start/end, fwdata # phases { # 10 sequential, non-overlapping phases of S/R # contents: times for phase start/end, order/color data for html # devlist { # device callback or action list for this phase # device { # a single device callback or generic action # contents: start/stop times, pid/cpu/driver info # parents/children, html id for timeline/callgraph # optionally includes an ftrace callgraph # optionally includes intradev trace events # } # } # } # } # class Data: dmesg = {} # root data structure phases = [] # ordered list of phases start = 0.0 # test start end = 0.0 # test end tSuspended = 0.0 # low-level suspend start tResumed = 0.0 # low-level resume start tLow = 0.0 # time spent in low-level suspend (standby/freeze) fwValid = False # is firmware data available fwSuspend = 0 # time spent in firmware suspend fwResume = 0 # time spent in firmware resume dmesgtext = [] # dmesg text file in memory testnumber = 0 idstr = '' html_device_id = 0 stamp = 0 outfile = '' def __init__(self, num): idchar = 'abcdefghijklmnopqrstuvwxyz' self.testnumber = num self.idstr = idchar[num] self.dmesgtext = [] self.phases = [] self.dmesg = { # fixed list of 10 phases 'suspend_prepare': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#CCFFCC', 'order': 0}, 'suspend': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#88FF88', 'order': 1}, 'suspend_late': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#00AA00', 'order': 2}, 'suspend_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#008888', 'order': 3}, 'suspend_machine': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#0000FF', 'order': 4}, 'resume_machine': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FF0000', 'order': 5}, 'resume_noirq': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FF9900', 'order': 6}, 'resume_early': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFCC00', 'order': 7}, 'resume': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFFF88', 'order': 8}, 'resume_complete': {'list': dict(), 'start': -1.0, 'end': -1.0, 'row': 0, 'color': '#FFFFCC', 'order': 9} } self.phases = self.sortedPhases() def getStart(self): return self.dmesg[self.phases[0]]['start'] def setStart(self, time): self.start = time self.dmesg[self.phases[0]]['start'] = time def getEnd(self): return self.dmesg[self.phases[-1]]['end'] def setEnd(self, time): self.end = time self.dmesg[self.phases[-1]]['end'] = time def isTraceEventOutsideDeviceCalls(self, pid, time): for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): return False return True def addIntraDevTraceEvent(self, action, name, pid, time): if(action == 'mutex_lock_try'): color = 'red' elif(action == 'mutex_lock_pass'): color = 'green' elif(action == 'mutex_unlock'): color = 'blue' else: # create separate colors based on the name v1 = len(name)*10 % 256 v2 = string.count(name, 'e')*100 % 256 v3 = ord(name[0])*20 % 256 color = '#%06X' % ((v1*0x10000) + (v2*0x100) + v3) for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): e = TraceEvent(action, name, color, time) if('traceevents' not in d): d['traceevents'] = [] d['traceevents'].append(e) return d break return 0 def capIntraDevTraceEvent(self, action, name, pid, time): for phase in self.phases: list = self.dmesg[phase]['list'] for dev in list: d = list[dev] if(d['pid'] == pid and time >= d['start'] and time <= d['end']): if('traceevents' not in d): return for e in d['traceevents']: if(e.action == action and e.name == name and not e.ready): e.length = time - e.time e.ready = True break return def trimTimeVal(self, t, t0, dT, left): if left: if(t > t0): if(t - dT < t0): return t0 return t - dT else: return t else: if(t < t0 + dT): if(t > t0): return t0 + dT return t + dT else: return t def trimTime(self, t0, dT, left): self.tSuspended = self.trimTimeVal(self.tSuspended, t0, dT, left) self.tResumed = self.trimTimeVal(self.tResumed, t0, dT, left) self.start = self.trimTimeVal(self.start, t0, dT, left) self.end = self.trimTimeVal(self.end, t0, dT, left) for phase in self.phases: p = self.dmesg[phase] p['start'] = self.trimTimeVal(p['start'], t0, dT, left) p['end'] = self.trimTimeVal(p['end'], t0, dT, left) list = p['list'] for name in list: d = list[name] d['start'] = self.trimTimeVal(d['start'], t0, dT, left) d['end'] = self.trimTimeVal(d['end'], t0, dT, left) if('ftrace' in d): cg = d['ftrace'] cg.start = self.trimTimeVal(cg.start, t0, dT, left) cg.end = self.trimTimeVal(cg.end, t0, dT, left) for line in cg.list: line.time = self.trimTimeVal(line.time, t0, dT, left) if('traceevents' in d): for e in d['traceevents']: e.time = self.trimTimeVal(e.time, t0, dT, left) def normalizeTime(self, tZero): # first trim out any standby or freeze clock time if(self.tSuspended != self.tResumed): if(self.tResumed > tZero): self.trimTime(self.tSuspended, \ self.tResumed-self.tSuspended, True) else: self.trimTime(self.tSuspended, \ self.tResumed-self.tSuspended, False) # shift the timeline so that tZero is the new 0 self.tSuspended -= tZero self.tResumed -= tZero self.start -= tZero self.end -= tZero for phase in self.phases: p = self.dmesg[phase] p['start'] -= tZero p['end'] -= tZero list = p['list'] for name in list: d = list[name] d['start'] -= tZero d['end'] -= tZero if('ftrace' in d): cg = d['ftrace'] cg.start -= tZero cg.end -= tZero for line in cg.list: line.time -= tZero if('traceevents' in d): for e in d['traceevents']: e.time -= tZero def newPhaseWithSingleAction(self, phasename, devname, start, end, color): for phase in self.phases: self.dmesg[phase]['order'] += 1 self.html_device_id += 1 devid = '%s%d' % (self.idstr, self.html_device_id) list = dict() list[devname] = \ {'start': start, 'end': end, 'pid': 0, 'par': '', 'length': (end-start), 'row': 0, 'id': devid, 'drv': '' }; self.dmesg[phasename] = \ {'list': list, 'start': start, 'end': end, 'row': 0, 'color': color, 'order': 0} self.phases = self.sortedPhases() def newPhase(self, phasename, start, end, color, order): if(order < 0): order = len(self.phases) for phase in self.phases[order:]: self.dmesg[phase]['order'] += 1 if(order > 0): p = self.phases[order-1] self.dmesg[p]['end'] = start if(order < len(self.phases)): p = self.phases[order] self.dmesg[p]['start'] = end list = dict() self.dmesg[phasename] = \ {'list': list, 'start': start, 'end': end, 'row': 0, 'color': color, 'order': order} self.phases = self.sortedPhases() def setPhase(self, phase, ktime, isbegin): if(isbegin): self.dmesg[phase]['start'] = ktime else: self.dmesg[phase]['end'] = ktime def dmesgSortVal(self, phase): return self.dmesg[phase]['order'] def sortedPhases(self): return sorted(self.dmesg, key=self.dmesgSortVal) def sortedDevices(self, phase): list = self.dmesg[phase]['list'] slist = [] tmp = dict() for devname in list: dev = list[devname] tmp[dev['start']] = devname for t in sorted(tmp): slist.append(tmp[t]) return slist def fixupInitcalls(self, phase, end): # if any calls never returned, clip them at system resume end phaselist = self.dmesg[phase]['list'] for devname in phaselist: dev = phaselist[devname] if(dev['end'] < 0): dev['end'] = end vprint('%s (%s): callback didnt return' % (devname, phase)) def deviceFilter(self, devicefilter): # remove all by the relatives of the filter devnames filter = [] for phase in self.phases: list = self.dmesg[phase]['list'] for name in devicefilter: dev = name while(dev in list): if(dev not in filter): filter.append(dev) dev = list[dev]['par'] children = self.deviceDescendants(name, phase) for dev in children: if(dev not in filter): filter.append(dev) for phase in self.phases: list = self.dmesg[phase]['list'] rmlist = [] for name in list: pid = list[name]['pid'] if(name not in filter and pid >= 0): rmlist.append(name) for name in rmlist: del list[name] def fixupInitcallsThatDidntReturn(self): # if any calls never returned, clip them at system resume end for phase in self.phases: self.fixupInitcalls(phase, self.getEnd()) def newActionGlobal(self, name, start, end): # which phase is this device callback or action "in" targetphase = "none" overlap = 0.0 for phase in self.phases: pstart = self.dmesg[phase]['start'] pend = self.dmesg[phase]['end'] o = max(0, min(end, pend) - max(start, pstart)) if(o > overlap): targetphase = phase overlap = o if targetphase in self.phases: self.newAction(targetphase, name, -1, '', start, end, '') return True return False def newAction(self, phase, name, pid, parent, start, end, drv): # new device callback for a specific phase self.html_device_id += 1 devid = '%s%d' % (self.idstr, self.html_device_id) list = self.dmesg[phase]['list'] length = -1.0 if(start >= 0 and end >= 0): length = end - start list[name] = {'start': start, 'end': end, 'pid': pid, 'par': parent, 'length': length, 'row': 0, 'id': devid, 'drv': drv } def deviceIDs(self, devlist, phase): idlist = [] list = self.dmesg[phase]['list'] for devname in list: if devname in devlist: idlist.append(list[devname]['id']) return idlist def deviceParentID(self, devname, phase): pdev = '' pdevid = '' list = self.dmesg[phase]['list'] if devname in list: pdev = list[devname]['par'] if pdev in list: return list[pdev]['id'] return pdev def deviceChildren(self, devname, phase): devlist = [] list = self.dmesg[phase]['list'] for child in list: if(list[child]['par'] == devname): devlist.append(child) return devlist def deviceDescendants(self, devname, phase): children = self.deviceChildren(devname, phase) family = children for child in children: family += self.deviceDescendants(child, phase) return family def deviceChildrenIDs(self, devname, phase): devlist = self.deviceChildren(devname, phase) return self.deviceIDs(devlist, phase) def printDetails(self): vprint(' test start: %f' % self.start) for phase in self.phases: dc = len(self.dmesg[phase]['list']) vprint(' %16s: %f - %f (%d devices)' % (phase, \ self.dmesg[phase]['start'], self.dmesg[phase]['end'], dc)) vprint(' test end: %f' % self.end) def masterTopology(self, name, list, depth): node = DeviceNode(name, depth) for cname in list: clist = self.deviceChildren(cname, 'resume') cnode = self.masterTopology(cname, clist, depth+1) node.children.append(cnode) return node def printTopology(self, node): html = '' if node.name: info = '' drv = '' for phase in self.phases: list = self.dmesg[phase]['list'] if node.name in list: s = list[node.name]['start'] e = list[node.name]['end'] if list[node.name]['drv']: drv = ' {'+list[node.name]['drv']+'}' info += ('<li>%s: %.3fms</li>' % (phase, (e-s)*1000)) html += '<li><b>'+node.name+drv+'</b>' if info: html += '<ul>'+info+'</ul>' html += '</li>' if len(node.children) > 0: html += '<ul>' for cnode in node.children: html += self.printTopology(cnode) html += '</ul>' return html def rootDeviceList(self): # list of devices graphed real = [] for phase in self.dmesg: list = self.dmesg[phase]['list'] for dev in list: if list[dev]['pid'] >= 0 and dev not in real: real.append(dev) # list of top-most root devices rootlist = [] for phase in self.dmesg: list = self.dmesg[phase]['list'] for dev in list: pdev = list[dev]['par'] if(re.match('[0-9]*-[0-9]*\.[0-9]*[\.0-9]*\:[\.0-9]*$', pdev)): continue if pdev and pdev not in real and pdev not in rootlist: rootlist.append(pdev) return rootlist def deviceTopology(self): rootlist = self.rootDeviceList() master = self.masterTopology('', rootlist, 0) return self.printTopology(master) # Class: TraceEvent # Description: # A container for trace event data found in the ftrace file class TraceEvent: ready = False name = '' time = 0.0 color = '#FFFFFF' length = 0.0 action = '' def __init__(self, a, n, c, t): self.action = a self.name = n self.color = c self.time = t # Class: FTraceLine # Description: # A container for a single line of ftrace data. There are six basic types: # callgraph line: # call: " dpm_run_callback() {" # return: " }" # leaf: " dpm_run_callback();" # trace event: # tracing_mark_write: SUSPEND START or RESUME COMPLETE # suspend_resume: phase or custom exec block data # device_pm_callback: device callback info class FTraceLine: time = 0.0 length = 0.0 fcall = False freturn = False fevent = False depth = 0 name = '' type = '' def __init__(self, t, m, d): self.time = float(t) # is this a trace event if(d == 'traceevent' or re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m)): if(d == 'traceevent'): # nop format trace event msg = m else: # function_graph format trace event em = re.match('^ *\/\* *(?P<msg>.*) \*\/ *$', m) msg = em.group('msg') emm = re.match('^(?P<call>.*?): (?P<msg>.*)', msg) if(emm): self.name = emm.group('msg') self.type = emm.group('call') else: self.name = msg self.fevent = True return # convert the duration to seconds if(d): self.length = float(d)/1000000 # the indentation determines the depth match = re.match('^(?P<d> *)(?P<o>.*)$', m) if(not match): return self.depth = self.getDepth(match.group('d')) m = match.group('o') # function return if(m[0] == '}'): self.freturn = True if(len(m) > 1): # includes comment with function name match = re.match('^} *\/\* *(?P<n>.*) *\*\/$', m) if(match): self.name = match.group('n') # function call else: self.fcall = True # function call with children if(m[-1] == '{'): match = re.match('^(?P<n>.*) *\(.*', m) if(match): self.name = match.group('n') # function call with no children (leaf) elif(m[-1] == ';'): self.freturn = True match = re.match('^(?P<n>.*) *\(.*', m) if(match): self.name = match.group('n') # something else (possibly a trace marker) else: self.name = m def getDepth(self, str): return len(str)/2 def debugPrint(self, dev): if(self.freturn and self.fcall): print('%s -- %f (%02d): %s(); (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) elif(self.freturn): print('%s -- %f (%02d): %s} (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) else: print('%s -- %f (%02d): %s() { (%.3f us)' % (dev, self.time, \ self.depth, self.name, self.length*1000000)) # Class: FTraceCallGraph # Description: # A container for the ftrace callgraph of a single recursive function. # This can be a dpm_run_callback, dpm_prepare, or dpm_complete callgraph # Each instance is tied to a single device in a single phase, and is # comprised of an ordered list of FTraceLine objects class FTraceCallGraph: start = -1.0 end = -1.0 list = [] invalid = False depth = 0 def __init__(self): self.start = -1.0 self.end = -1.0 self.list = [] self.depth = 0 def setDepth(self, line): if(line.fcall and not line.freturn): line.depth = self.depth self.depth += 1 elif(line.freturn and not line.fcall): self.depth -= 1 line.depth = self.depth else: line.depth = self.depth def addLine(self, line, match): if(not self.invalid): self.setDepth(line) if(line.depth == 0 and line.freturn): if(self.start < 0): self.start = line.time self.end = line.time self.list.append(line) return True if(self.invalid): return False if(len(self.list) >= 1000000 or self.depth < 0): if(len(self.list) > 0): first = self.list[0] self.list = [] self.list.append(first) self.invalid = True if(not match): return False id = 'task %s cpu %s' % (match.group('pid'), match.group('cpu')) window = '(%f - %f)' % (self.start, line.time) if(self.depth < 0): print('Too much data for '+id+\ ' (buffer overflow), ignoring this callback') else: print('Too much data for '+id+\ ' '+window+', ignoring this callback') return False self.list.append(line) if(self.start < 0): self.start = line.time return False def slice(self, t0, tN): minicg = FTraceCallGraph() count = -1 firstdepth = 0 for l in self.list: if(l.time < t0 or l.time > tN): continue if(count < 0): if(not l.fcall or l.name == 'dev_driver_string'): continue firstdepth = l.depth count = 0 l.depth -= firstdepth minicg.addLine(l, 0) if((count == 0 and l.freturn and l.fcall) or (count > 0 and l.depth <= 0)): break count += 1 return minicg def sanityCheck(self): stack = dict() cnt = 0 for l in self.list: if(l.fcall and not l.freturn): stack[l.depth] = l cnt += 1 elif(l.freturn and not l.fcall): if(l.depth not in stack): return False stack[l.depth].length = l.length stack[l.depth] = 0 l.length = 0 cnt -= 1 if(cnt == 0): return True return False def debugPrint(self, filename): if(filename == 'stdout'): print('[%f - %f]') % (self.start, self.end) for l in self.list: if(l.freturn and l.fcall): print('%f (%02d): %s(); (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) elif(l.freturn): print('%f (%02d): %s} (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) else: print('%f (%02d): %s() { (%.3f us)' % (l.time, \ l.depth, l.name, l.length*1000000)) print(' ') else: fp = open(filename, 'w') print(filename) for l in self.list: if(l.freturn and l.fcall): fp.write('%f (%02d): %s(); (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) elif(l.freturn): fp.write('%f (%02d): %s} (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) else: fp.write('%f (%02d): %s() { (%.3f us)\n' % (l.time, \ l.depth, l.name, l.length*1000000)) fp.close() # Class: Timeline # Description: # A container for a suspend/resume html timeline. In older versions # of the script there were multiple timelines, but in the latest # there is only one. class Timeline: html = {} scaleH = 0.0 # height of the row as a percent of the timeline height rowH = 0.0 # height of each row in percent of the timeline height row_height_pixels = 30 maxrows = 0 height = 0 def __init__(self): self.html = { 'timeline': '', 'legend': '', 'scale': '' } def setRows(self, rows): self.maxrows = int(rows) self.scaleH = 100.0/float(self.maxrows) self.height = self.maxrows*self.row_height_pixels r = float(self.maxrows - 1) if(r < 1.0): r = 1.0 self.rowH = (100.0 - self.scaleH)/r # Class: TestRun # Description: # A container for a suspend/resume test run. This is necessary as # there could be more than one, and they need to be separate. class TestRun: ftrace_line_fmt_fg = \ '^ *(?P<time>[0-9\.]*) *\| *(?P<cpu>[0-9]*)\)'+\ ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\|'+\ '[ +!]*(?P<dur>[0-9\.]*) .*\| (?P<msg>.*)' ftrace_line_fmt_nop = \ ' *(?P<proc>.*)-(?P<pid>[0-9]*) *\[(?P<cpu>[0-9]*)\] *'+\ '(?P<flags>.{4}) *(?P<time>[0-9\.]*): *'+\ '(?P<msg>.*)' ftrace_line_fmt = ftrace_line_fmt_nop cgformat = False ftemp = dict() ttemp = dict() inthepipe = False tracertype = '' data = 0 def __init__(self, dataobj): self.data = dataobj self.ftemp = dict() self.ttemp = dict() def isReady(self): if(tracertype == '' or not data): return False return True def setTracerType(self, tracer): self.tracertype = tracer if(tracer == 'function_graph'): self.cgformat = True self.ftrace_line_fmt = self.ftrace_line_fmt_fg elif(tracer == 'nop'): self.ftrace_line_fmt = self.ftrace_line_fmt_nop else: doError('Invalid tracer format: [%s]' % tracer, False) # ----------------- FUNCTIONS -------------------- # Function: vprint # Description: # verbose print (prints only with -verbose option) # Arguments: # msg: the debug/log message to print def vprint(msg): global sysvals if(sysvals.verbose): print(msg) # Function: initFtrace # Description: # Configure ftrace to use trace events and/or a callgraph def initFtrace(): global sysvals tp = sysvals.tpath cf = 'dpm_run_callback' if(sysvals.usetraceeventsonly): cf = '-e dpm_prepare -e dpm_complete -e dpm_run_callback' if(sysvals.usecallgraph or sysvals.usetraceevents): print('INITIALIZING FTRACE...') # turn trace off os.system('echo 0 > '+tp+'tracing_on') # set the trace clock to global os.system('echo global > '+tp+'trace_clock') # set trace buffer to a huge value os.system('echo nop > '+tp+'current_tracer') os.system('echo 100000 > '+tp+'buffer_size_kb') # initialize the callgraph trace, unless this is an x2 run if(sysvals.usecallgraph and sysvals.execcount == 1): # set trace type os.system('echo function_graph > '+tp+'current_tracer') os.system('echo "" > '+tp+'set_ftrace_filter') # set trace format options os.system('echo funcgraph-abstime > '+tp+'trace_options') os.system('echo funcgraph-proc > '+tp+'trace_options') # focus only on device suspend and resume os.system('cat '+tp+'available_filter_functions | grep '+\ cf+' > '+tp+'set_graph_function') if(sysvals.usetraceevents): # turn trace events on events = iter(sysvals.traceevents) for e in events: os.system('echo 1 > '+sysvals.epath+e+'/enable') # clear the trace buffer os.system('echo "" > '+tp+'trace') # Function: initFtraceAndroid # Description: # Configure ftrace to capture trace events def initFtraceAndroid(): global sysvals tp = sysvals.tpath if(sysvals.usetraceevents): print('INITIALIZING FTRACE...') # turn trace off os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'") # set the trace clock to global os.system(sysvals.adb+" shell 'echo global > "+tp+"trace_clock'") # set trace buffer to a huge value os.system(sysvals.adb+" shell 'echo nop > "+tp+"current_tracer'") os.system(sysvals.adb+" shell 'echo 10000 > "+tp+"buffer_size_kb'") # turn trace events on events = iter(sysvals.traceevents) for e in events: os.system(sysvals.adb+" shell 'echo 1 > "+\ sysvals.epath+e+"/enable'") # clear the trace buffer os.system(sysvals.adb+" shell 'echo \"\" > "+tp+"trace'") # Function: verifyFtrace # Description: # Check that ftrace is working on the system # Output: # True or False def verifyFtrace(): global sysvals # files needed for any trace data files = ['buffer_size_kb', 'current_tracer', 'trace', 'trace_clock', 'trace_marker', 'trace_options', 'tracing_on'] # files needed for callgraph trace data tp = sysvals.tpath if(sysvals.usecallgraph): files += [ 'available_filter_functions', 'set_ftrace_filter', 'set_graph_function' ] for f in files: if(sysvals.android): out = os.popen(sysvals.adb+' shell ls '+tp+f).read().strip() if(out != tp+f): return False else: if(os.path.exists(tp+f) == False): return False return True # Function: parseStamp # Description: # Pull in the stamp comment line from the data file(s), # create the stamp, and add it to the global sysvals object # Arguments: # m: the valid re.match output for the stamp line def parseStamp(m, data): global sysvals data.stamp = {'time': '', 'host': '', 'mode': ''} dt = datetime(int(m.group('y'))+2000, int(m.group('m')), int(m.group('d')), int(m.group('H')), int(m.group('M')), int(m.group('S'))) data.stamp['time'] = dt.strftime('%B %d %Y, %I:%M:%S %p') data.stamp['host'] = m.group('host') data.stamp['mode'] = m.group('mode') data.stamp['kernel'] = m.group('kernel') sysvals.suspendmode = data.stamp['mode'] if not sysvals.stamp: sysvals.stamp = data.stamp # Function: diffStamp # Description: # compare the host, kernel, and mode fields in 3 stamps # Arguments: # stamp1: string array with mode, kernel, and host # stamp2: string array with mode, kernel, and host # Return: # True if stamps differ, False if they're the same def diffStamp(stamp1, stamp2): if 'host' in stamp1 and 'host' in stamp2: if stamp1['host'] != stamp2['host']: return True if 'kernel' in stamp1 and 'kernel' in stamp2: if stamp1['kernel'] != stamp2['kernel']: return True if 'mode' in stamp1 and 'mode' in stamp2: if stamp1['mode'] != stamp2['mode']: return True return False # Function: doesTraceLogHaveTraceEvents # Description: # Quickly determine if the ftrace log has some or all of the trace events # required for primary parsing. Set the usetraceevents and/or # usetraceeventsonly flags in the global sysvals object def doesTraceLogHaveTraceEvents(): global sysvals sysvals.usetraceeventsonly = True sysvals.usetraceevents = False for e in sysvals.traceevents: out = os.popen('cat '+sysvals.ftracefile+' | grep "'+e+': "').read() if(not out): sysvals.usetraceeventsonly = False if(e == 'suspend_resume' and out): sysvals.usetraceevents = True # Function: appendIncompleteTraceLog # Description: # [deprecated for kernel 3.15 or newer] # Legacy support of ftrace outputs that lack the device_pm_callback # and/or suspend_resume trace events. The primary data should be # taken from dmesg, and this ftrace is used only for callgraph data # or custom actions in the timeline. The data is appended to the Data # objects provided. # Arguments: # testruns: the array of Data objects obtained from parseKernelLog def appendIncompleteTraceLog(testruns): global sysvals # create TestRun vessels for ftrace parsing testcnt = len(testruns) testidx = -1 testrun = [] for data in testruns: testrun.append(TestRun(data)) # extract the callgraph and traceevent data vprint('Analyzing the ftrace data...') tf = open(sysvals.ftracefile, 'r') for line in tf: # remove any latent carriage returns line = line.replace('\r\n', '') # grab the time stamp first (signifies the start of the test run) m = re.match(sysvals.stampfmt, line) if(m): testidx += 1 parseStamp(m, testrun[testidx].data) continue # pull out any firmware data if(re.match(sysvals.firmwarefmt, line)): continue # if we havent found a test time stamp yet keep spinning til we do if(testidx < 0): continue # determine the trace data type (required for further parsing) m = re.match(sysvals.tracertypefmt, line) if(m): tracer = m.group('t') testrun[testidx].setTracerType(tracer) continue # parse only valid lines, if this isnt one move on m = re.match(testrun[testidx].ftrace_line_fmt, line) if(not m): continue # gather the basic message data from the line m_time = m.group('time') m_pid = m.group('pid') m_msg = m.group('msg') if(testrun[testidx].cgformat): m_param3 = m.group('dur') else: m_param3 = 'traceevent' if(m_time and m_pid and m_msg): t = FTraceLine(m_time, m_msg, m_param3) pid = int(m_pid) else: continue # the line should be a call, return, or event if(not t.fcall and not t.freturn and not t.fevent): continue # only parse the ftrace data during suspend/resume data = testrun[testidx].data if(not testrun[testidx].inthepipe): # look for the suspend start marker if(t.fevent): if(t.name == 'SUSPEND START'): testrun[testidx].inthepipe = True data.setStart(t.time) continue else: # trace event processing if(t.fevent): if(t.name == 'RESUME COMPLETE'): testrun[testidx].inthepipe = False data.setEnd(t.time) if(testidx == testcnt - 1): break continue # general trace events have two types, begin and end if(re.match('(?P<name>.*) begin$', t.name)): isbegin = True elif(re.match('(?P<name>.*) end$', t.name)): isbegin = False else: continue m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name) if(m): val = m.group('val') if val == '0': name = m.group('name') else: name = m.group('name')+'['+val+']' else: m = re.match('(?P<name>.*) .*', t.name) name = m.group('name') # special processing for trace events if re.match('dpm_prepare\[.*', name): continue elif re.match('machine_suspend.*', name): continue elif re.match('suspend_enter\[.*', name): if(not isbegin): data.dmesg['suspend_prepare']['end'] = t.time continue elif re.match('dpm_suspend\[.*', name): if(not isbegin): data.dmesg['suspend']['end'] = t.time continue elif re.match('dpm_suspend_late\[.*', name): if(isbegin): data.dmesg['suspend_late']['start'] = t.time else: data.dmesg['suspend_late']['end'] = t.time continue elif re.match('dpm_suspend_noirq\[.*', name): if(isbegin): data.dmesg['suspend_noirq']['start'] = t.time else: data.dmesg['suspend_noirq']['end'] = t.time continue elif re.match('dpm_resume_noirq\[.*', name): if(isbegin): data.dmesg['resume_machine']['end'] = t.time data.dmesg['resume_noirq']['start'] = t.time else: data.dmesg['resume_noirq']['end'] = t.time continue elif re.match('dpm_resume_early\[.*', name): if(isbegin): data.dmesg['resume_early']['start'] = t.time else: data.dmesg['resume_early']['end'] = t.time continue elif re.match('dpm_resume\[.*', name): if(isbegin): data.dmesg['resume']['start'] = t.time else: data.dmesg['resume']['end'] = t.time continue elif re.match('dpm_complete\[.*', name): if(isbegin): data.dmesg['resume_complete']['start'] = t.time else: data.dmesg['resume_complete']['end'] = t.time continue # is this trace event outside of the devices calls if(data.isTraceEventOutsideDeviceCalls(pid, t.time)): # global events (outside device calls) are simply graphed if(isbegin): # store each trace event in ttemp if(name not in testrun[testidx].ttemp): testrun[testidx].ttemp[name] = [] testrun[testidx].ttemp[name].append(\ {'begin': t.time, 'end': t.time}) else: # finish off matching trace event in ttemp if(name in testrun[testidx].ttemp): testrun[testidx].ttemp[name][-1]['end'] = t.time else: if(isbegin): data.addIntraDevTraceEvent('', name, pid, t.time) else: data.capIntraDevTraceEvent('', name, pid, t.time) # call/return processing elif sysvals.usecallgraph: # create a callgraph object for the data if(pid not in testrun[testidx].ftemp): testrun[testidx].ftemp[pid] = [] testrun[testidx].ftemp[pid].append(FTraceCallGraph()) # when the call is finished, see which device matches it cg = testrun[testidx].ftemp[pid][-1] if(cg.addLine(t, m)): testrun[testidx].ftemp[pid].append(FTraceCallGraph()) tf.close() for test in testrun: # add the traceevent data to the device hierarchy if(sysvals.usetraceevents): for name in test.ttemp: for event in test.ttemp[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < test.data.start): test.data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > test.data.end): test.data.setEnd(end) test.data.newActionGlobal(name, begin, end) # add the callgraph data to the device hierarchy for pid in test.ftemp: for cg in test.ftemp[pid]: if(not cg.sanityCheck()): id = 'task %s cpu %s' % (pid, m.group('cpu')) vprint('Sanity check failed for '+\ id+', ignoring this callback') continue callstart = cg.start callend = cg.end for p in test.data.phases: if(test.data.dmesg[p]['start'] <= callstart and callstart <= test.data.dmesg[p]['end']): list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg break if(sysvals.verbose): test.data.printDetails() # add the time in between the tests as a new phase so we can see it if(len(testruns) > 1): t1e = testruns[0].getEnd() t2s = testruns[-1].getStart() testruns[-1].newPhaseWithSingleAction('user mode', \ 'user mode', t1e, t2s, '#FF9966') # Function: parseTraceLog # Description: # Analyze an ftrace log output file generated from this app during # the execution phase. Used when the ftrace log is the primary data source # and includes the suspend_resume and device_pm_callback trace events # The ftrace filename is taken from sysvals # Output: # An array of Data objects def parseTraceLog(): global sysvals vprint('Analyzing the ftrace data...') if(os.path.exists(sysvals.ftracefile) == False): doError('%s doesnt exist' % sysvals.ftracefile, False) # extract the callgraph and traceevent data testruns = [] testdata = [] testrun = 0 data = 0 tf = open(sysvals.ftracefile, 'r') phase = 'suspend_prepare' for line in tf: # remove any latent carriage returns line = line.replace('\r\n', '') # stamp line: each stamp means a new test run m = re.match(sysvals.stampfmt, line) if(m): data = Data(len(testdata)) testdata.append(data) testrun = TestRun(data) testruns.append(testrun) parseStamp(m, data) continue if(not data): continue # firmware line: pull out any firmware data m = re.match(sysvals.firmwarefmt, line) if(m): data.fwSuspend = int(m.group('s')) data.fwResume = int(m.group('r')) if(data.fwSuspend > 0 or data.fwResume > 0): data.fwValid = True continue # tracer type line: determine the trace data type m = re.match(sysvals.tracertypefmt, line) if(m): tracer = m.group('t') testrun.setTracerType(tracer) continue # post resume time line: did this test run include post-resume data m = re.match(sysvals.postresumefmt, line) if(m): t = int(m.group('t')) if(t > 0): sysvals.postresumetime = t continue # ftrace line: parse only valid lines m = re.match(testrun.ftrace_line_fmt, line) if(not m): continue # gather the basic message data from the line m_time = m.group('time') m_pid = m.group('pid') m_msg = m.group('msg') if(testrun.cgformat): m_param3 = m.group('dur') else: m_param3 = 'traceevent' if(m_time and m_pid and m_msg): t = FTraceLine(m_time, m_msg, m_param3) pid = int(m_pid) else: continue # the line should be a call, return, or event if(not t.fcall and not t.freturn and not t.fevent): continue # only parse the ftrace data during suspend/resume if(not testrun.inthepipe): # look for the suspend start marker if(t.fevent): if(t.name == 'SUSPEND START'): testrun.inthepipe = True data.setStart(t.time) continue # trace event processing if(t.fevent): if(t.name == 'RESUME COMPLETE'): if(sysvals.postresumetime > 0): phase = 'post_resume' data.newPhase(phase, t.time, t.time, '#FF9966', -1) else: testrun.inthepipe = False data.setEnd(t.time) continue if(phase == 'post_resume'): data.setEnd(t.time) if(t.type == 'suspend_resume'): # suspend_resume trace events have two types, begin and end if(re.match('(?P<name>.*) begin$', t.name)): isbegin = True elif(re.match('(?P<name>.*) end$', t.name)): isbegin = False else: continue m = re.match('(?P<name>.*)\[(?P<val>[0-9]*)\] .*', t.name) if(m): val = m.group('val') if val == '0': name = m.group('name') else: name = m.group('name')+'['+val+']' else: m = re.match('(?P<name>.*) .*', t.name) name = m.group('name') # ignore these events if(re.match('acpi_suspend\[.*', t.name) or re.match('suspend_enter\[.*', name)): continue # -- phase changes -- # suspend_prepare start if(re.match('dpm_prepare\[.*', t.name)): phase = 'suspend_prepare' if(not isbegin): data.dmesg[phase]['end'] = t.time continue # suspend start elif(re.match('dpm_suspend\[.*', t.name)): phase = 'suspend' data.setPhase(phase, t.time, isbegin) continue # suspend_late start elif(re.match('dpm_suspend_late\[.*', t.name)): phase = 'suspend_late' data.setPhase(phase, t.time, isbegin) continue # suspend_noirq start elif(re.match('dpm_suspend_noirq\[.*', t.name)): phase = 'suspend_noirq' data.setPhase(phase, t.time, isbegin) if(not isbegin): phase = 'suspend_machine' data.dmesg[phase]['start'] = t.time continue # suspend_machine/resume_machine elif(re.match('machine_suspend\[.*', t.name)): if(isbegin): phase = 'suspend_machine' data.dmesg[phase]['end'] = t.time data.tSuspended = t.time else: if(sysvals.suspendmode in ['mem', 'disk']): data.dmesg['suspend_machine']['end'] = t.time data.tSuspended = t.time phase = 'resume_machine' data.dmesg[phase]['start'] = t.time data.tResumed = t.time data.tLow = data.tResumed - data.tSuspended continue # resume_noirq start elif(re.match('dpm_resume_noirq\[.*', t.name)): phase = 'resume_noirq' data.setPhase(phase, t.time, isbegin) if(isbegin): data.dmesg['resume_machine']['end'] = t.time continue # resume_early start elif(re.match('dpm_resume_early\[.*', t.name)): phase = 'resume_early' data.setPhase(phase, t.time, isbegin) continue # resume start elif(re.match('dpm_resume\[.*', t.name)): phase = 'resume' data.setPhase(phase, t.time, isbegin) continue # resume complete start elif(re.match('dpm_complete\[.*', t.name)): phase = 'resume_complete' if(isbegin): data.dmesg[phase]['start'] = t.time continue # is this trace event outside of the devices calls if(data.isTraceEventOutsideDeviceCalls(pid, t.time)): # global events (outside device calls) are simply graphed if(name not in testrun.ttemp): testrun.ttemp[name] = [] if(isbegin): # create a new list entry testrun.ttemp[name].append(\ {'begin': t.time, 'end': t.time}) else: if(len(testrun.ttemp[name]) > 0): # if an antry exists, assume this is its end testrun.ttemp[name][-1]['end'] = t.time elif(phase == 'post_resume'): # post resume events can just have ends testrun.ttemp[name].append({ 'begin': data.dmesg[phase]['start'], 'end': t.time}) else: if(isbegin): data.addIntraDevTraceEvent('', name, pid, t.time) else: data.capIntraDevTraceEvent('', name, pid, t.time) # device callback start elif(t.type == 'device_pm_callback_start'): m = re.match('(?P<drv>.*) (?P<d>.*), parent: *(?P<p>.*), .*',\ t.name); if(not m): continue drv = m.group('drv') n = m.group('d') p = m.group('p') if(n and p): data.newAction(phase, n, pid, p, t.time, -1, drv) # device callback finish elif(t.type == 'device_pm_callback_end'): m = re.match('(?P<drv>.*) (?P<d>.*), err.*', t.name); if(not m): continue n = m.group('d') list = data.dmesg[phase]['list'] if(n in list): dev = list[n] dev['length'] = t.time - dev['start'] dev['end'] = t.time # callgraph processing elif sysvals.usecallgraph: # this shouldn't happen, but JIC, ignore callgraph data post-res if(phase == 'post_resume'): continue # create a callgraph object for the data if(pid not in testrun.ftemp): testrun.ftemp[pid] = [] testrun.ftemp[pid].append(FTraceCallGraph()) # when the call is finished, see which device matches it cg = testrun.ftemp[pid][-1] if(cg.addLine(t, m)): testrun.ftemp[pid].append(FTraceCallGraph()) tf.close() for test in testruns: # add the traceevent data to the device hierarchy if(sysvals.usetraceevents): for name in test.ttemp: for event in test.ttemp[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < test.data.start): test.data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > test.data.end): test.data.setEnd(end) test.data.newActionGlobal(name, begin, end) # add the callgraph data to the device hierarchy borderphase = { 'dpm_prepare': 'suspend_prepare', 'dpm_complete': 'resume_complete' } for pid in test.ftemp: for cg in test.ftemp[pid]: if len(cg.list) < 2: continue if(not cg.sanityCheck()): id = 'task %s cpu %s' % (pid, m.group('cpu')) vprint('Sanity check failed for '+\ id+', ignoring this callback') continue callstart = cg.start callend = cg.end if(cg.list[0].name in borderphase): p = borderphase[cg.list[0].name] list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg.slice(dev['start'], dev['end']) continue if(cg.list[0].name != 'dpm_run_callback'): continue for p in test.data.phases: if(test.data.dmesg[p]['start'] <= callstart and callstart <= test.data.dmesg[p]['end']): list = test.data.dmesg[p]['list'] for devname in list: dev = list[devname] if(pid == dev['pid'] and callstart <= dev['start'] and callend >= dev['end']): dev['ftrace'] = cg break # fill in any missing phases for data in testdata: lp = data.phases[0] for p in data.phases: if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0): print('WARNING: phase "%s" is missing!' % p) if(data.dmesg[p]['start'] < 0): data.dmesg[p]['start'] = data.dmesg[lp]['end'] if(p == 'resume_machine'): data.tSuspended = data.dmesg[lp]['end'] data.tResumed = data.dmesg[lp]['end'] data.tLow = 0 if(data.dmesg[p]['end'] < 0): data.dmesg[p]['end'] = data.dmesg[p]['start'] lp = p if(len(sysvals.devicefilter) > 0): data.deviceFilter(sysvals.devicefilter) data.fixupInitcallsThatDidntReturn() if(sysvals.verbose): data.printDetails() # add the time in between the tests as a new phase so we can see it if(len(testdata) > 1): t1e = testdata[0].getEnd() t2s = testdata[-1].getStart() testdata[-1].newPhaseWithSingleAction('user mode', \ 'user mode', t1e, t2s, '#FF9966') return testdata # Function: loadKernelLog # Description: # [deprecated for kernel 3.15.0 or newer] # load the dmesg file into memory and fix up any ordering issues # The dmesg filename is taken from sysvals # Output: # An array of empty Data objects with only their dmesgtext attributes set def loadKernelLog(): global sysvals vprint('Analyzing the dmesg data...') if(os.path.exists(sysvals.dmesgfile) == False): doError('%s doesnt exist' % sysvals.dmesgfile, False) # there can be multiple test runs in a single file delineated by stamps testruns = [] data = 0 lf = open(sysvals.dmesgfile, 'r') for line in lf: line = line.replace('\r\n', '') idx = line.find('[') if idx > 1: line = line[idx:] m = re.match(sysvals.stampfmt, line) if(m): if(data): testruns.append(data) data = Data(len(testruns)) parseStamp(m, data) continue if(not data): continue m = re.match(sysvals.firmwarefmt, line) if(m): data.fwSuspend = int(m.group('s')) data.fwResume = int(m.group('r')) if(data.fwSuspend > 0 or data.fwResume > 0): data.fwValid = True continue m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line) if(m): data.dmesgtext.append(line) if(re.match('ACPI: resume from mwait', m.group('msg'))): print('NOTE: This suspend appears to be freeze rather than'+\ ' %s, it will be treated as such' % sysvals.suspendmode) sysvals.suspendmode = 'freeze' else: vprint('ignoring dmesg line: %s' % line.replace('\n', '')) testruns.append(data) lf.close() if(not data): print('ERROR: analyze_suspend header missing from dmesg log') sys.exit() # fix lines with same timestamp/function with the call and return swapped for data in testruns: last = '' for line in data.dmesgtext: mc = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) calling '+\ '(?P<f>.*)\+ @ .*, parent: .*', line) mr = re.match('.*(\[ *)(?P<t>[0-9\.]*)(\]) call '+\ '(?P<f>.*)\+ returned .* after (?P<dt>.*) usecs', last) if(mc and mr and (mc.group('t') == mr.group('t')) and (mc.group('f') == mr.group('f'))): i = data.dmesgtext.index(last) j = data.dmesgtext.index(line) data.dmesgtext[i] = line data.dmesgtext[j] = last last = line return testruns # Function: parseKernelLog # Description: # [deprecated for kernel 3.15.0 or newer] # Analyse a dmesg log output file generated from this app during # the execution phase. Create a set of device structures in memory # for subsequent formatting in the html output file # This call is only for legacy support on kernels where the ftrace # data lacks the suspend_resume or device_pm_callbacks trace events. # Arguments: # data: an empty Data object (with dmesgtext) obtained from loadKernelLog # Output: # The filled Data object def parseKernelLog(data): global sysvals phase = 'suspend_runtime' if(data.fwValid): vprint('Firmware Suspend = %u ns, Firmware Resume = %u ns' % \ (data.fwSuspend, data.fwResume)) # dmesg phase match table dm = { 'suspend_prepare': 'PM: Syncing filesystems.*', 'suspend': 'PM: Entering [a-z]* sleep.*', 'suspend_late': 'PM: suspend of devices complete after.*', 'suspend_noirq': 'PM: late suspend of devices complete after.*', 'suspend_machine': 'PM: noirq suspend of devices complete after.*', 'resume_machine': 'ACPI: Low-level resume complete.*', 'resume_noirq': 'ACPI: Waking up from system sleep state.*', 'resume_early': 'PM: noirq resume of devices complete after.*', 'resume': 'PM: early resume of devices complete after.*', 'resume_complete': 'PM: resume of devices complete after.*', 'post_resume': '.*Restarting tasks \.\.\..*', } if(sysvals.suspendmode == 'standby'): dm['resume_machine'] = 'PM: Restoring platform NVS memory' elif(sysvals.suspendmode == 'disk'): dm['suspend_late'] = 'PM: freeze of devices complete after.*' dm['suspend_noirq'] = 'PM: late freeze of devices complete after.*' dm['suspend_machine'] = 'PM: noirq freeze of devices complete after.*' dm['resume_machine'] = 'PM: Restoring platform NVS memory' dm['resume_early'] = 'PM: noirq restore of devices complete after.*' dm['resume'] = 'PM: early restore of devices complete after.*' dm['resume_complete'] = 'PM: restore of devices complete after.*' elif(sysvals.suspendmode == 'freeze'): dm['resume_machine'] = 'ACPI: resume from mwait' # action table (expected events that occur and show up in dmesg) at = { 'sync_filesystems': { 'smsg': 'PM: Syncing filesystems.*', 'emsg': 'PM: Preparing system for mem sleep.*' }, 'freeze_user_processes': { 'smsg': 'Freezing user space processes .*', 'emsg': 'Freezing remaining freezable tasks.*' }, 'freeze_tasks': { 'smsg': 'Freezing remaining freezable tasks.*', 'emsg': 'PM: Entering (?P<mode>[a-z,A-Z]*) sleep.*' }, 'ACPI prepare': { 'smsg': 'ACPI: Preparing to enter system sleep state.*', 'emsg': 'PM: Saving platform NVS memory.*' }, 'PM vns': { 'smsg': 'PM: Saving platform NVS memory.*', 'emsg': 'Disabling non-boot CPUs .*' }, } t0 = -1.0 cpu_start = -1.0 prevktime = -1.0 actions = dict() for line in data.dmesgtext: # -- preprocessing -- # parse each dmesg line into the time and message m = re.match('[ \t]*(\[ *)(?P<ktime>[0-9\.]*)(\]) (?P<msg>.*)', line) if(m): val = m.group('ktime') try: ktime = float(val) except: doWarning('INVALID DMESG LINE: '+\ line.replace('\n', ''), 'dmesg') continue msg = m.group('msg') # initialize data start to first line time if t0 < 0: data.setStart(ktime) t0 = ktime else: continue # hack for determining resume_machine end for freeze if(not sysvals.usetraceevents and sysvals.suspendmode == 'freeze' \ and phase == 'resume_machine' and \ re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)): data.dmesg['resume_machine']['end'] = ktime phase = 'resume_noirq' data.dmesg[phase]['start'] = ktime # -- phase changes -- # suspend start if(re.match(dm['suspend_prepare'], msg)): phase = 'suspend_prepare' data.dmesg[phase]['start'] = ktime data.setStart(ktime) # suspend start elif(re.match(dm['suspend'], msg)): data.dmesg['suspend_prepare']['end'] = ktime phase = 'suspend' data.dmesg[phase]['start'] = ktime # suspend_late start elif(re.match(dm['suspend_late'], msg)): data.dmesg['suspend']['end'] = ktime phase = 'suspend_late' data.dmesg[phase]['start'] = ktime # suspend_noirq start elif(re.match(dm['suspend_noirq'], msg)): data.dmesg['suspend_late']['end'] = ktime phase = 'suspend_noirq' data.dmesg[phase]['start'] = ktime # suspend_machine start elif(re.match(dm['suspend_machine'], msg)): data.dmesg['suspend_noirq']['end'] = ktime phase = 'suspend_machine' data.dmesg[phase]['start'] = ktime # resume_machine start elif(re.match(dm['resume_machine'], msg)): if(sysvals.suspendmode in ['freeze', 'standby']): data.tSuspended = prevktime data.dmesg['suspend_machine']['end'] = prevktime else: data.tSuspended = ktime data.dmesg['suspend_machine']['end'] = ktime phase = 'resume_machine' data.tResumed = ktime data.tLow = data.tResumed - data.tSuspended data.dmesg[phase]['start'] = ktime # resume_noirq start elif(re.match(dm['resume_noirq'], msg)): data.dmesg['resume_machine']['end'] = ktime phase = 'resume_noirq' data.dmesg[phase]['start'] = ktime # resume_early start elif(re.match(dm['resume_early'], msg)): data.dmesg['resume_noirq']['end'] = ktime phase = 'resume_early' data.dmesg[phase]['start'] = ktime # resume start elif(re.match(dm['resume'], msg)): data.dmesg['resume_early']['end'] = ktime phase = 'resume' data.dmesg[phase]['start'] = ktime # resume complete start elif(re.match(dm['resume_complete'], msg)): data.dmesg['resume']['end'] = ktime phase = 'resume_complete' data.dmesg[phase]['start'] = ktime # post resume start elif(re.match(dm['post_resume'], msg)): data.dmesg['resume_complete']['end'] = ktime data.setEnd(ktime) phase = 'post_resume' break # -- device callbacks -- if(phase in data.phases): # device init call if(re.match('calling (?P<f>.*)\+ @ .*, parent: .*', msg)): sm = re.match('calling (?P<f>.*)\+ @ '+\ '(?P<n>.*), parent: (?P<p>.*)', msg); f = sm.group('f') n = sm.group('n') p = sm.group('p') if(f and n and p): data.newAction(phase, f, int(n), p, ktime, -1, '') # device init return elif(re.match('call (?P<f>.*)\+ returned .* after '+\ '(?P<t>.*) usecs', msg)): sm = re.match('call (?P<f>.*)\+ returned .* after '+\ '(?P<t>.*) usecs(?P<a>.*)', msg); f = sm.group('f') t = sm.group('t') list = data.dmesg[phase]['list'] if(f in list): dev = list[f] dev['length'] = int(t) dev['end'] = ktime # -- non-devicecallback actions -- # if trace events are not available, these are better than nothing if(not sysvals.usetraceevents): # look for known actions for a in at: if(re.match(at[a]['smsg'], msg)): if(a not in actions): actions[a] = [] actions[a].append({'begin': ktime, 'end': ktime}) if(re.match(at[a]['emsg'], msg)): actions[a][-1]['end'] = ktime # now look for CPU on/off events if(re.match('Disabling non-boot CPUs .*', msg)): # start of first cpu suspend cpu_start = ktime elif(re.match('Enabling non-boot CPUs .*', msg)): # start of first cpu resume cpu_start = ktime elif(re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg)): # end of a cpu suspend, start of the next m = re.match('smpboot: CPU (?P<cpu>[0-9]*) is now offline', msg) cpu = 'CPU'+m.group('cpu') if(cpu not in actions): actions[cpu] = [] actions[cpu].append({'begin': cpu_start, 'end': ktime}) cpu_start = ktime elif(re.match('CPU(?P<cpu>[0-9]*) is up', msg)): # end of a cpu resume, start of the next m = re.match('CPU(?P<cpu>[0-9]*) is up', msg) cpu = 'CPU'+m.group('cpu') if(cpu not in actions): actions[cpu] = [] actions[cpu].append({'begin': cpu_start, 'end': ktime}) cpu_start = ktime prevktime = ktime # fill in any missing phases lp = data.phases[0] for p in data.phases: if(data.dmesg[p]['start'] < 0 and data.dmesg[p]['end'] < 0): print('WARNING: phase "%s" is missing, something went wrong!' % p) print(' In %s, this dmesg line denotes the start of %s:' % \ (sysvals.suspendmode, p)) print(' "%s"' % dm[p]) if(data.dmesg[p]['start'] < 0): data.dmesg[p]['start'] = data.dmesg[lp]['end'] if(p == 'resume_machine'): data.tSuspended = data.dmesg[lp]['end'] data.tResumed = data.dmesg[lp]['end'] data.tLow = 0 if(data.dmesg[p]['end'] < 0): data.dmesg[p]['end'] = data.dmesg[p]['start'] lp = p # fill in any actions we've found for name in actions: for event in actions[name]: begin = event['begin'] end = event['end'] # if event starts before timeline start, expand timeline if(begin < data.start): data.setStart(begin) # if event ends after timeline end, expand the timeline if(end > data.end): data.setEnd(end) data.newActionGlobal(name, begin, end) if(sysvals.verbose): data.printDetails() if(len(sysvals.devicefilter) > 0): data.deviceFilter(sysvals.devicefilter) data.fixupInitcallsThatDidntReturn() return True # Function: setTimelineRows # Description: # Organize the timeline entries into the smallest # number of rows possible, with no entry overlapping # Arguments: # list: the list of devices/actions for a single phase # sortedkeys: cronologically sorted key list to use # Output: # The total number of rows needed to display this phase of the timeline def setTimelineRows(list, sortedkeys): # clear all rows and set them to undefined remaining = len(list) rowdata = dict() row = 0 for item in list: list[item]['row'] = -1 # try to pack each row with as many ranges as possible while(remaining > 0): if(row not in rowdata): rowdata[row] = [] for item in sortedkeys: if(list[item]['row'] < 0): s = list[item]['start'] e = list[item]['end'] valid = True for ritem in rowdata[row]: rs = ritem['start'] re = ritem['end'] if(not (((s <= rs) and (e <= rs)) or ((s >= re) and (e >= re)))): valid = False break if(valid): rowdata[row].append(list[item]) list[item]['row'] = row remaining -= 1 row += 1 return row # Function: createTimeScale # Description: # Create the timescale header for the html timeline # Arguments: # t0: start time (suspend begin) # tMax: end time (resume end) # tSuspend: time when suspend occurs, i.e. the zero time # Output: # The html code needed to display the time scale def createTimeScale(t0, tMax, tSuspended): timescale = '<div class="t" style="right:{0}%">{1}</div>\n' output = '<div id="timescale">\n' # set scale for timeline tTotal = tMax - t0 tS = 0.1 if(tTotal <= 0): return output if(tTotal > 4): tS = 1 if(tSuspended < 0): for i in range(int(tTotal/tS)+1): pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal)) if(i > 0): val = '%0.fms' % (float(i)*tS*1000) else: val = '' output += timescale.format(pos, val) else: tSuspend = tSuspended - t0 divTotal = int(tTotal/tS) + 1 divSuspend = int(tSuspend/tS) s0 = (tSuspend - tS*divSuspend)*100/tTotal for i in range(divTotal): pos = '%0.3f' % (100 - ((float(i)*tS*100)/tTotal) - s0) if((i == 0) and (s0 < 3)): val = '' elif(i == divSuspend): val = 'S/R' else: val = '%0.fms' % (float(i-divSuspend)*tS*1000) output += timescale.format(pos, val) output += '</div>\n' return output # Function: createHTMLSummarySimple # Description: # Create summary html file for a series of tests # Arguments: # testruns: array of Data objects from parseTraceLog def createHTMLSummarySimple(testruns, htmlfile): global sysvals # print out the basic summary of all the tests hf = open(htmlfile, 'w') # write the html header first (html head, css code, up to body start) html = '<!DOCTYPE html>\n<html>\n<head>\n\ <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\ <title>AnalyzeSuspend Summary</title>\n\ <style type=\'text/css\'>\n\ body {overflow-y: scroll;}\n\ .stamp {width: 100%;text-align:center;background-color:#495E09;line-height:30px;color:white;font: 25px Arial;}\n\ table {width:100%;border-collapse: collapse;}\n\ .summary {font: 22px Arial;border:1px solid;}\n\ th {border: 1px solid black;background-color:#A7C942;color:white;}\n\ td {text-align: center;}\n\ tr.alt td {background-color:#EAF2D3;}\n\ tr.avg td {background-color:#BDE34C;}\n\ a:link {color: #90B521;}\n\ a:visited {color: #495E09;}\n\ a:hover {color: #B1DF28;}\n\ a:active {color: #FFFFFF;}\n\ </style>\n</head>\n<body>\n' # group test header count = len(testruns) headline_stamp = '<div class="stamp">{0} {1} {2} {3} ({4} tests)</div>\n' html += headline_stamp.format(sysvals.stamp['host'], sysvals.stamp['kernel'], sysvals.stamp['mode'], sysvals.stamp['time'], count) # check to see if all the tests have the same value stampcolumns = False for data in testruns: if diffStamp(sysvals.stamp, data.stamp): stampcolumns = True break th = '\t<th>{0}</th>\n' td = '\t<td>{0}</td>\n' tdlink = '\t<td><a href="{0}">Click Here</a></td>\n' # table header html += '<table class="summary">\n<tr>\n' html += th.format("Test #") if stampcolumns: html += th.format("Hostname") html += th.format("Kernel Version") html += th.format("Suspend Mode") html += th.format("Test Time") html += th.format("Suspend Time") html += th.format("Resume Time") html += th.format("Detail") html += '</tr>\n' # test data, 1 row per test sTimeAvg = 0.0 rTimeAvg = 0.0 num = 1 for data in testruns: # data.end is the end of post_resume resumeEnd = data.dmesg['resume_complete']['end'] if num % 2 == 1: html += '<tr class="alt">\n' else: html += '<tr>\n' # test num html += td.format("test %d" % num) num += 1 if stampcolumns: # host name val = "unknown" if('host' in data.stamp): val = data.stamp['host'] html += td.format(val) # host kernel val = "unknown" if('kernel' in data.stamp): val = data.stamp['kernel'] html += td.format(val) # suspend mode val = "unknown" if('mode' in data.stamp): val = data.stamp['mode'] html += td.format(val) # test time val = "unknown" if('time' in data.stamp): val = data.stamp['time'] html += td.format(val) # suspend time sTime = (data.tSuspended - data.start)*1000 sTimeAvg += sTime html += td.format("%3.3f ms" % sTime) # resume time rTime = (resumeEnd - data.tResumed)*1000 rTimeAvg += rTime html += td.format("%3.3f ms" % rTime) # link to the output html html += tdlink.format(data.outfile) html += '</tr>\n' # last line: test average if(count > 0): sTimeAvg /= count rTimeAvg /= count html += '<tr class="avg">\n' html += td.format('Average') # name if stampcolumns: html += td.format('') # host html += td.format('') # kernel html += td.format('') # mode html += td.format('') # time html += td.format("%3.3f ms" % sTimeAvg) # suspend time html += td.format("%3.3f ms" % rTimeAvg) # resume time html += td.format('') # output link html += '</tr>\n' # flush the data to file hf.write(html+'</table>\n') hf.write('</body>\n</html>\n') hf.close() # Function: createHTML # Description: # Create the output html file from the resident test data # Arguments: # testruns: array of Data objects from parseKernelLog or parseTraceLog # Output: # True if the html file was created, false if it failed def createHTML(testruns): global sysvals for data in testruns: data.normalizeTime(testruns[-1].tSuspended) x2changes = ['', 'absolute'] if len(testruns) > 1: x2changes = ['1', 'relative'] # html function templates headline_stamp = '<div class="stamp">{0} {1} {2} {3}</div>\n' html_devlist1 = '<button id="devlist1" class="devlist" style="float:left;">Device Detail%s</button>' % x2changes[0] html_zoombox = '<center><button id="zoomin">ZOOM IN</button><button id="zoomout">ZOOM OUT</button><button id="zoomdef">ZOOM 1:1</button></center>\n' html_devlist2 = '<button id="devlist2" class="devlist" style="float:right;">Device Detail2</button>\n' html_timeline = '<div id="dmesgzoombox" class="zoombox">\n<div id="{0}" class="timeline" style="height:{1}px">\n' html_device = '<div id="{0}" title="{1}" class="thread" style="left:{2}%;top:{3}%;height:{4}%;width:{5}%;">{6}</div>\n' html_traceevent = '<div title="{0}" class="traceevent" style="left:{1}%;top:{2}%;height:{3}%;width:{4}%;border:1px solid {5};background-color:{5}">{6}</div>\n' html_phase = '<div class="phase" style="left:{0}%;width:{1}%;top:{2}%;height:{3}%;background-color:{4}">{5}</div>\n' html_phaselet = '<div id="{0}" class="phaselet" style="left:{1}%;width:{2}%;background-color:{3}"></div>\n' html_legend = '<div class="square" style="left:{0}%;background-color:{1}">&nbsp;{2}</div>\n' html_timetotal = '<table class="time1">\n<tr>'\ '<td class="green">{2} Suspend Time: <b>{0} ms</b></td>'\ '<td class="yellow">{2} Resume Time: <b>{1} ms</b></td>'\ '</tr>\n</table>\n' html_timetotal2 = '<table class="time1">\n<tr>'\ '<td class="green">{3} Suspend Time: <b>{0} ms</b></td>'\ '<td class="gray">'+sysvals.suspendmode+' time: <b>{1} ms</b></td>'\ '<td class="yellow">{3} Resume Time: <b>{2} ms</b></td>'\ '</tr>\n</table>\n' html_timegroups = '<table class="time2">\n<tr>'\ '<td class="green">{4}Kernel Suspend: {0} ms</td>'\ '<td class="purple">{4}Firmware Suspend: {1} ms</td>'\ '<td class="purple">{4}Firmware Resume: {2} ms</td>'\ '<td class="yellow">{4}Kernel Resume: {3} ms</td>'\ '</tr>\n</table>\n' # device timeline vprint('Creating Device Timeline...') devtl = Timeline() # Generate the header for this timeline textnum = ['First', 'Second'] for data in testruns: tTotal = data.end - data.start tEnd = data.dmesg['resume_complete']['end'] if(tTotal == 0): print('ERROR: No timeline data') sys.exit() if(data.tLow > 0): low_time = '%.0f'%(data.tLow*1000) if data.fwValid: suspend_time = '%.0f'%((data.tSuspended-data.start)*1000 + \ (data.fwSuspend/1000000.0)) resume_time = '%.0f'%((tEnd-data.tSuspended)*1000 + \ (data.fwResume/1000000.0)) testdesc1 = 'Total' testdesc2 = '' if(len(testruns) > 1): testdesc1 = testdesc2 = textnum[data.testnumber] testdesc2 += ' ' if(data.tLow == 0): thtml = html_timetotal.format(suspend_time, \ resume_time, testdesc1) else: thtml = html_timetotal2.format(suspend_time, low_time, \ resume_time, testdesc1) devtl.html['timeline'] += thtml sktime = '%.3f'%((data.dmesg['suspend_machine']['end'] - \ data.getStart())*1000) sftime = '%.3f'%(data.fwSuspend / 1000000.0) rftime = '%.3f'%(data.fwResume / 1000000.0) rktime = '%.3f'%((data.getEnd() - \ data.dmesg['resume_machine']['start'])*1000) devtl.html['timeline'] += html_timegroups.format(sktime, \ sftime, rftime, rktime, testdesc2) else: suspend_time = '%.0f'%((data.tSuspended-data.start)*1000) resume_time = '%.0f'%((tEnd-data.tSuspended)*1000) testdesc = 'Kernel' if(len(testruns) > 1): testdesc = textnum[data.testnumber]+' '+testdesc if(data.tLow == 0): thtml = html_timetotal.format(suspend_time, \ resume_time, testdesc) else: thtml = html_timetotal2.format(suspend_time, low_time, \ resume_time, testdesc) devtl.html['timeline'] += thtml # time scale for potentially multiple datasets t0 = testruns[0].start tMax = testruns[-1].end tSuspended = testruns[-1].tSuspended tTotal = tMax - t0 # determine the maximum number of rows we need to draw timelinerows = 0 for data in testruns: for phase in data.dmesg: list = data.dmesg[phase]['list'] rows = setTimelineRows(list, list) data.dmesg[phase]['row'] = rows if(rows > timelinerows): timelinerows = rows # calculate the timeline height and create bounding box, add buttons devtl.setRows(timelinerows + 1) devtl.html['timeline'] += html_devlist1 if len(testruns) > 1: devtl.html['timeline'] += html_devlist2 devtl.html['timeline'] += html_zoombox devtl.html['timeline'] += html_timeline.format('dmesg', devtl.height) # draw the colored boxes for each of the phases for data in testruns: for b in data.dmesg: phase = data.dmesg[b] length = phase['end']-phase['start'] left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal) width = '%.3f' % ((length*100.0)/tTotal) devtl.html['timeline'] += html_phase.format(left, width, \ '%.3f'%devtl.scaleH, '%.3f'%(100-devtl.scaleH), \ data.dmesg[b]['color'], '') # draw the time scale, try to make the number of labels readable devtl.html['scale'] = createTimeScale(t0, tMax, tSuspended) devtl.html['timeline'] += devtl.html['scale'] for data in testruns: for b in data.dmesg: phaselist = data.dmesg[b]['list'] for d in phaselist: name = d drv = '' dev = phaselist[d] if(d in sysvals.altdevname): name = sysvals.altdevname[d] if('drv' in dev and dev['drv']): drv = ' {%s}' % dev['drv'] height = (100.0 - devtl.scaleH)/data.dmesg[b]['row'] top = '%.3f' % ((dev['row']*height) + devtl.scaleH) left = '%.3f' % (((dev['start']-t0)*100)/tTotal) width = '%.3f' % (((dev['end']-dev['start'])*100)/tTotal) length = ' (%0.3f ms) ' % ((dev['end']-dev['start'])*1000) color = 'rgba(204,204,204,0.5)' devtl.html['timeline'] += html_device.format(dev['id'], \ d+drv+length+b, left, top, '%.3f'%height, width, name+drv) # draw any trace events found for data in testruns: for b in data.dmesg: phaselist = data.dmesg[b]['list'] for name in phaselist: dev = phaselist[name] if('traceevents' in dev): vprint('Debug trace events found for device %s' % name) vprint('%20s %20s %10s %8s' % ('action', \ 'name', 'time(ms)', 'length(ms)')) for e in dev['traceevents']: vprint('%20s %20s %10.3f %8.3f' % (e.action, \ e.name, e.time*1000, e.length*1000)) height = (100.0 - devtl.scaleH)/data.dmesg[b]['row'] top = '%.3f' % ((dev['row']*height) + devtl.scaleH) left = '%.3f' % (((e.time-t0)*100)/tTotal) width = '%.3f' % (e.length*100/tTotal) color = 'rgba(204,204,204,0.5)' devtl.html['timeline'] += \ html_traceevent.format(e.action+' '+e.name, \ left, top, '%.3f'%height, \ width, e.color, '') # timeline is finished devtl.html['timeline'] += '</div>\n</div>\n' # draw a legend which describes the phases by color data = testruns[-1] devtl.html['legend'] = '<div class="legend">\n' pdelta = 100.0/len(data.phases) pmargin = pdelta / 4.0 for phase in data.phases: order = '%.2f' % ((data.dmesg[phase]['order'] * pdelta) + pmargin) name = string.replace(phase, '_', ' &nbsp;') devtl.html['legend'] += html_legend.format(order, \ data.dmesg[phase]['color'], name) devtl.html['legend'] += '</div>\n' hf = open(sysvals.htmlfile, 'w') thread_height = 0 # write the html header first (html head, css code, up to body start) html_header = '<!DOCTYPE html>\n<html>\n<head>\n\ <meta http-equiv="content-type" content="text/html; charset=UTF-8">\n\ <title>AnalyzeSuspend</title>\n\ <style type=\'text/css\'>\n\ body {overflow-y: scroll;}\n\ .stamp {width: 100%;text-align:center;background-color:gray;line-height:30px;color:white;font: 25px Arial;}\n\ .callgraph {margin-top: 30px;box-shadow: 5px 5px 20px black;}\n\ .callgraph article * {padding-left: 28px;}\n\ h1 {color:black;font: bold 30px Times;}\n\ t0 {color:black;font: bold 30px Times;}\n\ t1 {color:black;font: 30px Times;}\n\ t2 {color:black;font: 25px Times;}\n\ t3 {color:black;font: 20px Times;white-space:nowrap;}\n\ t4 {color:black;font: bold 30px Times;line-height:60px;white-space:nowrap;}\n\ table {width:100%;}\n\ .gray {background-color:rgba(80,80,80,0.1);}\n\ .green {background-color:rgba(204,255,204,0.4);}\n\ .purple {background-color:rgba(128,0,128,0.2);}\n\ .yellow {background-color:rgba(255,255,204,0.4);}\n\ .time1 {font: 22px Arial;border:1px solid;}\n\ .time2 {font: 15px Arial;border-bottom:1px solid;border-left:1px solid;border-right:1px solid;}\n\ td {text-align: center;}\n\ r {color:#500000;font:15px Tahoma;}\n\ n {color:#505050;font:15px Tahoma;}\n\ .tdhl {color: red;}\n\ .hide {display: none;}\n\ .pf {display: none;}\n\ .pf:checked + label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/><rect x="8" y="4" width="2" height="10" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\ .pf:not(:checked) ~ label {background: url(\'data:image/svg+xml;utf,<?xml version="1.0" standalone="no"?><svg xmlns="http://www.w3.org/2000/svg" height="18" width="18" version="1.1"><circle cx="9" cy="9" r="8" stroke="black" stroke-width="1" fill="white"/><rect x="4" y="8" width="10" height="2" style="fill:black;stroke-width:0"/></svg>\') no-repeat left center;}\n\ .pf:checked ~ *:not(:nth-child(2)) {display: none;}\n\ .zoombox {position: relative; width: 100%; overflow-x: scroll;}\n\ .timeline {position: relative; font-size: 14px;cursor: pointer;width: 100%; overflow: hidden; background-color:#dddddd;}\n\ .thread {position: absolute; height: '+'%.3f'%thread_height+'%; overflow: hidden; line-height: 30px; border:1px solid;text-align:center;white-space:nowrap;background-color:rgba(204,204,204,0.5);}\n\ .thread:hover {background-color:white;border:1px solid red;z-index:10;}\n\ .hover {background-color:white;border:1px solid red;z-index:10;}\n\ .traceevent {position: absolute;opacity: 0.3;height: '+'%.3f'%thread_height+'%;width:0;overflow:hidden;line-height:30px;text-align:center;white-space:nowrap;}\n\ .phase {position: absolute;overflow: hidden;border:0px;text-align:center;}\n\ .phaselet {position:absolute;overflow:hidden;border:0px;text-align:center;height:100px;font-size:24px;}\n\ .t {position:absolute;top:0%;height:100%;border-right:1px solid black;}\n\ .legend {position: relative; width: 100%; height: 40px; text-align: center;margin-bottom:20px}\n\ .legend .square {position:absolute;top:10px; width: 0px;height: 20px;border:1px solid;padding-left:20px;}\n\ button {height:40px;width:200px;margin-bottom:20px;margin-top:20px;font-size:24px;}\n\ .devlist {position:'+x2changes[1]+';width:190px;}\n\ #devicedetail {height:100px;box-shadow: 5px 5px 20px black;}\n\ </style>\n</head>\n<body>\n' hf.write(html_header) # write the test title and general info header if(sysvals.stamp['time'] != ""): hf.write(headline_stamp.format(sysvals.stamp['host'], sysvals.stamp['kernel'], sysvals.stamp['mode'], \ sysvals.stamp['time'])) # write the device timeline hf.write(devtl.html['timeline']) hf.write(devtl.html['legend']) hf.write('<div id="devicedetailtitle"></div>\n') hf.write('<div id="devicedetail" style="display:none;">\n') # draw the colored boxes for the device detail section for data in testruns: hf.write('<div id="devicedetail%d">\n' % data.testnumber) for b in data.phases: phase = data.dmesg[b] length = phase['end']-phase['start'] left = '%.3f' % (((phase['start']-t0)*100.0)/tTotal) width = '%.3f' % ((length*100.0)/tTotal) hf.write(html_phaselet.format(b, left, width, \ data.dmesg[b]['color'])) hf.write('</div>\n') hf.write('</div>\n') # write the ftrace data (callgraph) data = testruns[-1] if(sysvals.usecallgraph): hf.write('<section id="callgraphs" class="callgraph">\n') # write out the ftrace data converted to html html_func_top = '<article id="{0}" class="atop" style="background-color:{1}">\n<input type="checkbox" class="pf" id="f{2}" checked/><label for="f{2}">{3} {4}</label>\n' html_func_start = '<article>\n<input type="checkbox" class="pf" id="f{0}" checked/><label for="f{0}">{1} {2}</label>\n' html_func_end = '</article>\n' html_func_leaf = '<article>{0} {1}</article>\n' num = 0 for p in data.phases: list = data.dmesg[p]['list'] for devname in data.sortedDevices(p): if('ftrace' not in list[devname]): continue name = devname if(devname in sysvals.altdevname): name = sysvals.altdevname[devname] devid = list[devname]['id'] cg = list[devname]['ftrace'] flen = '<r>(%.3f ms @ %.3f to %.3f)</r>' % \ ((cg.end - cg.start)*1000, cg.start*1000, cg.end*1000) hf.write(html_func_top.format(devid, data.dmesg[p]['color'], \ num, name+' '+p, flen)) num += 1 for line in cg.list: if(line.length < 0.000000001): flen = '' else: flen = '<n>(%.3f ms @ %.3f)</n>' % (line.length*1000, \ line.time*1000) if(line.freturn and line.fcall): hf.write(html_func_leaf.format(line.name, flen)) elif(line.freturn): hf.write(html_func_end) else: hf.write(html_func_start.format(num, line.name, flen)) num += 1 hf.write(html_func_end) hf.write('\n\n </section>\n') # write the footer and close addScriptCode(hf, testruns) hf.write('</body>\n</html>\n') hf.close() return True # Function: addScriptCode # Description: # Adds the javascript code to the output html # Arguments: # hf: the open html file pointer # testruns: array of Data objects from parseKernelLog or parseTraceLog def addScriptCode(hf, testruns): t0 = (testruns[0].start - testruns[-1].tSuspended) * 1000 tMax = (testruns[-1].end - testruns[-1].tSuspended) * 1000 # create an array in javascript memory with the device details detail = ' var devtable = [];\n' for data in testruns: topo = data.deviceTopology() detail += ' devtable[%d] = "%s";\n' % (data.testnumber, topo) detail += ' var bounds = [%f,%f];\n' % (t0, tMax) # add the code which will manipulate the data in the browser script_code = \ '<script type="text/javascript">\n'+detail+\ ' function zoomTimeline() {\n'\ ' var timescale = document.getElementById("timescale");\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var zoombox = document.getElementById("dmesgzoombox");\n'\ ' var val = parseFloat(dmesg.style.width);\n'\ ' var newval = 100;\n'\ ' var sh = window.outerWidth / 2;\n'\ ' if(this.id == "zoomin") {\n'\ ' newval = val * 1.2;\n'\ ' if(newval > 40000) newval = 40000;\n'\ ' dmesg.style.width = newval+"%";\n'\ ' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\ ' } else if (this.id == "zoomout") {\n'\ ' newval = val / 1.2;\n'\ ' if(newval < 100) newval = 100;\n'\ ' dmesg.style.width = newval+"%";\n'\ ' zoombox.scrollLeft = ((zoombox.scrollLeft + sh) * newval / val) - sh;\n'\ ' } else {\n'\ ' zoombox.scrollLeft = 0;\n'\ ' dmesg.style.width = "100%";\n'\ ' }\n'\ ' var html = "";\n'\ ' var t0 = bounds[0];\n'\ ' var tMax = bounds[1];\n'\ ' var tTotal = tMax - t0;\n'\ ' var wTotal = tTotal * 100.0 / newval;\n'\ ' for(var tS = 1000; (wTotal / tS) < 3; tS /= 10);\n'\ ' if(tS < 1) tS = 1;\n'\ ' for(var s = ((t0 / tS)|0) * tS; s < tMax; s += tS) {\n'\ ' var pos = (tMax - s) * 100.0 / tTotal;\n'\ ' var name = (s == 0)?"S/R":(s+"ms");\n'\ ' html += "<div class=\\"t\\" style=\\"right:"+pos+"%\\">"+name+"</div>";\n'\ ' }\n'\ ' timescale.innerHTML = html;\n'\ ' }\n'\ ' function deviceHover() {\n'\ ' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' var cpu = -1;\n'\ ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(7));\n'\ ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(8));\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\ ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\ ' (name == dname))\n'\ ' {\n'\ ' dev[i].className = "thread hover";\n'\ ' } else {\n'\ ' dev[i].className = "thread";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' function deviceUnhover() {\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dev[i].className = "thread";\n'\ ' }\n'\ ' }\n'\ ' function deviceTitle(title, total, cpu) {\n'\ ' var prefix = "Total";\n'\ ' if(total.length > 3) {\n'\ ' prefix = "Average";\n'\ ' total[1] = (total[1]+total[3])/2;\n'\ ' total[2] = (total[2]+total[4])/2;\n'\ ' }\n'\ ' var devtitle = document.getElementById("devicedetailtitle");\n'\ ' var name = title.slice(0, title.indexOf(" "));\n'\ ' if(cpu >= 0) name = "CPU"+cpu;\n'\ ' var driver = "";\n'\ ' var tS = "<t2>(</t2>";\n'\ ' var tR = "<t2>)</t2>";\n'\ ' if(total[1] > 0)\n'\ ' tS = "<t2>("+prefix+" Suspend:</t2><t0> "+total[1].toFixed(3)+" ms</t0> ";\n'\ ' if(total[2] > 0)\n'\ ' tR = " <t2>"+prefix+" Resume:</t2><t0> "+total[2].toFixed(3)+" ms<t2>)</t2></t0>";\n'\ ' var s = title.indexOf("{");\n'\ ' var e = title.indexOf("}");\n'\ ' if((s >= 0) && (e >= 0))\n'\ ' driver = title.slice(s+1, e) + " <t1>@</t1> ";\n'\ ' if(total[1] > 0 && total[2] > 0)\n'\ ' devtitle.innerHTML = "<t0>"+driver+name+"</t0> "+tS+tR;\n'\ ' else\n'\ ' devtitle.innerHTML = "<t0>"+title+"</t0>";\n'\ ' return name;\n'\ ' }\n'\ ' function deviceDetail() {\n'\ ' var devinfo = document.getElementById("devicedetail");\n'\ ' devinfo.style.display = "block";\n'\ ' var name = this.title.slice(0, this.title.indexOf(" ("));\n'\ ' var cpu = -1;\n'\ ' if(name.match("CPU_ON\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(7));\n'\ ' else if(name.match("CPU_OFF\[[0-9]*\]"))\n'\ ' cpu = parseInt(name.slice(8));\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' var idlist = [];\n'\ ' var pdata = [[]];\n'\ ' var pd = pdata[0];\n'\ ' var total = [0.0, 0.0, 0.0];\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dname = dev[i].title.slice(0, dev[i].title.indexOf(" ("));\n'\ ' if((cpu >= 0 && dname.match("CPU_O[NF]*\\\[*"+cpu+"\\\]")) ||\n'\ ' (name == dname))\n'\ ' {\n'\ ' idlist[idlist.length] = dev[i].id;\n'\ ' var tidx = 1;\n'\ ' if(dev[i].id[0] == "a") {\n'\ ' pd = pdata[0];\n'\ ' } else {\n'\ ' if(pdata.length == 1) pdata[1] = [];\n'\ ' if(total.length == 3) total[3]=total[4]=0.0;\n'\ ' pd = pdata[1];\n'\ ' tidx = 3;\n'\ ' }\n'\ ' var info = dev[i].title.split(" ");\n'\ ' var pname = info[info.length-1];\n'\ ' pd[pname] = parseFloat(info[info.length-3].slice(1));\n'\ ' total[0] += pd[pname];\n'\ ' if(pname.indexOf("suspend") >= 0)\n'\ ' total[tidx] += pd[pname];\n'\ ' else\n'\ ' total[tidx+1] += pd[pname];\n'\ ' }\n'\ ' }\n'\ ' var devname = deviceTitle(this.title, total, cpu);\n'\ ' var left = 0.0;\n'\ ' for (var t = 0; t < pdata.length; t++) {\n'\ ' pd = pdata[t];\n'\ ' devinfo = document.getElementById("devicedetail"+t);\n'\ ' var phases = devinfo.getElementsByClassName("phaselet");\n'\ ' for (var i = 0; i < phases.length; i++) {\n'\ ' if(phases[i].id in pd) {\n'\ ' var w = 100.0*pd[phases[i].id]/total[0];\n'\ ' var fs = 32;\n'\ ' if(w < 8) fs = 4*w | 0;\n'\ ' var fs2 = fs*3/4;\n'\ ' phases[i].style.width = w+"%";\n'\ ' phases[i].style.left = left+"%";\n'\ ' phases[i].title = phases[i].id+" "+pd[phases[i].id]+" ms";\n'\ ' left += w;\n'\ ' var time = "<t4 style=\\"font-size:"+fs+"px\\">"+pd[phases[i].id]+" ms<br></t4>";\n'\ ' var pname = "<t3 style=\\"font-size:"+fs2+"px\\">"+phases[i].id.replace("_", " ")+"</t3>";\n'\ ' phases[i].innerHTML = time+pname;\n'\ ' } else {\n'\ ' phases[i].style.width = "0%";\n'\ ' phases[i].style.left = left+"%";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' var cglist = document.getElementById("callgraphs");\n'\ ' if(!cglist) return;\n'\ ' var cg = cglist.getElementsByClassName("atop");\n'\ ' for (var i = 0; i < cg.length; i++) {\n'\ ' if(idlist.indexOf(cg[i].id) >= 0) {\n'\ ' cg[i].style.display = "block";\n'\ ' } else {\n'\ ' cg[i].style.display = "none";\n'\ ' }\n'\ ' }\n'\ ' }\n'\ ' function devListWindow(e) {\n'\ ' var sx = e.clientX;\n'\ ' if(sx > window.innerWidth - 440)\n'\ ' sx = window.innerWidth - 440;\n'\ ' var cfg="top="+e.screenY+", left="+sx+", width=440, height=720, scrollbars=yes";\n'\ ' var win = window.open("", "_blank", cfg);\n'\ ' if(window.chrome) win.moveBy(sx, 0);\n'\ ' var html = "<title>"+e.target.innerHTML+"</title>"+\n'\ ' "<style type=\\"text/css\\">"+\n'\ ' " ul {list-style-type:circle;padding-left:10px;margin-left:10px;}"+\n'\ ' "</style>"\n'\ ' var dt = devtable[0];\n'\ ' if(e.target.id != "devlist1")\n'\ ' dt = devtable[1];\n'\ ' win.document.write(html+dt);\n'\ ' }\n'\ ' window.addEventListener("load", function () {\n'\ ' var dmesg = document.getElementById("dmesg");\n'\ ' dmesg.style.width = "100%"\n'\ ' document.getElementById("zoomin").onclick = zoomTimeline;\n'\ ' document.getElementById("zoomout").onclick = zoomTimeline;\n'\ ' document.getElementById("zoomdef").onclick = zoomTimeline;\n'\ ' var devlist = document.getElementsByClassName("devlist");\n'\ ' for (var i = 0; i < devlist.length; i++)\n'\ ' devlist[i].onclick = devListWindow;\n'\ ' var dev = dmesg.getElementsByClassName("thread");\n'\ ' for (var i = 0; i < dev.length; i++) {\n'\ ' dev[i].onclick = deviceDetail;\n'\ ' dev[i].onmouseover = deviceHover;\n'\ ' dev[i].onmouseout = deviceUnhover;\n'\ ' }\n'\ ' zoomTimeline();\n'\ ' });\n'\ '</script>\n' hf.write(script_code); # Function: executeSuspend # Description: # Execute system suspend through the sysfs interface, then copy the output # dmesg and ftrace files to the test output directory. def executeSuspend(): global sysvals detectUSB(False) t0 = time.time()*1000 tp = sysvals.tpath # execute however many s/r runs requested for count in range(1,sysvals.execcount+1): # clear the kernel ring buffer just as we start os.system('dmesg -C') # enable callgraph ftrace only for the second run if(sysvals.usecallgraph and count == 2): # set trace type os.system('echo function_graph > '+tp+'current_tracer') os.system('echo "" > '+tp+'set_ftrace_filter') # set trace format options os.system('echo funcgraph-abstime > '+tp+'trace_options') os.system('echo funcgraph-proc > '+tp+'trace_options') # focus only on device suspend and resume os.system('cat '+tp+'available_filter_functions | '+\ 'grep dpm_run_callback > '+tp+'set_graph_function') # if this is test2 and there's a delay, start here if(count > 1 and sysvals.x2delay > 0): tN = time.time()*1000 while (tN - t0) < sysvals.x2delay: tN = time.time()*1000 time.sleep(0.001) # start ftrace if(sysvals.usecallgraph or sysvals.usetraceevents): print('START TRACING') os.system('echo 1 > '+tp+'tracing_on') # initiate suspend if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo SUSPEND START > '+tp+'trace_marker') if(sysvals.rtcwake): print('SUSPEND START') print('will autoresume in %d seconds' % sysvals.rtcwaketime) sysvals.rtcWakeAlarm() else: print('SUSPEND START (press a key to resume)') pf = open(sysvals.powerfile, 'w') pf.write(sysvals.suspendmode) # execution will pause here pf.close() t0 = time.time()*1000 # return from suspend print('RESUME COMPLETE') if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo RESUME COMPLETE > '+tp+'trace_marker') # see if there's firmware timing data to be had t = sysvals.postresumetime if(t > 0): print('Waiting %d seconds for POST-RESUME trace events...' % t) time.sleep(t) # stop ftrace if(sysvals.usecallgraph or sysvals.usetraceevents): os.system('echo 0 > '+tp+'tracing_on') print('CAPTURING TRACE') writeDatafileHeader(sysvals.ftracefile) os.system('cat '+tp+'trace >> '+sysvals.ftracefile) os.system('echo "" > '+tp+'trace') # grab a copy of the dmesg output print('CAPTURING DMESG') writeDatafileHeader(sysvals.dmesgfile) os.system('dmesg -c >> '+sysvals.dmesgfile) def writeDatafileHeader(filename): global sysvals fw = getFPDT(False) prt = sysvals.postresumetime fp = open(filename, 'a') fp.write(sysvals.teststamp+'\n') if(fw): fp.write('# fwsuspend %u fwresume %u\n' % (fw[0], fw[1])) if(prt > 0): fp.write('# post resume time %u\n' % prt) fp.close() # Function: executeAndroidSuspend # Description: # Execute system suspend through the sysfs interface # on a remote android device, then transfer the output # dmesg and ftrace files to the local output directory. def executeAndroidSuspend(): global sysvals # check to see if the display is currently off tp = sysvals.tpath out = os.popen(sysvals.adb+\ ' shell dumpsys power | grep mScreenOn').read().strip() # if so we need to turn it on so we can issue a new suspend if(out.endswith('false')): print('Waking the device up for the test...') # send the KEYPAD_POWER keyevent to wake it up os.system(sysvals.adb+' shell input keyevent 26') # wait a few seconds so the user can see the device wake up time.sleep(3) # execute however many s/r runs requested for count in range(1,sysvals.execcount+1): # clear the kernel ring buffer just as we start os.system(sysvals.adb+' shell dmesg -c > /dev/null 2>&1') # start ftrace if(sysvals.usetraceevents): print('START TRACING') os.system(sysvals.adb+" shell 'echo 1 > "+tp+"tracing_on'") # initiate suspend for count in range(1,sysvals.execcount+1): if(sysvals.usetraceevents): os.system(sysvals.adb+\ " shell 'echo SUSPEND START > "+tp+"trace_marker'") print('SUSPEND START (press a key on the device to resume)') os.system(sysvals.adb+" shell 'echo "+sysvals.suspendmode+\ " > "+sysvals.powerfile+"'") # execution will pause here, then adb will exit while(True): check = os.popen(sysvals.adb+\ ' shell pwd 2>/dev/null').read().strip() if(len(check) > 0): break time.sleep(1) if(sysvals.usetraceevents): os.system(sysvals.adb+" shell 'echo RESUME COMPLETE > "+tp+\ "trace_marker'") # return from suspend print('RESUME COMPLETE') # stop ftrace if(sysvals.usetraceevents): os.system(sysvals.adb+" shell 'echo 0 > "+tp+"tracing_on'") print('CAPTURING TRACE') os.system('echo "'+sysvals.teststamp+'" > '+sysvals.ftracefile) os.system(sysvals.adb+' shell cat '+tp+\ 'trace >> '+sysvals.ftracefile) # grab a copy of the dmesg output print('CAPTURING DMESG') os.system('echo "'+sysvals.teststamp+'" > '+sysvals.dmesgfile) os.system(sysvals.adb+' shell dmesg >> '+sysvals.dmesgfile) # Function: setUSBDevicesAuto # Description: # Set the autosuspend control parameter of all USB devices to auto # This can be dangerous, so use at your own risk, most devices are set # to always-on since the kernel cant determine if the device can # properly autosuspend def setUSBDevicesAuto(): global sysvals rootCheck() for dirname, dirnames, filenames in os.walk('/sys/devices'): if(re.match('.*/usb[0-9]*.*', dirname) and 'idVendor' in filenames and 'idProduct' in filenames): os.system('echo auto > %s/power/control' % dirname) name = dirname.split('/')[-1] desc = os.popen('cat %s/product 2>/dev/null' % \ dirname).read().replace('\n', '') ctrl = os.popen('cat %s/power/control 2>/dev/null' % \ dirname).read().replace('\n', '') print('control is %s for %6s: %s' % (ctrl, name, desc)) # Function: yesno # Description: # Print out an equivalent Y or N for a set of known parameter values # Output: # 'Y', 'N', or ' ' if the value is unknown def yesno(val): yesvals = ['auto', 'enabled', 'active', '1'] novals = ['on', 'disabled', 'suspended', 'forbidden', 'unsupported'] if val in yesvals: return 'Y' elif val in novals: return 'N' return ' ' # Function: ms2nice # Description: # Print out a very concise time string in minutes and seconds # Output: # The time string, e.g. "1901m16s" def ms2nice(val): ms = 0 try: ms = int(val) except: return 0.0 m = ms / 60000 s = (ms / 1000) - (m * 60) return '%3dm%2ds' % (m, s) # Function: detectUSB # Description: # Detect all the USB hosts and devices currently connected and add # a list of USB device names to sysvals for better timeline readability # Arguments: # output: True to output the info to stdout, False otherwise def detectUSB(output): global sysvals field = {'idVendor':'', 'idProduct':'', 'product':'', 'speed':''} power = {'async':'', 'autosuspend':'', 'autosuspend_delay_ms':'', 'control':'', 'persist':'', 'runtime_enabled':'', 'runtime_status':'', 'runtime_usage':'', 'runtime_active_time':'', 'runtime_suspended_time':'', 'active_duration':'', 'connected_duration':''} if(output): print('LEGEND') print('---------------------------------------------------------------------------------------------') print(' A = async/sync PM queue Y/N D = autosuspend delay (seconds)') print(' S = autosuspend Y/N rACTIVE = runtime active (min/sec)') print(' P = persist across suspend Y/N rSUSPEN = runtime suspend (min/sec)') print(' E = runtime suspend enabled/forbidden Y/N ACTIVE = active duration (min/sec)') print(' R = runtime status active/suspended Y/N CONNECT = connected duration (min/sec)') print(' U = runtime usage count') print('---------------------------------------------------------------------------------------------') print(' NAME ID DESCRIPTION SPEED A S P E R U D rACTIVE rSUSPEN ACTIVE CONNECT') print('---------------------------------------------------------------------------------------------') for dirname, dirnames, filenames in os.walk('/sys/devices'): if(re.match('.*/usb[0-9]*.*', dirname) and 'idVendor' in filenames and 'idProduct' in filenames): for i in field: field[i] = os.popen('cat %s/%s 2>/dev/null' % \ (dirname, i)).read().replace('\n', '') name = dirname.split('/')[-1] if(len(field['product']) > 0): sysvals.altdevname[name] = \ '%s [%s]' % (field['product'], name) else: sysvals.altdevname[name] = \ '%s:%s [%s]' % (field['idVendor'], \ field['idProduct'], name) if(output): for i in power: power[i] = os.popen('cat %s/power/%s 2>/dev/null' % \ (dirname, i)).read().replace('\n', '') if(re.match('usb[0-9]*', name)): first = '%-8s' % name else: first = '%8s' % name print('%s [%s:%s] %-20s %-4s %1s %1s %1s %1s %1s %1s %1s %s %s %s %s' % \ (first, field['idVendor'], field['idProduct'], \ field['product'][0:20], field['speed'], \ yesno(power['async']), \ yesno(power['control']), \ yesno(power['persist']), \ yesno(power['runtime_enabled']), \ yesno(power['runtime_status']), \ power['runtime_usage'], \ power['autosuspend'], \ ms2nice(power['runtime_active_time']), \ ms2nice(power['runtime_suspended_time']), \ ms2nice(power['active_duration']), \ ms2nice(power['connected_duration']))) # Function: getModes # Description: # Determine the supported power modes on this system # Output: # A string list of the available modes def getModes(): global sysvals modes = '' if(not sysvals.android): if(os.path.exists(sysvals.powerfile)): fp = open(sysvals.powerfile, 'r') modes = string.split(fp.read()) fp.close() else: line = os.popen(sysvals.adb+' shell cat '+\ sysvals.powerfile).read().strip() modes = string.split(line) return modes # Function: getFPDT # Description: # Read the acpi bios tables and pull out FPDT, the firmware data # Arguments: # output: True to output the info to stdout, False otherwise def getFPDT(output): global sysvals rectype = {} rectype[0] = 'Firmware Basic Boot Performance Record' rectype[1] = 'S3 Performance Table Record' prectype = {} prectype[0] = 'Basic S3 Resume Performance Record' prectype[1] = 'Basic S3 Suspend Performance Record' rootCheck() if(not os.path.exists(sysvals.fpdtpath)): if(output): doError('file doesnt exist: %s' % sysvals.fpdtpath, False) return False if(not os.access(sysvals.fpdtpath, os.R_OK)): if(output): doError('file isnt readable: %s' % sysvals.fpdtpath, False) return False if(not os.path.exists(sysvals.mempath)): if(output): doError('file doesnt exist: %s' % sysvals.mempath, False) return False if(not os.access(sysvals.mempath, os.R_OK)): if(output): doError('file isnt readable: %s' % sysvals.mempath, False) return False fp = open(sysvals.fpdtpath, 'rb') buf = fp.read() fp.close() if(len(buf) < 36): if(output): doError('Invalid FPDT table data, should '+\ 'be at least 36 bytes', False) return False table = struct.unpack('4sIBB6s8sI4sI', buf[0:36]) if(output): print('') print('Firmware Performance Data Table (%s)' % table[0]) print(' Signature : %s' % table[0]) print(' Table Length : %u' % table[1]) print(' Revision : %u' % table[2]) print(' Checksum : 0x%x' % table[3]) print(' OEM ID : %s' % table[4]) print(' OEM Table ID : %s' % table[5]) print(' OEM Revision : %u' % table[6]) print(' Creator ID : %s' % table[7]) print(' Creator Revision : 0x%x' % table[8]) print('') if(table[0] != 'FPDT'): if(output): doError('Invalid FPDT table') return False if(len(buf) <= 36): return False i = 0 fwData = [0, 0] records = buf[36:] fp = open(sysvals.mempath, 'rb') while(i < len(records)): header = struct.unpack('HBB', records[i:i+4]) if(header[0] not in rectype): continue if(header[1] != 16): continue addr = struct.unpack('Q', records[i+8:i+16])[0] try: fp.seek(addr) first = fp.read(8) except: doError('Bad address 0x%x in %s' % (addr, sysvals.mempath), False) rechead = struct.unpack('4sI', first) recdata = fp.read(rechead[1]-8) if(rechead[0] == 'FBPT'): record = struct.unpack('HBBIQQQQQ', recdata) if(output): print('%s (%s)' % (rectype[header[0]], rechead[0])) print(' Reset END : %u ns' % record[4]) print(' OS Loader LoadImage Start : %u ns' % record[5]) print(' OS Loader StartImage Start : %u ns' % record[6]) print(' ExitBootServices Entry : %u ns' % record[7]) print(' ExitBootServices Exit : %u ns' % record[8]) elif(rechead[0] == 'S3PT'): if(output): print('%s (%s)' % (rectype[header[0]], rechead[0])) j = 0 while(j < len(recdata)): prechead = struct.unpack('HBB', recdata[j:j+4]) if(prechead[0] not in prectype): continue if(prechead[0] == 0): record = struct.unpack('IIQQ', recdata[j:j+prechead[1]]) fwData[1] = record[2] if(output): print(' %s' % prectype[prechead[0]]) print(' Resume Count : %u' % \ record[1]) print(' FullResume : %u ns' % \ record[2]) print(' AverageResume : %u ns' % \ record[3]) elif(prechead[0] == 1): record = struct.unpack('QQ', recdata[j+4:j+prechead[1]]) fwData[0] = record[1] - record[0] if(output): print(' %s' % prectype[prechead[0]]) print(' SuspendStart : %u ns' % \ record[0]) print(' SuspendEnd : %u ns' % \ record[1]) print(' SuspendTime : %u ns' % \ fwData[0]) j += prechead[1] if(output): print('') i += header[1] fp.close() return fwData # Function: statusCheck # Description: # Verify that the requested command and options will work, and # print the results to the terminal # Output: # True if the test will work, False if not def statusCheck(): global sysvals status = True if(sysvals.android): print('Checking the android system ...') else: print('Checking this system (%s)...' % platform.node()) # check if adb is connected to a device if(sysvals.android): res = 'NO' out = os.popen(sysvals.adb+' get-state').read().strip() if(out == 'device'): res = 'YES' print(' is android device connected: %s' % res) if(res != 'YES'): print(' Please connect the device before using this tool') return False # check we have root access res = 'NO (No features of this tool will work!)' if(sysvals.android): out = os.popen(sysvals.adb+' shell id').read().strip() if('root' in out): res = 'YES' else: if(os.environ['USER'] == 'root'): res = 'YES' print(' have root access: %s' % res) if(res != 'YES'): if(sysvals.android): print(' Try running "adb root" to restart the daemon as root') else: print(' Try running this script with sudo') return False # check sysfs is mounted res = 'NO (No features of this tool will work!)' if(sysvals.android): out = os.popen(sysvals.adb+' shell ls '+\ sysvals.powerfile).read().strip() if(out == sysvals.powerfile): res = 'YES' else: if(os.path.exists(sysvals.powerfile)): res = 'YES' print(' is sysfs mounted: %s' % res) if(res != 'YES'): return False # check target mode is a valid mode res = 'NO' modes = getModes() if(sysvals.suspendmode in modes): res = 'YES' else: status = False print(' is "%s" a valid power mode: %s' % (sysvals.suspendmode, res)) if(res == 'NO'): print(' valid power modes are: %s' % modes) print(' please choose one with -m') # check if the tool can unlock the device if(sysvals.android): res = 'YES' out1 = os.popen(sysvals.adb+\ ' shell dumpsys power | grep mScreenOn').read().strip() out2 = os.popen(sysvals.adb+\ ' shell input').read().strip() if(not out1.startswith('mScreenOn') or not out2.startswith('usage')): res = 'NO (wake the android device up before running the test)' print(' can I unlock the screen: %s' % res) # check if ftrace is available res = 'NO' ftgood = verifyFtrace() if(ftgood): res = 'YES' elif(sysvals.usecallgraph): status = False print(' is ftrace supported: %s' % res) # what data source are we using res = 'DMESG' if(ftgood): sysvals.usetraceeventsonly = True sysvals.usetraceevents = False for e in sysvals.traceevents: check = False if(sysvals.android): out = os.popen(sysvals.adb+' shell ls -d '+\ sysvals.epath+e).read().strip() if(out == sysvals.epath+e): check = True else: if(os.path.exists(sysvals.epath+e)): check = True if(not check): sysvals.usetraceeventsonly = False if(e == 'suspend_resume' and check): sysvals.usetraceevents = True if(sysvals.usetraceevents and sysvals.usetraceeventsonly): res = 'FTRACE (all trace events found)' elif(sysvals.usetraceevents): res = 'DMESG and FTRACE (suspend_resume trace event found)' print(' timeline data source: %s' % res) # check if rtcwake res = 'NO' if(sysvals.rtcpath != ''): res = 'YES' elif(sysvals.rtcwake): status = False print(' is rtcwake supported: %s' % res) return status # Function: doError # Description: # generic error function for catastrphic failures # Arguments: # msg: the error message to print # help: True if printHelp should be called after, False otherwise def doError(msg, help): if(help == True): printHelp() print('ERROR: %s\n') % msg sys.exit() # Function: doWarning # Description: # generic warning function for non-catastrophic anomalies # Arguments: # msg: the warning message to print # file: If not empty, a filename to request be sent to the owner for debug def doWarning(msg, file): print('/* %s */') % msg if(file): print('/* For a fix, please send this'+\ ' %s file to <todd.e.brandt@intel.com> */' % file) # Function: rootCheck # Description: # quick check to see if we have root access def rootCheck(): if(os.environ['USER'] != 'root'): doError('This script must be run as root', False) # Function: getArgInt # Description: # pull out an integer argument from the command line with checks def getArgInt(name, args, min, max): try: arg = args.next() except: doError(name+': no argument supplied', True) try: val = int(arg) except: doError(name+': non-integer value given', True) if(val < min or val > max): doError(name+': value should be between %d and %d' % (min, max), True) return val # Function: rerunTest # Description: # generate an output from an existing set of ftrace/dmesg logs def rerunTest(): global sysvals if(sysvals.ftracefile != ''): doesTraceLogHaveTraceEvents() if(sysvals.dmesgfile == '' and not sysvals.usetraceeventsonly): doError('recreating this html output '+\ 'requires a dmesg file', False) sysvals.setOutputFile() vprint('Output file: %s' % sysvals.htmlfile) print('PROCESSING DATA') if(sysvals.usetraceeventsonly): testruns = parseTraceLog() else: testruns = loadKernelLog() for data in testruns: parseKernelLog(data) if(sysvals.ftracefile != ''): appendIncompleteTraceLog(testruns) createHTML(testruns) # Function: runTest # Description: # execute a suspend/resume, gather the logs, and generate the output def runTest(subdir): global sysvals # prepare for the test if(not sysvals.android): initFtrace() else: initFtraceAndroid() sysvals.initTestOutput(subdir) vprint('Output files:\n %s' % sysvals.dmesgfile) if(sysvals.usecallgraph or sysvals.usetraceevents or sysvals.usetraceeventsonly): vprint(' %s' % sysvals.ftracefile) vprint(' %s' % sysvals.htmlfile) # execute the test if(not sysvals.android): executeSuspend() else: executeAndroidSuspend() # analyze the data and create the html output print('PROCESSING DATA') if(sysvals.usetraceeventsonly): # data for kernels 3.15 or newer is entirely in ftrace testruns = parseTraceLog() else: # data for kernels older than 3.15 is primarily in dmesg testruns = loadKernelLog() for data in testruns: parseKernelLog(data) if(sysvals.usecallgraph or sysvals.usetraceevents): appendIncompleteTraceLog(testruns) createHTML(testruns) # Function: runSummary # Description: # create a summary of tests in a sub-directory def runSummary(subdir, output): global sysvals # get a list of ftrace output files files = [] for dirname, dirnames, filenames in os.walk(subdir): for filename in filenames: if(re.match('.*_ftrace.txt', filename)): files.append("%s/%s" % (dirname, filename)) # process the files in order and get an array of data objects testruns = [] for file in sorted(files): if output: print("Test found in %s" % os.path.dirname(file)) sysvals.ftracefile = file sysvals.dmesgfile = file.replace('_ftrace.txt', '_dmesg.txt') doesTraceLogHaveTraceEvents() sysvals.usecallgraph = False if not sysvals.usetraceeventsonly: if(not os.path.exists(sysvals.dmesgfile)): print("Skipping %s: not a valid test input" % file) continue else: if output: f = os.path.basename(sysvals.ftracefile) d = os.path.basename(sysvals.dmesgfile) print("\tInput files: %s and %s" % (f, d)) testdata = loadKernelLog() data = testdata[0] parseKernelLog(data) testdata = [data] appendIncompleteTraceLog(testdata) else: if output: print("\tInput file: %s" % os.path.basename(sysvals.ftracefile)) testdata = parseTraceLog() data = testdata[0] data.normalizeTime(data.tSuspended) link = file.replace(subdir+'/', '').replace('_ftrace.txt', '.html') data.outfile = link testruns.append(data) createHTMLSummarySimple(testruns, subdir+'/summary.html') # Function: printHelp # Description: # print out the help text def printHelp(): global sysvals modes = getModes() print('') print('AnalyzeSuspend v%.1f' % sysvals.version) print('Usage: sudo analyze_suspend.py <options>') print('') print('Description:') print(' This tool is designed to assist kernel and OS developers in optimizing') print(' their linux stack\'s suspend/resume time. Using a kernel image built') print(' with a few extra options enabled, the tool will execute a suspend and') print(' capture dmesg and ftrace data until resume is complete. This data is') print(' transformed into a device timeline and an optional callgraph to give') print(' a detailed view of which devices/subsystems are taking the most') print(' time in suspend/resume.') print('') print(' Generates output files in subdirectory: suspend-mmddyy-HHMMSS') print(' HTML output: <hostname>_<mode>.html') print(' raw dmesg output: <hostname>_<mode>_dmesg.txt') print(' raw ftrace output: <hostname>_<mode>_ftrace.txt') print('') print('Options:') print(' [general]') print(' -h Print this help text') print(' -v Print the current tool version') print(' -verbose Print extra information during execution and analysis') print(' -status Test to see if the system is enabled to run this tool') print(' -modes List available suspend modes') print(' -m mode Mode to initiate for suspend %s (default: %s)') % (modes, sysvals.suspendmode) print(' -rtcwake t Use rtcwake to autoresume after <t> seconds (default: disabled)') print(' [advanced]') print(' -f Use ftrace to create device callgraphs (default: disabled)') print(' -filter "d1 d2 ..." Filter out all but this list of dev names') print(' -x2 Run two suspend/resumes back to back (default: disabled)') print(' -x2delay t Minimum millisecond delay <t> between the two test runs (default: 0 ms)') print(' -postres t Time after resume completion to wait for post-resume events (default: 0 S)') print(' -multi n d Execute <n> consecutive tests at <d> seconds intervals. The outputs will') print(' be created in a new subdirectory with a summary page.') print(' [utilities]') print(' -fpdt Print out the contents of the ACPI Firmware Performance Data Table') print(' -usbtopo Print out the current USB topology with power info') print(' -usbauto Enable autosuspend for all connected USB devices') print(' [android testing]') print(' -adb binary Use the given adb binary to run the test on an android device.') print(' The device should already be connected and with root access.') print(' Commands will be executed on the device using "adb shell"') print(' [re-analyze data from previous runs]') print(' -ftrace ftracefile Create HTML output using ftrace input') print(' -dmesg dmesgfile Create HTML output using dmesg (not needed for kernel >= 3.15)') print(' -summary directory Create a summary of all test in this dir') print('') return True # ----------------- MAIN -------------------- # exec start (skipped if script is loaded as library) if __name__ == '__main__': cmd = '' cmdarg = '' multitest = {'run': False, 'count': 0, 'delay': 0} # loop through the command line arguments args = iter(sys.argv[1:]) for arg in args: if(arg == '-m'): try: val = args.next() except: doError('No mode supplied', True) sysvals.suspendmode = val elif(arg == '-adb'): try: val = args.next() except: doError('No adb binary supplied', True) if(not os.path.exists(val)): doError('file doesnt exist: %s' % val, False) if(not os.access(val, os.X_OK)): doError('file isnt executable: %s' % val, False) try: check = os.popen(val+' version').read().strip() except: doError('adb version failed to execute', False) if(not re.match('Android Debug Bridge .*', check)): doError('adb version failed to execute', False) sysvals.adb = val sysvals.android = True elif(arg == '-x2'): if(sysvals.postresumetime > 0): doError('-x2 is not compatible with -postres', False) sysvals.execcount = 2 elif(arg == '-x2delay'): sysvals.x2delay = getArgInt('-x2delay', args, 0, 60000) elif(arg == '-postres'): if(sysvals.execcount != 1): doError('-x2 is not compatible with -postres', False) sysvals.postresumetime = getArgInt('-postres', args, 0, 3600) elif(arg == '-f'): sysvals.usecallgraph = True elif(arg == '-modes'): cmd = 'modes' elif(arg == '-fpdt'): cmd = 'fpdt' elif(arg == '-usbtopo'): cmd = 'usbtopo' elif(arg == '-usbauto'): cmd = 'usbauto' elif(arg == '-status'): cmd = 'status' elif(arg == '-verbose'): sysvals.verbose = True elif(arg == '-v'): print("Version %.1f" % sysvals.version) sys.exit() elif(arg == '-rtcwake'): sysvals.rtcwake = True sysvals.rtcwaketime = getArgInt('-rtcwake', args, 0, 3600) elif(arg == '-multi'): multitest['run'] = True multitest['count'] = getArgInt('-multi n (exec count)', args, 2, 1000000) multitest['delay'] = getArgInt('-multi d (delay between tests)', args, 0, 3600) elif(arg == '-dmesg'): try: val = args.next() except: doError('No dmesg file supplied', True) sysvals.notestrun = True sysvals.dmesgfile = val if(os.path.exists(sysvals.dmesgfile) == False): doError('%s doesnt exist' % sysvals.dmesgfile, False) elif(arg == '-ftrace'): try: val = args.next() except: doError('No ftrace file supplied', True) sysvals.notestrun = True sysvals.usecallgraph = True sysvals.ftracefile = val if(os.path.exists(sysvals.ftracefile) == False): doError('%s doesnt exist' % sysvals.ftracefile, False) elif(arg == '-summary'): try: val = args.next() except: doError('No directory supplied', True) cmd = 'summary' cmdarg = val sysvals.notestrun = True if(os.path.isdir(val) == False): doError('%s isnt accesible' % val, False) elif(arg == '-filter'): try: val = args.next() except: doError('No devnames supplied', True) sysvals.setDeviceFilter(val) elif(arg == '-h'): printHelp() sys.exit() else: doError('Invalid argument: '+arg, True) # just run a utility command and exit if(cmd != ''): if(cmd == 'status'): statusCheck() elif(cmd == 'fpdt'): if(sysvals.android): doError('cannot read FPDT on android device', False) getFPDT(True) elif(cmd == 'usbtopo'): if(sysvals.android): doError('cannot read USB topology '+\ 'on an android device', False) detectUSB(True) elif(cmd == 'modes'): modes = getModes() print modes elif(cmd == 'usbauto'): setUSBDevicesAuto() elif(cmd == 'summary'): print("Generating a summary of folder \"%s\"" % cmdarg) runSummary(cmdarg, True) sys.exit() # run test on android device if(sysvals.android): if(sysvals.usecallgraph): doError('ftrace (-f) is not yet supported '+\ 'in the android kernel', False) if(sysvals.notestrun): doError('cannot analyze test files on the '+\ 'android device', False) # if instructed, re-analyze existing data files if(sysvals.notestrun): rerunTest() sys.exit() # verify that we can run a test if(not statusCheck()): print('Check FAILED, aborting the test run!') sys.exit() if multitest['run']: # run multiple tests in a separte subdirectory s = 'x%d' % multitest['count'] subdir = datetime.now().strftime('suspend-'+s+'-%m%d%y-%H%M%S') os.mkdir(subdir) for i in range(multitest['count']): if(i != 0): print('Waiting %d seconds...' % (multitest['delay'])) time.sleep(multitest['delay']) print('TEST (%d/%d) START' % (i+1, multitest['count'])) runTest(subdir) print('TEST (%d/%d) COMPLETE' % (i+1, multitest['count'])) runSummary(subdir, False) else: # run the test in the current directory runTest(".")
gpl-2.0
mccheung/kbengine
kbe/src/lib/python/Doc/includes/email-unpack.py
43
1551
#!/usr/bin/env python3 """Unpack a MIME message into a directory of files.""" import os import sys import email import errno import mimetypes from argparse import ArgumentParser def main(): parser = ArgumentParser(description="""\ Unpack a MIME message into a directory of files. """) parser.add_argument('-d', '--directory', required=True, help="""Unpack the MIME message into the named directory, which will be created if it doesn't already exist.""") parser.add_argument('msgfile') args = parser.parse_args() with open(args.msgfile) as fp: msg = email.message_from_file(fp) try: os.mkdir(args.directory) except FileExistsError: pass counter = 1 for part in msg.walk(): # multipart/* are just containers if part.get_content_maintype() == 'multipart': continue # Applications should really sanitize the given filename so that an # email message can't be used to overwrite important files filename = part.get_filename() if not filename: ext = mimetypes.guess_extension(part.get_content_type()) if not ext: # Use a generic bag-of-bits extension ext = '.bin' filename = 'part-%03d%s' % (counter, ext) counter += 1 with open(os.path.join(args.directory, filename), 'wb') as fp: fp.write(part.get_payload(decode=True)) if __name__ == '__main__': main()
lgpl-3.0
a1ezzz/wasp-general
wasp_general/network/web/cookies.py
1
9885
# -*- coding: utf-8 -*- # wasp_general/network/web/cookies.py # # Copyright (C) 2016 the wasp-general authors and contributors # <see AUTHORS file> # # This file is part of wasp-general. # # Wasp-general is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Wasp-general is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with wasp-general. If not, see <http://www.gnu.org/licenses/>. # noinspection PyUnresolvedReferences from wasp_general.version import __author__, __version__, __credits__, __license__, __copyright__, __email__ # noinspection PyUnresolvedReferences from wasp_general.version import __status__ from http.cookies import SimpleCookie import re from wasp_general.verify import verify_type, verify_value from wasp_general.network.web.re_statements import http_path, http_cookie_expires, http_cookie_max_age from wasp_general.network.web.re_statements import http_cookie_secure, http_cookie_httponly, http_cookie_domain class WHTTPCookie: """ This class represent a single HTTP Cookie as it is described in RFC 6265. Call :meth:`.WHTTPCookie.ro` method to create unchangeable cookie copy """ cookie_name_non_compliance_re = re.compile(b'.*[\x00-\x1e\x7f()<>@,;:\\\\"/\[\]?={} \t].*') """ Check for non-acceptable cookie name see RFC 6265, Section 4.1.1 """ cookie_value_non_compliance_re = re.compile(b'.*[\x00-\x1e\x7f \",;\\\].*') """ Check for non-acceptable cookie value see RFC 6265, Section 4.1.1 """ cookie_attr_value_compliance = { 'path': re.compile(http_path), 'expires': re.compile(http_cookie_expires), 'max-age': re.compile(http_cookie_max_age), 'secure': re.compile(http_cookie_secure), 'httponly': re.compile(http_cookie_httponly), 'domain': re.compile(http_cookie_domain) } """ Dictionary of valid attribute names and appropriate values regexp """ @staticmethod def cookie_name_check(cookie_name): """ Check cookie name for validity. Return True if name is valid :param cookie_name: name to check :return: bool """ cookie_match = WHTTPCookie.cookie_name_non_compliance_re.match(cookie_name.encode('us-ascii')) return len(cookie_name) > 0 and cookie_match is None @staticmethod def cookie_value_check(cookie_value): """ Check cookie value for validity. Return True if value is valid :param cookie_value: value to check :return: bool """ return WHTTPCookie.cookie_value_non_compliance_re.match(cookie_value.encode('us-ascii')) is None @staticmethod def cookie_attr_value_check(attr_name, attr_value): """ Check cookie attribute value for validity. Return True if value is valid :param attr_name: attribute name to check :param attr_value: attribute value to check :return: bool """ attr_value.encode('us-ascii') return WHTTPCookie.cookie_attr_value_compliance[attr_name].match(attr_value) is not None @verify_type(name=str, value=str) @verify_value(name=lambda x: WHTTPCookie.cookie_name_check(x)) @verify_value(value=lambda x: WHTTPCookie.cookie_value_check(x)) def __init__(self, name, value, **attrs): """ Construct new cookie with defined name, value and attributes :param name: cookie name :param value: cookie value :param attrs: cookie attribute with its value. Attribute name must be in lowercase. In order to set 'max-age' attribute, dash character ('-') may be replaced by underscore ('_') both variants are allowed """ self.__name = name self.__value = value self.__attrs = {} self.__ro_flag = False for attr_name in attrs: self.attr(attr_name, attrs[attr_name]) def name(self): """ Return cookie name :return: str """ return self.__name @verify_type(name=str, new_value=(str, None)) @verify_value(new_value=lambda x: x is None or WHTTPCookie.cookie_value_check(x)) def value(self, new_value=None): """ Return cookie value. Cookie value can be updated, when new_value is not None. Cookie value couldn't be changed if cookie is in read-only mode (RuntimeError exception is raised). :param new_value: new value to set :return: str """ if new_value is not None: if self.__ro_flag: raise RuntimeError('Read-only cookie changing attempt') self.__value = new_value return self.__value def attrs_as_dict(self): """ Return cookie attributes as dictionary, where keys are attribute names and values are their values :return: dict """ return self.__attrs.copy() @verify_type(name=str) def __attr_name(self, name): """ Return suitable and valid attribute name. This method replaces dash char to underscore. If name is invalid ValueError exception is raised :param name: cookie attribute name :return: str """ if name not in self.cookie_attr_value_compliance.keys(): suggested_name = name.replace('_', '-').lower() if suggested_name not in self.cookie_attr_value_compliance.keys(): raise ValueError('Invalid attribute name is specified') name = suggested_name return name @verify_type('paranoid', attr_name=str) @verify_type(attr_value=(str, None)) def attr(self, attr_name, attr_value=None): """ Return attribute value. Attribute value can be updated with this method. In order to update attribute value attr_value must be set. Cookie attribute value couldn't be changed if cookie is in read-only mode (RuntimeError exception is raised). :param attr_name: target attribute name :param attr_value: new value to set :return: str """ name = self.__attr_name(attr_name) if attr_value is not None: if WHTTPCookie.cookie_attr_value_check(name, attr_value) is not True: raise ValueError('Unacceptable value passed') if self.__ro_flag: raise RuntimeError('Read-only cookie changing attempt') self.__attrs[name] = attr_value return attr_value return self.__attrs[name] @verify_type('paranoid', attr_name=str) def remove_attr(self, attr_name): """ Remove cookie attribute. Cookie attribute couldn't be removed if cookie is in read-only mode (RuntimeError exception is raised). :param attr_name: name of attribute to remove :return: None """ if self.__ro_flag: raise RuntimeError('Read-only cookie changing attempt') name = self.__attr_name(attr_name) if name in self.__attrs.keys(): self.__attrs.pop(attr_name) def __str__(self): """ Return valid "Set-Cookie" HTTP-header for this cookie :return: str """ simple_cookie = SimpleCookie() simple_cookie[self.__name] = self.__value for attr_name in self.__attrs.keys(): simple_cookie[self.__name][attr_name] = self.__attrs[attr_name] return str(simple_cookie) def ro(self): """ Return read-only copy :return: WHTTPCookie """ ro_cookie = self.copy() ro_cookie.__ro_flag = True return ro_cookie def copy(self): """ Return copy :return: WHTTPCookie """ copy_cookie = WHTTPCookie(self.__name, self.__value) copy_cookie.__attrs = self.__attrs.copy() return copy_cookie class WHTTPCookieJar: """ Class represent collection of cookies. Call :meth:`.WHTTPCookieJar.ro` method to create read-only copy (in this state no changes are allowed) """ def __init__(self): """ Construct new collection """ self.__cookies = {} self.__ro_flag = False def cookies(self): """ Return available cookie names :return: tuple of str """ return tuple(self.__cookies.keys()) @verify_type(cookie=WHTTPCookie) def add_cookie(self, cookie): """ Add new cookie (or replace if there is cookie with the same name already) :param cookie: cookie to add :return: None """ if self.__ro_flag: raise RuntimeError('Read-only cookie-jar changing attempt') self.__cookies[cookie.name()] = cookie @verify_type(cookie_name=str) def remove_cookie(self, cookie_name): """ Remove cookie by its name :param cookie_name: cookie name :return: """ if self.__ro_flag: raise RuntimeError('Read-only cookie-jar changing attempt') if cookie_name in self.__cookies.keys(): self.__cookies.pop(cookie_name) @verify_type(item=str) def __getitem__(self, item): """ Get cookie by its name :param item: cookie name :return: """ return self.__cookies[item] def __iter__(self): """ Iterate over cookie collection :return: None """ for cookie in self.__cookies.values(): yield cookie def ro(self): """ Return read-only copy :return: WHTTPCookieJar """ ro_jar = WHTTPCookieJar() for cookie in self.__cookies.values(): ro_jar.add_cookie(cookie.ro()) ro_jar.__ro_flag = True return ro_jar @classmethod @verify_type(simple_cookie=SimpleCookie) def import_simple_cookie(cls, simple_cookie): """ Create cookie jar from SimpleCookie object :param simple_cookie: cookies to import :return: WHTTPCookieJar """ cookie_jar = WHTTPCookieJar() for cookie_name in simple_cookie.keys(): cookie_attrs = {} for attr_name in WHTTPCookie.cookie_attr_value_compliance.keys(): attr_value = simple_cookie[cookie_name][attr_name] if attr_value != '': cookie_attrs[attr_name] = attr_value cookie_jar.add_cookie(WHTTPCookie( cookie_name, simple_cookie[cookie_name].value, **cookie_attrs )) return cookie_jar @classmethod @verify_type(cookie_text=str) def import_header_text(cls, cookie_text): """ Create cookie jar from HTTP Header text like 'Set-Cookie: cookie=value' :param cookie_text: http header code :return: WHTTPCookieJar """ simple_cookie = SimpleCookie() simple_cookie.load(cookie_text) return cls.import_simple_cookie(simple_cookie)
lgpl-3.0
minillinim/ParseM
setup.py
1
1973
from distutils.core import setup from distutils.command.install import INSTALL_SCHEMES import sys from subprocess import call from os.path import join, abspath from os import chdir, getcwd xtra_opts = {"--with-libcfu-inc":"libcfu headers at this location", "--with-libcfu-lib":"libcfu library at this location", "--with-libhts-inc":"htslib headers at this location", "--with-libhts-lib":"htslib library at this location"} if '--help' not in sys.argv: if 'sdist' not in sys.argv: # set the location of the compiled c library to the place where the headers will be for scheme in INSTALL_SCHEMES.values(): scheme['data'] = join(scheme['platlib'], 'parsem', 'c', 'bam') # grab extra configuration arguments configure_args = [] for opt in xtra_opts.keys(): try: opt_idx = sys.argv.index(opt) configure_args.append(opt+"="+abspath(sys.argv[opt_idx+1])) del sys.argv[opt_idx+1] sys.argv.remove(opt) except ValueError: pass # configure and make the c portion of the program cur_dir = getcwd() chdir(join(cur_dir, 'c', 'bam')) call([join(getcwd(), "configure")] + configure_args) call(['make','clean']) call(['make']) chdir(cur_dir) else: print print "Embedded C options (for building libPMBam.a) USE: --OPTION<space>PATH" for opt in xtra_opts.keys(): print " %s %s"%(opt,xtra_opts[opt]) print # return to regular viewing setup( name='ParseM', version='0.0.3', author='Michael Imelfort', author_email='mike@mikeimelfort.com', packages=['parsem', 'parsem.test'], url='http://pypi.python.org/pypi/ParseM/', license='GPLv3', description='ParseM', long_description=open('README.md').read(), install_requires=[], data_files=[('', ['c/bam/libPMBam.a'])] )
gpl-3.0
sadig/DC2
frontends/frontend-qooxdoo/dc2-qooxdoo-widgets/generate.py
6
2121
#!/usr/bin/env python ################################################################################ # # qooxdoo - the new era of web development # # http://qooxdoo.org # # Copyright: # 2008 - 2010 1&1 Internet AG, Germany, http://www.1und1.de # # License: # LGPL: http://www.gnu.org/licenses/lgpl.html # EPL: http://www.eclipse.org/org/documents/epl-v10.php # See the LICENSE file in the project's top-level directory for details. # # Authors: # * Thomas Herchenroeder (thron7) # ################################################################################ ## # This is a stub proxy for the real generator.py ## import sys, os, re, subprocess CMD_PYTHON = sys.executable QOOXDOO_PATH = '../../qooxdoo/1.5' def getQxPath(): path = QOOXDOO_PATH # try updating from config file if os.path.exists('config.json'): # "using QOOXDOO_PATH from config.json" qpathr=re.compile(r'"QOOXDOO_PATH"\s*:\s*"([^"]*)"\s*,?') conffile = open('config.json') aconffile = conffile.readlines() for line in aconffile: mo = qpathr.search(line) if mo: path = mo.group(1) break # assume first occurrence is ok path = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), path)) return path os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # switch to skeleton dir qxpath = getQxPath() REAL_GENERATOR = os.path.join(qxpath, 'tool', 'bin', 'generator.py') if not os.path.exists(REAL_GENERATOR): print "Cannot find real generator script under: \"%s\"; aborting" % REAL_GENERATOR sys.exit(1) argList = [] argList.append(CMD_PYTHON) argList.append(REAL_GENERATOR) argList.extend(sys.argv[1:]) if sys.platform == "win32": argList1=[] for arg in argList: if arg.find(' ')>-1: argList1.append('"%s"' % arg) else: argList1.append(arg) argList = argList1 else: argList = ['"%s"' % x for x in argList] # quote argv elements cmd = " ".join(argList) retval = subprocess.call(cmd, shell=True) sys.exit(retval)
gpl-2.0
arenadata/ambari
ambari-agent/src/main/python/ambari_agent/apscheduler/jobstores/redis_store.py
98
2815
""" Stores jobs in a Redis database. """ from uuid import uuid4 from datetime import datetime import logging from apscheduler.jobstores.base import JobStore from apscheduler.job import Job try: import cPickle as pickle except ImportError: # pragma: nocover import pickle try: from redis import StrictRedis except ImportError: # pragma: nocover raise ImportError('RedisJobStore requires redis installed') try: long = long except NameError: long = int logger = logging.getLogger(__name__) class RedisJobStore(JobStore): def __init__(self, db=0, key_prefix='jobs.', pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args): self.jobs = [] self.pickle_protocol = pickle_protocol self.key_prefix = key_prefix if db is None: raise ValueError('The "db" parameter must not be empty') if not key_prefix: raise ValueError('The "key_prefix" parameter must not be empty') self.redis = StrictRedis(db=db, **connect_args) def add_job(self, job): job.id = str(uuid4()) job_state = job.__getstate__() job_dict = { 'job_state': pickle.dumps(job_state, self.pickle_protocol), 'runs': '0', 'next_run_time': job_state.pop('next_run_time').isoformat()} self.redis.hmset(self.key_prefix + job.id, job_dict) self.jobs.append(job) def remove_job(self, job): self.redis.delete(self.key_prefix + job.id) self.jobs.remove(job) def load_jobs(self): jobs = [] keys = self.redis.keys(self.key_prefix + '*') pipeline = self.redis.pipeline() for key in keys: pipeline.hgetall(key) results = pipeline.execute() for job_dict in results: job_state = {} try: job = Job.__new__(Job) job_state = pickle.loads(job_dict['job_state'.encode()]) job_state['runs'] = long(job_dict['runs'.encode()]) dateval = job_dict['next_run_time'.encode()].decode() job_state['next_run_time'] = datetime.strptime( dateval, '%Y-%m-%dT%H:%M:%S') job.__setstate__(job_state) jobs.append(job) except Exception: job_name = job_state.get('name', '(unknown)') logger.exception('Unable to restore job "%s"', job_name) self.jobs = jobs def update_job(self, job): attrs = { 'next_run_time': job.next_run_time.isoformat(), 'runs': job.runs} self.redis.hmset(self.key_prefix + job.id, attrs) def close(self): self.redis.connection_pool.disconnect() def __repr__(self): return '<%s>' % self.__class__.__name__
apache-2.0
bheesham/hyde
tests/test_simple_copy.py
7
4366
# -*- coding: utf-8 -*- """ Tests the simple copy feature. In order to mark some files to simply be copied to the destination without any processing what so ever add this to the config (site.yaml for example): simple_copy: - media/css/*.css - media/js/*.js - **/*.js Matching is done with `fnmatch` module. So any `glob` that fnmatch can process is a valid pattern. Use nose `$ pip install nose` `$ nosetests` """ import yaml from hyde.model import Config from hyde.site import Site from hyde.generator import Generator from fswrap import File from nose.tools import nottest TEST_SITE_ROOT = File(__file__).parent.child_folder('sites/test_jinja') class TestSimpleCopy(object): @classmethod def setup_class(cls): cls.SITE_PATH = File(__file__).parent.child_folder( 'sites/test_jinja_with_config') cls.SITE_PATH.make() TEST_SITE_ROOT.copy_contents_to(cls.SITE_PATH) @classmethod def teardown_class(cls): cls.SITE_PATH.delete() @nottest def setup_config(self, passthru): self.config_file = File(self.SITE_PATH.child('site.yaml')) with open(self.config_file.path) as config: conf = yaml.load(config) conf['simple_copy'] = passthru self.config = Config(sitepath=self.SITE_PATH, config_dict=conf) def test_simple_copy_basic(self): self.setup_config([ 'about.html' ]) s = Site(self.SITE_PATH, config=self.config) s.load() res = s.content.resource_from_relative_path('about.html') assert res assert res.simple_copy def test_simple_copy_directory(self): self.setup_config([ '**/*.html' ]) s = Site(self.SITE_PATH, config=self.config) s.load() res = s.content.resource_from_relative_path('about.html') assert res assert not res.simple_copy res = s.content.resource_from_relative_path( 'blog/2010/december/merry-christmas.html') assert res assert res.simple_copy def test_simple_copy_multiple(self): self.setup_config([ '**/*.html', 'media/css/*.css' ]) s = Site(self.SITE_PATH, config=self.config) s.load() res = s.content.resource_from_relative_path('about.html') assert res assert not res.simple_copy res = s.content.resource_from_relative_path( 'blog/2010/december/merry-christmas.html') assert res assert res.simple_copy res = s.content.resource_from_relative_path('media/css/site.css') assert res assert res.simple_copy def test_generator(self): self.setup_config([ '**/*.html', 'media/css/*.css' ]) s = Site(self.SITE_PATH, self.config) g = Generator(s) g.generate_all() source = s.content.resource_from_relative_path( 'blog/2010/december/merry-christmas.html') target = File( s.config.deploy_root_path.child(source.relative_deploy_path)) left = source.source_file.read_all() right = target.read_all() assert left == right def test_plugins(self): text = """ --- title: Hey author: Me twitter: @me --- {%% extends "base.html" %%} {%% block main %%} Hi! I am a test template to make sure jinja2 generation works well with hyde. <span class="title">{{resource.meta.title}}</span> <span class="author">{{resource.meta.author}}</span> <span class="twitter">{{resource.meta.twitter}}</span> {%% endblock %%} """ index = File(self.SITE_PATH.child('content/blog/index.html')) index.write(text) self.setup_config([ '**/*.html', 'media/css/*.css' ]) conf = {'plugins': ['hyde.ext.plugins.meta.MetaPlugin']} conf.update(self.config.to_dict()) s = Site(self.SITE_PATH, Config( sitepath=self.SITE_PATH, config_dict=conf)) g = Generator(s) g.generate_all() source = s.content.resource_from_relative_path('blog/index.html') target = File( s.config.deploy_root_path.child(source.relative_deploy_path)) left = source.source_file.read_all() right = target.read_all() assert left == right
mit
kaplun/invenio
modules/webstat/lib/webstat_templates.py
24
46362
## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2010, 2011, 2013 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" __lastupdated__ = "$Date$" import datetime, cgi, urllib, os import re from invenio.config import \ CFG_WEBDIR, \ CFG_SITE_URL, \ CFG_SITE_LANG, \ CFG_SITE_NAME from invenio.search_engine import get_coll_sons from invenio.webstat_engine import get_invenio_error_details class Template: def tmpl_welcome(self, ln=CFG_SITE_LANG): """ Generates a welcome page for the Webstat module. """ return """<p>On these pages, you can review measurements of Invenio usage and performance. Output is available in several formats, and its raw data can be exported for offline processing. Further on, a general overview is presented below under the label Current System Health.</p>""" def tmpl_system_health_list(self, recount, ln=CFG_SITE_LANG): """ Generates a box with current information from the system providing the administrator an easy way of overlooking the 'health', i.e. the current performance/efficency, of the system. """ temp_out = """<h3>Current system health</h3> <p>Please, choose the information you want to review.</p> <ul> """ if recount == 0: temp_out += """ <li class="warninggreen"> <a href="%s/stats/system_health%s"> Current system health</a> (all records up to date!)</li> """ % \ (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') else: temp_out += """ <li class="warningred"> <a href="%s/stats/system_health%s"> Current system health</a> (around %s records pending)</li> """ % \ (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) \ or '', str(recount)) temp_out += """ <li><a href="%s/stats/ingestion_health%s"> Check ingestion health for specific records</a></li> </ul> """ % \ (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') return temp_out def tmpl_system_health(self, health_statistics, ln=CFG_SITE_LANG): """ Generates the system health list of parameters. """ temp_out = "" for statistic in health_statistics: if statistic is None: temp_out += '\n' elif statistic[1] is None: temp_out += statistic[0] + '\n' else: # regular expression, just to extract the text # inside the link and view it's length aux = re.search('.*>(.*)<.*', str(statistic[1])) if aux is None: temp_out += statistic[0] + " " + '.' * \ (85 - len(str(statistic[0])) - \ len(str(statistic[1]))) + " " + \ str(statistic[1]) + '\n' else: temp_out += statistic[0] + " " + '.' * \ (85 - len(str(statistic[0])) - \ len(str(aux.group(1)))) + " " + \ str(statistic[1]) + '\n' temp_out = "<pre>" + temp_out + "</pre>" temp_out += '<a href="%s/stats/ingestion_health%s">Check ingestion \ health for specific records pending</a>' % (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') return temp_out def tmpl_keyevent_list(self, ln=CFG_SITE_LANG): """ Generates a list of available key statistics. """ return """<h3>Key statistics</h3> <p>Please choose a statistic from below to review it in detail.</p> <ul> <li><a href="%(CFG_SITE_URL)s/stats/collection_population%(ln_link)s">Collection population</a></li> <li><a href="%(CFG_SITE_URL)s/stats/new_records%(ln_link)s">New records</a></li> <li><a href="%(CFG_SITE_URL)s/stats/search_frequency%(ln_link)s">Search frequency</a></li> <li><a href="%(CFG_SITE_URL)s/stats/search_type_distribution%(ln_link)s">Search type distribution</a></li> <li><a href="%(CFG_SITE_URL)s/stats/download_frequency%(ln_link)s">Download frequency</a></li> <li><a href="%(CFG_SITE_URL)s/stats/comments_frequency%(ln_link)s">Comments frequency</a></li> <li><a href="%(CFG_SITE_URL)s/stats/number_of_loans%(ln_link)s">Number of circulation loans</a></li> <li><a href="%(CFG_SITE_URL)s/stats/web_submissions%(ln_link)s">Web submissions</a></li> <li><a href="%(CFG_SITE_URL)s/stats/loans_stats%(ln_link)s">Circulation loan statistics</a></li> <li><a href="%(CFG_SITE_URL)s/stats/loans_lists%(ln_link)s">Circulation loan lists</a></li> <li><a href="%(CFG_SITE_URL)s/stats/renewals_lists%(ln_link)s">Circulation renewals lists</a></li> <li><a href="%(CFG_SITE_URL)s/stats/returns_table%(ln_link)s">Number of circulation overdue returns</a></li> <li><a href="%(CFG_SITE_URL)s/stats/returns_graph%(ln_link)s">Percentage of circulation overdue returns</a></li> <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_stats%(ln_link)s">Circulation ILL Requests statistics</a></li> <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_lists%(ln_link)s">Circulation ILL Requests list</a></li> <li><a href="%(CFG_SITE_URL)s/stats/ill_requests_graph%(ln_link)s">Percentage of satisfied circulation ILL requests</a></li> <li><a href="%(CFG_SITE_URL)s/stats/items_stats%(ln_link)s">Circulation items statistics</a></li> <li><a href="%(CFG_SITE_URL)s/stats/items_list%(ln_link)s">Circulation items list</a></li> <li><a href="%(CFG_SITE_URL)s/stats/loans_requests%(ln_link)s">Circulation hold requests statistics</a></li> <li><a href="%(CFG_SITE_URL)s/stats/loans_request_lists%(ln_link)s">Circulation hold requests lists</a></li> <li><a href="%(CFG_SITE_URL)s/stats/user_stats%(ln_link)s">Circulation user statistics</a></li> <li><a href="%(CFG_SITE_URL)s/stats/user_lists%(ln_link)s">Circulation user lists</a></li> </ul>""" % {'CFG_SITE_URL': CFG_SITE_URL, 'ln_link': (CFG_SITE_LANG != ln and '?ln=' + ln) or ''} def tmpl_customevent_list(self, customevents, ln=CFG_SITE_LANG): """ Generates a list of available custom statistics. """ out = """<h3>Custom events</h3> <p>The Webstat module supplies a mean for the administrators of Invenio to define their own custom events, more abstract than the Key Statistics above. A technical walk-through how to create these, is available <a href="%s/stats/customevent_help">here</a>. When a custom event has been made available, it is displayed below.</p> """ % CFG_SITE_URL temp_out = "" for event in customevents: temp_out += """<li><a href="%s/stats/customevent?ids=%s">%s</a></li>""" \ % (CFG_SITE_URL, event[0], (event[1] is None) and event[0] or event[1]) if len(customevents) == 0: out += self.tmpl_error("There are currently no custom events available.", ln=ln) else: out += "<ul>" + temp_out + "</ul>" return out def tmpl_loans_statistics(self, ln=CFG_SITE_LANG): """ Generates the tables with the bibcirculation statistics """ out = """<h3>Bibcirculation stats</h3>""" return out def tmpl_error_log_statistics_list(self, ln=CFG_SITE_LANG): """ Link to error log analyzer """ return """<h3>Error log statistics</h3> <p>Displays statistics about the last errors in the Invenio and Apache logs</p> <ul><li><a href="%s/stats/error_log%s">Error log analyzer</a></li> </ul>""" % (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') def tmpl_error_log_analyzer(self, invenio_ranking, invenio_last_errors, apache_ranking): """ Generates the statistics of the last errors """ out = """<h4>Invenio error log</h4> <h5>Ranking</h5> <pre>%s</pre> <h5>Last errors</h5> """ % (cgi.escape(invenio_ranking)) lines = invenio_last_errors.splitlines() error_number = len(lines) for line in lines: out += """<div> %(line)s<button id="bt_toggle%(error_number)s">Toggle</button> <pre id="txt_error%(error_number)s">%(error_details)s</pre> </div> <script> $("#txt_error%(error_number)s").slideToggle("fast"); $("#bt_toggle%(error_number)s").click(function () { $("#txt_error%(error_number)s").slideToggle("fast"); }); </script> """ % {'line': cgi.escape(line), 'error_number': error_number, 'error_details': cgi.escape(get_invenio_error_details(error_number))} error_number -= 1 out += """<h4>Apache error log</h4> <pre>%s</pre>""" % apache_ranking return out def tmpl_custom_summary(self, ln=CFG_SITE_LANG): """ Link to custom annual report. """ return """<h3>Library report</h3> <ul><li><a href="%s/stats/custom_summary">Custom query summary</a></li></ul> """ % CFG_SITE_URL def tmpl_yearly_report_list(self, ln=CFG_SITE_LANG): """ Link to yearly report """ return """<h3>Yearly report</h3> <p>Yearly report with the total number of circulation transactions (loans, renewals, returns, ILL requests, hold request).</p> <ul><li><a href="%s/stats/yearly_report">Yearly report</a></li></ul> """ % CFG_SITE_URL def tmpl_yearly_report(self, year_report, ln=CFG_SITE_LANG): """ Display yearly report with the total number of circulation transactions. """ temp_out = "" for info in year_report: if info is None: temp_out += '\n' elif info[1] is None: temp_out += info[0] + '\n' else: temp_out += info[0] + " " + \ '.' * (85 - len(str(info[0])) - \ len(str(info[1]))) + " " + str(info[1]) + '\n' return "<pre>" + temp_out + "</pre>" def tmpl_ingestion_health(self, general, req_ingestion=None, stats=None, ln=CFG_SITE_LANG): """ Display the record status search box and the results of the last request, if done. """ # Introduction and search box temp_out = """ <p>Check the ingestion health for records pending or go to <a href="%s/stats/system_health%s">current system health</a>.</p> """ % \ (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') if general == 0: temp_out += """ <p class="warninggreen">(all records up to date!)</p> """ else: temp_out += """ <p class="warningred">(around %s records pending)</p> """ % str(general) temp_out += """ <form method="get"> <input type="hidden" name="ln" value="%s" /> """ % ln if req_ingestion == None: temp_out += self._tmpl_text_box("pattern", "", ln=ln) else: temp_out += self._tmpl_text_box("pattern", req_ingestion, ln=ln) temp_out += """ <input class="formbutton" type="submit" /> </form> """ if stats==None: return temp_out else: # results of the last request info_stats = self.tmpl_ingestion_list(stats, ln=ln) return temp_out + info_stats def tmpl_ingestion_list(self, ingestion_statistics, ln=CFG_SITE_LANG): """ Generates the system ingestion health. """ temp_out = "" for statistic in ingestion_statistics: if statistic is None: temp_out += '\n' elif statistic[1] is None: temp_out += statistic[0] + '\n' else: # regular expression, just to extract the text # inside the link and view it's length aux = re.search('.*>(.*)<.*', str(statistic[0])) if aux is None: temp_out += statistic[0] + " " + '.' * \ (85 - len(str(statistic[0])) - \ len(str(statistic[1]))) + " " + \ str(statistic[1]) + '\n' else: temp_out += statistic[0] + " " + '.' * \ (85 - len(str(aux.group(1))) - \ len(str(statistic[1]))) + " " + \ str(statistic[1]) + '\n' temp_out = "<pre>" + temp_out + "</pre>" return temp_out def tmpl_collection_stats_main_list(self, ln=CFG_SITE_LANG): """ Generates a list of available collections statistics. """ out = """<h3>Collections stats</h3> <ul>""" for coll in get_coll_sons(CFG_SITE_NAME): out += """<li><a href="%s/stats/collections?%s">%s</a></li>""" \ % (CFG_SITE_URL, urllib.urlencode({'collection': coll}) + ((CFG_SITE_LANG != ln and '&ln=' + ln) or ''), coll) out += """<li><a href="%s/stats/collection_stats%s">Other collections</a></li>""" \ % (CFG_SITE_URL, (CFG_SITE_LANG != ln and '?ln=' + ln) or '') return out + "</ul>" def tmpl_collection_stats_complete_list(self, collections, ln=CFG_SITE_LANG): if len(collections) == 0: return self.tmpl_error("There are currently no collections available.", ln=ln) temp_out = """<h4>Collections stats</h4> <ul>""" for coll in collections: temp_out += """<li><a href="%s/stats/collections?%s">%s</a></li>""" \ % (CFG_SITE_URL, urllib.urlencode({'collection': coll[0]}), coll[1]) return temp_out + "</ul>" def tmpl_customevent_help(self, ln=CFG_SITE_LANG): """ Display help for custom events. """ return """<h3>General overview</h3> <p>A custom event is a measure indicating the frequency of some kind of "action", such as e.g. the number of advanced searches carried out using the Swedish language interface. The custom event functionality is intended to give administrators a mean to log abstract activity, as opposed to trivial measurements like "collection population" and "search frequency". Thus, a custom event is fully customizable and defined by an administrator but it is important to understand that the Webstat module merely supplies the mean to register an action and associate it with a predefined custom event, while the actual use case leading up to the very registration of the action is left to the user.</p> <p>After a custom event has been created and the process of collecting data has started, the event is accessible for review through the Webstat webpage.</p> <h3>How to create a new custom event</h3> <ol> <li>Edit <strong>/opt/invenio/etc/webstat/webstat.cfg</strong> adding the definition of the customevent: <pre> [webstat_custom_event_1] name = baskets param1 = action param2 = basket param3 = user</pre> </li> <li>The title must be <em>webstat_custom_event_(num)</em> where <em>(num)</em> is a number. The number can not be repeated in two different customevents. </li> <li>The option <em>name</em> is the name of the customevent.</li> <li>Each param in the customevent must be given as <em>param(num)</em> where <em>(num)</em> is an unique number.</li> </ol>""" def tmpl_error(self, msg, ln=CFG_SITE_LANG): """ Provides a common way of outputting error messages. """ return """<div class="important">%s</div>""" % msg def tmpl_keyevent_box(self, options, order, choosed, ln=CFG_SITE_LANG, list=False): """ Generates a FORM box with dropdowns for keyevents. @param options: { parameter name: [(argument internal, argument full)]} @type options: { str: [(str, str)]} @param order: A permutation of the keys in options, for design purpose. @type order: [str] @param options: The selected parameters, and its values. @type options: { str: str } """ # Create the FORM's header formheader = """<form method="get"> <input type="hidden" name="ln" value="%s" />""" % ln # Create the headers using the options permutation headers = [[options[param][1] for param in order]] headers[0].append("") # Create all SELECT boxes sels = [[]] for param in order: if choosed[param] == 'select date': sels[0].append(self._tmpl_select_box(options[param][2], # SELECT box data " - select " + options[param][1], # first item info param, # name [choosed['s_date'], choosed['f_date']], # selected value (perhaps several) True, # multiple box? ln=ln)) elif options[param][0] == 'combobox': sels[0].append(self._tmpl_select_box(options[param][2], # SELECT box data " - select " + options[param][1], # first item info param, # name choosed[param], # selected value (perhaps several) type(choosed[param]) is list, # multiple box? ln=ln)) elif options[param][0] == 'textbox': sels[0].append(self._tmpl_text_box(param, # name choosed[param], # selected value ln=ln)) # Create button sels[0].append("""<input class="formbutton" type="submit" name="action_gen" value="Generate"/>""") # Export option if list: sels[0].append("""<input class="formbutton" type="submit" name="format" value="Full list"/>""") # Create form footer formfooter = """</form>""" return self._tmpl_box(formheader, formfooter, ["keyevent_table"], headers, sels, [""], ln=ln) def tmpl_customevent_box(self, options, choosed, ln=CFG_SITE_LANG): """ Generates a FORM box with dropdowns for customevents. @param options: { parameter name: (header, [(argument internal, argument full)]) or {param father: [(argument internal, argument full)]}} The dictionary is for options that are dependient of other. It's use for 'cols' With "param father"="__header" the headers With "param father"="__none" indicate the arguments by default @type options: { str: (str, [(str, str)])|{str: [(str, str)]}} @param choosed: The selected parameters, and its values. @type choosed: { str: str } """ if choosed['ids'] == []: choosed['ids'] = [""] choosed['cols'] = [[("", "", "")]] num_ids = len(choosed['ids']) operators = [('and', 'AND'), ('or', 'OR'), ('and_not', 'AND NOT')] # Crate the ids of the tables table_id = ["time_format"] table_id.extend(['cols' + str(i) for i in range(num_ids)]) # Create the headers using the options permutation headers = [(options['timespan'][0], options['format'][0])] headers.extend([(options['ids'][0], "", options['cols']['__header'], "value") for event_id in choosed['ids']]) # Create all SELECT boxes sels = [[]] for param in ['timespan', 'format']: if choosed[param] == 'select date': sels[0].append(self._tmpl_select_box(options[param][1], # SELECT box data " - select " + options[param][0], # first item info param, # name [choosed['s_date'], choosed['f_date']], # selected value (perhaps several) True, # multiple box? ln=ln)) else: sels[0].append(self._tmpl_select_box(options[param][1], # SELECT box data " - select " + options[param][0], # first item info param, # name choosed[param], # selected value (perhaps several) type(choosed[param]) is list, # multiple box? ln=ln)) for event_id, i in zip(choosed['ids'], range(num_ids)): select_table = [] select_row = [self._tmpl_select_box(options['ids'][1], " - select " + options['ids'][0], 'ids', event_id, attribute='onChange="javascript: \ changed_customevent(customevent[\'ids\'],%d);"' % i, ln=ln)] is_first_loop = True row = 0 if len(choosed['cols']) <= i: choosed['cols'].append([("", "", "")]) if choosed['cols'][i] == []: choosed['cols'][i] = [("", "", "")] for _, col, value in choosed['cols'][i]: select_row.append("") if not is_first_loop: select_row.append(self._tmpl_select_box(operators, "", "bool%d" % i, bool)) if event_id: select_row.append(self._tmpl_select_box(options['cols'][event_id], " - select " + options['cols']['__header'], 'cols' + str(i), col, ln=ln)) else: select_row.append(self._tmpl_select_box(options['cols']['__none'], "Choose CustomEvent", 'cols' + str(i), "", ln=ln)) if is_first_loop: select_row.append("<input name=\"col_value%d\" value=\"%s\">" % (i, value)) else: select_row.append("""<input name="col_value%d" value="%s"> <a href="javascript:;" onclick="delrow(%d,%d);">Remove row</a>""" \ % (i, value, i, row)) select_table.append(select_row) select_row = [] if is_first_loop: is_first_loop = False row += 1 sels.append(select_table) # javascript for add col selectors sels_col = [] sels_col.append(self._tmpl_select_box(options['ids'][1], " - select " + options['ids'][0], 'ids', "", False, attribute='onChange="javascript: \ changed_customevent(customevent[\\\'ids\\\'],\' + col + \');"', ln=ln)) sels_col.append("") sels_col.append(self._tmpl_select_box(options['cols']['__none'], "Choose CustomEvent", 'cols\' + col + \'', "", False, ln=ln)) sels_col.append("""<input name="col_value' + col + '">""") col_table = self._tmpl_box("", "", ["cols' + col + '"], headers[1:], [sels_col], ["""<a id="add' + col + '" href="javascript:;" onclick="addcol(\\'cols' + col + '\\', ' + col + ');">Add more arguments</a> <a id="del' + col + '" href="javascript:;" onclick="delblock(' + col + ');"> Remove block</a>"""], ln=ln) col_table = col_table.replace('\n', '') formheader = """<script type="text/javascript"> var col = %d; var col_select = new Array(%s,0); var block_pos_max = %d; var block_pos = new Array(%s,0); var rows_pos_max = [%s]; var rows_pos = [%s]; function addcol(id, num){ col_select[num]++; var table = document.getElementById(id); var body = table.getElementsByTagName('tbody')[0]; var row = document.createElement('tr'); var cel0 = document.createElement('td'); row.appendChild(cel0); var cel1 = document.createElement('td'); cel1.innerHTML = '<select name="bool' + num + '"> <option value="and">AND</option> <option value="or">OR</option> <option value="and_not">AND NOT</option> </select>'; row.appendChild(cel1); var cel2 = document.createElement('td'); cel2.innerHTML = '%s'; row.appendChild(cel2); var cel3 = document.createElement('td'); cel3.innerHTML = '%s'; row.appendChild(cel3); body.appendChild(row); // Change arguments arguments = document['customevent']['cols' + num] if (col_select[1] == 0) { value = document['customevent']['ids'].value; } else { value = document['customevent']['ids'][block_pos[num]].value; } _change_select_options(arguments[arguments.length -1], get_argument_list(value), ''); rows_pos[num][col_select[num]-1] = rows_pos_max[num]; rows_pos_max[num]++; } """ % (num_ids, ','.join([str(len(choosed['cols'][i])) for i in range(num_ids)]), num_ids, ','.join([str(i) for i in range(num_ids)]), ','.join([str(len(block)) for block in choosed['cols']]), ','.join([str(range(len(block))) for block in choosed['cols']]), sels_col[2].replace("' + col + '", "' + num + '"), sels_col[3].replace("' + col + '", "' + num + '") + \ """ <a href="javascript:;" onclick="delrow(' + num + ',' + (col_select[num]-1) + ');">Remove row</a>""") formheader += """ function addblock() { col_select[col] = 1; var ni = document.getElementById('block'); var newdiv = document.createElement('div'+col); newdiv.innerHTML = '%s'; ni.appendChild(newdiv); block_pos[col] = block_pos_max; block_pos_max++; rows_pos[col] = [0]; rows_pos_max[col] = 1; col++; }""" % col_table formheader += """ function delblock(id) { var block = document.getElementById("cols" + id); var add = document.getElementById("add" + id); var del = document.getElementById("del" + id); block.parentNode.removeChild(block); add.parentNode.removeChild(add); del.parentNode.removeChild(del); for (var i = id+1; i < col_select.length; i++) { block_pos[i]--; } block_pos_max--; } function delrow(table_id,row_num) { var table = document.getElementById('cols' + table_id); table.tBodies[0].deleteRow(rows_pos[table_id][row_num]); col_select[table_id]--; for (var i = row_num+1; i < rows_pos[table_id].length; i++) { rows_pos[table_id][i]--; } rows_pos_max[table_id]--; } """ formheader += """ function change_select_options(selectList, isList, optionArray, chooseDefault) { if (isList) { for (var select = 0; select < selectList.length; select++) { _change_select_options(selectList[select], optionArray, chooseDefault); } } else { _change_select_options(selectList, optionArray, chooseDefault); } } function _change_select_options(select, optionArray, chooseDefault) { select.options.length = 0; for (var option = 0; option*2 < optionArray.length - 1; option++) { if (chooseDefault == optionArray[option*2+1]) { select.options[option] = new Option(optionArray[option*2], optionArray[option*2+1], true, true); } else { select.options[option] = new Option(optionArray[option*2], optionArray[option*2+1]); } } } function changed_customevent(select, num){ if (select.length) { value = select[block_pos[num]].value; } else { value = select.value; } list = get_argument_list(value); select_list = (col_select[num] > 1); change_select_options(document['customevent']['cols' + num], select_list, list, ''); } function get_argument_list(value) { if (value == "") { return ['Choose CustomEvent',''];""" for event_id, cols in options['cols'].items(): if event_id not in ['__header', '__none']: str_cols = "[' - select %s', ''," % options['cols']['__header'] for internal, full in cols: str_cols += "'%s','%s'," % (full, internal) str_cols = str_cols[:-1] + ']' formheader += """ } else if (value == "%s") { return %s;""" % (event_id, str_cols) formheader += """ } } </script>""" # Create the FORM's header formheader += """<form method="get" name="customevent"> <input type="hidden" name="ln"value="%s" />""" % ln # Create all footers footers = [] footers.append("") footers.append("""<a href="javascript:;" onclick="addcol('cols0', 0);"> Add more arguments</a>""") for i in range(1, num_ids): footers.append(""" <a id="add%(i)d" href="javascript:;" onclick="addcol('cols%(i)d', %(i)d);">Add more arguments</a> <a id="del%(i)d" href="javascript:;" onclick="delblock(%(i)d);">Remove block</a> """ % {'i': i}) footers[-1] += """<div id="block"> </div>""" # Create formfooter formfooter = """<p><a href="javascript:;" onclick="addblock();">Add more events</a> <input class="formbutton" type="submit" name="action_gen" value="Generate"></p> </form>""" return self._tmpl_box(formheader, formfooter, table_id, headers, sels, footers, ln=ln) def tmpl_display_event_trend_ascii(self, title, filename, ln=CFG_SITE_LANG): """Displays a ASCII graph representing a trend""" try: return self.tmpl_display_trend(title, "<div><pre>%s</pre></div>" % open(filename, 'r').read(), ln=ln) except IOError: return "No data found" def tmpl_display_event_trend_image(self, title, filename, ln=CFG_SITE_LANG): """Displays an image graph representing a trend""" if os.path.isfile(filename): return self.tmpl_display_trend(title, """<div><img src="%s" /></div>""" % filename.replace(CFG_WEBDIR, CFG_SITE_URL), ln=ln) else: return "No data found" def tmpl_display_event_trend_text(self, title, filename, ln=CFG_SITE_LANG): """Displays a text representing a trend""" try: return self.tmpl_display_trend(title, "<div>%s</div>" % open(filename, 'r').read(), ln=ln) except IOError: return "No data found" def tmpl_display_numeric_stats(self, titles, avgs, maxs, mins): """Display average, max and min values""" if titles: out = "" for i in range(len(titles)): out += """<em>%s</em><br /> <b>Average:</b> %d<br /> <b>Max:</b> %d<br /> <b>Min:</b> %d<br />""" % (cgi.escape(titles[i]), avgs[i], maxs[i], mins[i]) return out else: return """<b>Average:</b> %d<br /> <b>Max:</b> %d<br /> <b>Min:</b> %d<br />""" % (avgs, maxs, mins) def tmpl_display_custom_summary(self, tag_name, data, title, query, tag, path, ln=CFG_SITE_LANG): """Display the custom summary (annual report)""" # Create the FORM's header formheader = """<form method="get"> <input type="hidden" name="ln"value="%s" />""" % ln # Create the headers headers = [("Chart title", "Query", "Output tag", "")] # Create the body (text boxes and button) fields = (("""<input type="text" name="title" value="%s" size="20"/>""" % cgi.escape(title), """<input type="text" name="query" value="%s" size="35"/>""" % cgi.escape(query), """<input type="text" name="tag" value="%s" size="10"/>""" % cgi.escape(tag), """<input class="formbutton" type="submit" name="action_gen" value="Generate"/>"""), ) # Create form footer formfooter = """</form>""" out = self._tmpl_box(formheader, formfooter, [("custom_summary_table", )], headers, fields, [""], ln=ln) out += """<div> <table border> <tr> <td colspan=2> <b><center> Distribution across %s </center> </td> </tr> <tr> <td align="right"><b>Nb.</b></td> <td><b>%s</b></td> </tr> """ % (cgi.escape(tag_name), cgi.escape(tag_name[0].capitalize() + tag_name[1:])) if len(query) > 0: query += " and " for title, number in data: if title in ('Others', 'TOTAL'): out += """<tr> <td align="right">%d</td> <td>%s</td> </tr> """ % (number, cgi.escape(title)) else: out += """<tr> <td align="right"><a href="%s/search?p=%s&ln=%s">%d</a></td> <td>%s</td> </tr> """ % (CFG_SITE_URL, cgi.escape(urllib.quote(query + " " + tag + ':"' + title + '"')), ln, number, cgi.escape(title)) out += """</table></div> <div><img src="%s" /></div>""" % cgi.escape(path.replace(CFG_WEBDIR, CFG_SITE_URL)) return out # INTERNALS def tmpl_display_trend(self, title, html, ln=CFG_SITE_LANG): """ Generates a generic display box for showing graphs (ASCII and IMGs) alongside to some metainformational boxes. """ return """<table class="narrowsearchbox"> <thead><tr><th colspan="2" class="narrowsearchboxheader" align="left">%s</th></tr></thead> <tbody><tr><td class="narrowsearchboxbody" valign="top">%s</td></tr></tbody> </table> """ % (title, html) def _tmpl_box(self, formheader, formfooter, table_id, headers, selectboxes, footers, ln=CFG_SITE_LANG): """ Aggregates together the parameters in order to generate the corresponding box for customevent. @param formheader: Start tag for the FORM element. @type formheader: str @param formfooter: End tag for the FORM element. @type formfooter: str @param table_id: id for each table @type table_id: list<str> @param headers: Headers for the SELECT boxes @type headers: list<list<str>> @param selectboxes: The actual HTML drop-down boxes, with appropriate content. @type selectboxes: list<list<str>>|list<list<list<str>>> @param footers: footer for each table @type footers: list<str> @return: HTML describing a particular FORM box. @type: str """ out = formheader for table in range(len(table_id)): out += """<table id="%s" class="searchbox"> <thead> <tr>""" % table_id[table] #Append the headers for header in headers[table]: out += """<th class="searchboxheader">%s</th>""" % header out += """</tr> </thead> <tbody>""" # Append the SELECT boxes is_first_loop = True out += """<tr valign="bottom">""" for selectbox in selectboxes[table]: if type(selectbox) is list: if is_first_loop: is_first_loop = False else: out += """</tr> <tr valign="bottom">""" for select in selectbox: out += """<td class="searchboxbody" valign="top">%s</td>""" % select else: out += """<td class="searchboxbody" valign="top">%s</td>""" % selectbox out += """ </tr>""" out += """ </tbody> </table>""" # Append footer out += footers[table] out += formfooter return out def _tmpl_select_box(self, iterable, explaination, name, preselected, multiple=False, attribute="", ln=CFG_SITE_LANG): """ Generates a HTML SELECT drop-down menu. @param iterable: A list of values and tag content to be used in the SELECT list @type iterable: [(str, str)] @param explaination: An explainatory string put as the tag content for the first OPTION. @type explaination: str @param name: The name of the SELECT tag. Important for FORM-parsing. @type name: str @param preselected: The value, or list of values, of the OPTION that should be preselected. Blank or empty list for none. @type preselected: str | [] @param attribute: Optionally add attributes to the select tag @type attribute: str @param multiple: Optionally sets the SELECT box to accept multiple entries. @type multiple: bool """ if attribute: sel = """<select name="%s" %s>""" % (name, attribute) else: if name == "timespan": sel = """<script type="text/javascript"> function changeTimeSpanDates(val){ if(val == "select date"){ document.getElementById("selectDateTxt").style.display='block';} else{ document.getElementById("selectDateTxt").style.display='none';} } </script> <select name="timespan" id="timespan" onchange="javascript: changeTimeSpanDates(this.value);">""" else: sel = """<select name="%s">""" % name if multiple is True and name != "timespan": sel = sel.replace("<select ", """<select multiple="multiple" size="5" """) elif explaination: sel += """<option value="">%s</option>""" % explaination for realname, printname in [(x[0], x[1]) for x in iterable]: if printname is None: printname = realname option = """<option value="%s">%s</option>""" % (realname, printname) if realname == preselected or (type(preselected) is list and realname in preselected) or (name == "timespan" and realname == 'select date' and multiple): option = option.replace('">', '" selected="selected">') sel += option sel += "</select>" if name == "timespan": if multiple: s_date = preselected[0] f_date = preselected[1] else: s_date = datetime.datetime.today().date().strftime("%m/%d/%Y %H:%M") f_date = datetime.datetime.now().strftime("%m/%d/%Y %H:%M") sel += """<link rel="stylesheet" href="%(CFG_SITE_URL)s/img/jquery-ui.css" type="text/css" /> <script language="javascript" type="text/javascript" src="%(CFG_SITE_URL)s/js/jquery-ui.min.js"></script> <script type="text/javascript" src="%(CFG_SITE_URL)s/js/jquery-ui-timepicker-addon.js"></script> <div id="selectDateTxt" style="position:relative;display:none"> <table align="center"> <tr align="center"> <td align="right" class="searchboxheader">From: </td> <td align="left"><input type="text" name="s_date" id="s_date" value="%(s_date)s" size="14" /></td> </tr> <tr align="center"> <td align="right" class="searchboxheader">To: </td> <td align="left"><input type="text" name="f_date" id="f_date" value="%(f_date)s" size="14" /></td> </tr> </table> </div> <script type="text/javascript"> $('#s_date').datetimepicker(); $('#f_date').datetimepicker({ hour: 23, minute: 59 }); if(document.getElementById("timespan").value == "select date"){ document.getElementById("selectDateTxt").style.display='block'; } </script>""" % {'CFG_SITE_URL': CFG_SITE_URL, 's_date': cgi.escape(s_date), 'f_date': cgi.escape(f_date)} return sel def _tmpl_text_box(self, name, preselected, ln=CFG_SITE_LANG): """ Generates a HTML text-box menu. @param name: The name of the textbox label. @type name: str @param preselected: The value that should be preselected. Blank or empty list for none. @type preselected: str | [] """ if name == 'min_loans' or name == 'max_loans': return """<script type="text/javascript"> function checkNumber(input){ var num = input.value.replace(/\,/g,''); var newtext = parseInt(num); if(isNaN(newtext)){ alert('You may enter only numbers in this field!'); input.value = 0; } else { input.value = newtext; } } </script> <input type="text" name="%s" onchange="checkNumber(this)" value="%s" />""" % (name, preselected) else: return """<input type="text" name="%s" value="%s" />""" % (name, preselected)
gpl-2.0
pfnet/chainercv
chainercv/datasets/ade20k/ade20k_test_image_dataset.py
3
1666
import glob import os from chainercv.chainer_experimental.datasets.sliceable import GetterDataset from chainercv.datasets.ade20k.ade20k_utils import get_ade20k from chainercv.utils import read_image root = 'pfnet/chainercv/ade20k' url = 'http://data.csail.mit.edu/places/ADEchallenge/release_test.zip' class ADE20KTestImageDataset(GetterDataset): """Image dataset for test split of `ADE20K`_. This is an image dataset of test split in ADE20K dataset distributed at MIT Scene Parsing Benchmark website. It has 3,352 test images. .. _`MIT Scene Parsing Benchmark`: http://sceneparsing.csail.mit.edu/ Args: data_dir (string): Path to the dataset directory. The directory should contain the :obj:`release_test` dir. If :obj:`auto` is given, the dataset is automatically downloaded into :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/ade20k`. This dataset returns the following data. .. csv-table:: :header: name, shape, dtype, format :obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \ "RGB, :math:`[0, 255]`" """ def __init__(self, data_dir='auto'): super(ADE20KTestImageDataset, self).__init__() if data_dir is 'auto': data_dir = get_ade20k(root, url) img_dir = os.path.join(data_dir, 'release_test', 'testing') self.img_paths = sorted(glob.glob(os.path.join(img_dir, '*.jpg'))) self.add_getter('img', self._get_image) self.keys = 'img' # do not return tuple def __len__(self): return len(self.img_paths) def _get_image(self, i): return read_image(self.img_paths[i])
mit
andmos/ansible
test/units/modules/network/iosxr/test_iosxr_user.py
48
4023
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type from units.compat.mock import patch from ansible.modules.network.iosxr import iosxr_user from units.modules.utils import set_module_args from .iosxr_module import TestIosxrModule, load_fixture class TestIosxrUserModule(TestIosxrModule): module = iosxr_user def setUp(self): super(TestIosxrUserModule, self).setUp() self.mock_get_config = patch('ansible.modules.network.iosxr.iosxr_user.get_config') self.get_config = self.mock_get_config.start() self.mock_load_config = patch('ansible.modules.network.iosxr.iosxr_user.load_config') self.load_config = self.mock_load_config.start() self.mock_is_cliconf = patch('ansible.modules.network.iosxr.iosxr_user.is_cliconf') self.is_cliconf = self.mock_is_cliconf.start() def tearDown(self): super(TestIosxrUserModule, self).tearDown() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_is_cliconf.stop() def load_fixtures(self, commands=None, transport='cli'): self.get_config.return_value = load_fixture('iosxr_user_config.cfg') self.load_config.return_value = dict(diff=None, session='session') self.is_cliconf.return_value = True def test_iosxr_user_delete(self): set_module_args(dict(name='ansible', state='absent')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['no username ansible']) def test_iosxr_user_password(self): set_module_args(dict(name='ansible', configured_password='test')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['username ansible secret test']) def test_iosxr_user_purge(self): set_module_args(dict(purge=True)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['no username ansible']) def test_iosxr_user_group(self): set_module_args(dict(name='ansible', group='sysadmin')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['username ansible group sysadmin']) def test_iosxr_user_update_password_changed(self): set_module_args(dict(name='test', configured_password='test', update_password='on_create')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['username test', 'username test secret test']) def test_iosxr_user_update_password_on_create_ok(self): set_module_args(dict(name='ansible', configured_password='test', update_password='on_create')) self.execute_module() def test_iosxr_user_update_password_always(self): set_module_args(dict(name='ansible', configured_password='test', update_password='always')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['username ansible secret test']) def test_iosxr_user_admin_mode(self): set_module_args(dict(name='ansible-2', configured_password='test-2', admin=True)) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['username ansible-2', 'username ansible-2 secret test-2'])
gpl-3.0
beiko-lab/gengis
bin/Lib/site-packages/scipy/weave/tests/test_inline_tools.py
1
1548
from __future__ import absolute_import, print_function from numpy.testing import TestCase, dec, assert_ from scipy.weave import inline_tools class TestInline(TestCase): """ These are long running tests... I'd like to benchmark these things somehow. """ @dec.slow def test_exceptions(self): a = 3 code = """ if (a < 2) throw_error(PyExc_ValueError, "the variable 'a' should not be less than 2"); else return_val = PyInt_FromLong(a+1); """ result = inline_tools.inline(code,['a']) assert_(result == 4) ## Unfortunately, it is not always possible to catch distutils compiler ## errors, since SystemExit is used. Until that is fixed, these tests ## cannot be run in the same process as the test suite. ## try: ## a = 1 ## result = inline_tools.inline(code,['a']) ## assert_(1) # should've thrown a ValueError ## except ValueError: ## pass ## from distutils.errors import DistutilsError, CompileError ## try: ## a = 'string' ## result = inline_tools.inline(code,['a']) ## assert_(1) # should've gotten an error ## except: ## # ?CompileError is the error reported, but catching it doesn't work ## pass if __name__ == "__main__": import nose nose.run(argv=['', __file__])
gpl-3.0
yprez/django-post_office
post_office/validators.py
3
1409
from django.core.exceptions import ValidationError from django.core.validators import validate_email from django.template import Template, TemplateSyntaxError, TemplateDoesNotExist from django.utils.encoding import force_text from .compat import text_type def validate_email_with_name(value): """ Validate email address. Both "Recipient Name <email@example.com>" and "email@example.com" are valid. """ value = force_text(value) if '<' and '>' in value: start = value.find('<') + 1 end = value.find('>') if start < end: recipient = value[start:end] else: recipient = value validate_email(recipient) def validate_comma_separated_emails(value): """ Validate every email address in a comma separated list of emails. """ if not isinstance(value, (tuple, list)): raise ValidationError('Email list must be a list/tuple.') for email in value: try: validate_email_with_name(email) except ValidationError: raise ValidationError('Invalid email: %s' % email, code='invalid') def validate_template_syntax(source): """ Basic Django Template syntax validation. This allows for robuster template authoring. """ try: Template(source) except (TemplateSyntaxError, TemplateDoesNotExist) as err: raise ValidationError(text_type(err))
mit
MiLk/ansible
test/units/modules/network/nxos/test_nxos_bgp_neighbor.py
28
1970
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_bgp_neighbor from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosBgpNeighborModule(TestNxosModule): module = nxos_bgp_neighbor def setUp(self): self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.load_config') self.load_config = self.mock_load_config.start() self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.get_config') self.get_config = self.mock_get_config.start() def tearDown(self): self.mock_load_config.stop() self.mock_get_config.stop() def load_fixtures(self, commands=None, device=''): self.get_config.return_value = load_fixture('', 'nxos_bgp_config.cfg') self.load_config.return_value = None def test_nxos_bgp_neighbor(self): set_module_args(dict(asn=65535, neighbor='3.3.3.3', description='some words')) result = self.execute_module(changed=True) self.assertEqual(result['commands'], ['router bgp 65535', 'neighbor 3.3.3.3', 'description some words'])
gpl-3.0
ecular/qemu_seamless
scripts/tracetool.py
111
3993
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Command-line wrapper for the tracetool machinery. """ __author__ = "Lluís Vilanova <vilanova@ac.upc.edu>" __copyright__ = "Copyright 2012, Lluís Vilanova <vilanova@ac.upc.edu>" __license__ = "GPL version 2 or (at your option) any later version" __maintainer__ = "Stefan Hajnoczi" __email__ = "stefanha@linux.vnet.ibm.com" import sys import getopt from tracetool import error_write, out import tracetool.backend import tracetool.format _SCRIPT = "" def error_opt(msg = None): if msg is not None: error_write("Error: " + msg + "\n") backend_descr = "\n".join([ " %-15s %s" % (n, d) for n,d in tracetool.backend.get_list() ]) format_descr = "\n".join([ " %-15s %s" % (n, d) for n,d in tracetool.format.get_list() ]) error_write("""\ Usage: %(script)s --format=<format> --backend=<backend> [<options>] Backends: %(backends)s Formats: %(formats)s Options: --help This help message. --list-backends Print list of available backends. --check-backend Check if the given backend is valid. --binary <path> Full path to QEMU binary. --target-type <type> QEMU emulator target type ('system' or 'user'). --target-arch <arch> QEMU emulator target arch. --probe-prefix <prefix> Prefix for dtrace probe names (default: qemu-<target-type>-<target-arch>).\ """ % { "script" : _SCRIPT, "backends" : backend_descr, "formats" : format_descr, }) if msg is None: sys.exit(0) else: sys.exit(1) def main(args): global _SCRIPT _SCRIPT = args[0] long_opts = [ "backend=", "format=", "help", "list-backends", "check-backend" ] long_opts += [ "binary=", "target-type=", "target-arch=", "probe-prefix=" ] try: opts, args = getopt.getopt(args[1:], "", long_opts) except getopt.GetoptError, err: error_opt(str(err)) check_backend = False arg_backend = "" arg_format = "" binary = None target_type = None target_arch = None probe_prefix = None for opt, arg in opts: if opt == "--help": error_opt() elif opt == "--backend": arg_backend = arg elif opt == "--format": arg_format = arg elif opt == "--list-backends": backends = tracetool.backend.get_list() out(", ".join([ b for b,_ in backends ])) sys.exit(0) elif opt == "--check-backend": check_backend = True elif opt == "--binary": binary = arg elif opt == '--target-type': target_type = arg elif opt == '--target-arch': target_arch = arg elif opt == '--probe-prefix': probe_prefix = arg else: error_opt("unhandled option: %s" % opt) if arg_backend is None: error_opt("backend not set") if check_backend: if tracetool.backend.exists(arg_backend): sys.exit(0) else: sys.exit(1) if arg_format == "stap": if binary is None: error_opt("--binary is required for SystemTAP tapset generator") if probe_prefix is None and target_type is None: error_opt("--target-type is required for SystemTAP tapset generator") if probe_prefix is None and target_arch is None: error_opt("--target-arch is required for SystemTAP tapset generator") if probe_prefix is None: probe_prefix = ".".join([ "qemu", target_type, target_arch ]) try: tracetool.generate(sys.stdin, arg_format, arg_backend, binary = binary, probe_prefix = probe_prefix) except tracetool.TracetoolError, e: error_opt(str(e)) if __name__ == "__main__": main(sys.argv)
gpl-2.0
DUlSine/DUlSine
DUlSine/templatetags/dulsine_extras.py
1
2202
from django import template from DUlSine.models.DPS import Dimensionnement from DUlSine.models.team import Inscription, Wish from DUlSine.models.dulsine_commons import DIPLOME_CI, DIPLOME_PSC1, WISH_ND register = template.Library() @register.simple_tag def badge(dim_id, function_id): function_id = int(function_id) if function_id < DIPLOME_CI or function_id > DIPLOME_PSC1: raise template.TemplateSyntaxError("'badge'(%s) tag second argument should be in [1, 4]" % (function_id)) # Grab the corresponding Dimensionnement dim = Dimensionnement.objects.get(id = dim_id) # Compute the answer current = dim.nombreIS(function_id) required = dim.nombreISMax(function_id) # Filter out the badge if the function is not needed if(required == 0 or required == 0): return u'' # Select the right badge if(current == 0): badge = 'important' elif(current < required): badge = 'warning' else: badge = 'success' # If the PSC1 is missing that's not an issue if(function_id == DIPLOME_PSC1 and current < required): badge = 'info' return u"<span class=\"badge badge-%s\" dim-id=\"%d\">%d/%d</span>" % (badge, dim_id, current, required) @register.simple_tag def label(dim_id, user_id): # Test if the user is registered in a team try: subscription = Inscription.objects.get(team__dimensionnement = dim_id, benevole = user_id) except Inscription.DoesNotExist: # Get the wish if any (should be unique) try: wish = Wish.objects.get(dimensionnement = dim_id, benevole = user_id) except Wish.DoesNotExist: class_name = 'default' message = '--?--' else: if wish.wish == WISH_ND: class_name = 'inverse' message = u"N.D." else: class_name = 'warning' message = u"%s" % (wish.get_wish_display()) else: class_name = 'success' message = u"%s" % (subscription.get_fonction_display()) return "<span class=\"label clickable label-%s\" dim-id=\"%s\" function-id=\"1\">%s</span>" % (class_name, dim_id, message)
agpl-3.0
a-ro/preimage
preimage/tests/inference/test_node_creator.py
1
5368
__author__ = 'amelie' from math import sqrt from mock import Mock import unittest2 import numpy from preimage.inference.bound_calculator import BoundCalculatorMock from preimage.inference.node import MaxNode from preimage.inference.node_creator import NodeCreator class TestNodeCreator(unittest2.TestCase): def setUp(self): self.setup_alphabet() self.setup_node_creator_one_gram() self.setup_node_creator_two_gram() self.setup_nodes() def setup_alphabet(self): self.one_grams = ['a', 'b', 'c'] self.two_grams = ['aa', 'ab', 'ba', 'bb'] def setup_node_creator_one_gram(self): min_bound_calculator_mock = Mock() min_bound_calculator_mock.get_start_node_real_values.return_value = numpy.array([1, 2, 3], dtype=numpy.float64) min_bound_calculator_mock.get_start_node_bounds.return_value = numpy.array([4, 5, 6], dtype=numpy.float64) min_bound_calculator_mock.compute_bound.return_value = {'real_value': 1, 'bound_value': 2} min_bound_calculator = BoundCalculatorMock(min_bound_calculator_mock) max_bound_calculator_mock = Mock() max_bound_calculator_mock.get_start_node_real_values.return_value = numpy.array([7, 8, 9], dtype=numpy.float64) max_bound_calculator_mock.get_start_node_bounds.return_value = numpy.array([10, 11, 12], dtype=numpy.float64) max_bound_calculator_mock.compute_bound.return_value = {'real_value': 3, 'bound_value': 4} max_bound_calculator = BoundCalculatorMock(max_bound_calculator_mock) self.node_creator_one_gram = NodeCreator(min_bound_calculator, max_bound_calculator, self.one_grams) def setup_node_creator_two_gram(self): min_bound_calculator_mock = Mock() min_bound_calculator_mock.get_start_node_real_values.return_value = numpy.array([1, 3, 5, 7], dtype=numpy.float64) min_bound_calculator_mock.get_start_node_bounds.return_value = numpy.array([9, 11, 13, 15], dtype=numpy.float64) min_bound_calculator_mock.compute_bound.return_value = {'real_value': 0.1, 'bound_value': 0.2} min_bound_calculator = BoundCalculatorMock(min_bound_calculator_mock) max_bound_calculator_mock = Mock() max_bound_calculator_mock.get_start_node_real_values.return_value = numpy.array([2, 4, 6, 8], dtype=numpy.float64) max_bound_calculator_mock.get_start_node_bounds.return_value = numpy.array([10, 12, 14, 16], dtype=numpy.float64) max_bound_calculator_mock.compute_bound.return_value = {'real_value': 0.3, 'bound_value': 0.4} max_bound_calculator = BoundCalculatorMock(max_bound_calculator_mock) self.node_creator_two_gram = NodeCreator(min_bound_calculator, max_bound_calculator, self.two_grams) def setup_nodes(self): self.a_b_c_nodes_length_one = [MaxNode('a', 10. / sqrt(4), 1., 7.), MaxNode('b', 11. / sqrt(5), 2., 8.), MaxNode('c', 12. / sqrt(6), 3., 9.)] self.aa_ab_ba_bb = [MaxNode('aa', 10. / sqrt(9), 1, 2), MaxNode('ab', 12 / sqrt(11), 3, 4), MaxNode('ba', 14. / sqrt(13), 5, 6), MaxNode('bb', 16 / sqrt(15), 7, 8)] self.aa_parent_node = MaxNode('a', 5. / sqrt(2), 1, 2) self.aa_node_one_gram = MaxNode('aa', 4. / sqrt(2), 1, 3) self.abc_parent_node = MaxNode('bc', 2., 1, 3) self.abc_node_two_gram = MaxNode('abc', 0.4 / sqrt(0.2), 0.1, 0.3) def test_one_gram_length_one_get_start_nodes_returns_expected_nodes(self): start_nodes = self.node_creator_one_gram.get_start_nodes_python(1) for node_index, start_node in enumerate(start_nodes): with self.subTest(y=start_node.y): self.assertEqual(start_node, self.a_b_c_nodes_length_one[node_index], msg=self.get_message(start_node, self.a_b_c_nodes_length_one[node_index])) def test_two_gram_length_three_get_start_nodes_returns_expected_nodes(self): start_nodes = self.node_creator_two_gram.get_start_nodes_python(3) for node_index, start_node in enumerate(start_nodes): with self.subTest(y=start_node.y): self.assertEqual(start_node, self.aa_ab_ba_bb[node_index], msg=self.get_message(start_node, self.aa_ab_ba_bb[node_index])) def test_one_gram_create_node_returns_expected_node(self): length = 3 node = self.node_creator_one_gram.create_node_python('aa', self.aa_parent_node, length) self.assertEqual(node, self.aa_node_one_gram, msg=self.get_message(node, self.aa_node_one_gram)) def test_two_gram_create_node_returns_expected_node(self): length = 4 node = self.node_creator_two_gram.create_node_python('abc', self.abc_parent_node, length) self.assertEqual(node, self.abc_node_two_gram, msg=self.get_message(node, self.abc_node_two_gram)) def get_message(self, actual, expected): return "{} != {}".format(str(actual), str(expected)) if __name__ == '__main__': unittest2.main()
bsd-2-clause
cmunk/protwis
build/management/commands/build_drugs.py
3
3830
from django.core.management.base import BaseCommand, CommandError from django.conf import settings from common.models import WebResource, WebLink from protein.models import Protein from drugs.models import Drugs from optparse import make_option import logging import csv import os import pandas as pd class Command(BaseCommand): help = 'Build Drug Data' def add_arguments(self, parser): parser.add_argument('--filename', action='append', dest='filename', help='Filename to import. Can be used multiple times') logger = logging.getLogger(__name__) # source file directory drugdata_data_dir = os.sep.join([settings.DATA_DIR, 'drug_data']) def handle(self, *args, **options): if options['filename']: filenames = options['filename'] else: filenames = False try: self.purge_drugs() self.create_drug_data(filenames) except Exception as msg: print(msg) self.logger.error(msg) def purge_drugs(self): try: Drugs.objects.all().delete() except Drugs.DoesNotExist: self.logger.warning('Drugs mod not found: nothing to delete.') def create_drug_data(self, filenames=False): self.logger.info('CREATING DRUGDATA') # read source files if not filenames: filenames = [fn for fn in os.listdir(self.drugdata_data_dir) if fn.endswith('drug_data.csv')] for filename in filenames: filepath = os.sep.join([self.drugdata_data_dir, filename]) data = pd.read_csv(filepath, low_memory=False, encoding = "ISO-8859-1") for index, row in enumerate(data.iterrows()): drugname = data[index:index+1]['Drug Name'].values[0] trialname = data[index:index+1]['Trial name'].values[0] drugalias_raw = data[index:index+1]['DrugAliases'].values[0] drugalias = ['' if str(drugalias_raw) == 'nan' else ', '+str(drugalias_raw)] # trialadd = ['' if str(trialname) == drugname else ' ('+str(trialname)+')'] drugname = drugname + drugalias[0] entry_name = data[index:index+1]['EntryName'].values[0] phase = data[index:index+1]['Phase'].values[0] PhaseDate = data[index:index+1]['PhaseDate'].values[0] ClinicalStatus = data[index:index+1]['ClinicalStatus'].values[0] moa = data[index:index+1]['ModeOfAction'].values[0] targetlevel = data[index:index+1]['TargetCategory'].values[0] drugtype = data[index:index+1]['Drug Class'].values[0] indication = data[index:index+1]['Indication(s)'].values[0] novelty = data[index:index+1]['Target_novelty'].values[0] approval = data[index:index+1]['Approval'].values[0] status = data[index:index+1]['Status'].values[0] references = data[index:index+1]['PMID'].values[0] # fetch protein try: p = Protein.objects.get(entry_name=entry_name) except Protein.DoesNotExist: self.logger.error('Protein not found for entry_name {}'.format(entry_name)) continue drug, created = Drugs.objects.get_or_create(name=drugname, synonym=', '.join(drugalias), drugtype=drugtype, indication=indication, novelty=novelty, approval=approval, phase=phase, phasedate=PhaseDate, clinicalstatus=ClinicalStatus, moa=moa, status=status, targetlevel=targetlevel,references=references) drug.target.add(p) drug.save() # target_list = drug.target.all() self.logger.info('COMPLETED CREATING DRUGDATA')
apache-2.0
ar7z1/ansible
lib/ansible/modules/cloud/google/gcp_compute_http_health_check_facts.py
8
7506
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright (C) 2017 Google # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # ---------------------------------------------------------------------------- # # *** AUTO GENERATED CODE *** AUTO GENERATED CODE *** # # ---------------------------------------------------------------------------- # # This file is automatically generated by Magic Modules and manual # changes will be clobbered when the file is regenerated. # # Please read more about how to change this file at # https://www.github.com/GoogleCloudPlatform/magic-modules # # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function __metaclass__ = type ################################################################################ # Documentation ################################################################################ ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gcp_compute_http_health_check_facts description: - Gather facts for GCP HttpHealthCheck short_description: Gather facts for GCP HttpHealthCheck version_added: 2.7 author: Google Inc. (@googlecloudplatform) requirements: - python >= 2.6 - requests >= 2.18.4 - google-auth >= 1.3.0 options: filters: description: A list of filter value pairs. Available filters are listed here U(https://cloud.google.com/sdk/gcloud/reference/topic/filters). Each additional filter in the list will act be added as an AND condition (filter1 and filter2) extends_documentation_fragment: gcp ''' EXAMPLES = ''' - name: a http health check facts gcp_compute_http_health_check_facts: filters: - name = test_object project: test_project auth_kind: serviceaccount service_account_file: "/tmp/auth.pem" ''' RETURN = ''' items: description: List of items returned: always type: complex contains: checkIntervalSec: description: - How often (in seconds) to send a health check. The default value is 5 seconds. returned: success type: int creationTimestamp: description: - Creation timestamp in RFC3339 text format. returned: success type: str description: description: - An optional description of this resource. Provide this property when you create the resource. returned: success type: str healthyThreshold: description: - A so-far unhealthy instance will be marked healthy after this many consecutive successes. The default value is 2. returned: success type: int host: description: - The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used. returned: success type: str id: description: - The unique identifier for the resource. This identifier is defined by the server. returned: success type: int name: description: - Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?` which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. returned: success type: str port: description: - The TCP port number for the HTTP health check request. - The default value is 80. returned: success type: int requestPath: description: - The request path of the HTTP health check request. - The default value is /. returned: success type: str timeoutSec: description: - How long (in seconds) to wait before claiming failure. - The default value is 5 seconds. It is invalid for timeoutSec to have greater value than checkIntervalSec. returned: success type: int unhealthyThreshold: description: - A so-far healthy instance will be marked unhealthy after this many consecutive failures. The default value is 2. returned: success type: int ''' ################################################################################ # Imports ################################################################################ from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest import json ################################################################################ # Main ################################################################################ def main(): module = GcpModule( argument_spec=dict( filters=dict(type='list', elements='str') ) ) if 'scopes' not in module.params: module.params['scopes'] = ['https://www.googleapis.com/auth/compute'] items = fetch_list(module, collection(module), query_options(module.params['filters'])) if items.get('items'): items = items.get('items') else: items = [] return_value = { 'items': items } module.exit_json(**return_value) def collection(module): return "https://www.googleapis.com/compute/v1/projects/{project}/global/httpHealthChecks".format(**module.params) def fetch_list(module, link, query): auth = GcpSession(module, 'compute') response = auth.get(link, params={'filter': query}) return return_if_object(module, response) def query_options(filters): if not filters: return '' if len(filters) == 1: return filters[0] else: queries = [] for f in filters: # For multiple queries, all queries should have () if f[0] != '(' and f[-1] != ')': queries.append("(%s)" % ''.join(f)) else: queries.append(f) return ' '.join(queries) def return_if_object(module, response): # If not found, return nothing. if response.status_code == 404: return None # If no content, return nothing. if response.status_code == 204: return None try: module.raise_for_status(response) result = response.json() except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst: module.fail_json(msg="Invalid JSON response with error: %s" % inst) if navigate_hash(result, ['error', 'errors']): module.fail_json(msg=navigate_hash(result, ['error', 'errors'])) return result if __name__ == "__main__": main()
gpl-3.0
jptomo/rpython-lang-scheme
rpython/rtyper/rdict.py
2
6253
from rpython.annotator import model as annmodel from rpython.rtyper import rmodel from rpython.rtyper.lltypesystem import lltype class __extend__(annmodel.SomeDict): def get_dict_repr(self): from rpython.rtyper.lltypesystem.rdict import DictRepr return DictRepr def rtyper_makerepr(self, rtyper): dictkey = self.dictdef.dictkey dictvalue = self.dictdef.dictvalue s_key = dictkey.s_value s_value = dictvalue.s_value force_non_null = self.dictdef.force_non_null if dictkey.custom_eq_hash: custom_eq_hash = lambda: (rtyper.getrepr(dictkey.s_rdict_eqfn), rtyper.getrepr(dictkey.s_rdict_hashfn)) else: custom_eq_hash = None return self.get_dict_repr()(rtyper, lambda: rtyper.getrepr(s_key), lambda: rtyper.getrepr(s_value), dictkey, dictvalue, custom_eq_hash, force_non_null) def rtyper_makekey(self): self.dictdef.dictkey .dont_change_any_more = True self.dictdef.dictvalue.dont_change_any_more = True return (self.__class__, self.dictdef.dictkey, self.dictdef.dictvalue) class __extend__(annmodel.SomeOrderedDict): def get_dict_repr(self): from rpython.rtyper.lltypesystem.rordereddict import OrderedDictRepr return OrderedDictRepr class AbstractDictRepr(rmodel.Repr): def pickrepr(self, item_repr): if self.custom_eq_hash: return item_repr, item_repr else: return self._externalvsinternal(self.rtyper, item_repr) pickkeyrepr = pickrepr def compact_repr(self): return 'DictR %s %s' % (self.key_repr.compact_repr(), self.value_repr.compact_repr()) def recast_value(self, llops, v): return llops.convertvar(v, self.value_repr, self.external_value_repr) def recast_key(self, llops, v): return llops.convertvar(v, self.key_repr, self.external_key_repr) def rtype_newdict(hop): hop.inputargs() # no arguments expected r_dict = hop.r_result cDICT = hop.inputconst(lltype.Void, r_dict.DICT) v_result = hop.gendirectcall(r_dict.ll_newdict, cDICT) return v_result class AbstractDictIteratorRepr(rmodel.IteratorRepr): def newiter(self, hop): v_dict, = hop.inputargs(self.r_dict) citerptr = hop.inputconst(lltype.Void, self.lowleveltype) return hop.gendirectcall(self.ll_dictiter, citerptr, v_dict) def rtype_next(self, hop): v_iter, = hop.inputargs(self) # record that we know about these two possible exceptions hop.has_implicit_exception(StopIteration) hop.has_implicit_exception(RuntimeError) hop.exception_is_here() v_index = hop.gendirectcall(self._ll_dictnext, v_iter) # # read 'iter.dict.entries' DICT = self.lowleveltype.TO.dict c_dict = hop.inputconst(lltype.Void, 'dict') v_dict = hop.genop('getfield', [v_iter, c_dict], resulttype=DICT) ENTRIES = DICT.TO.entries c_entries = hop.inputconst(lltype.Void, 'entries') v_entries = hop.genop('getfield', [v_dict, c_entries], resulttype=ENTRIES) # call the correct variant_*() method method = getattr(self, 'variant_' + self.variant) return method(hop, ENTRIES, v_entries, v_index) def get_tuple_result(self, hop, items_v): # this allocates the tuple for the result, directly in the function # where it will be used (likely). This will let it be removed. if hop.r_result.lowleveltype is lltype.Void: return hop.inputconst(lltype.Void, None) c1 = hop.inputconst(lltype.Void, hop.r_result.lowleveltype.TO) cflags = hop.inputconst(lltype.Void, {'flavor': 'gc'}) v_result = hop.genop('malloc', [c1, cflags], resulttype = hop.r_result.lowleveltype) for i, v_item in enumerate(items_v): ITEM = getattr(v_result.concretetype.TO, 'item%d' % i) if ITEM != v_item.concretetype: assert isinstance(ITEM, lltype.Ptr) v_item = hop.genop('cast_pointer', [v_item], resulttype=ITEM) c_item = hop.inputconst(lltype.Void, 'item%d' % i) hop.genop('setfield', [v_result, c_item, v_item]) return v_result def variant_keys(self, hop, ENTRIES, v_entries, v_index): KEY = ENTRIES.TO.OF.key c_key = hop.inputconst(lltype.Void, 'key') v_key = hop.genop('getinteriorfield', [v_entries, v_index, c_key], resulttype=KEY) return self.r_dict.recast_key(hop.llops, v_key) variant_reversed = variant_keys def variant_values(self, hop, ENTRIES, v_entries, v_index): VALUE = ENTRIES.TO.OF.value c_value = hop.inputconst(lltype.Void, 'value') v_value = hop.genop('getinteriorfield', [v_entries,v_index,c_value], resulttype=VALUE) return self.r_dict.recast_value(hop.llops, v_value) def variant_items(self, hop, ENTRIES, v_entries, v_index): v_key = self.variant_keys(hop, ENTRIES, v_entries, v_index) v_value = self.variant_values(hop, ENTRIES, v_entries, v_index) return self.get_tuple_result(hop, (v_key, v_value)) def variant_hashes(self, hop, ENTRIES, v_entries, v_index): # there is not really a variant 'hashes', but this method is # convenient for the following variants return hop.gendirectcall(ENTRIES.TO.hash, v_entries, v_index) def variant_keys_with_hash(self, hop, ENTRIES, v_entries, v_index): v_key = self.variant_keys(hop, ENTRIES, v_entries, v_index) v_hash = self.variant_hashes(hop, ENTRIES, v_entries, v_index) return self.get_tuple_result(hop, (v_key, v_hash)) def variant_items_with_hash(self, hop, ENTRIES, v_entries, v_index): v_key = self.variant_keys(hop, ENTRIES, v_entries, v_index) v_value = self.variant_values(hop, ENTRIES, v_entries, v_index) v_hash = self.variant_hashes(hop, ENTRIES, v_entries, v_index) return self.get_tuple_result(hop, (v_key, v_value, v_hash))
mit
phase/gitfiti
gitfiti.py
1
9262
#!/usr/bin/env python # # Copyright (c) 2013 Eric Romano (@gelstudios) # released under The MIT license (MIT) http://opensource.org/licenses/MIT # """ gitfiti noun : Carefully crafted graffiti in a github commit history calendar """ import datetime import math import itertools from urllib.request import urlopen import json TITLE = ''' _ __ _____ __ _ ____ _(_) /_/ __(_) /_(_) / __ `/ / __/ /_/ / __/ / / /_/ / / /_/ __/ / /_/ / \__, /_/\__/_/ /_/\__/_/ /____/ ''' KITTY = [ [0,0,0,4,0,0,0,0,4,0,0,0], [0,0,4,2,4,4,4,4,2,4,0,0], [0,0,4,2,2,2,2,2,2,4,0,0], [2,2,4,2,4,2,2,4,2,4,2,2], [0,0,4,2,2,3,3,2,2,4,0,0], [2,2,4,2,2,2,2,2,2,4,2,2], [0,0,0,3,4,4,4,4,3,0,0,0]] ONEUP = [ [0,4,4,4,4,4,4,4,0], [4,3,2,2,1,2,2,3,4], [4,2,2,1,1,1,2,2,4], [4,3,4,4,4,4,4,3,4], [4,4,1,4,1,4,1,4,4], [0,4,1,1,1,1,1,4,0], [0,0,4,4,4,4,4,0,0]] ONEUP2 = [ [0,0,4,4,4,4,4,4,4,0,0], [0,4,2,2,1,1,1,2,2,4,0], [4,3,2,2,1,1,1,2,2,3,4], [4,3,3,4,4,4,4,4,3,3,4], [0,4,4,1,4,1,4,1,4,4,0], [0,0,4,1,1,1,1,1,4,0,0], [0,0,0,4,4,4,4,4,0,0,0]] HACKERSCHOOL = [ [4,4,4,4,4,4], [4,3,3,3,3,4], [4,1,3,3,1,4], [4,3,3,3,3,4], [4,4,4,4,4,4], [0,0,4,4,0,0], [4,4,4,4,4,4]] OCTOCAT = [ [0,0,0,4,0,0,0,4,0], [0,0,4,4,4,4,4,4,4], [0,0,4,1,3,3,3,1,4], [4,0,3,4,3,3,3,4,3], [0,4,0,0,4,4,4,0,0], [0,0,4,4,4,4,4,4,4], [0,0,4,0,4,0,4,0,4]] OCTOCAT2 = [ [0,0,4,0,0,4,0], [0,4,4,4,4,4,4], [0,4,1,3,3,1,4], [0,4,4,4,4,4,4], [4,0,0,4,4,0,0], [0,4,4,4,4,4,0], [0,0,0,4,4,4,0]] HELLO = [ [0,1,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,4], [0,2,0,0,0,0,0,0,0,2,0,2,0,0,0,0,0,4], [0,3,3,3,0,2,3,3,0,3,0,3,0,1,3,1,0,3], [0,4,0,4,0,4,0,4,0,4,0,4,0,4,0,4,0,3], [0,3,0,3,0,3,3,3,0,3,0,3,0,3,0,3,0,2], [0,2,0,2,0,2,0,0,0,2,0,2,0,2,0,2,0,0], [0,1,0,1,0,1,1,1,0,1,0,1,0,1,1,1,0,4]] HIREME = [ [1,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0], [3,3,3,0,2,0,3,3,3,0,2,3,3,0,0,3,3,0,3,0,0,2,3,3], [4,0,4,0,4,0,4,0,0,0,4,0,4,0,0,4,0,4,0,4,0,4,0,4], [3,0,3,0,3,0,3,0,0,0,3,3,3,0,0,3,0,3,0,3,0,3,3,3], [2,0,2,0,2,0,2,0,0,0,2,0,0,0,0,2,0,2,0,2,0,2,0,0], [1,0,1,0,1,0,1,0,0,0,1,1,1,0,0,1,0,1,0,1,0,1,1,1]] LINES = [ [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4], [4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4, 3, 2, 1, 2, 3, 4]] RAINBOW = [ [0,0,0,0,0,4,4,4,4,4,4,4,4,4,4,0,0,0,0,0], [0,0,0,4,4,3,3,3,3,3,3,3,3,3,3,4,4,0,0,0], [0,0,4,3,3,2,2,2,2,2,2,2,2,2,2,3,3,4,0,0], [0,4,3,2,2,1,1,1,1,1,1,1,1,1,1,2,2,3,4,0], [0,4,3,2,1,0,0,0,0,0,0,0,0,0,0,1,2,3,4,0], [4,3,2,1,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4], [4,3,2,1,0,0,0,0,0,0,0,0,0,0,0,0,1,2,3,4], ] ASCII_TO_NUMBER = { '_': 0, '_': 1, '~': 2, '=': 3, '*': 4 } def str_to_sprite(content): # Break out lines and filter any excess lines = content.split('\n') def is_empty_line(line): return len(line) != 0 lines = filter(is_empty_line, lines) # Break up lines into each character split_lines = map(list, lines) # Replace each character with its numeric equivalent for line in split_lines: for index, char in enumerate(line): line[index] = ASCII_TO_NUMBER.get(char, 0) # Return the formatted str return split_lines ONEUP_STR = str_to_sprite(""" ******* *=~~-~~=* *~~---~~* *=*****=* **-*-*-** *-----* ***** """) IMAGES = { 'kitty': KITTY, 'oneup':ONEUP, 'oneup2':ONEUP2, 'hackerschool':HACKERSCHOOL, 'octocat':OCTOCAT, 'octocat2':OCTOCAT2, 'hello':HELLO, 'hireme':HIREME, 'oneup_str':ONEUP_STR, 'lines':LINES, 'rainbow':RAINBOW } def load_images(img_names): """loads user images from given file(s)""" if img_names[0] == '': return dict() for image_name in img_names: img = open(image_name) loaded_imgs = {} img_list = '' img_line = ' ' name = img.readline().replace('\n', '') name = name[1:] while True: img_line = img.readline() if img_line == '': break img_line.replace('\n', '') if(img_line[0] == ':'): loaded_imgs[name] = json.loads(img_list) name = img_line[1:] img_list = '' else: img_list += img_line loaded_imgs[name] = json.loads(img_list) return loaded_imgs def get_calendar(username, base_url='https://github.com/'): """retrieves the github commit calendar data for a username""" base_url = base_url + 'users/' + username try: url = base_url + '/contributions' page = urlopen(url) except (urllib2.HTTPError,urllib2.URLError) as e: print ("There was a problem fetching data from {0}".format(url)) print (e) raise SystemExit return page.readlines() def max_commits(input): """finds the highest number of commits in one day""" output = set() for line in input: for day in line.split(): day = day.decode() if "data-count=" in day: commit = day.split('=')[1] commit = commit.strip('"') output.add(int(commit)) output = list(output) output.sort() output.reverse() return output[0] def multiplier(max_commits): """calculates a multiplier to scale github colors to commit history""" m = max_commits/4.0 if m == 0: return 1 m = math.ceil(m) m = int(m) return m def get_start_date(): """returns a datetime object for the first sunday after one year ago today at 12:00 noon""" d = datetime.datetime.today() date = datetime.datetime(d.year-1, d.month, d.day, 12) weekday = datetime.datetime.weekday(date) while weekday < 6: date = date + datetime.timedelta(1) weekday = datetime.datetime.weekday(date) return date def date_gen(start_date, offset=0): """generator that returns the next date, requires a datetime object as input. The offset is in weeks""" start = offset * 7 for i in itertools.count(start): yield start_date + datetime.timedelta(i) def values_in_date_order(image, multiplier=1): height = 7 width = len(image[0]) for w in range(width): for h in range(height): yield image[h][w]*multiplier def commit(content, commitdate): template = ("""echo {0} >> gitfiti\n""" """GIT_AUTHOR_DATE={1} GIT_COMMITTER_DATE={2} """ """git commit -a -m "gitfiti" > /dev/null\n""") return template.format(content, commitdate.isoformat(), commitdate.isoformat()) def fake_it(image, start_date, username, repo, offset=0, multiplier=1, git_url='https://github.com'): template = ('#!/bin/bash\n' 'REPO={0}\n' 'git init $REPO\n' 'cd $REPO\n' 'touch README.md\n' 'git add README.md\n' 'touch gitfiti\n' 'git add gitfiti\n' '{1}\n' 'git remote add origin {2}/{3}/$REPO.git\n' 'git pull\n' 'git push -u origin master\n') strings = [] for value, date in zip(values_in_date_order(image, multiplier), date_gen(start_date, offset)): for i in range(value): strings.append(commit(i, date)) return template.format(repo, "".join(strings), git_url, username) def save(output, filename): """Saves the list to a given filename""" f = open(filename, "w") f.write(output) f.close() def main(): print (TITLE) print ("Enter github url") ghe = input("Enter nothing for https://github.com/ to be used: ") print ('Enter your github username:') username = input(">") if not ghe: git_base = "https://github.com/" cal = get_calendar(username) else: cal = get_calendar(username,base_url=ghe) git_base = ghe m = multiplier(max_commits(cal)) print ('Enter name of the repo to be used by gitfiti:') repo = input(">") print ('Enter the number of weeks to offset the image (from the left):') offset = input(">") if not offset.strip(): offset = 0 else: offset = int(offset) match = 1 print ('enter file(s) to load images from (blank if not applicable)') img_names = input(">").split(' ') images = dict(IMAGES, **load_images(img_names)) print ('enter the image name to gitfiti') print ('images: ' + ", ".join(images.keys())) image = input(">") if not image: image = IMAGES['kitty'] else: try: image = images[image] except: image = IMAGES['kitty'] if not ghe: output = fake_it(image, get_start_date(), username, repo, offset, m*match) else: git_url = input("Enter git url like git@site.github.com: ") output = fake_it(image, get_start_date(), username, repo, offset, m*match,git_url=git_url) save(output, 'gitfiti.sh') print ('gitfiti.sh saved.') print ('Create a new(!) repo at: {0}new and run it.'.format(git_base)) if __name__ == '__main__': main()
mit
collingreen/yaib_ludumdare
modules/persistence/sqlalchemy_persistence.py
1
3467
from contextlib import contextmanager from pubsub import pub from sqlalchemy import create_engine from sqlalchemy import Column, Integer from sqlalchemy.orm import sessionmaker from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.declarative import declared_attr # declarative base for sqlalchemy schema Base = declarative_base() class SqlAlchemyPersistence(object): """ Default persistence module for YAIB. Uses sqlalchemy to expose schema creation and ORM (and eventually migrations) to plugins. Configuration: Requires a persistence.connection key in the configuration file which will be passed to the sqlalchemy create_engine function. http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html#database-urls Usage: ``` with persistence.getDbSession() as db_session: db_session.query(Thing).update({'x': 5}) ``` """ def __init__(self, configuration): """Initialize the module""" self._configure(configuration) # set to true when persistence is set up # polite code should check if persistence.enabled is True before # trying to use sqlalchemy self.enabled = False # subscribe to events self.subscribeToEvents() def _configure(self, configuration): """Called on init with the bot configuration.""" # require persistence and persistence.connection if (configuration.persistence is None or configuration.persistence.connection is None): logging.error( "Error - could not configure persistence layer." ) return False # an Engine, which the Session will use for connection resources self.db_engine = create_engine( configuration.persistence.connection, echo=configuration.persistence.logging ) # create a configured "Session" class self.session_class = sessionmaker(bind=self.db_engine) # set flag that persistence is active self.enabled = True def subscribeToEvents(self): pub.subscribe(self.pluginsLoaded, 'core:pluginsLoaded') def pluginsLoaded(self): """ Called after everything has been initialized. Calls create_all on the base class metadata, creating any missing tables in the database. """ Base.metadata.create_all(self.db_engine) @contextmanager def getDbSession(self): """Provide a transactional scope around a series of operations.""" session = self.session_class() # give the new session to the caller try: yield session session.commit() # if any exceptions are raised, roll back the transaction and reraise except: session.rollback() raise # always close the sessions finally: session.close() def getModelBase(prefix=''): """Returns a custom model base to use for sqlalchemy declarative model classes. Ensures the table has an integer primary key named 'id' and that the itablename is a lowercase version of the prefix class name to help prevent name collisions.""" class CustomBase(object): id = Column(Integer, primary_key=True) @declared_attr def __tablename__(cls): return ('%s_%s' % (prefix, cls.__name__)).lower() return CustomBase
mit
SummerLW/Perf-Insight-Report
telemetry/telemetry/util/image_util_unittest.py
32
5341
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import tempfile import unittest from telemetry.core import util from telemetry.util import image_util from telemetry.util import rgba_color # This is a simple base64 encoded 2x2 PNG which contains, in order, a single # Red, Yellow, Blue, and Green pixel. test_png = """ iVBORw0KGgoAAAANSUhEUgAAAAIAAAACCAIAAAD91 JpzAAAAFklEQVR4Xg3EAQ0AAABAMP1LY3YI7l8l6A T8tgwbJAAAAABJRU5ErkJggg== """ test_png_path = os.path.join(util.GetUnittestDataDir(), 'test_png.png') test_png_2_path = os.path.join(util.GetUnittestDataDir(), 'test_png_2.png') class ImageUtilTest(unittest.TestCase): def testReadFromBase64Png(self): bmp = image_util.FromBase64Png(test_png) self.assertEquals(2, image_util.Width(bmp)) self.assertEquals(2, image_util.Height(bmp)) image_util.GetPixelColor(bmp, 0, 0).AssertIsRGB(255, 0, 0) image_util.GetPixelColor(bmp, 1, 1).AssertIsRGB(0, 255, 0) image_util.GetPixelColor(bmp, 0, 1).AssertIsRGB(0, 0, 255) image_util.GetPixelColor(bmp, 1, 0).AssertIsRGB(255, 255, 0) def testReadFromPngFile(self): file_bmp = image_util.FromPngFile(test_png_path) self.assertEquals(2, image_util.Width(file_bmp)) self.assertEquals(2, image_util.Height(file_bmp)) image_util.GetPixelColor(file_bmp, 0, 0).AssertIsRGB(255, 0, 0) image_util.GetPixelColor(file_bmp, 1, 1).AssertIsRGB(0, 255, 0) image_util.GetPixelColor(file_bmp, 0, 1).AssertIsRGB(0, 0, 255) image_util.GetPixelColor(file_bmp, 1, 0).AssertIsRGB(255, 255, 0) def testWritePngToPngFile(self): orig = image_util.FromPngFile(test_png_path) temp_file = tempfile.NamedTemporaryFile(suffix='.png').name image_util.WritePngFile(orig, temp_file) new_file = image_util.FromPngFile(temp_file) self.assertTrue(image_util.AreEqual(orig, new_file, likely_equal=True)) def testWritePngWithoutPngSuffixThrows(self): orig = image_util.FromPngFile(test_png_path) temp_file = tempfile.NamedTemporaryFile().name self.assertRaises(AssertionError, image_util.WritePngFile, orig, temp_file) def testWriteCroppedBmpToPngFile(self): pixels = [255, 0, 0, 255, 255, 0, 0, 0, 0, 255, 255, 0, 0, 255, 0, 0, 0, 0] orig = image_util.FromRGBPixels(3, 2, pixels) orig = image_util.Crop(orig, 0, 0, 2, 2) temp_file = tempfile.NamedTemporaryFile(suffix='.png').name image_util.WritePngFile(orig, temp_file) new_file = image_util.FromPngFile(temp_file) self.assertTrue(image_util.AreEqual(orig, new_file, likely_equal=True)) def testIsEqual(self): bmp = image_util.FromBase64Png(test_png) file_bmp = image_util.FromPngFile(test_png_path) self.assertTrue(image_util.AreEqual(bmp, file_bmp, likely_equal=True)) def testDiff(self): file_bmp = image_util.FromPngFile(test_png_path) file_bmp_2 = image_util.FromPngFile(test_png_2_path) diff_bmp = image_util.Diff(file_bmp, file_bmp) self.assertEquals(2, image_util.Width(diff_bmp)) self.assertEquals(2, image_util.Height(diff_bmp)) image_util.GetPixelColor(diff_bmp, 0, 0).AssertIsRGB(0, 0, 0) image_util.GetPixelColor(diff_bmp, 1, 1).AssertIsRGB(0, 0, 0) image_util.GetPixelColor(diff_bmp, 0, 1).AssertIsRGB(0, 0, 0) image_util.GetPixelColor(diff_bmp, 1, 0).AssertIsRGB(0, 0, 0) diff_bmp = image_util.Diff(file_bmp, file_bmp_2) self.assertEquals(3, image_util.Width(diff_bmp)) self.assertEquals(3, image_util.Height(diff_bmp)) image_util.GetPixelColor(diff_bmp, 0, 0).AssertIsRGB(0, 255, 255) image_util.GetPixelColor(diff_bmp, 1, 1).AssertIsRGB(255, 0, 255) image_util.GetPixelColor(diff_bmp, 0, 1).AssertIsRGB(255, 255, 0) image_util.GetPixelColor(diff_bmp, 1, 0).AssertIsRGB(0, 0, 255) image_util.GetPixelColor(diff_bmp, 0, 2).AssertIsRGB(255, 255, 255) image_util.GetPixelColor(diff_bmp, 1, 2).AssertIsRGB(255, 255, 255) image_util.GetPixelColor(diff_bmp, 2, 0).AssertIsRGB(255, 255, 255) image_util.GetPixelColor(diff_bmp, 2, 1).AssertIsRGB(255, 255, 255) image_util.GetPixelColor(diff_bmp, 2, 2).AssertIsRGB(255, 255, 255) def testGetBoundingBox(self): pixels = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] bmp = image_util.FromRGBPixels(4, 3, pixels) box, count = image_util.GetBoundingBox(bmp, rgba_color.RgbaColor(1, 0, 0)) self.assertEquals(box, (1, 1, 2, 1)) self.assertEquals(count, 2) box, count = image_util.GetBoundingBox(bmp, rgba_color.RgbaColor(0, 1, 0)) self.assertEquals(box, None) self.assertEquals(count, 0) def testCrop(self): pixels = [0, 0, 0, 1, 0, 0, 2, 0, 0, 3, 0, 0, 0, 1, 0, 1, 1, 0, 2, 1, 0, 3, 1, 0, 0, 2, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0] bmp = image_util.FromRGBPixels(4, 3, pixels) bmp = image_util.Crop(bmp, 1, 2, 2, 1) self.assertEquals(2, image_util.Width(bmp)) self.assertEquals(1, image_util.Height(bmp)) image_util.GetPixelColor(bmp, 0, 0).AssertIsRGB(1, 2, 0) image_util.GetPixelColor(bmp, 1, 0).AssertIsRGB(2, 2, 0) self.assertEquals(image_util.Pixels(bmp), bytearray([1, 2, 0, 2, 2, 0]))
bsd-3-clause
andrewor14/spark
python/pyspark/shuffle.py
75
27712
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import platform import shutil import warnings import gc import itertools import operator import random import pyspark.heapq3 as heapq from pyspark.serializers import BatchedSerializer, PickleSerializer, FlattenedValuesSerializer, \ CompressedSerializer, AutoBatchedSerializer try: import psutil process = None def get_used_memory(): """ Return the used memory in MB """ global process if process is None or process._pid != os.getpid(): process = psutil.Process(os.getpid()) if hasattr(process, "memory_info"): info = process.memory_info() else: info = process.get_memory_info() return info.rss >> 20 except ImportError: def get_used_memory(): """ Return the used memory in MB """ if platform.system() == 'Linux': for line in open('/proc/self/status'): if line.startswith('VmRSS:'): return int(line.split()[1]) >> 10 else: warnings.warn("Please install psutil to have better " "support with spilling") if platform.system() == "Darwin": import resource rss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss return rss >> 20 # TODO: support windows return 0 def _get_local_dirs(sub): """ Get all the directories """ path = os.environ.get("SPARK_LOCAL_DIRS", "/tmp") dirs = path.split(",") if len(dirs) > 1: # different order in different processes and instances rnd = random.Random(os.getpid() + id(dirs)) random.shuffle(dirs, rnd.random) return [os.path.join(d, "python", str(os.getpid()), sub) for d in dirs] # global stats MemoryBytesSpilled = 0 DiskBytesSpilled = 0 class Aggregator(object): """ Aggregator has tree functions to merge values into combiner. createCombiner: (value) -> combiner mergeValue: (combine, value) -> combiner mergeCombiners: (combiner, combiner) -> combiner """ def __init__(self, createCombiner, mergeValue, mergeCombiners): self.createCombiner = createCombiner self.mergeValue = mergeValue self.mergeCombiners = mergeCombiners class SimpleAggregator(Aggregator): """ SimpleAggregator is useful for the cases that combiners have same type with values """ def __init__(self, combiner): Aggregator.__init__(self, lambda x: x, combiner, combiner) class Merger(object): """ Merge shuffled data together by aggregator """ def __init__(self, aggregator): self.agg = aggregator def mergeValues(self, iterator): """ Combine the items by creator and combiner """ raise NotImplementedError def mergeCombiners(self, iterator): """ Merge the combined items by mergeCombiner """ raise NotImplementedError def items(self): """ Return the merged items ad iterator """ raise NotImplementedError def _compressed_serializer(self, serializer=None): # always use PickleSerializer to simplify implementation ser = PickleSerializer() return AutoBatchedSerializer(CompressedSerializer(ser)) class ExternalMerger(Merger): """ External merger will dump the aggregated data into disks when memory usage goes above the limit, then merge them together. This class works as follows: - It repeatedly combine the items and save them in one dict in memory. - When the used memory goes above memory limit, it will split the combined data into partitions by hash code, dump them into disk, one file per partition. - Then it goes through the rest of the iterator, combine items into different dict by hash. Until the used memory goes over memory limit, it dump all the dicts into disks, one file per dict. Repeat this again until combine all the items. - Before return any items, it will load each partition and combine them separately. Yield them before loading next partition. - During loading a partition, if the memory goes over limit, it will partition the loaded data and dump them into disks and load them partition by partition again. `data` and `pdata` are used to hold the merged items in memory. At first, all the data are merged into `data`. Once the used memory goes over limit, the items in `data` are dumped into disks, `data` will be cleared, all rest of items will be merged into `pdata` and then dumped into disks. Before returning, all the items in `pdata` will be dumped into disks. Finally, if any items were spilled into disks, each partition will be merged into `data` and be yielded, then cleared. >>> agg = SimpleAggregator(lambda x, y: x + y) >>> merger = ExternalMerger(agg, 10) >>> N = 10000 >>> merger.mergeValues(zip(range(N), range(N))) >>> assert merger.spills > 0 >>> sum(v for k,v in merger.items()) 49995000 >>> merger = ExternalMerger(agg, 10) >>> merger.mergeCombiners(zip(range(N), range(N))) >>> assert merger.spills > 0 >>> sum(v for k,v in merger.items()) 49995000 """ # the max total partitions created recursively MAX_TOTAL_PARTITIONS = 4096 def __init__(self, aggregator, memory_limit=512, serializer=None, localdirs=None, scale=1, partitions=59, batch=1000): Merger.__init__(self, aggregator) self.memory_limit = memory_limit self.serializer = _compressed_serializer(serializer) self.localdirs = localdirs or _get_local_dirs(str(id(self))) # number of partitions when spill data into disks self.partitions = partitions # check the memory after # of items merged self.batch = batch # scale is used to scale down the hash of key for recursive hash map self.scale = scale # un-partitioned merged data self.data = {} # partitioned merged data, list of dicts self.pdata = [] # number of chunks dumped into disks self.spills = 0 # randomize the hash of key, id(o) is the address of o (aligned by 8) self._seed = id(self) + 7 def _get_spill_dir(self, n): """ Choose one directory for spill by number n """ return os.path.join(self.localdirs[n % len(self.localdirs)], str(n)) def _next_limit(self): """ Return the next memory limit. If the memory is not released after spilling, it will dump the data only when the used memory starts to increase. """ return max(self.memory_limit, get_used_memory() * 1.05) def mergeValues(self, iterator): """ Combine the items by creator and combiner """ # speedup attribute lookup creator, comb = self.agg.createCombiner, self.agg.mergeValue c, data, pdata, hfun, batch = 0, self.data, self.pdata, self._partition, self.batch limit = self.memory_limit for k, v in iterator: d = pdata[hfun(k)] if pdata else data d[k] = comb(d[k], v) if k in d else creator(v) c += 1 if c >= batch: if get_used_memory() >= limit: self._spill() limit = self._next_limit() batch /= 2 c = 0 else: batch *= 1.5 if get_used_memory() >= limit: self._spill() def _partition(self, key): """ Return the partition for key """ return hash((key, self._seed)) % self.partitions def _object_size(self, obj): """ How much of memory for this obj, assume that all the objects consume similar bytes of memory """ return 1 def mergeCombiners(self, iterator, limit=None): """ Merge (K,V) pair by mergeCombiner """ if limit is None: limit = self.memory_limit # speedup attribute lookup comb, hfun, objsize = self.agg.mergeCombiners, self._partition, self._object_size c, data, pdata, batch = 0, self.data, self.pdata, self.batch for k, v in iterator: d = pdata[hfun(k)] if pdata else data d[k] = comb(d[k], v) if k in d else v if not limit: continue c += objsize(v) if c > batch: if get_used_memory() > limit: self._spill() limit = self._next_limit() batch /= 2 c = 0 else: batch *= 1.5 if limit and get_used_memory() >= limit: self._spill() def _spill(self): """ dump already partitioned data into disks. It will dump the data in batch for better performance. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self.pdata: # The data has not been partitioned, it will iterator the # dataset once, write them into different files, has no # additional memory. It only called when the memory goes # above limit at the first time. # open all the files for writing streams = [open(os.path.join(path, str(i)), 'wb') for i in range(self.partitions)] for k, v in self.data.items(): h = self._partition(k) # put one item in batch, make it compatible with load_stream # it will increase the memory if dump them in batch self.serializer.dump_stream([(k, v)], streams[h]) for s in streams: DiskBytesSpilled += s.tell() s.close() self.data.clear() self.pdata.extend([{} for i in range(self.partitions)]) else: for i in range(self.partitions): p = os.path.join(path, str(i)) with open(p, "wb") as f: # dump items in batch self.serializer.dump_stream(iter(self.pdata[i].items()), f) self.pdata[i].clear() DiskBytesSpilled += os.path.getsize(p) self.spills += 1 gc.collect() # release the memory as much as possible MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 def items(self): """ Return all merged items as iterator """ if not self.pdata and not self.spills: return iter(self.data.items()) return self._external_items() def _external_items(self): """ Return all partitioned items as iterator """ assert not self.data if any(self.pdata): self._spill() # disable partitioning and spilling when merge combiners from disk self.pdata = [] try: for i in range(self.partitions): for v in self._merged_items(i): yield v self.data.clear() # remove the merged partition for j in range(self.spills): path = self._get_spill_dir(j) os.remove(os.path.join(path, str(i))) finally: self._cleanup() def _merged_items(self, index): self.data = {} limit = self._next_limit() for j in range(self.spills): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) # do not check memory during merging with open(p, "rb") as f: self.mergeCombiners(self.serializer.load_stream(f), 0) # limit the total partitions if (self.scale * self.partitions < self.MAX_TOTAL_PARTITIONS and j < self.spills - 1 and get_used_memory() > limit): self.data.clear() # will read from disk again gc.collect() # release the memory as much as possible return self._recursive_merged_items(index) return self.data.items() def _recursive_merged_items(self, index): """ merge the partitioned items and return the as iterator If one partition can not be fit in memory, then them will be partitioned and merged recursively. """ subdirs = [os.path.join(d, "parts", str(index)) for d in self.localdirs] m = ExternalMerger(self.agg, self.memory_limit, self.serializer, subdirs, self.scale * self.partitions, self.partitions, self.batch) m.pdata = [{} for _ in range(self.partitions)] limit = self._next_limit() for j in range(self.spills): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb') as f: m.mergeCombiners(self.serializer.load_stream(f), 0) if get_used_memory() > limit: m._spill() limit = self._next_limit() return m._external_items() def _cleanup(self): """ Clean up all the files in disks """ for d in self.localdirs: shutil.rmtree(d, True) class ExternalSorter(object): """ ExtenalSorter will divide the elements into chunks, sort them in memory and dump them into disks, finally merge them back. The spilling will only happen when the used memory goes above the limit. >>> sorter = ExternalSorter(1) # 1M >>> import random >>> l = list(range(1024)) >>> random.shuffle(l) >>> sorted(l) == list(sorter.sorted(l)) True >>> sorted(l) == list(sorter.sorted(l, key=lambda x: -x, reverse=True)) True """ def __init__(self, memory_limit, serializer=None): self.memory_limit = memory_limit self.local_dirs = _get_local_dirs("sort") self.serializer = _compressed_serializer(serializer) def _get_path(self, n): """ Choose one directory for spill by number n """ d = self.local_dirs[n % len(self.local_dirs)] if not os.path.exists(d): os.makedirs(d) return os.path.join(d, str(n)) def _next_limit(self): """ Return the next memory limit. If the memory is not released after spilling, it will dump the data only when the used memory starts to increase. """ return max(self.memory_limit, get_used_memory() * 1.05) def sorted(self, iterator, key=None, reverse=False): """ Sort the elements in iterator, do external sort when the memory goes above the limit. """ global MemoryBytesSpilled, DiskBytesSpilled batch, limit = 100, self._next_limit() chunks, current_chunk = [], [] iterator = iter(iterator) while True: # pick elements in batch chunk = list(itertools.islice(iterator, batch)) current_chunk.extend(chunk) if len(chunk) < batch: break used_memory = get_used_memory() if used_memory > limit: # sort them inplace will save memory current_chunk.sort(key=key, reverse=reverse) path = self._get_path(len(chunks)) with open(path, 'wb') as f: self.serializer.dump_stream(current_chunk, f) def load(f): for v in self.serializer.load_stream(f): yield v # close the file explicit once we consume all the items # to avoid ResourceWarning in Python3 f.close() chunks.append(load(open(path, 'rb'))) current_chunk = [] MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 DiskBytesSpilled += os.path.getsize(path) os.unlink(path) # data will be deleted after close elif not chunks: batch = min(int(batch * 1.5), 10000) current_chunk.sort(key=key, reverse=reverse) if not chunks: return current_chunk if current_chunk: chunks.append(iter(current_chunk)) return heapq.merge(chunks, key=key, reverse=reverse) class ExternalList(object): """ ExternalList can have many items which cannot be hold in memory in the same time. >>> l = ExternalList(list(range(100))) >>> len(l) 100 >>> l.append(10) >>> len(l) 101 >>> for i in range(20240): ... l.append(i) >>> len(l) 20341 >>> import pickle >>> l2 = pickle.loads(pickle.dumps(l)) >>> len(l2) 20341 >>> list(l2)[100] 10 """ LIMIT = 10240 def __init__(self, values): self.values = values self.count = len(values) self._file = None self._ser = None def __getstate__(self): if self._file is not None: self._file.flush() with os.fdopen(os.dup(self._file.fileno()), "rb") as f: f.seek(0) serialized = f.read() else: serialized = b'' return self.values, self.count, serialized def __setstate__(self, item): self.values, self.count, serialized = item if serialized: self._open_file() self._file.write(serialized) else: self._file = None self._ser = None def __iter__(self): if self._file is not None: self._file.flush() # read all items from disks first with os.fdopen(os.dup(self._file.fileno()), 'rb') as f: f.seek(0) for v in self._ser.load_stream(f): yield v for v in self.values: yield v def __len__(self): return self.count def append(self, value): self.values.append(value) self.count += 1 # dump them into disk if the key is huge if len(self.values) >= self.LIMIT: self._spill() def _open_file(self): dirs = _get_local_dirs("objects") d = dirs[id(self) % len(dirs)] if not os.path.exists(d): os.makedirs(d) p = os.path.join(d, str(id(self))) self._file = open(p, "w+b", 65536) self._ser = BatchedSerializer(CompressedSerializer(PickleSerializer()), 1024) os.unlink(p) def __del__(self): if self._file: self._file.close() self._file = None def _spill(self): """ dump the values into disk """ global MemoryBytesSpilled, DiskBytesSpilled if self._file is None: self._open_file() used_memory = get_used_memory() pos = self._file.tell() self._ser.dump_stream(self.values, self._file) self.values = [] gc.collect() DiskBytesSpilled += self._file.tell() - pos MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 class ExternalListOfList(ExternalList): """ An external list for list. >>> l = ExternalListOfList([[i, i] for i in range(100)]) >>> len(l) 200 >>> l.append(range(10)) >>> len(l) 210 >>> len(list(l)) 210 """ def __init__(self, values): ExternalList.__init__(self, values) self.count = sum(len(i) for i in values) def append(self, value): ExternalList.append(self, value) # already counted 1 in ExternalList.append self.count += len(value) - 1 def __iter__(self): for values in ExternalList.__iter__(self): for v in values: yield v class GroupByKey(object): """ Group a sorted iterator as [(k1, it1), (k2, it2), ...] >>> k = [i // 3 for i in range(6)] >>> v = [[i] for i in range(6)] >>> g = GroupByKey(zip(k, v)) >>> [(k, list(it)) for k, it in g] [(0, [0, 1, 2]), (1, [3, 4, 5])] """ def __init__(self, iterator): self.iterator = iterator def __iter__(self): key, values = None, None for k, v in self.iterator: if values is not None and k == key: values.append(v) else: if values is not None: yield (key, values) key = k values = ExternalListOfList([v]) if values is not None: yield (key, values) class ExternalGroupBy(ExternalMerger): """ Group by the items by key. If any partition of them can not been hold in memory, it will do sort based group by. This class works as follows: - It repeatedly group the items by key and save them in one dict in memory. - When the used memory goes above memory limit, it will split the combined data into partitions by hash code, dump them into disk, one file per partition. If the number of keys in one partitions is smaller than 1000, it will sort them by key before dumping into disk. - Then it goes through the rest of the iterator, group items by key into different dict by hash. Until the used memory goes over memory limit, it dump all the dicts into disks, one file per dict. Repeat this again until combine all the items. It also will try to sort the items by key in each partition before dumping into disks. - It will yield the grouped items partitions by partitions. If the data in one partitions can be hold in memory, then it will load and combine them in memory and yield. - If the dataset in one partition cannot be hold in memory, it will sort them first. If all the files are already sorted, it merge them by heap.merge(), so it will do external sort for all the files. - After sorting, `GroupByKey` class will put all the continuous items with the same key as a group, yield the values as an iterator. """ SORT_KEY_LIMIT = 1000 def flattened_serializer(self): assert isinstance(self.serializer, BatchedSerializer) ser = self.serializer return FlattenedValuesSerializer(ser, 20) def _object_size(self, obj): return len(obj) def _spill(self): """ dump already partitioned data into disks. """ global MemoryBytesSpilled, DiskBytesSpilled path = self._get_spill_dir(self.spills) if not os.path.exists(path): os.makedirs(path) used_memory = get_used_memory() if not self.pdata: # The data has not been partitioned, it will iterator the # data once, write them into different files, has no # additional memory. It only called when the memory goes # above limit at the first time. # open all the files for writing streams = [open(os.path.join(path, str(i)), 'wb') for i in range(self.partitions)] # If the number of keys is small, then the overhead of sort is small # sort them before dumping into disks self._sorted = len(self.data) < self.SORT_KEY_LIMIT if self._sorted: self.serializer = self.flattened_serializer() for k in sorted(self.data.keys()): h = self._partition(k) self.serializer.dump_stream([(k, self.data[k])], streams[h]) else: for k, v in self.data.items(): h = self._partition(k) self.serializer.dump_stream([(k, v)], streams[h]) for s in streams: DiskBytesSpilled += s.tell() s.close() self.data.clear() # self.pdata is cached in `mergeValues` and `mergeCombiners` self.pdata.extend([{} for i in range(self.partitions)]) else: for i in range(self.partitions): p = os.path.join(path, str(i)) with open(p, "wb") as f: # dump items in batch if self._sorted: # sort by key only (stable) sorted_items = sorted(self.pdata[i].items(), key=operator.itemgetter(0)) self.serializer.dump_stream(sorted_items, f) else: self.serializer.dump_stream(self.pdata[i].items(), f) self.pdata[i].clear() DiskBytesSpilled += os.path.getsize(p) self.spills += 1 gc.collect() # release the memory as much as possible MemoryBytesSpilled += max(used_memory - get_used_memory(), 0) << 20 def _merged_items(self, index): size = sum(os.path.getsize(os.path.join(self._get_spill_dir(j), str(index))) for j in range(self.spills)) # if the memory can not hold all the partition, # then use sort based merge. Because of compression, # the data on disks will be much smaller than needed memory if size >= self.memory_limit << 17: # * 1M / 8 return self._merge_sorted_items(index) self.data = {} for j in range(self.spills): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) # do not check memory during merging with open(p, "rb") as f: self.mergeCombiners(self.serializer.load_stream(f), 0) return self.data.items() def _merge_sorted_items(self, index): """ load a partition from disk, then sort and group by key """ def load_partition(j): path = self._get_spill_dir(j) p = os.path.join(path, str(index)) with open(p, 'rb', 65536) as f: for v in self.serializer.load_stream(f): yield v disk_items = [load_partition(j) for j in range(self.spills)] if self._sorted: # all the partitions are already sorted sorted_items = heapq.merge(disk_items, key=operator.itemgetter(0)) else: # Flatten the combined values, so it will not consume huge # memory during merging sort. ser = self.flattened_serializer() sorter = ExternalSorter(self.memory_limit, ser) sorted_items = sorter.sorted(itertools.chain(*disk_items), key=operator.itemgetter(0)) return ((k, vs) for k, vs in GroupByKey(sorted_items)) if __name__ == "__main__": import doctest (failure_count, test_count) = doctest.testmod() if failure_count: exit(-1)
apache-2.0
nanolearningllc/edx-platform-cypress-2
common/test/acceptance/tests/studio/test_studio_library.py
7
27320
""" Acceptance tests for Content Libraries in Studio """ from ddt import ddt, data from nose.plugins.attrib import attr from flaky import flaky from .base_studio_test import StudioLibraryTest from ...fixtures.course import XBlockFixtureDesc from ...pages.studio.auto_auth import AutoAuthPage from ...pages.studio.utils import add_component from ...pages.studio.library import LibraryEditPage from ...pages.studio.users import LibraryUsersPage @attr('shard_2') @ddt class LibraryEditPageTest(StudioLibraryTest): """ Test the functionality of the library edit page. """ def setUp(self): # pylint: disable=arguments-differ """ Ensure a library exists and navigate to the library edit page. """ super(LibraryEditPageTest, self).setUp() self.lib_page = LibraryEditPage(self.browser, self.library_key) self.lib_page.visit() self.lib_page.wait_until_ready() def test_page_header(self): """ Scenario: Ensure that the library's name is displayed in the header and title. Given I have a library in Studio And I navigate to Library Page in Studio Then I can see library name in page header title And I can see library name in browser page title """ self.assertIn(self.library_info['display_name'], self.lib_page.get_header_title()) self.assertIn(self.library_info['display_name'], self.browser.title) def test_add_duplicate_delete_actions(self): """ Scenario: Ensure that we can add an HTML block, duplicate it, then delete the original. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio Then there are no XBlocks displayed When I add Text XBlock Then one XBlock is displayed When I duplicate first XBlock Then two XBlocks are displayed And those XBlocks locators' are different When I delete first XBlock Then one XBlock is displayed And displayed XBlock are second one """ self.assertEqual(len(self.lib_page.xblocks), 0) # Create a new block: add_component(self.lib_page, "html", "Text") self.assertEqual(len(self.lib_page.xblocks), 1) first_block_id = self.lib_page.xblocks[0].locator # Duplicate the block: self.lib_page.click_duplicate_button(first_block_id) self.assertEqual(len(self.lib_page.xblocks), 2) second_block_id = self.lib_page.xblocks[1].locator self.assertNotEqual(first_block_id, second_block_id) # Delete the first block: self.lib_page.click_delete_button(first_block_id, confirm=True) self.assertEqual(len(self.lib_page.xblocks), 1) self.assertEqual(self.lib_page.xblocks[0].locator, second_block_id) def test_no_edit_visibility_button(self): """ Scenario: Ensure that library xblocks do not have 'edit visibility' buttons. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio When I add Text XBlock Then one XBlock is displayed And no 'edit visibility' button is shown """ add_component(self.lib_page, "html", "Text") self.assertFalse(self.lib_page.xblocks[0].has_edit_visibility_button) def test_add_edit_xblock(self): """ Scenario: Ensure that we can add an XBlock, edit it, then see the resulting changes. Given I have a library in Studio with no XBlocks And I navigate to Library Page in Studio Then there are no XBlocks displayed When I add Multiple Choice XBlock Then one XBlock is displayed When I edit first XBlock And I go to basic tab And set it's text to a fairly trivial question about Battlestar Galactica And save XBlock Then one XBlock is displayed And first XBlock student content contains at least part of text I set """ self.assertEqual(len(self.lib_page.xblocks), 0) # Create a new problem block: add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] # Edit it: problem_block.edit() problem_block.open_basic_tab() problem_block.set_codemirror_text( """ >>Who is "Starbuck"?<< (x) Kara Thrace ( ) William Adama ( ) Laura Roslin ( ) Lee Adama ( ) Gaius Baltar """ ) problem_block.save_settings() # Check that the save worked: self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] self.assertIn("Laura Roslin", problem_block.student_content) def test_no_discussion_button(self): """ Ensure the UI is not loaded for adding discussions. """ self.assertFalse(self.browser.find_elements_by_css_selector('span.large-discussion-icon')) @flaky # TODO fix this, see TNL-2322 def test_library_pagination(self): """ Scenario: Ensure that adding several XBlocks to a library results in pagination. Given that I have a library in Studio with no XBlocks And I create 10 Multiple Choice XBlocks Then 10 are displayed. When I add one more Multiple Choice XBlock Then 1 XBlock will be displayed When I delete that XBlock Then 10 are displayed. """ self.assertEqual(len(self.lib_page.xblocks), 0) for _ in range(10): add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 10) add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 10) @data('top', 'bottom') def test_nav_present_but_disabled(self, position): """ Scenario: Ensure that the navigation buttons aren't active when there aren't enough XBlocks. Given that I have a library in Studio with no XBlocks The Navigation buttons should be disabled. When I add a multiple choice problem The Navigation buttons should be disabled. """ self.assertEqual(len(self.lib_page.xblocks), 0) self.assertTrue(self.lib_page.nav_disabled(position)) add_component(self.lib_page, "problem", "Multiple Choice") self.assertTrue(self.lib_page.nav_disabled(position)) def test_delete_deletes_only_desired_block(self): """ Scenario: Ensure that when deleting XBlock only desired XBlock is deleted Given that I have a library in Studio with no XBlocks And I create Blank Common Problem XBlock And I create Checkboxes XBlock When I delete Blank Problem XBlock Then Checkboxes XBlock is not deleted And Blank Common Problem XBlock is deleted """ self.assertEqual(len(self.lib_page.xblocks), 0) add_component(self.lib_page, "problem", "Blank Common Problem") add_component(self.lib_page, "problem", "Checkboxes") self.assertEqual(len(self.lib_page.xblocks), 2) self.assertIn("Blank Common Problem", self.lib_page.xblocks[0].name) self.assertIn("Checkboxes", self.lib_page.xblocks[1].name) self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 1) problem_block = self.lib_page.xblocks[0] self.assertIn("Checkboxes", problem_block.name) @attr('shard_5') @ddt class LibraryNavigationTest(StudioLibraryTest): """ Test common Navigation actions """ def setUp(self): # pylint: disable=arguments-differ """ Ensure a library exists and navigate to the library edit page. """ super(LibraryNavigationTest, self).setUp() self.lib_page = LibraryEditPage(self.browser, self.library_key) self.lib_page.visit() self.lib_page.wait_until_ready() def populate_library_fixture(self, library_fixture): """ Create four pages worth of XBlocks, and offset by one so each is named after the number they should be in line by the user's perception. """ # pylint: disable=attribute-defined-outside-init self.blocks = [XBlockFixtureDesc('html', str(i)) for i in xrange(1, 41)] library_fixture.add_children(*self.blocks) def test_arbitrary_page_selection(self): """ Scenario: I can pick a specific page number of a Library at will. Given that I have a library in Studio with 40 XBlocks When I go to the 3rd page The first XBlock should be the 21st XBlock When I go to the 4th Page The first XBlock should be the 31st XBlock When I go to the 1st page The first XBlock should be the 1st XBlock When I go to the 2nd page The first XBlock should be the 11th XBlock """ self.lib_page.go_to_page(3) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.go_to_page(4) self.assertEqual(self.lib_page.xblocks[0].name, '31') self.lib_page.go_to_page(1) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.go_to_page(2) self.assertEqual(self.lib_page.xblocks[0].name, '11') def test_bogus_page_selection(self): """ Scenario: I can't pick a nonsense page number of a Library Given that I have a library in Studio with 40 XBlocks When I attempt to go to the 'a'th page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the 5th page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the -1st page The input field will be cleared and no change of XBlocks will be made When I attempt to visit the 0th page The input field will be cleared and no change of XBlocks will be made """ self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.go_to_page('a') self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(-1) self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(5) self.assertTrue(self.lib_page.check_page_unchanged('1')) self.lib_page.go_to_page(0) self.assertTrue(self.lib_page.check_page_unchanged('1')) @data('top', 'bottom') def test_nav_buttons(self, position): """ Scenario: Ensure that the navigation buttons work. Given that I have a library in Studio with 40 XBlocks The previous button should be disabled. The first XBlock should be the 1st XBlock Then if I hit the next button The first XBlock should be the 11th XBlock Then if I hit the next button The first XBlock should be the 21st XBlock Then if I hit the next button The first XBlock should be the 31st XBlock And the next button should be disabled Then if I hit the previous button The first XBlock should be the 21st XBlock Then if I hit the previous button The first XBlock should be the 11th XBlock Then if I hit the previous button The first XBlock should be the 1st XBlock And the previous button should be disabled """ # Check forward navigation self.assertTrue(self.lib_page.nav_disabled(position, ['previous'])) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.move_forward(position) self.assertEqual(self.lib_page.xblocks[0].name, '31') self.lib_page.nav_disabled(position, ['next']) # Check backward navigation self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '21') self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.lib_page.move_back(position) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertTrue(self.lib_page.nav_disabled(position, ['previous'])) def test_library_pagination(self): """ Scenario: Ensure that adding several XBlocks to a library results in pagination. Given that I have a library in Studio with 40 XBlocks Then 10 are displayed And the first XBlock will be the 1st one And I'm on the 1st page When I add 1 Multiple Choice XBlock Then 1 XBlock will be displayed And I'm on the 5th page The first XBlock will be the newest one When I delete that XBlock Then 10 are displayed And I'm on the 4th page And the first XBlock is the 31st one And the last XBlock is the 40th one. """ self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '1') self.assertEqual(self.lib_page.xblocks[0].name, '1') add_component(self.lib_page, "problem", "Multiple Choice") self.assertEqual(len(self.lib_page.xblocks), 1) self.assertEqual(self.lib_page.get_page_number(), '5') self.assertEqual(self.lib_page.xblocks[0].name, "Multiple Choice") self.lib_page.click_delete_button(self.lib_page.xblocks[0].locator) self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '4') self.assertEqual(self.lib_page.xblocks[0].name, '31') self.assertEqual(self.lib_page.xblocks[-1].name, '40') def test_delete_shifts_blocks(self): """ Scenario: Ensure that removing an XBlock shifts other blocks back. Given that I have a library in Studio with 40 XBlocks Then 10 are displayed And I will be on the first page When I delete the third XBlock There will be 10 displayed And the first XBlock will be the first one And the last XBlock will be the 11th one And I will be on the first page """ self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.get_page_number(), '1') self.lib_page.click_delete_button(self.lib_page.xblocks[2].locator, confirm=True) self.assertEqual(len(self.lib_page.xblocks), 10) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertEqual(self.lib_page.xblocks[-1].name, '11') self.assertEqual(self.lib_page.get_page_number(), '1') def test_previews(self): """ Scenario: Ensure the user is able to hide previews of XBlocks. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I click the toggle previews button Then the previews are visible """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertTrue(self.lib_page.are_previews_showing()) def test_previews_navigation(self): """ Scenario: Ensure preview settings persist across navigation. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button And click the next page button Then the previews will not be visible And the first XBlock will be the 11th one And the last XBlock will be the 20th one And when I click the toggle previews button And I click the previous page button Then the previews will be visible And the first XBlock will be the first one And the last XBlock will be the 11th one """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() # Which set of arrows shouldn't matter for this test. self.lib_page.move_forward('top') self.assertFalse(self.lib_page.are_previews_showing()) self.assertEqual(self.lib_page.xblocks[0].name, '11') self.assertEqual(self.lib_page.xblocks[-1].name, '20') self.lib_page.toggle_previews() self.lib_page.move_back('top') self.assertTrue(self.lib_page.are_previews_showing()) self.assertEqual(self.lib_page.xblocks[0].name, '1') self.assertEqual(self.lib_page.xblocks[-1].name, '10') def test_preview_state_persistance(self): """ Scenario: Ensure preview state persists between page loads. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button And I revisit the page Then the previews will not be visible """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.lib_page.visit() self.lib_page.wait_until_ready() self.assertFalse(self.lib_page.are_previews_showing()) def test_preview_add_xblock(self): """ Scenario: Ensure previews are shown when adding new blocks, regardless of preview setting. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I add an XBlock Then I will be on the 5th page And the XBlock will have loaded a preview And when I revisit the library And I go to the 5th page Then the top XBlock will be the one I added And it will not have a preview And when I add an XBlock Then the XBlock I added will have a preview And the top XBlock will not have one. """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) add_component(self.lib_page, "problem", "Checkboxes") self.assertEqual(self.lib_page.get_page_number(), '5') first_added = self.lib_page.xblocks[0] self.assertIn("Checkboxes", first_added.name) self.assertFalse(self.lib_page.xblocks[0].is_placeholder()) self.lib_page.visit() self.lib_page.wait_until_ready() self.lib_page.go_to_page(5) self.assertTrue(self.lib_page.xblocks[0].is_placeholder()) add_component(self.lib_page, "problem", "Multiple Choice") # DOM has detatched the element since last assignment first_added = self.lib_page.xblocks[0] second_added = self.lib_page.xblocks[1] self.assertIn("Multiple Choice", second_added.name) self.assertFalse(second_added.is_placeholder()) self.assertTrue(first_added.is_placeholder()) def test_edit_with_preview(self): """ Scenario: Editing an XBlock should show me a preview even if previews are hidden. Given that I have a library in Studio with 40 XBlocks Then previews are visible And when I click the toggle previews button Then the previews will not be visible And when I edit the first XBlock Then the first XBlock will show a preview And the other XBlocks will still be placeholders """ self.assertTrue(self.lib_page.are_previews_showing()) self.lib_page.toggle_previews() self.assertFalse(self.lib_page.are_previews_showing()) target = self.lib_page.xblocks[0] target.edit() target.save_settings() self.assertFalse(target.is_placeholder()) self.assertTrue(all([xblock.is_placeholder() for xblock in self.lib_page.xblocks[1:]])) def test_duplicate_xblock_pagination(self): """ Scenario: Duplicating an XBlock should not shift the page if the XBlock is not at the end. Given that I have a library in Studio with 40 XBlocks When I duplicate the third XBlock Then the page should not change And the duplicate XBlock should be there And it should show a preview And there should not be more than 10 XBlocks visible. """ third_block_id = self.lib_page.xblocks[2].locator self.lib_page.click_duplicate_button(third_block_id) self.lib_page.wait_until_ready() target = self.lib_page.xblocks[3] self.assertIn('Duplicate', target.name) self.assertFalse(target.is_placeholder()) self.assertEqual(len(self.lib_page.xblocks), 10) def test_duplicate_xblock_pagination_end(self): """ Scenario: Duplicating an XBlock if it's the last one should bring me to the next page with a preview. Given that I have a library in Studio with 40 XBlocks And when I hide previews And I duplicate the last XBlock The page should change to page 2 And the duplicate XBlock should be the first XBlock And it should not be a placeholder """ self.lib_page.toggle_previews() last_block_id = self.lib_page.xblocks[-1].locator self.lib_page.click_duplicate_button(last_block_id) self.lib_page.wait_until_ready() self.assertEqual(self.lib_page.get_page_number(), '2') target_block = self.lib_page.xblocks[0] self.assertIn('Duplicate', target_block.name) self.assertFalse(target_block.is_placeholder()) class LibraryUsersPageTest(StudioLibraryTest): """ Test the functionality of the library "Instructor Access" page. """ def setUp(self): super(LibraryUsersPageTest, self).setUp() # Create a second user for use in these tests: AutoAuthPage(self.browser, username="second", email="second@example.com", no_login=True).visit() self.page = LibraryUsersPage(self.browser, self.library_key) self.page.visit() def _refresh_page(self): """ Reload the page. """ self.page = LibraryUsersPage(self.browser, self.library_key) self.page.visit() def test_user_management(self): """ Scenario: Ensure that we can edit the permissions of users. Given I have a library in Studio where I am the only admin assigned (which is the default for a newly-created library) And I navigate to Library "Instructor Access" Page in Studio Then there should be one user listed (myself), and I must not be able to remove myself or my instructor privilege. When I click Add Instructor Then I see a form to complete When I complete the form and submit it Then I can see the new user is listed as a "User" of the library When I click to Add Staff permissions to the new user Then I can see the new user has staff permissions and that I am now able to promote them to an Admin or remove their staff permissions. When I click to Add Admin permissions to the new user Then I can see the new user has admin permissions and that I can now remove Admin permissions from either user. """ def check_is_only_admin(user): """ Ensure user is an admin user and cannot be removed. (There must always be at least one admin user.) """ self.assertIn("admin", user.role_label.lower()) self.assertFalse(user.can_promote) self.assertFalse(user.can_demote) self.assertFalse(user.can_delete) self.assertTrue(user.has_no_change_warning) self.assertIn("Promote another member to Admin to remove your admin rights", user.no_change_warning_text) self.assertEqual(len(self.page.users), 1) user = self.page.users[0] self.assertTrue(user.is_current_user) check_is_only_admin(user) # Add a new user: self.assertTrue(self.page.has_add_button) self.assertFalse(self.page.new_user_form_visible) self.page.click_add_button() self.assertTrue(self.page.new_user_form_visible) self.page.set_new_user_email('second@example.com') self.page.click_submit_new_user_form() # Check the new user's listing: def get_two_users(): """ Expect two users to be listed, one being me, and another user. Returns me, them """ users = self.page.users self.assertEqual(len(users), 2) self.assertEqual(len([u for u in users if u.is_current_user]), 1) if users[0].is_current_user: return users[0], users[1] else: return users[1], users[0] self._refresh_page() user_me, them = get_two_users() check_is_only_admin(user_me) self.assertIn("user", them.role_label.lower()) self.assertTrue(them.can_promote) self.assertIn("Add Staff Access", them.promote_button_text) self.assertFalse(them.can_demote) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Add Staff permissions to the new user: them.click_promote() self._refresh_page() user_me, them = get_two_users() check_is_only_admin(user_me) self.assertIn("staff", them.role_label.lower()) self.assertTrue(them.can_promote) self.assertIn("Add Admin Access", them.promote_button_text) self.assertTrue(them.can_demote) self.assertIn("Remove Staff Access", them.demote_button_text) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Add Admin permissions to the new user: them.click_promote() self._refresh_page() user_me, them = get_two_users() self.assertIn("admin", user_me.role_label.lower()) self.assertFalse(user_me.can_promote) self.assertTrue(user_me.can_demote) self.assertTrue(user_me.can_delete) self.assertFalse(user_me.has_no_change_warning) self.assertIn("admin", them.role_label.lower()) self.assertFalse(them.can_promote) self.assertTrue(them.can_demote) self.assertIn("Remove Admin Access", them.demote_button_text) self.assertTrue(them.can_delete) self.assertFalse(them.has_no_change_warning) # Delete the new user: them.click_delete() self._refresh_page() self.assertEqual(len(self.page.users), 1) user = self.page.users[0] self.assertTrue(user.is_current_user)
agpl-3.0
Markyparky56/WasmNoise
removeextraexports.py
1
1513
""" Module to process the wat file output by s2wasm, and remove unneeded exported functions """ def canRemoveExport(export, exports, enabledFlags): """ Cycles through each exported function, comparing its name to those in the enabled set of functions, if it isn't present it returns true """ # Get the function name within the export declaration funcName = export.split('"')[1] if funcName in exports["exports"]["getset"]["funcs"]: return False # Is listed in exports.getset, so we can't remove it else: for enabled in enabledFlags: if funcName in exports["exports"][enabled]["funcs"]: return False return True def removeExtraExports(fileName, enabledFlags, exports): """ Writes out a modified version of the input file, removing functions which aren't enabled """ # Construct the new file name, inserting cleanexports before the file ending fileParts = fileName.split('.') length = len(fileParts) fileParts.insert(length-1, "cleanexports") newfile = '.'.join(fileParts) with open(fileName, "r") as pre: with open(newfile, "w") as post: # Check every line for line in pre: # If it's an export, check if it's one we want if "export" in line: # Check if the exported function is in the exportsfile if canRemoveExport(line, exports, enabledFlags): continue else: post.write(line) # If it's not an export leave it alone else: post.write(line)
mit
cloudnull/ansible
lib/ansible/plugins/lookup/dig.py
82
8222
# (c) 2015, Jan-Piet Mens <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase import socket try: import dns.resolver import dns.reversename from dns.rdatatype import * from dns.exception import DNSException HAVE_DNS = True except ImportError: HAVE_DNS = False def make_rdata_dict(rdata): ''' While the 'dig' lookup plugin supports anything which dnspython supports out of the box, the following supported_types list describes which DNS query types we can convert to a dict. Note: adding support for RRSIG is hard work. :) ''' supported_types = { A : ['address'], AAAA : ['address'], CNAME : ['target'], DNAME : ['target'], DLV : ['algorithm', 'digest_type', 'key_tag', 'digest'], DNSKEY : ['flags', 'algorithm', 'protocol', 'key'], DS : ['algorithm', 'digest_type', 'key_tag', 'digest'], HINFO : ['cpu', 'os'], LOC : ['latitude', 'longitude', 'altitude', 'size', 'horizontal_precision', 'vertical_precision'], MX : ['preference', 'exchange'], NAPTR : ['order', 'preference', 'flags', 'service', 'regexp', 'replacement'], NS : ['target'], NSEC3PARAM : ['algorithm', 'flags', 'iterations', 'salt'], PTR : ['target'], RP : ['mbox', 'txt'], # RRSIG : ['algorithm', 'labels', 'original_ttl', 'expiration', 'inception', 'signature'], SOA : ['mname', 'rname', 'serial', 'refresh', 'retry', 'expire', 'minimum'], SPF : ['strings'], SRV : ['priority', 'weight', 'port', 'target'], SSHFP : ['algorithm', 'fp_type', 'fingerprint'], TLSA : ['usage', 'selector', 'mtype', 'cert'], TXT : ['strings'], } rd = {} if rdata.rdtype in supported_types: fields = supported_types[rdata.rdtype] for f in fields: val = rdata.__getattribute__(f) if type(val) == dns.name.Name: val = dns.name.Name.to_text(val) if rdata.rdtype == DLV and f == 'digest': val = dns.rdata._hexify(rdata.digest).replace(' ', '') if rdata.rdtype == DS and f == 'digest': val = dns.rdata._hexify(rdata.digest).replace(' ', '') if rdata.rdtype == DNSKEY and f == 'key': val = dns.rdata._base64ify(rdata.key).replace(' ', '') if rdata.rdtype == NSEC3PARAM and f == 'salt': val = dns.rdata._hexify(rdata.salt).replace(' ', '') if rdata.rdtype == SSHFP and f == 'fingerprint': val = dns.rdata._hexify(rdata.fingerprint).replace(' ', '') if rdata.rdtype == TLSA and f == 'cert': val = dns.rdata._hexify(rdata.cert).replace(' ', '') rd[f] = val return rd # ============================================================== # dig: Lookup DNS records # # -------------------------------------------------------------- class LookupModule(LookupBase): def run(self, terms, variables=None, **kwargs): ''' terms contains a string with things to `dig' for. We support the following formats: example.com # A record example.com qtype=A # same example.com/TXT # specific qtype example.com qtype=txt # same 192.168.1.2/PTR # reverse PTR ^^ shortcut for 2.1.168.192.in-addr.arpa/PTR example.net/AAAA @nameserver # query specified server ^^^ can be comma-sep list of names/addresses ... flat=0 # returns a dict; default is 1 == string ''' if HAVE_DNS == False: raise AnsibleError("Can't LOOKUP(dig): module dns.resolver is not installed") # Create Resolver object so that we can set NS if necessary myres = dns.resolver.Resolver() edns_size = 4096 myres.use_edns(0, ednsflags=dns.flags.DO, payload=edns_size) domain = None qtype = 'A' flat = True for t in terms: if t.startswith('@'): # e.g. "@10.0.1.2,192.168.1.1" is ok. nsset = t[1:].split(',') nameservers = [] for ns in nsset: # Check if we have a valid IP address. If so, use that, otherwise # try to resolve name to address using system's resolver. If that # fails we bail out. try: socket.inet_aton(ns) nameservers.append(ns) except: try: nsaddr = dns.resolver.query(ns)[0].address nameservers.append(nsaddr) except Exception as e: raise AnsibleError("dns lookup NS: ", str(e)) myres.nameservers = nameservers continue if '=' in t: try: opt, arg = t.split('=') except: pass if opt == 'qtype': qtype = arg.upper() elif opt == 'flat': flat = int(arg) continue if '/' in t: try: domain, qtype = t.split('/') except: domain = t else: domain = t # print "--- domain = {0} qtype={1}".format(domain, qtype) ret = [] if qtype.upper() == 'PTR': try: n = dns.reversename.from_address(domain) domain = n.to_text() except dns.exception.SyntaxError: pass except Exception as e: raise AnsibleError("dns.reversename unhandled exception", str(e)) try: answers = myres.query(domain, qtype) for rdata in answers: s = rdata.to_text() if qtype.upper() == 'TXT': s = s[1:-1] # Strip outside quotes on TXT rdata if flat: ret.append(s) else: try: rd = make_rdata_dict(rdata) rd['owner'] = answers.canonical_name.to_text() rd['type'] = dns.rdatatype.to_text(rdata.rdtype) rd['ttl'] = answers.rrset.ttl ret.append(rd) except Exception as e: ret.append(str(e)) except dns.resolver.NXDOMAIN: ret.append('NXDOMAIN') except dns.resolver.NoAnswer: ret.append("") except dns.resolver.Timeout: ret.append('') except dns.exception.DNSException as e: raise AnsibleError("dns.resolver unhandled exception", e) return ret
gpl-3.0
joone/chromium-crosswalk
tools/telemetry/telemetry/testing/test_page_test_results.py
15
1600
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from telemetry.internal.results import page_test_results from telemetry.page import page as page_module from telemetry.value import list_of_scalar_values from telemetry.value import scalar class TestPageTestResults( page_test_results.PageTestResults): def __init__(self, test): super(TestPageTestResults, self).__init__() self.test = test page = page_module.Page("http://www.google.com", {}) self.WillRunPage(page) def GetPageSpecificValueNamed(self, name): values = [value for value in self.all_page_specific_values if value.name == name] assert len(values) == 1, 'Could not find value named %s' % name return values[0] def AssertHasPageSpecificScalarValue(self, name, units, expected_value): value = self.GetPageSpecificValueNamed(name) self.test.assertEquals(units, value.units) self.test.assertTrue(isinstance(value, scalar.ScalarValue)) self.test.assertEquals(expected_value, value.value) def AssertHasPageSpecificListOfScalarValues(self, name, units, expected_values): value = self.GetPageSpecificValueNamed(name) self.test.assertEquals(units, value.units) self.test.assertTrue( isinstance(value, list_of_scalar_values.ListOfScalarValues)) self.test.assertItemsEqual(expected_values, value.values) def __str__(self): return '\n'.join([repr(x) for x in self.all_page_specific_values])
bsd-3-clause
boompieman/iim_project
project_python2/lib/python2.7/site-packages/jsonschema/tests/test_cli.py
61
3575
from jsonschema import Draft4Validator, ValidationError, cli from jsonschema.compat import StringIO from jsonschema.tests.compat import mock, unittest def fake_validator(*errors): errors = list(reversed(errors)) class FakeValidator(object): def __init__(self, *args, **kwargs): pass def iter_errors(self, instance): if errors: return errors.pop() return [] return FakeValidator class TestParser(unittest.TestCase): FakeValidator = fake_validator() def setUp(self): mock_open = mock.mock_open() patch_open = mock.patch.object(cli, "open", mock_open, create=True) patch_open.start() self.addCleanup(patch_open.stop) mock_json_load = mock.Mock() mock_json_load.return_value = {} patch_json_load = mock.patch("json.load") patch_json_load.start() self.addCleanup(patch_json_load.stop) def test_find_validator_by_fully_qualified_object_name(self): arguments = cli.parse_args( [ "--validator", "jsonschema.tests.test_cli.TestParser.FakeValidator", "--instance", "foo.json", "schema.json", ] ) self.assertIs(arguments["validator"], self.FakeValidator) def test_find_validator_in_jsonschema(self): arguments = cli.parse_args( [ "--validator", "Draft4Validator", "--instance", "foo.json", "schema.json", ] ) self.assertIs(arguments["validator"], Draft4Validator) class TestCLI(unittest.TestCase): def test_successful_validation(self): stdout, stderr = StringIO(), StringIO() exit_code = cli.run( { "validator": fake_validator(), "schema": {}, "instances": [1], "error_format": "{error.message}", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertFalse(stderr.getvalue()) self.assertEqual(exit_code, 0) def test_unsuccessful_validation(self): error = ValidationError("I am an error!", instance=1) stdout, stderr = StringIO(), StringIO() exit_code = cli.run( { "validator": fake_validator([error]), "schema": {}, "instances": [1], "error_format": "{error.instance} - {error.message}", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertEqual(stderr.getvalue(), "1 - I am an error!") self.assertEqual(exit_code, 1) def test_unsuccessful_validation_multiple_instances(self): first_errors = [ ValidationError("9", instance=1), ValidationError("8", instance=1), ] second_errors = [ValidationError("7", instance=2)] stdout, stderr = StringIO(), StringIO() exit_code = cli.run( { "validator": fake_validator(first_errors, second_errors), "schema": {}, "instances": [1, 2], "error_format": "{error.instance} - {error.message}\t", }, stdout=stdout, stderr=stderr, ) self.assertFalse(stdout.getvalue()) self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t") self.assertEqual(exit_code, 1)
gpl-3.0
edx/xblock-lti-consumer
lti_consumer/plugin/urls.py
1
1707
""" URL mappings for LTI Consumer plugin. """ from django.conf import settings from django.conf.urls import url, include from rest_framework import routers from lti_consumer.plugin.views import ( public_keyset_endpoint, launch_gate_endpoint, access_token_endpoint, # LTI Advantage URLs LtiAgsLineItemViewset, deep_linking_response_endpoint, deep_linking_content_endpoint, # LTI NRPS URLs LtiNrpsContextMembershipViewSet, ) # LTI 1.3 APIs router router = routers.SimpleRouter(trailing_slash=False) router.register(r'lti-ags', LtiAgsLineItemViewset, basename='lti-ags-view') router.register(r'memberships', LtiNrpsContextMembershipViewSet, basename='lti-nrps-memberships-view') app_name = 'lti_consumer' urlpatterns = [ url( f'lti_consumer/v1/public_keysets/{settings.USAGE_ID_PATTERN}$', public_keyset_endpoint, name='lti_consumer.public_keyset_endpoint' ), url( 'lti_consumer/v1/launch/(?:/(?P<suffix>.*))?$', launch_gate_endpoint, name='lti_consumer.launch_gate' ), url( f'lti_consumer/v1/token/{settings.USAGE_ID_PATTERN}$', access_token_endpoint, name='lti_consumer.access_token' ), url( r'lti_consumer/v1/lti/(?P<lti_config_id>[-\w]+)/lti-dl/response', deep_linking_response_endpoint, name='lti_consumer.deep_linking_response_endpoint' ), url( r'lti_consumer/v1/lti/(?P<lti_config_id>[-\w]+)/lti-dl/content', deep_linking_content_endpoint, name='lti_consumer.deep_linking_content_endpoint' ), url( r'lti_consumer/v1/lti/(?P<lti_config_id>[-\w]+)/', include(router.urls) ), ]
agpl-3.0
yuanyelele/solfege
solfege/instrumentselector.py
1
6075
# GNU Solfege - free ear training software # Copyright (C) 2000, 2001, 2002, 2003, 2004, 2006, 2007, 2008, 2011 Tom Cato Amundsen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import absolute_import from gi.repository import Gtk from solfege import cfg from solfege import gu from solfege import mpd from solfege import soundcard MAX_VOLUME = 127.0 class MidiInstrumentMenu(Gtk.Menu): def __init__(self, callback): Gtk.Menu.__init__(self) self.m_callback = callback for x in range(len(soundcard.instrument_names)): if x % 8 == 0: menuitem = Gtk.MenuItem(soundcard.instrument_sections[x/8]) submenu = Gtk.Menu() self.append(menuitem) menuitem.set_submenu(submenu) menuitem.show() item = Gtk.MenuItem(soundcard.instrument_names[x]) item.connect('activate', self.on_activate, x) submenu.append(item) item.show() self.show() def on_activate(self, menuitem, instrument): self.m_callback(instrument) class nInstrumentSelector(Gtk.VBox, cfg.ConfigUtils): def __init__(self, exname, name, sizegroup): Gtk.VBox.__init__(self) cfg.ConfigUtils.__dict__['__init__'](self, exname) self.m_name = name hbox = gu.bHBox(self) hbox.set_spacing(gu.PAD_SMALL) self.g_button = Gtk.Button( soundcard.instrument_names[self.get_int(self.m_name)]) self.g_button.connect('clicked', self.on_btnclick) hbox.pack_start(self.g_button, True, True, 0) g = Gtk.VolumeButton() g.props.value = self.get_int('%s_volume' % name) / MAX_VOLUME def ff(volumebutton, value): self.set_int('%s_volume' % name, int(value * MAX_VOLUME)) g.connect('value-changed', ff) hbox.pack_start(g, False, False, 0) self.g_menu = MidiInstrumentMenu(self.on_instrument_selected) self.m_instrument = self.get_int('preferred_instrument') hbox = Gtk.HBox() hbox.set_spacing(6) self.pack_start(hbox, True, True, 0) def on_btnclick(self, *argv): self.g_menu.popup(None, None, None, None, 1, 0) def on_instrument_selected(self, instrument=None): self.set_int(self.m_name, instrument) self.g_button.get_children()[0].set_text(soundcard.instrument_names[instrument]) self.m_instrument = instrument self.play_selected_instrument() def play_selected_instrument(self, _o=None): t = mpd.Track() t.set_patch(self.m_instrument) t.set_volume(self.get_int('%s_volume' % self.m_name)) t.note(4, 60) soundcard.synth.play_track(t) def show(self): self.show_all() def FramedInstrumentSelector(title, exname, varname, sizegroup): box = Gtk.HBox() box.set_spacing(6) label = Gtk.Label(label=title) label.set_alignment(0.0, 0.5) box.pack_start(label, False, False, 0) n = nInstrumentSelector(exname, varname, None) sizegroup.add_widget(label) n.show() box.pack_start(n, True, True, 0) return box class InstrumentConfigurator(Gtk.VBox, cfg.ConfigUtils): def __init__(self, exname, num, labeltext): Gtk.VBox.__init__(self) #cfg.ConfigUtils.__init__(self, exname) cfg.ConfigUtils.__dict__['__init__'](self, exname) assert num in (2, 3) self.m_num = num self.g_override_default_instrument_checkbutton \ = gu.nCheckButton(exname, 'override_default_instrument', labeltext, callback=self.update_instrument_override) self.pack_start(self.g_override_default_instrument_checkbutton, False, False, 0) hbox = gu.bVBox(self) hbox.set_spacing(gu.PAD_SMALL) sizegroup = Gtk.SizeGroup(Gtk.SizeGroupMode.HORIZONTAL) self.g_instrsel_high = FramedInstrumentSelector(_("Highest:"), exname, 'highest_instrument',sizegroup) hbox.pack_start(self.g_instrsel_high, False, False, 0) if num == 3: self.g_instrsel_middle = FramedInstrumentSelector(_("Middle:"), exname, 'middle_instrument', sizegroup) hbox.pack_start(self.g_instrsel_middle, False, False, 0) else: self.g_instrsel_middle = None self.g_instrsel_low = FramedInstrumentSelector(_("Lowest:"), exname, 'lowest_instrument', sizegroup) hbox.pack_start(self.g_instrsel_low, False, False, 0) self.update_instrument_override() def update(self): self.update_instrument_override() self.g_instrsel_high.update() if self.m_num == 3: self.g_instrsel_middle.update() self.g_instrsel_low.update() def update_instrument_override(self, _o=None): self.g_override_default_instrument_checkbutton.set_active( self.get_bool('override_default_instrument')) self.g_instrsel_high.set_sensitive( self.g_override_default_instrument_checkbutton.get_active()) self.g_instrsel_low.set_sensitive( self.g_override_default_instrument_checkbutton.get_active()) if self.g_instrsel_middle: self.g_instrsel_middle.set_sensitive( self.g_override_default_instrument_checkbutton.get_active()) def show(self): self.show_all()
gpl-3.0
FujitsuEnablingSoftwareTechnologyGmbH/tempest
tempest/api/baremetal/admin/test_chassis.py
3
3648
# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib.common.utils import data_utils from tempest_lib import exceptions as lib_exc from tempest.api.baremetal.admin import base from tempest import test class TestChassis(base.BaseBaremetalTest): """Tests for chassis.""" @classmethod def resource_setup(cls): super(TestChassis, cls).resource_setup() _, cls.chassis = cls.create_chassis() def _assertExpected(self, expected, actual): # Check if not expected keys/values exists in actual response body for key, value in expected.iteritems(): if key not in ('created_at', 'updated_at'): self.assertIn(key, actual) self.assertEqual(value, actual[key]) @test.attr(type='smoke') @test.idempotent_id('7c5a2e09-699c-44be-89ed-2bc189992d42') def test_create_chassis(self): descr = data_utils.rand_name('test-chassis') _, chassis = self.create_chassis(description=descr) self.assertEqual(chassis['description'], descr) @test.attr(type='smoke') @test.idempotent_id('cabe9c6f-dc16-41a7-b6b9-0a90c212edd5') def test_create_chassis_unicode_description(self): # Use a unicode string for testing: # 'We ♡ OpenStack in Ukraine' descr = u'В Україні ♡ OpenStack!' _, chassis = self.create_chassis(description=descr) self.assertEqual(chassis['description'], descr) @test.attr(type='smoke') @test.idempotent_id('c84644df-31c4-49db-a307-8942881f41c0') def test_show_chassis(self): _, chassis = self.client.show_chassis(self.chassis['uuid']) self._assertExpected(self.chassis, chassis) @test.attr(type="smoke") @test.idempotent_id('29c9cd3f-19b5-417b-9864-99512c3b33b3') def test_list_chassis(self): _, body = self.client.list_chassis() self.assertIn(self.chassis['uuid'], [i['uuid'] for i in body['chassis']]) @test.attr(type='smoke') @test.idempotent_id('5ae649ad-22d1-4fe1-bbc6-97227d199fb3') def test_delete_chassis(self): _, body = self.create_chassis() uuid = body['uuid'] self.delete_chassis(uuid) self.assertRaises(lib_exc.NotFound, self.client.show_chassis, uuid) @test.attr(type='smoke') @test.idempotent_id('cda8a41f-6be2-4cbf-840c-994b00a89b44') def test_update_chassis(self): _, body = self.create_chassis() uuid = body['uuid'] new_description = data_utils.rand_name('new-description') _, body = (self.client.update_chassis(uuid, description=new_description)) _, chassis = self.client.show_chassis(uuid) self.assertEqual(chassis['description'], new_description) @test.attr(type='smoke') @test.idempotent_id('76305e22-a4e2-4ab3-855c-f4e2368b9335') def test_chassis_node_list(self): _, node = self.create_node(self.chassis['uuid']) _, body = self.client.list_chassis_nodes(self.chassis['uuid']) self.assertIn(node['uuid'], [n['uuid'] for n in body['nodes']])
apache-2.0
opendaylight/archived-net-virt-platform
sdncon/clusterAdmin/models.py
4
5655
# # Copyright (c) 2013 Big Switch Networks, Inc. # # Licensed under the Eclipse Public License, Version 1.0 (the # "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.eclipse.org/legal/epl-v10.html # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. # import datetime from django.db import models from django.contrib.auth.models import User # Create your models here. class CustomerManager(models.Manager): def create_customer(self, name, email): """ Creates and saves a customer with the given name and email """ now = datetime.datetime.now() # Normalize the address by lowercasing the domain part of the email address try: email_name, domain_part = email.strip().split('@', 1) except ValueError: pass else: email = '@'.join([email_name, domain_part.lower()]) customer = self.model(customername=name, is_active=True, date_joined=now) customer.save(using=self._db) return customer class ClusterManager(models.Manager): def create_cluster(self, name, customer): """ Creates and saves a cluster with the given cluster name and the customer """ print "clusterManager create_cluster" now = datetime.datetime.now() id = ":".join([customer.customername, name]) cluster = self.model(id=id, clustername=name, is_active=True, date_joined=now) cluster.save(using=self._db) return cluster class CustomerUserManager(models.Manager): def create_customerUser(self, user, customer): """ Creates and saves a customer user membership """ id = ":".join([customer.customername, user.username]) cu = self.model(id=id, user=user, customer=customer) cu.save(using=self._db) return cu class Customer(models.Model): """ Customer defines a customer in the cloud server. """ customername = models.CharField('customer name', primary_key=True, max_length=30, help_text="30 characters or fewer. Letters, numbers and @/./+/-/_ characters") email = models.EmailField('e-mail address', blank=True) is_active = models.BooleanField('active', default=True, help_text="Designates whether this Customer should be treated as active. Unselect this instead of deleting accounts.") date_joined = models.DateTimeField('date joined', default=datetime.datetime.now) objects = CustomerManager() def __unicode__(self): return self.customername class Cluster(models.Model): """ Cluster defines a cluster of nodes in the cloud server. """ id = models.CharField( primary_key=True, verbose_name='Cluster ID', max_length=75, help_text='Unique identifier for the cluster; format is customername:clustername') clustername = models.CharField('cluster name', max_length=30, help_text="Required. 30 characters or fewer. Letters, numbers and @/./+/-/_ characters") is_active = models.BooleanField('active', default=True, help_text="Designates whether this cluster should be treated as active. Unselect this instead of deleting the cluster.") date_joined = models.DateTimeField('date joined', default=datetime.datetime.now) customer = models.ForeignKey(Customer, on_delete=models.CASCADE) objects = ClusterManager() def __unicode__(self): return self.id class CustomerUser(models.Model): """ This is a temporary model that captures the list of users in a given customer """ id = models.CharField( primary_key=True, verbose_name='Customer User', max_length=75, help_text='Unique relationship that shows the customer which the user belongs; \ format is customername:username') user = models.ForeignKey(User, on_delete=models.CASCADE) customer = models.ForeignKey(Customer, on_delete=models.CASCADE) objects = CustomerUserManager() def __unicode__(self): return self.id class AuthToken(models.Model): """ Store the authentication token as an ascii string Associate various credential options: user, cluster, customer. At least one of these must be populated to be a valid entry. """ id = models.CharField( primary_key=True, max_length = 64) cluster = models.ForeignKey( Cluster, blank=True, null=True) user = models.ForeignKey( User, blank=True, null=True) customer = models.ForeignKey( Customer, blank=True, null=True) expiration_date = models.DateTimeField( verbose_name='Expiration Date', help_text='Date when the authentication token expires', blank=True, null=True) annotation = models.CharField( verbose_name='Annotation', help_text='Track creation information such as user', max_length=512, blank=True, null=True) def __unicode__(self): return str(self.id)
epl-1.0
iLoop2/ResInsight
ThirdParty/Ert/devel/python/test/ert_tests/enkf/test_obs_block.py
1
1173
import datetime from ert.util import BoolVector from ert.test import TestAreaContext from ert.test import ExtendedTestCase from ert.enkf import ObsBlock class ObsBlockTest(ExtendedTestCase): def test_create(self): block = ObsBlock("OBS" , 1000) self.assertTrue( isinstance( block , ObsBlock )) self.assertEqual( 1000 , block.totalSize()) self.assertEqual( 0 , block.activeSize()) def test_access(self): obs_size = 10 block = ObsBlock("OBS" , obs_size) with self.assertRaises(IndexError): block[100] = (1,1) with self.assertRaises(IndexError): block[-1] = (1,1) with self.assertRaises(TypeError): block[4] = 10 with self.assertRaises(TypeError): block[4] = (1,1,9) #------ with self.assertRaises(IndexError): v = block[100] with self.assertRaises(IndexError): v = block[-1] block[0] = (10,1) v = block[0] self.assertEqual( v , (10,1)) self.assertEqual( 1 , block.activeSize())
gpl-3.0
fragaria/suds
suds/soaparray.py
205
2262
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ The I{soaparray} module provides XSD extensions for handling soap (section 5) encoded arrays. """ from suds import * from logging import getLogger from suds.xsd.sxbasic import Factory as SXFactory from suds.xsd.sxbasic import Attribute as SXAttribute class Attribute(SXAttribute): """ Represents an XSD <attribute/> that handles special attributes that are extensions for WSDLs. @ivar aty: Array type information. @type aty: The value of wsdl:arrayType. """ def __init__(self, schema, root, aty): """ @param aty: Array type information. @type aty: The value of wsdl:arrayType. """ SXAttribute.__init__(self, schema, root) if aty.endswith('[]'): self.aty = aty[:-2] else: self.aty = aty def autoqualified(self): aqs = SXAttribute.autoqualified(self) aqs.append('aty') return aqs def description(self): d = SXAttribute.description(self) d = d+('aty',) return d # # Builder function, only builds Attribute when arrayType # attribute is defined on root. # def __fn(x, y): ns = (None, "http://schemas.xmlsoap.org/wsdl/") aty = y.get('arrayType', ns=ns) if aty is None: return SXAttribute(x, y) else: return Attribute(x, y, aty) # # Remap <xs:attrbute/> tags to __fn() builder. # SXFactory.maptag('attribute', __fn)
lgpl-3.0
hgl888/chromium-crosswalk-efl
net/tools/testserver/testserver.py
26
82574
#!/usr/bin/env python # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """This is a simple HTTP/FTP/TCP/UDP/BASIC_AUTH_PROXY/WEBSOCKET server used for testing Chrome. It supports several test URLs, as specified by the handlers in TestPageHandler. By default, it listens on an ephemeral port and sends the port number back to the originating process over a pipe. The originating process can specify an explicit port if necessary. It can use https if you specify the flag --https=CERT where CERT is the path to a pem file containing the certificate and private key that should be used. """ import base64 import BaseHTTPServer import cgi import hashlib import logging import minica import os import json import random import re import select import socket import SocketServer import ssl import struct import sys import threading import time import urllib import urlparse import zlib BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(BASE_DIR))) # Temporary hack to deal with tlslite 0.3.8 -> 0.4.6 upgrade. # # TODO(davidben): Remove this when it has cycled through all the bots and # developer checkouts or when http://crbug.com/356276 is resolved. try: os.remove(os.path.join(ROOT_DIR, 'third_party', 'tlslite', 'tlslite', 'utils', 'hmac.pyc')) except Exception: pass # Append at the end of sys.path, it's fine to use the system library. sys.path.append(os.path.join(ROOT_DIR, 'third_party', 'pyftpdlib', 'src')) # Insert at the beginning of the path, we want to use our copies of the library # unconditionally. sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'pywebsocket', 'src')) sys.path.insert(0, os.path.join(ROOT_DIR, 'third_party', 'tlslite')) import mod_pywebsocket.standalone from mod_pywebsocket.standalone import WebSocketServer # import manually mod_pywebsocket.standalone.ssl = ssl import pyftpdlib.ftpserver import tlslite import tlslite.api import echo_message import testserver_base SERVER_HTTP = 0 SERVER_FTP = 1 SERVER_TCP_ECHO = 2 SERVER_UDP_ECHO = 3 SERVER_BASIC_AUTH_PROXY = 4 SERVER_WEBSOCKET = 5 # Default request queue size for WebSocketServer. _DEFAULT_REQUEST_QUEUE_SIZE = 128 class WebSocketOptions: """Holds options for WebSocketServer.""" def __init__(self, host, port, data_dir): self.request_queue_size = _DEFAULT_REQUEST_QUEUE_SIZE self.server_host = host self.port = port self.websock_handlers = data_dir self.scan_dir = None self.allow_handlers_outside_root_dir = False self.websock_handlers_map_file = None self.cgi_directories = [] self.is_executable_method = None self.allow_draft75 = False self.strict = True self.use_tls = False self.private_key = None self.certificate = None self.tls_client_auth = False self.tls_client_ca = None self.tls_module = 'ssl' self.use_basic_auth = False self.basic_auth_credential = 'Basic ' + base64.b64encode('test:test') class RecordingSSLSessionCache(object): """RecordingSSLSessionCache acts as a TLS session cache and maintains a log of lookups and inserts in order to test session cache behaviours.""" def __init__(self): self.log = [] def __getitem__(self, sessionID): self.log.append(('lookup', sessionID)) raise KeyError() def __setitem__(self, sessionID, session): self.log.append(('insert', sessionID)) class HTTPServer(testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, testserver_base.StoppableHTTPServer): """This is a specialization of StoppableHTTPServer that adds client verification.""" pass class OCSPServer(testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, BaseHTTPServer.HTTPServer): """This is a specialization of HTTPServer that serves an OCSP response""" def serve_forever_on_thread(self): self.thread = threading.Thread(target = self.serve_forever, name = "OCSPServerThread") self.thread.start() def stop_serving(self): self.shutdown() self.thread.join() class HTTPSServer(tlslite.api.TLSSocketServerMixIn, testserver_base.ClientRestrictingServerMixIn, testserver_base.BrokenPipeHandlerMixIn, testserver_base.StoppableHTTPServer): """This is a specialization of StoppableHTTPServer that add https support and client verification.""" def __init__(self, server_address, request_hander_class, pem_cert_and_key, ssl_client_auth, ssl_client_cas, ssl_client_cert_types, ssl_bulk_ciphers, ssl_key_exchanges, enable_npn, record_resume_info, tls_intolerant, tls_intolerance_type, signed_cert_timestamps, fallback_scsv_enabled, ocsp_response, disable_session_cache): self.cert_chain = tlslite.api.X509CertChain() self.cert_chain.parsePemList(pem_cert_and_key) # Force using only python implementation - otherwise behavior is different # depending on whether m2crypto Python module is present (error is thrown # when it is). m2crypto uses a C (based on OpenSSL) implementation under # the hood. self.private_key = tlslite.api.parsePEMKey(pem_cert_and_key, private=True, implementations=['python']) self.ssl_client_auth = ssl_client_auth self.ssl_client_cas = [] self.ssl_client_cert_types = [] if enable_npn: self.next_protos = ['http/1.1'] else: self.next_protos = None self.signed_cert_timestamps = signed_cert_timestamps self.fallback_scsv_enabled = fallback_scsv_enabled self.ocsp_response = ocsp_response if ssl_client_auth: for ca_file in ssl_client_cas: s = open(ca_file).read() x509 = tlslite.api.X509() x509.parse(s) self.ssl_client_cas.append(x509.subject) for cert_type in ssl_client_cert_types: self.ssl_client_cert_types.append({ "rsa_sign": tlslite.api.ClientCertificateType.rsa_sign, "dss_sign": tlslite.api.ClientCertificateType.dss_sign, "ecdsa_sign": tlslite.api.ClientCertificateType.ecdsa_sign, }[cert_type]) self.ssl_handshake_settings = tlslite.api.HandshakeSettings() if ssl_bulk_ciphers is not None: self.ssl_handshake_settings.cipherNames = ssl_bulk_ciphers if ssl_key_exchanges is not None: self.ssl_handshake_settings.keyExchangeNames = ssl_key_exchanges if tls_intolerant != 0: self.ssl_handshake_settings.tlsIntolerant = (3, tls_intolerant) self.ssl_handshake_settings.tlsIntoleranceType = tls_intolerance_type if disable_session_cache: self.session_cache = None elif record_resume_info: # If record_resume_info is true then we'll replace the session cache with # an object that records the lookups and inserts that it sees. self.session_cache = RecordingSSLSessionCache() else: self.session_cache = tlslite.api.SessionCache() testserver_base.StoppableHTTPServer.__init__(self, server_address, request_hander_class) def handshake(self, tlsConnection): """Creates the SSL connection.""" try: self.tlsConnection = tlsConnection tlsConnection.handshakeServer(certChain=self.cert_chain, privateKey=self.private_key, sessionCache=self.session_cache, reqCert=self.ssl_client_auth, settings=self.ssl_handshake_settings, reqCAs=self.ssl_client_cas, reqCertTypes=self.ssl_client_cert_types, nextProtos=self.next_protos, signedCertTimestamps= self.signed_cert_timestamps, fallbackSCSV=self.fallback_scsv_enabled, ocspResponse = self.ocsp_response) tlsConnection.ignoreAbruptClose = True return True except tlslite.api.TLSAbruptCloseError: # Ignore abrupt close. return True except tlslite.api.TLSError, error: print "Handshake failure:", str(error) return False class FTPServer(testserver_base.ClientRestrictingServerMixIn, pyftpdlib.ftpserver.FTPServer): """This is a specialization of FTPServer that adds client verification.""" pass class TCPEchoServer(testserver_base.ClientRestrictingServerMixIn, SocketServer.TCPServer): """A TCP echo server that echoes back what it has received.""" def server_bind(self): """Override server_bind to store the server name.""" SocketServer.TCPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port def serve_forever(self): self.stop = False self.nonce_time = None while not self.stop: self.handle_request() self.socket.close() class UDPEchoServer(testserver_base.ClientRestrictingServerMixIn, SocketServer.UDPServer): """A UDP echo server that echoes back what it has received.""" def server_bind(self): """Override server_bind to store the server name.""" SocketServer.UDPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port def serve_forever(self): self.stop = False self.nonce_time = None while not self.stop: self.handle_request() self.socket.close() class TestPageHandler(testserver_base.BasePageHandler): # Class variables to allow for persistence state between page handler # invocations rst_limits = {} fail_precondition = {} def __init__(self, request, client_address, socket_server): connect_handlers = [ self.RedirectConnectHandler, self.ServerAuthConnectHandler, self.DefaultConnectResponseHandler] get_handlers = [ self.NoCacheMaxAgeTimeHandler, self.NoCacheTimeHandler, self.CacheTimeHandler, self.CacheExpiresHandler, self.CacheProxyRevalidateHandler, self.CachePrivateHandler, self.CachePublicHandler, self.CacheSMaxAgeHandler, self.CacheMustRevalidateHandler, self.CacheMustRevalidateMaxAgeHandler, self.CacheNoStoreHandler, self.CacheNoStoreMaxAgeHandler, self.CacheNoTransformHandler, self.DownloadHandler, self.DownloadFinishHandler, self.EchoHeader, self.EchoHeaderCache, self.EchoAllHandler, self.ZipFileHandler, self.FileHandler, self.SetCookieHandler, self.SetManyCookiesHandler, self.ExpectAndSetCookieHandler, self.SetHeaderHandler, self.AuthBasicHandler, self.AuthDigestHandler, self.SlowServerHandler, self.ChunkedServerHandler, self.ContentTypeHandler, self.NoContentHandler, self.ServerRedirectHandler, self.ClientRedirectHandler, self.GetSSLSessionCacheHandler, self.SSLManySmallRecords, self.GetChannelID, self.ClientCipherListHandler, self.CloseSocketHandler, self.RangeResetHandler, self.DefaultResponseHandler] post_handlers = [ self.EchoTitleHandler, self.EchoHandler, self.PostOnlyFileHandler, self.EchoMultipartPostHandler] + get_handlers put_handlers = [ self.EchoTitleHandler, self.EchoHandler] + get_handlers head_handlers = [ self.FileHandler, self.DefaultResponseHandler] self._mime_types = { 'crx' : 'application/x-chrome-extension', 'exe' : 'application/octet-stream', 'gif': 'image/gif', 'jpeg' : 'image/jpeg', 'jpg' : 'image/jpeg', 'json': 'application/json', 'pdf' : 'application/pdf', 'txt' : 'text/plain', 'wav' : 'audio/wav', 'xml' : 'text/xml' } self._default_mime_type = 'text/html' testserver_base.BasePageHandler.__init__(self, request, client_address, socket_server, connect_handlers, get_handlers, head_handlers, post_handlers, put_handlers) def GetMIMETypeFromName(self, file_name): """Returns the mime type for the specified file_name. So far it only looks at the file extension.""" (_shortname, extension) = os.path.splitext(file_name.split("?")[0]) if len(extension) == 0: # no extension. return self._default_mime_type # extension starts with a dot, so we need to remove it return self._mime_types.get(extension[1:], self._default_mime_type) def NoCacheMaxAgeTimeHandler(self): """This request handler yields a page with the title set to the current system time, and no caching requested.""" if not self._ShouldHandleRequest("/nocachetime/maxage"): return False self.send_response(200) self.send_header('Cache-Control', 'max-age=0') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def NoCacheTimeHandler(self): """This request handler yields a page with the title set to the current system time, and no caching requested.""" if not self._ShouldHandleRequest("/nocachetime"): return False self.send_response(200) self.send_header('Cache-Control', 'no-cache') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheTimeHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for one minute.""" if not self._ShouldHandleRequest("/cachetime"): return False self.send_response(200) self.send_header('Cache-Control', 'max-age=60') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheExpiresHandler(self): """This request handler yields a page with the title set to the current system time, and set the page to expire on 1 Jan 2099.""" if not self._ShouldHandleRequest("/cache/expires"): return False self.send_response(200) self.send_header('Expires', 'Thu, 1 Jan 2099 00:00:00 GMT') self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheProxyRevalidateHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 60 seconds""" if not self._ShouldHandleRequest("/cache/proxy-revalidate"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, proxy-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CachePrivateHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 5 seconds.""" if not self._ShouldHandleRequest("/cache/private"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=3, private') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CachePublicHandler(self): """This request handler yields a page with the title set to the current system time, and allows caching for 5 seconds.""" if not self._ShouldHandleRequest("/cache/public"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=3, public') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheSMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow for caching.""" if not self._ShouldHandleRequest("/cache/s-maxage"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'public, s-maxage = 60, max-age = 0') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheMustRevalidateHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow caching.""" if not self._ShouldHandleRequest("/cache/must-revalidate"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'must-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheMustRevalidateMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow caching event though max-age of 60 seconds is specified.""" if not self._ShouldHandleRequest("/cache/must-revalidate/max-age"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, must-revalidate') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoStoreHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the page to be stored.""" if not self._ShouldHandleRequest("/cache/no-store"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'no-store') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoStoreMaxAgeHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the page to be stored even though max-age of 60 seconds is specified.""" if not self._ShouldHandleRequest("/cache/no-store/max-age"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=60, no-store') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def CacheNoTransformHandler(self): """This request handler yields a page with the title set to the current system time, and does not allow the content to transformed during user-agent caching""" if not self._ShouldHandleRequest("/cache/no-transform"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'no-transform') self.end_headers() self.wfile.write('<html><head><title>%s</title></head></html>' % time.time()) return True def EchoHeader(self): """This handler echoes back the value of a specific request header.""" return self.EchoHeaderHelper("/echoheader") def EchoHeaderCache(self): """This function echoes back the value of a specific request header while allowing caching for 16 hours.""" return self.EchoHeaderHelper("/echoheadercache") def EchoHeaderHelper(self, echo_header): """This function echoes back the value of the request header passed in.""" if not self._ShouldHandleRequest(echo_header): return False query_char = self.path.find('?') if query_char != -1: header_name = self.path[query_char+1:] self.send_response(200) self.send_header('Content-Type', 'text/plain') if echo_header == '/echoheadercache': self.send_header('Cache-control', 'max-age=60000') else: self.send_header('Cache-control', 'no-cache') # insert a vary header to properly indicate that the cachability of this # request is subject to value of the request header being echoed. if len(header_name) > 0: self.send_header('Vary', header_name) self.end_headers() if len(header_name) > 0: self.wfile.write(self.headers.getheader(header_name)) return True def ReadRequestBody(self): """This function reads the body of the current HTTP request, handling both plain and chunked transfer encoded requests.""" if self.headers.getheader('transfer-encoding') != 'chunked': length = int(self.headers.getheader('content-length')) return self.rfile.read(length) # Read the request body as chunks. body = "" while True: line = self.rfile.readline() length = int(line, 16) if length == 0: self.rfile.readline() break body += self.rfile.read(length) self.rfile.read(2) return body def EchoHandler(self): """This handler just echoes back the payload of the request, for testing form submission.""" if not self._ShouldHandleRequest("/echo"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write(self.ReadRequestBody()) return True def EchoTitleHandler(self): """This handler is like Echo, but sets the page title to the request.""" if not self._ShouldHandleRequest("/echotitle"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() request = self.ReadRequestBody() self.wfile.write('<html><head><title>') self.wfile.write(request) self.wfile.write('</title></head></html>') return True def EchoAllHandler(self): """This handler yields a (more) human-readable page listing information about the request header & contents.""" if not self._ShouldHandleRequest("/echoall"): return False self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head><style>' 'pre { border: 1px solid black; margin: 5px; padding: 5px }' '</style></head><body>' '<div style="float: right">' '<a href="/echo">back to referring page</a></div>' '<h1>Request Body:</h1><pre>') if self.command == 'POST' or self.command == 'PUT': qs = self.ReadRequestBody() params = cgi.parse_qs(qs, keep_blank_values=1) for param in params: self.wfile.write('%s=%s\n' % (param, params[param][0])) self.wfile.write('</pre>') self.wfile.write('<h1>Request Headers:</h1><pre>%s</pre>' % self.headers) self.wfile.write('</body></html>') return True def EchoMultipartPostHandler(self): """This handler echoes received multipart post data as json format.""" if not (self._ShouldHandleRequest("/echomultipartpost") or self._ShouldHandleRequest("/searchbyimage")): return False content_type, parameters = cgi.parse_header( self.headers.getheader('content-type')) if content_type == 'multipart/form-data': post_multipart = cgi.parse_multipart(self.rfile, parameters) elif content_type == 'application/x-www-form-urlencoded': raise Exception('POST by application/x-www-form-urlencoded is ' 'not implemented.') else: post_multipart = {} # Since the data can be binary, we encode them by base64. post_multipart_base64_encoded = {} for field, values in post_multipart.items(): post_multipart_base64_encoded[field] = [base64.b64encode(value) for value in values] result = {'POST_multipart' : post_multipart_base64_encoded} self.send_response(200) self.send_header("Content-type", "text/plain") self.end_headers() self.wfile.write(json.dumps(result, indent=2, sort_keys=False)) return True def DownloadHandler(self): """This handler sends a downloadable file with or without reporting the size (6K).""" if self.path.startswith("/download-unknown-size"): send_length = False elif self.path.startswith("/download-known-size"): send_length = True else: return False # # The test which uses this functionality is attempting to send # small chunks of data to the client. Use a fairly large buffer # so that we'll fill chrome's IO buffer enough to force it to # actually write the data. # See also the comments in the client-side of this test in # download_uitest.cc # size_chunk1 = 35*1024 size_chunk2 = 10*1024 self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Cache-Control', 'max-age=0') if send_length: self.send_header('Content-Length', size_chunk1 + size_chunk2) self.end_headers() # First chunk of data: self.wfile.write("*" * size_chunk1) self.wfile.flush() # handle requests until one of them clears this flag. self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() # Second chunk of data: self.wfile.write("*" * size_chunk2) return True def DownloadFinishHandler(self): """This handler just tells the server to finish the current download.""" if not self._ShouldHandleRequest("/download-finish"): return False self.server.wait_for_download = False self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-Control', 'max-age=0') self.end_headers() return True def _ReplaceFileData(self, data, query_parameters): """Replaces matching substrings in a file. If the 'replace_text' URL query parameter is present, it is expected to be of the form old_text:new_text, which indicates that any old_text strings in the file are replaced with new_text. Multiple 'replace_text' parameters may be specified. If the parameters are not present, |data| is returned. """ query_dict = cgi.parse_qs(query_parameters) replace_text_values = query_dict.get('replace_text', []) for replace_text_value in replace_text_values: replace_text_args = replace_text_value.split(':') if len(replace_text_args) != 2: raise ValueError( 'replace_text must be of form old_text:new_text. Actual value: %s' % replace_text_value) old_text_b64, new_text_b64 = replace_text_args old_text = base64.urlsafe_b64decode(old_text_b64) new_text = base64.urlsafe_b64decode(new_text_b64) data = data.replace(old_text, new_text) return data def ZipFileHandler(self): """This handler sends the contents of the requested file in compressed form. Can pass in a parameter that specifies that the content length be C - the compressed size (OK), U - the uncompressed size (Non-standard, but handled), S - less than compressed (OK because we keep going), M - larger than compressed but less than uncompressed (an error), L - larger than uncompressed (an error) Example: compressedfiles/Picture_1.doc?C """ prefix = "/compressedfiles/" if not self.path.startswith(prefix): return False # Consume a request body if present. if self.command == 'POST' or self.command == 'PUT' : self.ReadRequestBody() _, _, url_path, _, query, _ = urlparse.urlparse(self.path) if not query in ('C', 'U', 'S', 'M', 'L'): return False sub_path = url_path[len(prefix):] entries = sub_path.split('/') file_path = os.path.join(self.server.data_dir, *entries) if os.path.isdir(file_path): file_path = os.path.join(file_path, 'index.html') if not os.path.isfile(file_path): print "File not found " + sub_path + " full path:" + file_path self.send_error(404) return True f = open(file_path, "rb") data = f.read() uncompressed_len = len(data) f.close() # Compress the data. data = zlib.compress(data) compressed_len = len(data) content_length = compressed_len if query == 'U': content_length = uncompressed_len elif query == 'S': content_length = compressed_len / 2 elif query == 'M': content_length = (compressed_len + uncompressed_len) / 2 elif query == 'L': content_length = compressed_len + uncompressed_len self.send_response(200) self.send_header('Content-Type', 'application/msword') self.send_header('Content-encoding', 'deflate') self.send_header('Connection', 'close') self.send_header('Content-Length', content_length) self.send_header('ETag', '\'' + file_path + '\'') self.end_headers() self.wfile.write(data) return True def FileHandler(self): """This handler sends the contents of the requested file. Wow, it's like a real webserver!""" prefix = self.server.file_root_url if not self.path.startswith(prefix): return False return self._FileHandlerHelper(prefix) def PostOnlyFileHandler(self): """This handler sends the contents of the requested file on a POST.""" prefix = urlparse.urljoin(self.server.file_root_url, 'post/') if not self.path.startswith(prefix): return False return self._FileHandlerHelper(prefix) def _FileHandlerHelper(self, prefix): request_body = '' if self.command == 'POST' or self.command == 'PUT': # Consume a request body if present. request_body = self.ReadRequestBody() _, _, url_path, _, query, _ = urlparse.urlparse(self.path) query_dict = cgi.parse_qs(query) expected_body = query_dict.get('expected_body', []) if expected_body and request_body not in expected_body: self.send_response(404) self.end_headers() self.wfile.write('') return True expected_headers = query_dict.get('expected_headers', []) for expected_header in expected_headers: header_name, expected_value = expected_header.split(':') if self.headers.getheader(header_name) != expected_value: self.send_response(404) self.end_headers() self.wfile.write('') return True sub_path = url_path[len(prefix):] entries = sub_path.split('/') file_path = os.path.join(self.server.data_dir, *entries) if os.path.isdir(file_path): file_path = os.path.join(file_path, 'index.html') if not os.path.isfile(file_path): print "File not found " + sub_path + " full path:" + file_path self.send_error(404) return True f = open(file_path, "rb") data = f.read() f.close() data = self._ReplaceFileData(data, query) old_protocol_version = self.protocol_version # If file.mock-http-headers exists, it contains the headers we # should send. Read them in and parse them. headers_path = file_path + '.mock-http-headers' if os.path.isfile(headers_path): f = open(headers_path, "r") # "HTTP/1.1 200 OK" response = f.readline() http_major, http_minor, status_code = re.findall( 'HTTP/(\d+).(\d+) (\d+)', response)[0] self.protocol_version = "HTTP/%s.%s" % (http_major, http_minor) self.send_response(int(status_code)) for line in f: header_values = re.findall('(\S+):\s*(.*)', line) if len(header_values) > 0: # "name: value" name, value = header_values[0] self.send_header(name, value) f.close() else: # Could be more generic once we support mime-type sniffing, but for # now we need to set it explicitly. range_header = self.headers.get('Range') if range_header and range_header.startswith('bytes='): # Note this doesn't handle all valid byte range_header values (i.e. # left open ended ones), just enough for what we needed so far. range_header = range_header[6:].split('-') start = int(range_header[0]) if range_header[1]: end = int(range_header[1]) else: end = len(data) - 1 self.send_response(206) content_range = ('bytes ' + str(start) + '-' + str(end) + '/' + str(len(data))) self.send_header('Content-Range', content_range) data = data[start: end + 1] else: self.send_response(200) self.send_header('Content-Type', self.GetMIMETypeFromName(file_path)) self.send_header('Accept-Ranges', 'bytes') self.send_header('Content-Length', len(data)) self.send_header('ETag', '\'' + file_path + '\'') self.end_headers() if (self.command != 'HEAD'): self.wfile.write(data) self.protocol_version = old_protocol_version return True def SetCookieHandler(self): """This handler just sets a cookie, for testing cookie handling.""" if not self._ShouldHandleRequest("/set-cookie"): return False query_char = self.path.find('?') if query_char != -1: cookie_values = self.path[query_char + 1:].split('&') else: cookie_values = ("",) self.send_response(200) self.send_header('Content-Type', 'text/html') for cookie_value in cookie_values: self.send_header('Set-Cookie', '%s' % cookie_value) self.end_headers() for cookie_value in cookie_values: self.wfile.write('%s' % cookie_value) return True def SetManyCookiesHandler(self): """This handler just sets a given number of cookies, for testing handling of large numbers of cookies.""" if not self._ShouldHandleRequest("/set-many-cookies"): return False query_char = self.path.find('?') if query_char != -1: num_cookies = int(self.path[query_char + 1:]) else: num_cookies = 0 self.send_response(200) self.send_header('', 'text/html') for _i in range(0, num_cookies): self.send_header('Set-Cookie', 'a=') self.end_headers() self.wfile.write('%d cookies were sent' % num_cookies) return True def ExpectAndSetCookieHandler(self): """Expects some cookies to be sent, and if they are, sets more cookies. The expect parameter specifies a required cookie. May be specified multiple times. The set parameter specifies a cookie to set if all required cookies are preset. May be specified multiple times. The data parameter specifies the response body data to be returned.""" if not self._ShouldHandleRequest("/expect-and-set-cookie"): return False _, _, _, _, query, _ = urlparse.urlparse(self.path) query_dict = cgi.parse_qs(query) cookies = set() if 'Cookie' in self.headers: cookie_header = self.headers.getheader('Cookie') cookies.update([s.strip() for s in cookie_header.split(';')]) got_all_expected_cookies = True for expected_cookie in query_dict.get('expect', []): if expected_cookie not in cookies: got_all_expected_cookies = False self.send_response(200) self.send_header('Content-Type', 'text/html') if got_all_expected_cookies: for cookie_value in query_dict.get('set', []): self.send_header('Set-Cookie', '%s' % cookie_value) self.end_headers() for data_value in query_dict.get('data', []): self.wfile.write(data_value) return True def SetHeaderHandler(self): """This handler sets a response header. Parameters are in the key%3A%20value&key2%3A%20value2 format.""" if not self._ShouldHandleRequest("/set-header"): return False query_char = self.path.find('?') if query_char != -1: headers_values = self.path[query_char + 1:].split('&') else: headers_values = ("",) self.send_response(200) self.send_header('Content-Type', 'text/html') for header_value in headers_values: header_value = urllib.unquote(header_value) (key, value) = header_value.split(': ', 1) self.send_header(key, value) self.end_headers() for header_value in headers_values: self.wfile.write('%s' % header_value) return True def AuthBasicHandler(self): """This handler tests 'Basic' authentication. It just sends a page with title 'user/pass' if you succeed.""" if not self._ShouldHandleRequest("/auth-basic"): return False username = userpass = password = b64str = "" expected_password = 'secret' realm = 'testrealm' set_cookie_if_challenged = False _, _, url_path, _, query, _ = urlparse.urlparse(self.path) query_params = cgi.parse_qs(query, True) if 'set-cookie-if-challenged' in query_params: set_cookie_if_challenged = True if 'password' in query_params: expected_password = query_params['password'][0] if 'realm' in query_params: realm = query_params['realm'][0] auth = self.headers.getheader('authorization') try: if not auth: raise Exception('no auth') b64str = re.findall(r'Basic (\S+)', auth)[0] userpass = base64.b64decode(b64str) username, password = re.findall(r'([^:]+):(\S+)', userpass)[0] if password != expected_password: raise Exception('wrong password') except Exception, e: # Authentication failed. self.send_response(401) self.send_header('WWW-Authenticate', 'Basic realm="%s"' % realm) self.send_header('Content-Type', 'text/html') if set_cookie_if_challenged: self.send_header('Set-Cookie', 'got_challenged=true') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>Denied: %s</title>' % e) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('b64str=%s<p>' % b64str) self.wfile.write('username: %s<p>' % username) self.wfile.write('userpass: %s<p>' % userpass) self.wfile.write('password: %s<p>' % password) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('</body></html>') return True # Authentication successful. (Return a cachable response to allow for # testing cached pages that require authentication.) old_protocol_version = self.protocol_version self.protocol_version = "HTTP/1.1" if_none_match = self.headers.getheader('if-none-match') if if_none_match == "abc": self.send_response(304) self.end_headers() elif url_path.endswith(".gif"): # Using chrome/test/data/google/logo.gif as the test image test_image_path = ['google', 'logo.gif'] gif_path = os.path.join(self.server.data_dir, *test_image_path) if not os.path.isfile(gif_path): self.send_error(404) self.protocol_version = old_protocol_version return True f = open(gif_path, "rb") data = f.read() f.close() self.send_response(200) self.send_header('Content-Type', 'image/gif') self.send_header('Cache-control', 'max-age=60000') self.send_header('Etag', 'abc') self.end_headers() self.wfile.write(data) else: self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Cache-control', 'max-age=60000') self.send_header('Etag', 'abc') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>%s/%s</title>' % (username, password)) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('</body></html>') self.protocol_version = old_protocol_version return True def GetNonce(self, force_reset=False): """Returns a nonce that's stable per request path for the server's lifetime. This is a fake implementation. A real implementation would only use a given nonce a single time (hence the name n-once). However, for the purposes of unittesting, we don't care about the security of the nonce. Args: force_reset: Iff set, the nonce will be changed. Useful for testing the "stale" response. """ if force_reset or not self.server.nonce_time: self.server.nonce_time = time.time() return hashlib.md5('privatekey%s%d' % (self.path, self.server.nonce_time)).hexdigest() def AuthDigestHandler(self): """This handler tests 'Digest' authentication. It just sends a page with title 'user/pass' if you succeed. A stale response is sent iff "stale" is present in the request path. """ if not self._ShouldHandleRequest("/auth-digest"): return False stale = 'stale' in self.path nonce = self.GetNonce(force_reset=stale) opaque = hashlib.md5('opaque').hexdigest() password = 'secret' realm = 'testrealm' auth = self.headers.getheader('authorization') pairs = {} try: if not auth: raise Exception('no auth') if not auth.startswith('Digest'): raise Exception('not digest') # Pull out all the name="value" pairs as a dictionary. pairs = dict(re.findall(r'(\b[^ ,=]+)="?([^",]+)"?', auth)) # Make sure it's all valid. if pairs['nonce'] != nonce: raise Exception('wrong nonce') if pairs['opaque'] != opaque: raise Exception('wrong opaque') # Check the 'response' value and make sure it matches our magic hash. # See http://www.ietf.org/rfc/rfc2617.txt hash_a1 = hashlib.md5( ':'.join([pairs['username'], realm, password])).hexdigest() hash_a2 = hashlib.md5(':'.join([self.command, pairs['uri']])).hexdigest() if 'qop' in pairs and 'nc' in pairs and 'cnonce' in pairs: response = hashlib.md5(':'.join([hash_a1, nonce, pairs['nc'], pairs['cnonce'], pairs['qop'], hash_a2])).hexdigest() else: response = hashlib.md5(':'.join([hash_a1, nonce, hash_a2])).hexdigest() if pairs['response'] != response: raise Exception('wrong password') except Exception, e: # Authentication failed. self.send_response(401) hdr = ('Digest ' 'realm="%s", ' 'domain="/", ' 'qop="auth", ' 'algorithm=MD5, ' 'nonce="%s", ' 'opaque="%s"') % (realm, nonce, opaque) if stale: hdr += ', stale="TRUE"' self.send_header('WWW-Authenticate', hdr) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>Denied: %s</title>' % e) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('pairs=%s<p>' % pairs) self.wfile.write('You sent:<br>%s<p>' % self.headers) self.wfile.write('We are replying:<br>%s<p>' % hdr) self.wfile.write('</body></html>') return True # Authentication successful. self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<title>%s/%s</title>' % (pairs['username'], password)) self.wfile.write('</head><body>') self.wfile.write('auth=%s<p>' % auth) self.wfile.write('pairs=%s<p>' % pairs) self.wfile.write('</body></html>') return True def SlowServerHandler(self): """Wait for the user suggested time before responding. The syntax is /slow?0.5 to wait for half a second.""" if not self._ShouldHandleRequest("/slow"): return False query_char = self.path.find('?') wait_sec = 1.0 if query_char >= 0: try: wait_sec = int(self.path[query_char + 1:]) except ValueError: pass time.sleep(wait_sec) self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() self.wfile.write("waited %d seconds" % wait_sec) return True def ChunkedServerHandler(self): """Send chunked response. Allows to specify chunks parameters: - waitBeforeHeaders - ms to wait before sending headers - waitBetweenChunks - ms to wait between chunks - chunkSize - size of each chunk in bytes - chunksNumber - number of chunks Example: /chunked?waitBeforeHeaders=1000&chunkSize=5&chunksNumber=5 waits one second, then sends headers and five chunks five bytes each.""" if not self._ShouldHandleRequest("/chunked"): return False query_char = self.path.find('?') chunkedSettings = {'waitBeforeHeaders' : 0, 'waitBetweenChunks' : 0, 'chunkSize' : 5, 'chunksNumber' : 5} if query_char >= 0: params = self.path[query_char + 1:].split('&') for param in params: keyValue = param.split('=') if len(keyValue) == 2: try: chunkedSettings[keyValue[0]] = int(keyValue[1]) except ValueError: pass time.sleep(0.001 * chunkedSettings['waitBeforeHeaders']) self.protocol_version = 'HTTP/1.1' # Needed for chunked encoding self.send_response(200) self.send_header('Content-Type', 'text/plain') self.send_header('Connection', 'close') self.send_header('Transfer-Encoding', 'chunked') self.end_headers() # Chunked encoding: sending all chunks, then final zero-length chunk and # then final CRLF. for i in range(0, chunkedSettings['chunksNumber']): if i > 0: time.sleep(0.001 * chunkedSettings['waitBetweenChunks']) self.sendChunkHelp('*' * chunkedSettings['chunkSize']) self.wfile.flush() # Keep in mind that we start flushing only after 1kb. self.sendChunkHelp('') return True def ContentTypeHandler(self): """Returns a string of html with the given content type. E.g., /contenttype?text/css returns an html file with the Content-Type header set to text/css.""" if not self._ShouldHandleRequest("/contenttype"): return False query_char = self.path.find('?') content_type = self.path[query_char + 1:].strip() if not content_type: content_type = 'text/html' self.send_response(200) self.send_header('Content-Type', content_type) self.end_headers() self.wfile.write("<html>\n<body>\n<p>HTML text</p>\n</body>\n</html>\n") return True def NoContentHandler(self): """Returns a 204 No Content response.""" if not self._ShouldHandleRequest("/nocontent"): return False self.send_response(204) self.end_headers() return True def ServerRedirectHandler(self): """Sends a server redirect to the given URL. The syntax is '/server-redirect?http://foo.bar/asdf' to redirect to 'http://foo.bar/asdf'""" test_name = "/server-redirect" if not self._ShouldHandleRequest(test_name): return False query_char = self.path.find('?') if query_char < 0 or len(self.path) <= query_char + 1: self.sendRedirectHelp(test_name) return True dest = urllib.unquote(self.path[query_char + 1:]) self.send_response(301) # moved permanently self.send_header('Location', dest) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest) return True def ClientRedirectHandler(self): """Sends a client redirect to the given URL. The syntax is '/client-redirect?http://foo.bar/asdf' to redirect to 'http://foo.bar/asdf'""" test_name = "/client-redirect" if not self._ShouldHandleRequest(test_name): return False query_char = self.path.find('?') if query_char < 0 or len(self.path) <= query_char + 1: self.sendRedirectHelp(test_name) return True dest = urllib.unquote(self.path[query_char + 1:]) self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><head>') self.wfile.write('<meta http-equiv="refresh" content="0;url=%s">' % dest) self.wfile.write('</head><body>Redirecting to %s</body></html>' % dest) return True def GetSSLSessionCacheHandler(self): """Send a reply containing a log of the session cache operations.""" if not self._ShouldHandleRequest('/ssl-session-cache'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() try: log = self.server.session_cache.log except AttributeError: self.wfile.write('Pass --https-record-resume in order to use' + ' this request') return True for (action, sessionID) in log: self.wfile.write('%s\t%s\n' % (action, bytes(sessionID).encode('hex'))) return True def SSLManySmallRecords(self): """Sends a reply consisting of a variety of small writes. These will be translated into a series of small SSL records when used over an HTTPS server.""" if not self._ShouldHandleRequest('/ssl-many-small-records'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() # Write ~26K of data, in 1350 byte chunks for i in xrange(20): self.wfile.write('*' * 1350) self.wfile.flush() return True def GetChannelID(self): """Send a reply containing the hashed ChannelID that the client provided.""" if not self._ShouldHandleRequest('/channel-id'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() channel_id = bytes(self.server.tlsConnection.channel_id) self.wfile.write(hashlib.sha256(channel_id).digest().encode('base64')) return True def ClientCipherListHandler(self): """Send a reply containing the cipher suite list that the client provided. Each cipher suite value is serialized in decimal, followed by a newline.""" if not self._ShouldHandleRequest('/client-cipher-list'): return False self.send_response(200) self.send_header('Content-Type', 'text/plain') self.end_headers() for cipher_suite in self.server.tlsConnection.clientHello.cipher_suites: self.wfile.write(str(cipher_suite)) self.wfile.write('\n') return True def CloseSocketHandler(self): """Closes the socket without sending anything.""" if not self._ShouldHandleRequest('/close-socket'): return False self.wfile.close() return True def RangeResetHandler(self): """Send data broken up by connection resets every N (default 4K) bytes. Support range requests. If the data requested doesn't straddle a reset boundary, it will all be sent. Used for testing resuming downloads.""" def DataForRange(start, end): """Data to be provided for a particular range of bytes.""" # Offset and scale to avoid too obvious (and hence potentially # collidable) data. return ''.join([chr(y % 256) for y in range(start * 2 + 15, end * 2 + 15, 2)]) if not self._ShouldHandleRequest('/rangereset'): return False # HTTP/1.1 is required for ETag and range support. self.protocol_version = 'HTTP/1.1' _, _, url_path, _, query, _ = urlparse.urlparse(self.path) # Defaults size = 8000 # Note that the rst is sent just before sending the rst_boundary byte. rst_boundary = 4000 respond_to_range = True hold_for_signal = False rst_limit = -1 token = 'DEFAULT' fail_precondition = 0 send_verifiers = True # Parse the query qdict = urlparse.parse_qs(query, True) if 'size' in qdict: size = int(qdict['size'][0]) if 'rst_boundary' in qdict: rst_boundary = int(qdict['rst_boundary'][0]) if 'token' in qdict: # Identifying token for stateful tests. token = qdict['token'][0] if 'rst_limit' in qdict: # Max number of rsts for a given token. rst_limit = int(qdict['rst_limit'][0]) if 'bounce_range' in qdict: respond_to_range = False if 'hold' in qdict: # Note that hold_for_signal will not work with null range requests; # see TODO below. hold_for_signal = True if 'no_verifiers' in qdict: send_verifiers = False if 'fail_precondition' in qdict: fail_precondition = int(qdict['fail_precondition'][0]) # Record already set information, or set it. rst_limit = TestPageHandler.rst_limits.setdefault(token, rst_limit) if rst_limit != 0: TestPageHandler.rst_limits[token] -= 1 fail_precondition = TestPageHandler.fail_precondition.setdefault( token, fail_precondition) if fail_precondition != 0: TestPageHandler.fail_precondition[token] -= 1 first_byte = 0 last_byte = size - 1 # Does that define what we want to return, or do we need to apply # a range? range_response = False range_header = self.headers.getheader('range') if range_header and respond_to_range: mo = re.match("bytes=(\d*)-(\d*)", range_header) if mo.group(1): first_byte = int(mo.group(1)) if mo.group(2): last_byte = int(mo.group(2)) if last_byte > size - 1: last_byte = size - 1 range_response = True if last_byte < first_byte: return False if (fail_precondition and (self.headers.getheader('If-Modified-Since') or self.headers.getheader('If-Match'))): self.send_response(412) self.end_headers() return True if range_response: self.send_response(206) self.send_header('Content-Range', 'bytes %d-%d/%d' % (first_byte, last_byte, size)) else: self.send_response(200) self.send_header('Content-Type', 'application/octet-stream') self.send_header('Content-Length', last_byte - first_byte + 1) if send_verifiers: # If fail_precondition is non-zero, then the ETag for each request will be # different. etag = "%s%d" % (token, fail_precondition) self.send_header('ETag', etag) self.send_header('Last-Modified', 'Tue, 19 Feb 2013 14:32 EST') self.end_headers() if hold_for_signal: # TODO(rdsmith/phajdan.jr): http://crbug.com/169519: Without writing # a single byte, the self.server.handle_request() below hangs # without processing new incoming requests. self.wfile.write(DataForRange(first_byte, first_byte + 1)) first_byte = first_byte + 1 # handle requests until one of them clears this flag. self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() possible_rst = ((first_byte / rst_boundary) + 1) * rst_boundary if possible_rst >= last_byte or rst_limit == 0: # No RST has been requested in this range, so we don't need to # do anything fancy; just write the data and let the python # infrastructure close the connection. self.wfile.write(DataForRange(first_byte, last_byte + 1)) self.wfile.flush() return True # We're resetting the connection part way in; go to the RST # boundary and then send an RST. # Because socket semantics do not guarantee that all the data will be # sent when using the linger semantics to hard close a socket, # we send the data and then wait for our peer to release us # before sending the reset. data = DataForRange(first_byte, possible_rst) self.wfile.write(data) self.wfile.flush() self.server.wait_for_download = True while self.server.wait_for_download: self.server.handle_request() l_onoff = 1 # Linger is active. l_linger = 0 # Seconds to linger for. self.connection.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, struct.pack('ii', l_onoff, l_linger)) # Close all duplicates of the underlying socket to force the RST. self.wfile.close() self.rfile.close() self.connection.close() return True def DefaultResponseHandler(self): """This is the catch-all response handler for requests that aren't handled by one of the special handlers above. Note that we specify the content-length as without it the https connection is not closed properly (and the browser keeps expecting data).""" contents = "Default response given for path: " + self.path self.send_response(200) self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(contents)) self.end_headers() if (self.command != 'HEAD'): self.wfile.write(contents) return True def RedirectConnectHandler(self): """Sends a redirect to the CONNECT request for www.redirect.com. This response is not specified by the RFC, so the browser should not follow the redirect.""" if (self.path.find("www.redirect.com") < 0): return False dest = "http://www.destination.com/foo.js" self.send_response(302) # moved temporarily self.send_header('Location', dest) self.send_header('Connection', 'close') self.end_headers() return True def ServerAuthConnectHandler(self): """Sends a 401 to the CONNECT request for www.server-auth.com. This response doesn't make sense because the proxy server cannot request server authentication.""" if (self.path.find("www.server-auth.com") < 0): return False challenge = 'Basic realm="WallyWorld"' self.send_response(401) # unauthorized self.send_header('WWW-Authenticate', challenge) self.send_header('Connection', 'close') self.end_headers() return True def DefaultConnectResponseHandler(self): """This is the catch-all response handler for CONNECT requests that aren't handled by one of the special handlers above. Real Web servers respond with 400 to CONNECT requests.""" contents = "Your client has issued a malformed or illegal request." self.send_response(400) # bad request self.send_header('Content-Type', 'text/html') self.send_header('Content-Length', len(contents)) self.end_headers() self.wfile.write(contents) return True # called by the redirect handling function when there is no parameter def sendRedirectHelp(self, redirect_name): self.send_response(200) self.send_header('Content-Type', 'text/html') self.end_headers() self.wfile.write('<html><body><h1>Error: no redirect destination</h1>') self.wfile.write('Use <pre>%s?http://dest...</pre>' % redirect_name) self.wfile.write('</body></html>') # called by chunked handling function def sendChunkHelp(self, chunk): # Each chunk consists of: chunk size (hex), CRLF, chunk body, CRLF self.wfile.write('%X\r\n' % len(chunk)) self.wfile.write(chunk) self.wfile.write('\r\n') class OCSPHandler(testserver_base.BasePageHandler): def __init__(self, request, client_address, socket_server): handlers = [self.OCSPResponse] self.ocsp_response = socket_server.ocsp_response testserver_base.BasePageHandler.__init__(self, request, client_address, socket_server, [], handlers, [], handlers, []) def OCSPResponse(self): self.send_response(200) self.send_header('Content-Type', 'application/ocsp-response') self.send_header('Content-Length', str(len(self.ocsp_response))) self.end_headers() self.wfile.write(self.ocsp_response) class TCPEchoHandler(SocketServer.BaseRequestHandler): """The RequestHandler class for TCP echo server. It is instantiated once per connection to the server, and overrides the handle() method to implement communication to the client. """ def handle(self): """Handles the request from the client and constructs a response.""" data = self.request.recv(65536).strip() # Verify the "echo request" message received from the client. Send back # "echo response" message if "echo request" message is valid. try: return_data = echo_message.GetEchoResponseData(data) if not return_data: return except ValueError: return self.request.send(return_data) class UDPEchoHandler(SocketServer.BaseRequestHandler): """The RequestHandler class for UDP echo server. It is instantiated once per connection to the server, and overrides the handle() method to implement communication to the client. """ def handle(self): """Handles the request from the client and constructs a response.""" data = self.request[0].strip() request_socket = self.request[1] # Verify the "echo request" message received from the client. Send back # "echo response" message if "echo request" message is valid. try: return_data = echo_message.GetEchoResponseData(data) if not return_data: return except ValueError: return request_socket.sendto(return_data, self.client_address) class BasicAuthProxyRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): """A request handler that behaves as a proxy server which requires basic authentication. Only CONNECT, GET and HEAD is supported for now. """ _AUTH_CREDENTIAL = 'Basic Zm9vOmJhcg==' # foo:bar def parse_request(self): """Overrides parse_request to check credential.""" if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self): return False auth = self.headers.getheader('Proxy-Authorization') if auth != self._AUTH_CREDENTIAL: self.send_response(407) self.send_header('Proxy-Authenticate', 'Basic realm="MyRealm1"') self.end_headers() return False return True def _start_read_write(self, sock): sock.setblocking(0) self.request.setblocking(0) rlist = [self.request, sock] while True: ready_sockets, _unused, errors = select.select(rlist, [], []) if errors: self.send_response(500) self.end_headers() return for s in ready_sockets: received = s.recv(1024) if len(received) == 0: return if s == self.request: other = sock else: other = self.request other.send(received) def _do_common_method(self): url = urlparse.urlparse(self.path) port = url.port if not port: if url.scheme == 'http': port = 80 elif url.scheme == 'https': port = 443 if not url.hostname or not port: self.send_response(400) self.end_headers() return if len(url.path) == 0: path = '/' else: path = url.path if len(url.query) > 0: path = '%s?%s' % (url.path, url.query) sock = None try: sock = socket.create_connection((url.hostname, port)) sock.send('%s %s %s\r\n' % ( self.command, path, self.protocol_version)) for header in self.headers.headers: header = header.strip() if (header.lower().startswith('connection') or header.lower().startswith('proxy')): continue sock.send('%s\r\n' % header) sock.send('\r\n') self._start_read_write(sock) except Exception: self.send_response(500) self.end_headers() finally: if sock is not None: sock.close() def do_CONNECT(self): try: pos = self.path.rfind(':') host = self.path[:pos] port = int(self.path[pos+1:]) except Exception: self.send_response(400) self.end_headers() try: sock = socket.create_connection((host, port)) self.send_response(200, 'Connection established') self.end_headers() self._start_read_write(sock) except Exception: self.send_response(500) self.end_headers() finally: sock.close() def do_GET(self): self._do_common_method() def do_HEAD(self): self._do_common_method() class ServerRunner(testserver_base.TestServerRunner): """TestServerRunner for the net test servers.""" def __init__(self): super(ServerRunner, self).__init__() self.__ocsp_server = None def __make_data_dir(self): if self.options.data_dir: if not os.path.isdir(self.options.data_dir): raise testserver_base.OptionError('specified data dir not found: ' + self.options.data_dir + ' exiting...') my_data_dir = self.options.data_dir else: # Create the default path to our data dir, relative to the exe dir. my_data_dir = os.path.join(BASE_DIR, "..", "..", "..", "..", "test", "data") #TODO(ibrar): Must use Find* funtion defined in google\tools #i.e my_data_dir = FindUpward(my_data_dir, "test", "data") return my_data_dir def create_server(self, server_data): port = self.options.port host = self.options.host if self.options.server_type == SERVER_HTTP: if self.options.https: pem_cert_and_key = None if self.options.cert_and_key_file: if not os.path.isfile(self.options.cert_and_key_file): raise testserver_base.OptionError( 'specified server cert file not found: ' + self.options.cert_and_key_file + ' exiting...') pem_cert_and_key = file(self.options.cert_and_key_file, 'r').read() else: # generate a new certificate and run an OCSP server for it. self.__ocsp_server = OCSPServer((host, 0), OCSPHandler) print ('OCSP server started on %s:%d...' % (host, self.__ocsp_server.server_port)) ocsp_der = None ocsp_state = None if self.options.ocsp == 'ok': ocsp_state = minica.OCSP_STATE_GOOD elif self.options.ocsp == 'revoked': ocsp_state = minica.OCSP_STATE_REVOKED elif self.options.ocsp == 'invalid': ocsp_state = minica.OCSP_STATE_INVALID elif self.options.ocsp == 'unauthorized': ocsp_state = minica.OCSP_STATE_UNAUTHORIZED elif self.options.ocsp == 'unknown': ocsp_state = minica.OCSP_STATE_UNKNOWN else: raise testserver_base.OptionError('unknown OCSP status: ' + self.options.ocsp_status) (pem_cert_and_key, ocsp_der) = minica.GenerateCertKeyAndOCSP( subject = "127.0.0.1", ocsp_url = ("http://%s:%d/ocsp" % (host, self.__ocsp_server.server_port)), ocsp_state = ocsp_state, serial = self.options.cert_serial) self.__ocsp_server.ocsp_response = ocsp_der for ca_cert in self.options.ssl_client_ca: if not os.path.isfile(ca_cert): raise testserver_base.OptionError( 'specified trusted client CA file not found: ' + ca_cert + ' exiting...') stapled_ocsp_response = None if self.__ocsp_server and self.options.staple_ocsp_response: stapled_ocsp_response = self.__ocsp_server.ocsp_response server = HTTPSServer((host, port), TestPageHandler, pem_cert_and_key, self.options.ssl_client_auth, self.options.ssl_client_ca, self.options.ssl_client_cert_type, self.options.ssl_bulk_cipher, self.options.ssl_key_exchange, self.options.enable_npn, self.options.record_resume, self.options.tls_intolerant, self.options.tls_intolerance_type, self.options.signed_cert_timestamps_tls_ext.decode( "base64"), self.options.fallback_scsv, stapled_ocsp_response, self.options.disable_session_cache) print 'HTTPS server started on https://%s:%d...' % \ (host, server.server_port) else: server = HTTPServer((host, port), TestPageHandler) print 'HTTP server started on http://%s:%d...' % \ (host, server.server_port) server.data_dir = self.__make_data_dir() server.file_root_url = self.options.file_root_url server_data['port'] = server.server_port elif self.options.server_type == SERVER_WEBSOCKET: # Launch pywebsocket via WebSocketServer. logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) # TODO(toyoshim): Remove following os.chdir. Currently this operation # is required to work correctly. It should be fixed from pywebsocket side. os.chdir(self.__make_data_dir()) websocket_options = WebSocketOptions(host, port, '.') scheme = "ws" if self.options.cert_and_key_file: scheme = "wss" websocket_options.use_tls = True websocket_options.private_key = self.options.cert_and_key_file websocket_options.certificate = self.options.cert_and_key_file if self.options.ssl_client_auth: websocket_options.tls_client_cert_optional = False websocket_options.tls_client_auth = True if len(self.options.ssl_client_ca) != 1: raise testserver_base.OptionError( 'one trusted client CA file should be specified') if not os.path.isfile(self.options.ssl_client_ca[0]): raise testserver_base.OptionError( 'specified trusted client CA file not found: ' + self.options.ssl_client_ca[0] + ' exiting...') websocket_options.tls_client_ca = self.options.ssl_client_ca[0] server = WebSocketServer(websocket_options) print 'WebSocket server started on %s://%s:%d...' % \ (scheme, host, server.server_port) server_data['port'] = server.server_port websocket_options.use_basic_auth = self.options.ws_basic_auth elif self.options.server_type == SERVER_TCP_ECHO: # Used for generating the key (randomly) that encodes the "echo request" # message. random.seed() server = TCPEchoServer((host, port), TCPEchoHandler) print 'Echo TCP server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_UDP_ECHO: # Used for generating the key (randomly) that encodes the "echo request" # message. random.seed() server = UDPEchoServer((host, port), UDPEchoHandler) print 'Echo UDP server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_BASIC_AUTH_PROXY: server = HTTPServer((host, port), BasicAuthProxyRequestHandler) print 'BasicAuthProxy server started on port %d...' % server.server_port server_data['port'] = server.server_port elif self.options.server_type == SERVER_FTP: my_data_dir = self.__make_data_dir() # Instantiate a dummy authorizer for managing 'virtual' users authorizer = pyftpdlib.ftpserver.DummyAuthorizer() # Define a new user having full r/w permissions and a read-only # anonymous user authorizer.add_user('chrome', 'chrome', my_data_dir, perm='elradfmw') authorizer.add_anonymous(my_data_dir) # Instantiate FTP handler class ftp_handler = pyftpdlib.ftpserver.FTPHandler ftp_handler.authorizer = authorizer # Define a customized banner (string returned when client connects) ftp_handler.banner = ("pyftpdlib %s based ftpd ready." % pyftpdlib.ftpserver.__ver__) # Instantiate FTP server class and listen to address:port server = pyftpdlib.ftpserver.FTPServer((host, port), ftp_handler) server_data['port'] = server.socket.getsockname()[1] print 'FTP server started on port %d...' % server_data['port'] else: raise testserver_base.OptionError('unknown server type' + self.options.server_type) return server def run_server(self): if self.__ocsp_server: self.__ocsp_server.serve_forever_on_thread() testserver_base.TestServerRunner.run_server(self) if self.__ocsp_server: self.__ocsp_server.stop_serving() def add_options(self): testserver_base.TestServerRunner.add_options(self) self.option_parser.add_option('--disable-session-cache', action='store_true', dest='disable_session_cache', help='tells the server to disable the' 'TLS session cache.') self.option_parser.add_option('-f', '--ftp', action='store_const', const=SERVER_FTP, default=SERVER_HTTP, dest='server_type', help='start up an FTP server.') self.option_parser.add_option('--tcp-echo', action='store_const', const=SERVER_TCP_ECHO, default=SERVER_HTTP, dest='server_type', help='start up a tcp echo server.') self.option_parser.add_option('--udp-echo', action='store_const', const=SERVER_UDP_ECHO, default=SERVER_HTTP, dest='server_type', help='start up a udp echo server.') self.option_parser.add_option('--basic-auth-proxy', action='store_const', const=SERVER_BASIC_AUTH_PROXY, default=SERVER_HTTP, dest='server_type', help='start up a proxy server which requires ' 'basic authentication.') self.option_parser.add_option('--websocket', action='store_const', const=SERVER_WEBSOCKET, default=SERVER_HTTP, dest='server_type', help='start up a WebSocket server.') self.option_parser.add_option('--https', action='store_true', dest='https', help='Specify that https ' 'should be used.') self.option_parser.add_option('--cert-and-key-file', dest='cert_and_key_file', help='specify the ' 'path to the file containing the certificate ' 'and private key for the server in PEM ' 'format') self.option_parser.add_option('--ocsp', dest='ocsp', default='ok', help='The type of OCSP response generated ' 'for the automatically generated ' 'certificate. One of [ok,revoked,invalid]') self.option_parser.add_option('--cert-serial', dest='cert_serial', default=0, type=int, help='If non-zero then the generated ' 'certificate will have this serial number') self.option_parser.add_option('--tls-intolerant', dest='tls_intolerant', default='0', type='int', help='If nonzero, certain TLS connections ' 'will be aborted in order to test version ' 'fallback. 1 means all TLS versions will be ' 'aborted. 2 means TLS 1.1 or higher will be ' 'aborted. 3 means TLS 1.2 or higher will be ' 'aborted.') self.option_parser.add_option('--tls-intolerance-type', dest='tls_intolerance_type', default="alert", help='Controls how the server reacts to a ' 'TLS version it is intolerant to. Valid ' 'values are "alert", "close", and "reset".') self.option_parser.add_option('--signed-cert-timestamps-tls-ext', dest='signed_cert_timestamps_tls_ext', default='', help='Base64 encoded SCT list. If set, ' 'server will respond with a ' 'signed_certificate_timestamp TLS extension ' 'whenever the client supports it.') self.option_parser.add_option('--fallback-scsv', dest='fallback_scsv', default=False, const=True, action='store_const', help='If given, TLS_FALLBACK_SCSV support ' 'will be enabled. This causes the server to ' 'reject fallback connections from compatible ' 'clients (e.g. Chrome).') self.option_parser.add_option('--staple-ocsp-response', dest='staple_ocsp_response', default=False, action='store_true', help='If set, server will staple the OCSP ' 'response whenever OCSP is on and the client ' 'supports OCSP stapling.') self.option_parser.add_option('--https-record-resume', dest='record_resume', const=True, default=False, action='store_const', help='Record resumption cache events rather ' 'than resuming as normal. Allows the use of ' 'the /ssl-session-cache request') self.option_parser.add_option('--ssl-client-auth', action='store_true', help='Require SSL client auth on every ' 'connection.') self.option_parser.add_option('--ssl-client-ca', action='append', default=[], help='Specify that the client ' 'certificate request should include the CA ' 'named in the subject of the DER-encoded ' 'certificate contained in the specified ' 'file. This option may appear multiple ' 'times, indicating multiple CA names should ' 'be sent in the request.') self.option_parser.add_option('--ssl-client-cert-type', action='append', default=[], help='Specify that the client ' 'certificate request should include the ' 'specified certificate_type value. This ' 'option may appear multiple times, ' 'indicating multiple values should be send ' 'in the request. Valid values are ' '"rsa_sign", "dss_sign", and "ecdsa_sign". ' 'If omitted, "rsa_sign" will be used.') self.option_parser.add_option('--ssl-bulk-cipher', action='append', help='Specify the bulk encryption ' 'algorithm(s) that will be accepted by the ' 'SSL server. Valid values are "aes256", ' '"aes128", "3des", "rc4". If omitted, all ' 'algorithms will be used. This option may ' 'appear multiple times, indicating ' 'multiple algorithms should be enabled.'); self.option_parser.add_option('--ssl-key-exchange', action='append', help='Specify the key exchange algorithm(s)' 'that will be accepted by the SSL server. ' 'Valid values are "rsa", "dhe_rsa". If ' 'omitted, all algorithms will be used. This ' 'option may appear multiple times, ' 'indicating multiple algorithms should be ' 'enabled.'); # TODO(davidben): Add ALPN support to tlslite. self.option_parser.add_option('--enable-npn', dest='enable_npn', default=False, const=True, action='store_const', help='Enable server support for the NPN ' 'extension. The server will advertise ' 'support for exactly one protocol, http/1.1') self.option_parser.add_option('--file-root-url', default='/files/', help='Specify a root URL for files served.') # TODO(ricea): Generalize this to support basic auth for HTTP too. self.option_parser.add_option('--ws-basic-auth', action='store_true', dest='ws_basic_auth', help='Enable basic-auth for WebSocket') if __name__ == '__main__': sys.exit(ServerRunner().main())
bsd-3-clause
susurros/ConsoleVM
console/forms.py
1
1262
from django import forms from .models import VHost, Snapshot, Datastore class VHForm(forms.ModelForm): class Meta: model = VHost fields = ('id', 'name', 'ipaddr', 'VType','user', 'sshkey', 'sshport', 'isopath') widgets = { 'name': forms.TextInput(attrs={'class': 'form-control'}), 'VType': forms.Select(attrs={'class': 'form-control'}), 'ipaddr': forms.TextInput(attrs={'class': 'form-control'}), 'user': forms.TextInput(attrs={'class': 'form-control'}), 'sshkey': forms.TextInput(attrs = {'class': 'form-control'}), 'sshport': forms.NumberInput(attrs={'class': 'form-control'}), 'isopath': forms.TextInput(attrs={'class': 'form-control'}), } class DSForm(forms.ModelForm): class Meta: model = Datastore fields = ('id','name','dpath','VHost') widgets = { 'name': forms.TextInput(attrs={'class': 'form-control'}), 'dpath': forms.TextInput(attrs={'class': 'form-control'}), 'VHost': forms.Select(attrs={'class': 'form-control'}), } class SnapForm(forms.ModelForm): class Meta: model = Snapshot fields = ('id','name', 'suuid', 'VMachine',)
gpl-3.0