repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
asm-products/kanshu-backend | dictionary/node/node_modules/bunyan/node_modules/dtrace-provider/compile.py | 228 | 1769 | {
'conditions': [
['OS=="mac" or OS=="solaris"', {
'variables': {
'escaped_root': '<!(printf %q "<(module_root_dir)")',
},
# If we are on the Mac, or a Solaris derivative, attempt
# to build the DTrace provider extension.
'targets': [
{
'target_name': 'DTraceProviderBindings',
'sources': [
'dtrace_provider.cc',
'dtrace_probe.cc',
'dtrace_argument.cc'
],
'include_dirs': [
'libusdt',
'<!(node -e "require(\'nan\')")',
],
'dependencies': [
'libusdt'
],
'libraries': [
'-L<(escaped_root)/libusdt -l usdt'
]
},
{
'target_name': 'libusdt',
'type': 'none',
'actions': [{
'inputs': [''],
'outputs': [''],
'action_name': 'build_libusdt',
'action': [
'sh', 'libusdt-build.sh'
]
}]
}
]
},
# If we are not on the Mac or Solaris, DTrace is unavailable.
# This target is necessary because GYP requires at least one
# target to exist.
{
'targets': [
{
'target_name': 'DTraceProviderStub',
'type': 'none'
}
]
}]
]
}
| agpl-3.0 |
openstack/octavia | octavia/amphorae/drivers/haproxy/data_models.py | 1 | 3400 | # Copyright 2014 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import octavia.common.data_models as models
class Topology(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, topology=None, role=None,
ip=None, ha_ip=None):
self.hostname = hostname
self.uuid = uuid
self.topology = topology
self.role = role
self.ip = ip
self.ha_ip = ha_ip
class Info(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, version=None,
api_version=None):
self.hostname = hostname
self.uuid = uuid
self.version = version
self.api_version = api_version
class Details(models.BaseDataModel):
def __init__(self, hostname=None, uuid=None, version=None,
api_version=None, network_tx=None, network_rx=None,
active=None, haproxy_count=None, cpu=None, memory=None,
disk=None, load=None, listeners=None, packages=None):
self.hostname = hostname
self.uuid = uuid
self.version = version
self.api_version = api_version
self.network_tx = network_tx
self.network_rx = network_rx
self.active = active
self.haproxy_count = haproxy_count
self.cpu = cpu
self.memory = memory
self.disk = disk
self.load = load or []
self.listeners = listeners or []
self.packages = packages or []
class CPU(models.BaseDataModel):
def __init__(self, total=None, user=None, system=None, soft_irq=None):
self.total = total
self.user = user
self.system = system
self.soft_irq = soft_irq
class Memory(models.BaseDataModel):
def __init__(self, total=None, free=None, available=None, buffers=None,
cached=None, swap_used=None, shared=None, slab=None,
committed_as=None):
self.total = total
self.free = free
self.available = available
self.buffers = buffers
self.cached = cached
self.swap_used = swap_used
self.shared = shared
self.slab = slab
self.committed_as = committed_as
class Disk(models.BaseDataModel):
def __init__(self, used=None, available=None):
self.used = used
self.available = available
class ListenerStatus(models.BaseDataModel):
def __init__(self, status=None, uuid=None, provisioning_status=None,
type=None, pools=None):
self.status = status
self.uuid = uuid
self.provisioning_status = provisioning_status
self.type = type
self.pools = pools or []
class Pool(models.BaseDataModel):
def __init__(self, uuid=None, status=None, members=None):
self.uuid = uuid
self.status = status
self.members = members or []
| apache-2.0 |
TNosredna/CouchPotatoServer | libs/tornado/locale.py | 40 | 22014 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Translation methods for generating localized strings.
To load a locale and generate a translated string::
user_locale = tornado.locale.get("es_LA")
print user_locale.translate("Sign out")
`tornado.locale.get()` returns the closest matching locale, not necessarily the
specific locale you requested. You can support pluralization with
additional arguments to `~Locale.translate()`, e.g.::
people = [...]
message = user_locale.translate(
"%(list)s is online", "%(list)s are online", len(people))
print message % {"list": user_locale.list(people)}
The first string is chosen if ``len(people) == 1``, otherwise the second
string is chosen.
Applications should call one of `load_translations` (which uses a simple
CSV format) or `load_gettext_translations` (which uses the ``.mo`` format
supported by `gettext` and related tools). If neither method is called,
the `Locale.translate` method will simply return the original string.
"""
from __future__ import absolute_import, division, print_function, with_statement
import csv
import datetime
import numbers
import os
import re
from tornado import escape
from tornado.log import gen_log
from tornado.util import u
_default_locale = "en_US"
_translations = {}
_supported_locales = frozenset([_default_locale])
_use_gettext = False
def get(*locale_codes):
"""Returns the closest match for the given locale codes.
We iterate over all given locale codes in order. If we have a tight
or a loose match for the code (e.g., "en" for "en_US"), we return
the locale. Otherwise we move to the next code in the list.
By default we return ``en_US`` if no translations are found for any of
the specified locales. You can change the default locale with
`set_default_locale()`.
"""
return Locale.get_closest(*locale_codes)
def set_default_locale(code):
"""Sets the default locale.
The default locale is assumed to be the language used for all strings
in the system. The translations loaded from disk are mappings from
the default locale to the destination locale. Consequently, you don't
need to create a translation file for the default locale.
"""
global _default_locale
global _supported_locales
_default_locale = code
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
def load_translations(directory):
"""Loads translations from CSV files in a directory.
Translations are strings with optional Python-style named placeholders
(e.g., ``My name is %(name)s``) and their associated translations.
The directory should have translation files of the form ``LOCALE.csv``,
e.g. ``es_GT.csv``. The CSV files should have two or three columns: string,
translation, and an optional plural indicator. Plural indicators should
be one of "plural" or "singular". A given string can have both singular
and plural forms. For example ``%(name)s liked this`` may have a
different verb conjugation depending on whether %(name)s is one
name or a list of names. There should be two rows in the CSV file for
that string, one with plural indicator "singular", and one "plural".
For strings with no verbs that would change on translation, simply
use "unknown" or the empty string (or don't include the column at all).
The file is read using the `csv` module in the default "excel" dialect.
In this format there should not be spaces after the commas.
Example translation ``es_LA.csv``::
"I love you","Te amo"
"%(name)s liked this","A %(name)s les gustó esto","plural"
"%(name)s liked this","A %(name)s le gustó esto","singular"
"""
global _translations
global _supported_locales
_translations = {}
for path in os.listdir(directory):
if not path.endswith(".csv"):
continue
locale, extension = path.split(".")
if not re.match("[a-z]+(_[A-Z]+)?$", locale):
gen_log.error("Unrecognized locale %r (path: %s)", locale,
os.path.join(directory, path))
continue
full_path = os.path.join(directory, path)
try:
# python 3: csv.reader requires a file open in text mode.
# Force utf8 to avoid dependence on $LANG environment variable.
f = open(full_path, "r", encoding="utf-8")
except TypeError:
# python 2: files return byte strings, which are decoded below.
f = open(full_path, "r")
_translations[locale] = {}
for i, row in enumerate(csv.reader(f)):
if not row or len(row) < 2:
continue
row = [escape.to_unicode(c).strip() for c in row]
english, translation = row[:2]
if len(row) > 2:
plural = row[2] or "unknown"
else:
plural = "unknown"
if plural not in ("plural", "singular", "unknown"):
gen_log.error("Unrecognized plural indicator %r in %s line %d",
plural, path, i + 1)
continue
_translations[locale].setdefault(plural, {})[english] = translation
f.close()
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def load_gettext_translations(directory, domain):
"""Loads translations from `gettext`'s locale tree
Locale tree is similar to system's ``/usr/share/locale``, like::
{directory}/{lang}/LC_MESSAGES/{domain}.mo
Three steps are required to have you app translated:
1. Generate POT translation file::
xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc
2. Merge against existing POT file::
msgmerge old.po mydomain.po > new.po
3. Compile::
msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo
"""
import gettext
global _translations
global _supported_locales
global _use_gettext
_translations = {}
for lang in os.listdir(directory):
if lang.startswith('.'):
continue # skip .svn, etc
if os.path.isfile(os.path.join(directory, lang)):
continue
try:
os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo"))
_translations[lang] = gettext.translation(domain, directory,
languages=[lang])
except Exception as e:
gen_log.error("Cannot load translation for '%s': %s", lang, str(e))
continue
_supported_locales = frozenset(list(_translations.keys()) + [_default_locale])
_use_gettext = True
gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def get_supported_locales():
"""Returns a list of all the supported locale codes."""
return _supported_locales
class Locale(object):
"""Object representing a locale.
After calling one of `load_translations` or `load_gettext_translations`,
call `get` or `get_closest` to get a Locale object.
"""
@classmethod
def get_closest(cls, *locale_codes):
"""Returns the closest match for the given locale code."""
for code in locale_codes:
if not code:
continue
code = code.replace("-", "_")
parts = code.split("_")
if len(parts) > 2:
continue
elif len(parts) == 2:
code = parts[0].lower() + "_" + parts[1].upper()
if code in _supported_locales:
return cls.get(code)
if parts[0].lower() in _supported_locales:
return cls.get(parts[0].lower())
return cls.get(_default_locale)
@classmethod
def get(cls, code):
"""Returns the Locale for the given locale code.
If it is not supported, we raise an exception.
"""
if not hasattr(cls, "_cache"):
cls._cache = {}
if code not in cls._cache:
assert code in _supported_locales
translations = _translations.get(code, None)
if translations is None:
locale = CSVLocale(code, {})
elif _use_gettext:
locale = GettextLocale(code, translations)
else:
locale = CSVLocale(code, translations)
cls._cache[code] = locale
return cls._cache[code]
def __init__(self, code, translations):
self.code = code
self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown"))
self.rtl = False
for prefix in ["fa", "ar", "he"]:
if self.code.startswith(prefix):
self.rtl = True
break
self.translations = translations
# Initialize strings for date formatting
_ = self.translate
self._months = [
_("January"), _("February"), _("March"), _("April"),
_("May"), _("June"), _("July"), _("August"),
_("September"), _("October"), _("November"), _("December")]
self._weekdays = [
_("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"),
_("Friday"), _("Saturday"), _("Sunday")]
def translate(self, message, plural_message=None, count=None):
"""Returns the translation for the given message for this locale.
If ``plural_message`` is given, you must also provide
``count``. We return ``plural_message`` when ``count != 1``,
and we return the singular form for the given message when
``count == 1``.
"""
raise NotImplementedError()
def format_date(self, date, gmt_offset=0, relative=True, shorter=False,
full_format=False):
"""Formats the given date (which should be GMT).
By default, we return a relative time (e.g., "2 minutes ago"). You
can return an absolute date string with ``relative=False``.
You can force a full format date ("July 10, 1980") with
``full_format=True``.
This method is primarily intended for dates in the past.
For dates in the future, we fall back to full format.
"""
if self.code.startswith("ru"):
relative = False
if isinstance(date, numbers.Real):
date = datetime.datetime.utcfromtimestamp(date)
now = datetime.datetime.utcnow()
if date > now:
if relative and (date - now).seconds < 60:
# Due to click skew, things are some things slightly
# in the future. Round timestamps in the immediate
# future down to now in relative mode.
date = now
else:
# Otherwise, future dates always use the full format.
full_format = True
local_date = date - datetime.timedelta(minutes=gmt_offset)
local_now = now - datetime.timedelta(minutes=gmt_offset)
local_yesterday = local_now - datetime.timedelta(hours=24)
difference = now - date
seconds = difference.seconds
days = difference.days
_ = self.translate
format = None
if not full_format:
if relative and days == 0:
if seconds < 50:
return _("1 second ago", "%(seconds)d seconds ago",
seconds) % {"seconds": seconds}
if seconds < 50 * 60:
minutes = round(seconds / 60.0)
return _("1 minute ago", "%(minutes)d minutes ago",
minutes) % {"minutes": minutes}
hours = round(seconds / (60.0 * 60))
return _("1 hour ago", "%(hours)d hours ago",
hours) % {"hours": hours}
if days == 0:
format = _("%(time)s")
elif days == 1 and local_date.day == local_yesterday.day and \
relative:
format = _("yesterday") if shorter else \
_("yesterday at %(time)s")
elif days < 5:
format = _("%(weekday)s") if shorter else \
_("%(weekday)s at %(time)s")
elif days < 334: # 11mo, since confusing for same month last year
format = _("%(month_name)s %(day)s") if shorter else \
_("%(month_name)s %(day)s at %(time)s")
if format is None:
format = _("%(month_name)s %(day)s, %(year)s") if shorter else \
_("%(month_name)s %(day)s, %(year)s at %(time)s")
tfhour_clock = self.code not in ("en", "en_US", "zh_CN")
if tfhour_clock:
str_time = "%d:%02d" % (local_date.hour, local_date.minute)
elif self.code == "zh_CN":
str_time = "%s%d:%02d" % (
(u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12],
local_date.hour % 12 or 12, local_date.minute)
else:
str_time = "%d:%02d %s" % (
local_date.hour % 12 or 12, local_date.minute,
("am", "pm")[local_date.hour >= 12])
return format % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
"year": str(local_date.year),
"time": str_time
}
def format_day(self, date, gmt_offset=0, dow=True):
"""Formats the given date as a day of week.
Example: "Monday, January 22". You can remove the day of week with
``dow=False``.
"""
local_date = date - datetime.timedelta(minutes=gmt_offset)
_ = self.translate
if dow:
return _("%(weekday)s, %(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"weekday": self._weekdays[local_date.weekday()],
"day": str(local_date.day),
}
else:
return _("%(month_name)s %(day)s") % {
"month_name": self._months[local_date.month - 1],
"day": str(local_date.day),
}
def list(self, parts):
"""Returns a comma-separated list for the given list of parts.
The format is, e.g., "A, B and C", "A and B" or just "A" for lists
of size 1.
"""
_ = self.translate
if len(parts) == 0:
return ""
if len(parts) == 1:
return parts[0]
comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ")
return _("%(commas)s and %(last)s") % {
"commas": comma.join(parts[:-1]),
"last": parts[len(parts) - 1],
}
def friendly_number(self, value):
"""Returns a comma-separated number for the given integer."""
if self.code not in ("en", "en_US"):
return str(value)
value = str(value)
parts = []
while value:
parts.append(value[-3:])
value = value[:-3]
return ",".join(reversed(parts))
class CSVLocale(Locale):
"""Locale implementation using tornado's CSV translation format."""
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
if count != 1:
message = plural_message
message_dict = self.translations.get("plural", {})
else:
message_dict = self.translations.get("singular", {})
else:
message_dict = self.translations.get("unknown", {})
return message_dict.get(message, message)
class GettextLocale(Locale):
"""Locale implementation using the `gettext` module."""
def __init__(self, code, translations):
try:
# python 2
self.ngettext = translations.ungettext
self.gettext = translations.ugettext
except AttributeError:
# python 3
self.ngettext = translations.ngettext
self.gettext = translations.gettext
# self.gettext must exist before __init__ is called, since it
# calls into self.translate
super(GettextLocale, self).__init__(code, translations)
def translate(self, message, plural_message=None, count=None):
if plural_message is not None:
assert count is not None
return self.ngettext(message, plural_message, count)
else:
return self.gettext(message)
LOCALE_NAMES = {
"af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")},
"am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')},
"ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")},
"bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")},
"bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")},
"bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")},
"ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")},
"cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")},
"cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")},
"da_DK": {"name_en": u("Danish"), "name": u("Dansk")},
"de_DE": {"name_en": u("German"), "name": u("Deutsch")},
"el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")},
"en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")},
"en_US": {"name_en": u("English (US)"), "name": u("English (US)")},
"es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")},
"es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")},
"et_EE": {"name_en": u("Estonian"), "name": u("Eesti")},
"eu_ES": {"name_en": u("Basque"), "name": u("Euskara")},
"fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")},
"fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")},
"fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")},
"fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")},
"ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")},
"gl_ES": {"name_en": u("Galician"), "name": u("Galego")},
"he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")},
"hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")},
"hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")},
"hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")},
"id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")},
"is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")},
"it_IT": {"name_en": u("Italian"), "name": u("Italiano")},
"ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")},
"ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")},
"lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")},
"lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")},
"mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")},
"ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")},
"ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")},
"nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")},
"nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")},
"nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")},
"pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")},
"pl_PL": {"name_en": u("Polish"), "name": u("Polski")},
"pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")},
"pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")},
"ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")},
"ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")},
"sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")},
"sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")},
"sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")},
"sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")},
"sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")},
"sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")},
"ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")},
"te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")},
"th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")},
"tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")},
"tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")},
"uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")},
"vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")},
"zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")},
"zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")},
}
| gpl-3.0 |
zstackio/zstack-woodpecker | integrationtest/vm/multihosts/backup/paths/path65.py | 1 | 1455 | import zstackwoodpecker.test_state as ts_header
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template2",
path_list=[[TestAction.create_volume, "volume1", "=scsi"],\
[TestAction.attach_volume, "vm1", "volume1"], \
[TestAction.create_volume_backup, "volume1", "backup1", "=full"], \
[TestAction.create_volume_snapshot, "volume1", "snapshot62"], \
[TestAction.create_volume_snapshot, "volume1", "snapshot63"], \
[TestAction.create_volume_snapshot, "volume1", "snapshot64"], \
[TestAction.delete_volume_snapshot, "snapshot62"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_backup, "backup1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.reinit_vm, "vm1"], \
[TestAction.start_vm, "vm1"], \
[TestAction.batch_delete_volume_snapshot, ["snapshot63"]], \
[TestAction.create_vm_backup, "vm1", "backup2", "=full"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_vm_backup, "backup2"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_image_from_volume, "vm1", "image1"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_snapshot, "snapshot64"], \
[TestAction.start_vm, "vm1"], \
[TestAction.create_volume_backup, "volume1", "backup3", "=full"], \
[TestAction.stop_vm, "vm1"], \
[TestAction.use_volume_backup, "backup3"], \
[TestAction.start_vm, "vm1"]])
| apache-2.0 |
formath/mxnet | example/image-classification/symbols/alexnet.py | 57 | 3218 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Reference:
Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton. "Imagenet classification with deep convolutional neural networks." Advances in neural information processing systems. 2012.
"""
import mxnet as mx
import numpy as np
def get_symbol(num_classes, dtype='float32', **kwargs):
input_data = mx.sym.Variable(name="data")
if dtype == 'float16':
input_data = mx.sym.Cast(data=input_data, dtype=np.float16)
# stage 1
conv1 = mx.sym.Convolution(name='conv1',
data=input_data, kernel=(11, 11), stride=(4, 4), num_filter=96)
relu1 = mx.sym.Activation(data=conv1, act_type="relu")
lrn1 = mx.sym.LRN(data=relu1, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool1 = mx.sym.Pooling(
data=lrn1, pool_type="max", kernel=(3, 3), stride=(2,2))
# stage 2
conv2 = mx.sym.Convolution(name='conv2',
data=pool1, kernel=(5, 5), pad=(2, 2), num_filter=256)
relu2 = mx.sym.Activation(data=conv2, act_type="relu")
lrn2 = mx.sym.LRN(data=relu2, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool2 = mx.sym.Pooling(data=lrn2, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 3
conv3 = mx.sym.Convolution(name='conv3',
data=pool2, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu3 = mx.sym.Activation(data=conv3, act_type="relu")
conv4 = mx.sym.Convolution(name='conv4',
data=relu3, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu4 = mx.sym.Activation(data=conv4, act_type="relu")
conv5 = mx.sym.Convolution(name='conv5',
data=relu4, kernel=(3, 3), pad=(1, 1), num_filter=256)
relu5 = mx.sym.Activation(data=conv5, act_type="relu")
pool3 = mx.sym.Pooling(data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 4
flatten = mx.sym.Flatten(data=pool3)
fc1 = mx.sym.FullyConnected(name='fc1', data=flatten, num_hidden=4096)
relu6 = mx.sym.Activation(data=fc1, act_type="relu")
dropout1 = mx.sym.Dropout(data=relu6, p=0.5)
# stage 5
fc2 = mx.sym.FullyConnected(name='fc2', data=dropout1, num_hidden=4096)
relu7 = mx.sym.Activation(data=fc2, act_type="relu")
dropout2 = mx.sym.Dropout(data=relu7, p=0.5)
# stage 6
fc3 = mx.sym.FullyConnected(name='fc3', data=dropout2, num_hidden=num_classes)
if dtype == 'float16':
fc3 = mx.sym.Cast(data=fc3, dtype=np.float32)
softmax = mx.sym.SoftmaxOutput(data=fc3, name='softmax')
return softmax
| apache-2.0 |
pyatil/jenkins-job-builder | jenkins_jobs/modules/general.py | 14 | 7540 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""These are job parameters that are common to every type of Jenkins job.
Example:
.. literalinclude:: /../../tests/yamlparser/fixtures/general-example-001.yaml
:Job Parameters:
* **project-type**:
Defaults to "freestyle", but "maven" as well as "multijob", "flow" or
"externaljob" can also be specified.
* **defaults**:
Specifies a set of :ref:`defaults` to use for this job, defaults to
''global''. If you have values that are common to all of your jobs,
create a ``global`` :ref:`defaults` object to hold them, and no further
configuration of individual jobs is necessary. If some jobs
should not use the ``global`` defaults, use this field to specify a
different set of defaults.
* **description**:
The description for the job. By default, the description
"!-- Managed by Jenkins Job Builder" is applied.
* **disabled**:
Boolean value to set whether or not this job should be disabled in
Jenkins. Defaults to ``false`` (job will be enabled).
* **display-name**:
Optional name shown for the project throughout the Jenkins web GUI in
place of the actual job name. The jenkins_jobs tool cannot fully remove
this trait once it is set, so use caution when setting it. Setting it to
the same string as the job's name is an effective un-set workaround.
Alternately, the field can be cleared manually using the Jenkins web
interface.
* **concurrent**:
Boolean value to set whether or not Jenkins can run this job
concurrently. Defaults to ``false``.
* **workspace**:
Path for a custom workspace. Defaults to Jenkins default
configuration.
* **child-workspace**:
Path for a child custom workspace. Defaults to Jenkins default
configuration. This parameter is only valid for matrix type jobs.
* **quiet-period**:
Number of seconds to wait between consecutive runs of this job.
Defaults to ``0``.
* **block-downstream**:
Boolean value to set whether or not this job must block while
downstream jobs are running. Downstream jobs are determined
transitively. Defaults to ``false``.
* **block-upstream**:
Boolean value to set whether or not this job must block while
upstream jobs are running. Upstream jobs are determined
transitively. Defaults to ``false``.
* **auth-token**:
Specifies an authentication token that allows new builds to be
triggered by accessing a special predefined URL. Only those who
know the token will be able to trigger builds remotely.
* **retry-count**:
If a build fails to checkout from the repository, Jenkins will
retry the specified number of times before giving up.
* **node**:
Restrict where this job can be run. If there is a group of
machines that the job can be built on, you can specify that
label as the node to tie on, which will cause Jenkins to build the job on
any of the machines with that label. For matrix projects, this parameter
will only restrict where the parent job will run.
* **logrotate**:
The Logrotate section allows you to automatically remove old build
history. It adds the ``logrotate`` attribute to the :ref:`Job`
definition. All logrotate attributes default to "-1" (keep forever).
"""
import xml.etree.ElementTree as XML
import jenkins_jobs.modules.base
from jenkins_jobs.xml_config import remove_ignorable_whitespace
class General(jenkins_jobs.modules.base.Base):
sequence = 10
def gen_xml(self, parser, xml, data):
jdk = data.get('jdk', None)
if jdk:
XML.SubElement(xml, 'jdk').text = jdk
XML.SubElement(xml, 'actions')
desc_text = data.get('description', None)
if desc_text is not None:
description = XML.SubElement(xml, 'description')
description.text = desc_text
XML.SubElement(xml, 'keepDependencies').text = 'false'
disabled = data.get('disabled', None)
if disabled is not None:
if disabled:
XML.SubElement(xml, 'disabled').text = 'true'
else:
XML.SubElement(xml, 'disabled').text = 'false'
if 'display-name' in data:
XML.SubElement(xml, 'displayName').text = data['display-name']
if data.get('block-downstream'):
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenDownstreamBuilding').text = 'false'
if data.get('block-upstream'):
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'true'
else:
XML.SubElement(xml,
'blockBuildWhenUpstreamBuilding').text = 'false'
if 'auth-token' in data:
XML.SubElement(xml, 'authToken').text = data['auth-token']
if data.get('concurrent'):
XML.SubElement(xml, 'concurrentBuild').text = 'true'
else:
XML.SubElement(xml, 'concurrentBuild').text = 'false'
if 'workspace' in data:
XML.SubElement(xml, 'customWorkspace').text = \
str(data['workspace'])
if (xml.tag == 'matrix-project') and ('child-workspace' in data):
XML.SubElement(xml, 'childCustomWorkspace').text = \
str(data['child-workspace'])
if 'quiet-period' in data:
XML.SubElement(xml, 'quietPeriod').text = str(data['quiet-period'])
node = data.get('node', None)
if node:
XML.SubElement(xml, 'assignedNode').text = node
XML.SubElement(xml, 'canRoam').text = 'false'
else:
XML.SubElement(xml, 'canRoam').text = 'true'
if 'retry-count' in data:
XML.SubElement(xml, 'scmCheckoutRetryCount').text = \
str(data['retry-count'])
if 'logrotate' in data:
lr_xml = XML.SubElement(xml, 'logRotator')
logrotate = data['logrotate']
lr_days = XML.SubElement(lr_xml, 'daysToKeep')
lr_days.text = str(logrotate.get('daysToKeep', -1))
lr_num = XML.SubElement(lr_xml, 'numToKeep')
lr_num.text = str(logrotate.get('numToKeep', -1))
lr_adays = XML.SubElement(lr_xml, 'artifactDaysToKeep')
lr_adays.text = str(logrotate.get('artifactDaysToKeep', -1))
lr_anum = XML.SubElement(lr_xml, 'artifactNumToKeep')
lr_anum.text = str(logrotate.get('artifactNumToKeep', -1))
def raw(parser, xml_parent, data):
# documented in definition.rst since includes and docs is not working well
# For cross cutting method like this
root = XML.fromstring(data.get('xml'))
remove_ignorable_whitespace(root)
xml_parent.append(root)
| apache-2.0 |
rosswhitfield/mantid | Framework/PythonInterface/test/python/mantid/geometry/BoundingBoxTest.py | 3 | 1529 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.geometry import BoundingBox
from mantid.kernel import V3D
class BoundingBoxTest(unittest.TestCase):
def test_default_construction_is_allowed(self):
box = BoundingBox()
self.assertTrue(isinstance(box, BoundingBox))
self.assertTrue(box.isNull())
def test_construction_with_min_max_values_is_allowed(self):
box = BoundingBox(1.0, 4.0, 5.0, 0.0, 2.0, 3.0)
self.assertTrue(isinstance(box, BoundingBox))
def test_properties_are_correct(self):
bbox = BoundingBox (1.0, 2.0, 3.0, -1.0, -2.0, -3.0)
self.assertEqual(bbox.minPoint(), V3D(-1.0,-2.0,-3.0))
self.assertEqual(bbox.maxPoint(), V3D(1.0,2.0,3.0))
self.assertEqual(bbox.centrePoint(), V3D(0.0,0.0,0.0))
self.assertEqual(bbox.width(), V3D(2.0,4.0,6.0))
def test_point_inside(self):
box = BoundingBox(1.0, 2.0, 3.0, -1.0, -2.0, -3.0)
self.assertTrue(box.isPointInside(V3D(0.0,0.0,0.0)))
def test_doesLineIntersect(self):
bbox = BoundingBox(4.1, 4.1, 4.1, -4.1, -4.1, -4.1)
self.assertTrue(bbox.doesLineIntersect(V3D(-6.0,0.0,0.0), V3D(1.0,0.0,0.0)))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
TeddyDesTodes/PyRfK | setup.py | 4 | 1807 | import os
from setuptools import setup
def find_packages(dir_):
packages = []
for _dir, subdirectories, files in os.walk(os.path.join(dir_, 'rfk')):
if '__init__.py' in files:
lib, fragment = _dir.split(os.sep, 1)
packages.append(fragment.replace(os.sep, '.'))
return packages
setup(
name='PyRfK',
version='0.1',
long_description=__doc__,
packages=find_packages('lib'),
package_dir={'': 'lib'},
include_package_data=True,
zip_safe=False,
entry_points={'console_scripts': ['rfk-werkzeug = rfk.app:main',
'rfk-collectstats = rfk.collectstats:main',
'rfk-geoipdbupdate = rfk.geoipdbupdate:main',
'rfk-eventdispatcher = rfk.eventdispatcher:main',
'rfk-xmpp-bot = rfk.xmpp_bot:main',
'rfk-liquidsoaphandler = rfk.liquidsoaphandler:main',
'rfk-liquidsoap = rfk.liquidsoapdaemon:main',
'rfk-userstats = rfk.userstats:main',
'rfk-setup = rfk.setup:main']},
install_requires=['Flask', 'Flask-Login', 'Flask-Babel',
'wtforms',
'pytz',
'passlib',
'bcrypt',
'pycountry',
'geoip2',
'postmarkup',
'progressbar',
'sqlalchemy',
'parsedatetime',
'icalendar',
'humanize',
'netaddr',
'chardet',
'lockfile']
)
| bsd-3-clause |
edocappelli/oasys-crystalpy | diff_pat.py | 1 | 3239 |
def plot_crystal_sketch(v0_h,v0_v,vH_h ,vH_v ,H_h ,H_v):
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
hshift = 0.2
vshift = 0.0
plt.figure(1, figsize=(6,6))
ax = plt.subplot(111)
ax.xaxis.set_ticks_position('none)
ax.yaxis.set_ticks_position('none')
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
# draw axes
plt.xlim([-2.2,2.2])
plt.ylim([-2.2,2.2])
ax.annotate("",
xy =(0.0,0.0), xycoords='data',
xytext=(2.0,0.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3"),)
plt.text(2, 0,"$x_2$", color='k')
ax.annotate("",
xy =(0.0,0.0), xycoords='data',
xytext=(0.0,2.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3"),)
plt.text(0, 2,"$x_3$", color='k')
# draw vectors
ax.annotate("",
xy =(-v0_h,-v0_v), xycoords='data',
xytext=(0.0,0.0), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='red'),)
plt.text(-v0_h+hshift,-v0_v+vshift, r"$\vec k_0$", color='r')
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(vH_h,vH_v), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='red'),)
plt.text(vH_h+hshift,vH_v+vshift, r"$\vec k_H$", color='r')
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(H_h,H_v), textcoords='data',
arrowprops=dict(arrowstyle="<-",connectionstyle="arc3",color='blue'),)
plt.text(H_h+hshift,H_v+vshift, r"$\vec H$", color='b')
# draw Bragg plane
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=( -H_v*1.5, H_h*1.5), textcoords='data',
arrowprops=dict(arrowstyle="-",connectionstyle="arc3",color='green'),)
ax.annotate("",
xy =(0,0), xycoords='data',
xytext=(H_v*1.5,-H_h*1.5), textcoords='data',
arrowprops=dict(arrowstyle="-",connectionstyle="arc3",color='green'),)
# draw crystal
#
x1 = -0.8
y1 = -0.1
x2 = 0.8
y2 = 0.0
verts = [
(x1,y1), # left, bottom
(x2,y1), # left, top
(x2,y2), # right, top
(x1,y2), # right, bottom
(x1,y1), # ignored
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
patch = patches.PathPatch(path, facecolor='orange', lw=2)
ax.add_patch(patch)
plt.show()
if __name__ == "__main__":
# All vectors are normalized
v0_h = 0.92515270745695932
v0_v = -0.37959513680375029
vH_h = 0.99394445110430663
vH_v = 0.10988370269953050
H_h = 0.13917309988899462
H_v = 0.99026806889209951
plot_crystal_sketch(v0_h,v0_v,vH_h ,vH_v ,H_h ,H_v)
| mit |
Mrcl1450/caf | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
TeamEOS/external_chromium_org_third_party_WebKit | Tools/Scripts/webkitpy/layout_tests/controllers/repaint_overlay.py | 60 | 6449 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
def result_contains_repaint_rects(text):
return isinstance(text, str) and (
re.search('"repaintRects": \[$', text, re.MULTILINE) != None or
text.find('Minimum repaint:') != -1)
def extract_layer_tree(input_str):
if not isinstance(input_str, str):
return '{}'
if input_str[0:2] == '{\n':
start = 0
else:
start = input_str.find('\n{\n')
if start == -1:
return '{}'
end = input_str.find('\n}\n', start)
if end == -1:
return '{}'
# FIXME: There may be multiple layer trees in the result.
return input_str[start:end + 3]
def generate_repaint_overlay_html(test_name, actual_text, expected_text):
if not result_contains_repaint_rects(actual_text) and not result_contains_repaint_rects(expected_text):
return ''
expected_layer_tree = extract_layer_tree(expected_text)
actual_layer_tree = extract_layer_tree(actual_text)
minimum_repaint = '[]'
minimum_repaint_match = re.search('Minimum repaint:\n(\[.*\n\])', actual_text, re.DOTALL)
if minimum_repaint_match:
minimum_repaint = minimum_repaint_match.group(1)
return """<!DOCTYPE HTML>
<html>
<head>
<title>%(title)s</title>
<style>
body {
margin: 0;
padding: 0;
}
iframe {
position: absolute;
top: 80px;
left: 0;
border: 0;
z-index: -1;
}
canvas {
position: absolute;
top: 80px;
left: 0;
z-index: 1;
}
#actual, #minimum-repaint {
display: none;
}
</style>
</head>
<body>
<a href="http://crbug.com/381221">Known issues</a><br>
<label><input id="show-test" type="checkbox" checked onchange="toggle_test(this.checked)">Show test</label>
<label title="See fast/repaint/resources/text-based-repaint.js for how this works">
<input id="show-minimum-repaint" type="checkbox" onchange="toggle_minimum_repaint(this.checked)">Minimum repaint
</label>
<label><input id="use-solid-colors" type="checkbox" onchange="toggle_solid_color(this.checked)">Use solid colors</label>
<br>
<button title="See fast/repaint/resources/text-based-repaint.js for how this works" onclick="highlight_under_repaint()">
Highlight under-repaint
</button>
<br>
<span id='type'>Expected Invalidations</span>
<div id=overlay>
<canvas id='minimum-repaint' width='2000' height='2000'></canvas>
<canvas id='expected' width='2000' height='2000'></canvas>
<canvas id='actual' width='2000' height='2000'></canvas>
</div>
<script>
var overlay_opacity = 0.25;
function toggle_test(show_test) {
iframe.style.display = show_test ? 'block' : 'none';
}
function toggle_minimum_repaint(show_minimum_repaint) {
document.getElementById('minimum-repaint').style.display = show_minimum_repaint ? 'block' : 'none';
}
function toggle_solid_color(use_solid_color) {
overlay_opacity = use_solid_color ? 1 : 0.25;
draw_repaint_rects();
draw_minimum_repaint();
}
function highlight_under_repaint() {
document.getElementById('show-test').checked = false;
toggle_test(false);
document.getElementById('show-minimum-repaint').checked = true;
toggle_minimum_repaint(true);
document.getElementById('use-solid-colors').checked = true;
toggle_solid_color(true);
}
var expected = %(expected)s;
var actual = %(actual)s;
var minimum_repaint = %(minimum_repaint)s;
function rectsEqual(rect1, rect2) {
return rect1[0] == rect2[0] && rect1[1] == rect2[1] && rect1[2] == rect2[2] && rect1[3] == rect2[3];
}
function draw_rects(context, rects) {
for (var i = 0; i < rects.length; ++i) {
var rect = rects[i];
context.fillRect(rect[0], rect[1], rect[2], rect[3]);
}
}
function draw_layer_rects(context, result) {
context.save();
if (result.position)
context.translate(result.position[0], result.position[1]);
var t = result.transform;
if (t) {
var origin = result.transformOrigin || [result.bounds[0] / 2, result.bounds[1] / 2];
context.translate(origin[0], origin[1]);
context.transform(t[0][0], t[0][1], t[1][0], t[1][1], t[3][0], t[3][1]);
context.translate(-origin[0], -origin[1]);
}
if (result.repaintRects)
draw_rects(context, result.repaintRects);
if (result.children) {
for (var i = 0; i < result.children.length; ++i)
draw_layer_rects(context, result.children[i]);
}
context.restore();
}
var expected_canvas = document.getElementById('expected');
var actual_canvas = document.getElementById('actual');
var minimum_repaint_canvas = document.getElementById('minimum-repaint');
function draw_repaint_rects() {
var expected_ctx = expected_canvas.getContext("2d");
expected_ctx.clearRect(0, 0, 2000, 2000);
expected_ctx.fillStyle = 'rgba(255, 0, 0, ' + overlay_opacity + ')';
draw_layer_rects(expected_ctx, expected);
var actual_ctx = actual_canvas.getContext("2d");
actual_ctx.clearRect(0, 0, 2000, 2000);
actual_ctx.fillStyle = 'rgba(0, 255, 0, ' + overlay_opacity + ')';
draw_layer_rects(actual_ctx, actual);
}
function draw_minimum_repaint() {
var context = minimum_repaint_canvas.getContext("2d");
context.fillStyle = 'rgba(0, 0, 0, 1)';
draw_rects(context, minimum_repaint);
}
draw_repaint_rects();
draw_minimum_repaint();
var path = decodeURIComponent(location.search).substr(1);
var iframe = document.createElement('iframe');
iframe.id = 'test-frame';
iframe.width = 800;
iframe.height = 600;
iframe.src = path;
var overlay = document.getElementById('overlay');
overlay.appendChild(iframe);
var type = document.getElementById('type');
var expected_showing = true;
function flip() {
if (expected_showing) {
type.textContent = 'Actual Invalidations';
expected_canvas.style.display = 'none';
actual_canvas.style.display = 'block';
} else {
type.textContent = 'Expected Invalidations';
actual_canvas.style.display = 'none';
expected_canvas.style.display = 'block';
}
expected_showing = !expected_showing
}
setInterval(flip, 3000);
</script>
</body>
</html>
""" % {
'title': test_name,
'expected': expected_layer_tree,
'actual': actual_layer_tree,
'minimum_repaint': minimum_repaint,
}
| bsd-3-clause |
maheshp/novatest | nova/tests/test_iptables_network.py | 12 | 12448 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for network code."""
from nova.network import linux_net
from nova import test
class IptablesManagerTestCase(test.TestCase):
binary_name = linux_net.get_binary_name()
sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*filter',
':INPUT ACCEPT [2223527:305688874]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [2172501:140856656]',
':iptables-top-rule - [0:0]',
':iptables-bottom-rule - [0:0]',
':%s-FORWARD - [0:0]' % (binary_name),
':%s-INPUT - [0:0]' % (binary_name),
':%s-local - [0:0]' % (binary_name),
':%s-OUTPUT - [0:0]' % (binary_name),
':nova-filter-top - [0:0]',
'[0:0] -A FORWARD -j nova-filter-top',
'[0:0] -A OUTPUT -j nova-filter-top',
'[0:0] -A nova-filter-top -j %s-local' % (binary_name),
'[0:0] -A INPUT -j %s-INPUT' % (binary_name),
'[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
'[0:0] -A FORWARD -j %s-FORWARD' % (binary_name),
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 53 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 53 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p udp -m udp --dport 67 '
'-j ACCEPT',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 '
'-j ACCEPT',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 '
'-j ACCEPT',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT',
'[0:0] -A FORWARD -o virbr0 -j REJECT --reject-with '
'icmp-port-unreachable',
'[0:0] -A FORWARD -i virbr0 -j REJECT --reject-with '
'icmp-port-unreachable',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
'*nat',
':PREROUTING ACCEPT [3936:762355]',
':INPUT ACCEPT [2447:225266]',
':OUTPUT ACCEPT [63491:4191863]',
':POSTROUTING ACCEPT [63112:4108641]',
':%s-OUTPUT - [0:0]' % (binary_name),
':%s-snat - [0:0]' % (binary_name),
':%s-PREROUTING - [0:0]' % (binary_name),
':%s-float-snat - [0:0]' % (binary_name),
':%s-POSTROUTING - [0:0]' % (binary_name),
':nova-postrouting-bottom - [0:0]',
'[0:0] -A PREROUTING -j %s-PREROUTING' % (binary_name),
'[0:0] -A OUTPUT -j %s-OUTPUT' % (binary_name),
'[0:0] -A POSTROUTING -j %s-POSTROUTING' % (binary_name),
'[0:0] -A nova-postrouting-bottom '
'-j %s-snat' % (binary_name),
'[0:0] -A %s-snat '
'-j %s-float-snat' % (binary_name, binary_name),
'[0:0] -A POSTROUTING -j nova-postrouting-bottom',
'COMMIT',
'# Completed on Fri Feb 18 15:17:05 2011']
def setUp(self):
super(IptablesManagerTestCase, self).setUp()
self.manager = linux_net.IptablesManager()
def test_filter_rules_are_wrapped(self):
current_lines = self.sample_filter
table = self.manager.ipv4['filter']
table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table, 'filter')
self.assertTrue('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name in new_lines)
table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
new_lines = self.manager._modify_rules(current_lines, table, 'filter')
self.assertTrue('[0:0] -A %s-FORWARD '
'-s 1.2.3.4/5 -j DROP' % self.binary_name
not in new_lines)
def test_remove_rules_regex(self):
current_lines = self.sample_nat
table = self.manager.ipv4['nat']
table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
' -d 10.0.0.1')
table.add_rule('float-snat', '-s 10.0.0.1 -j SNAT --to 10.10.10.10'
' -o eth0')
table.add_rule('PREROUTING', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
table.add_rule('OUTPUT', '-d 10.10.10.10 -j DNAT --to 10.0.0.1')
table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
' -d 10.0.0.10')
table.add_rule('float-snat', '-s 10.0.0.10 -j SNAT --to 10.10.10.11'
' -o eth0')
table.add_rule('PREROUTING', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
table.add_rule('OUTPUT', '-d 10.10.10.11 -j DNAT --to 10.0.0.10')
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(len(new_lines) - len(current_lines), 8)
regex = '.*\s+%s(/32|\s+|$)'
num_removed = table.remove_rules_regex(regex % '10.10.10.10')
self.assertEqual(num_removed, 4)
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(len(new_lines) - len(current_lines), 4)
num_removed = table.remove_rules_regex(regex % '10.10.10.11')
self.assertEqual(num_removed, 4)
new_lines = self.manager._modify_rules(current_lines, table, 'nat')
self.assertEqual(new_lines, current_lines)
def test_nat_rules(self):
current_lines = self.sample_nat
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['nat'],
'nat')
for line in [':%s-OUTPUT - [0:0]' % (self.binary_name),
':%s-float-snat - [0:0]' % (self.binary_name),
':%s-snat - [0:0]' % (self.binary_name),
':%s-PREROUTING - [0:0]' % (self.binary_name),
':%s-POSTROUTING - [0:0]' % (self.binary_name)]:
self.assertTrue(line in new_lines, "One of our chains went"
" missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
last_postrouting_line = ''
for line in new_lines:
if line.startswith('[0:0] -A POSTROUTING'):
last_postrouting_line = line
self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
"Last POSTROUTING rule does not jump to "
"nova-postouting-bottom: %s" % last_postrouting_line)
for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_filter_rules(self):
current_lines = self.sample_filter
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'nat')
for line in [':%s-FORWARD - [0:0]' % (self.binary_name),
':%s-INPUT - [0:0]' % (self.binary_name),
':%s-local - [0:0]' % (self.binary_name),
':%s-OUTPUT - [0:0]' % (self.binary_name)]:
self.assertTrue(line in new_lines, "One of our chains went"
" missing.")
seen_lines = set()
for line in new_lines:
line = line.strip()
self.assertTrue(line not in seen_lines,
"Duplicate line: %s" % line)
seen_lines.add(line)
for chain in ['FORWARD', 'OUTPUT']:
for line in new_lines:
if line.startswith('[0:0] -A %s' % chain):
self.assertTrue('-j nova-filter-top' in line,
"First %s rule does not "
"jump to nova-filter-top" % chain)
break
self.assertTrue('[0:0] -A nova-filter-top '
'-j %s-local' % self.binary_name in new_lines,
"nova-filter-top does not jump to wrapped local chain")
for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
self.assertTrue('[0:0] -A %s -j %s-%s' %
(chain, self.binary_name, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
def test_missing_table(self):
current_lines = []
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
for line in ['*filter',
'COMMIT']:
self.assertTrue(line in new_lines, "One of iptables key lines"
"went missing.")
self.assertTrue(len(new_lines) > 4, "No iptables rules added")
self.assertTrue("#Generated by nova" == new_lines[0] and
"*filter" == new_lines[1] and
"COMMIT" == new_lines[-2] and
"#Completed by nova" == new_lines[-1],
"iptables rules not generated in the correct order")
def test_iptables_top_order(self):
# Test iptables_top_regex
current_lines = list(self.sample_filter)
current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
self.flags(iptables_top_regex='-j iptables-top-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
def test_iptables_bottom_order(self):
# Test iptables_bottom_regex
current_lines = list(self.sample_filter)
current_lines[26:26] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
def test_iptables_preserve_order(self):
# Test both iptables_top_regex and iptables_bottom_regex
current_lines = list(self.sample_filter)
current_lines[12:12] = ['[0:0] -A FORWARD -j iptables-top-rule']
current_lines[27:27] = ['[0:0] -A FORWARD -j iptables-bottom-rule']
self.flags(iptables_top_regex='-j iptables-top-rule')
self.flags(iptables_bottom_regex='-j iptables-bottom-rule')
new_lines = self.manager._modify_rules(current_lines,
self.manager.ipv4['filter'],
'filter')
self.assertEqual(current_lines, new_lines)
| apache-2.0 |
kingvuplus/ME-TEST2 | lib/python/Plugins/Extensions/PicturePlayer/plugin.py | 81 | 1489 | from Plugins.Plugin import PluginDescriptor
#------------------------------------------------------------------------------------------
def Pic_Thumb(*args, **kwa):
import ui
return ui.Pic_Thumb(*args, **kwa)
def picshow(*args, **kwa):
import ui
return ui.picshow(*args, **kwa)
def main(session, **kwargs):
from ui import picshow
session.open(picshow)
def filescan_open(list, session, **kwargs):
# Recreate List as expected by PicView
filelist = [((file.path, False), None) for file in list]
from ui import Pic_Full_View
session.open(Pic_Full_View, filelist, 0, file.path)
def filescan(**kwargs):
from Components.Scanner import Scanner, ScanPath
import os
# Overwrite checkFile to only detect local
class LocalScanner(Scanner):
def checkFile(self, file):
return os.path.exists(file.path)
return \
LocalScanner(mimetypes = ["image/jpeg", "image/png", "image/gif", "image/bmp"],
paths_to_scan =
[
ScanPath(path = "DCIM", with_subdirs = True),
ScanPath(path = "", with_subdirs = False),
],
name = "Pictures",
description = _("View photos..."),
openfnc = filescan_open,
)
def Plugins(**kwargs):
return \
[PluginDescriptor(name=_("Picture player"), description=_("fileformats (BMP, PNG, JPG, GIF)"), icon="pictureplayer.png", where = PluginDescriptor.WHERE_PLUGINMENU, needsRestart = False, fnc=main),
PluginDescriptor(name=_("Picture player"), where = PluginDescriptor.WHERE_FILESCAN, needsRestart = False, fnc = filescan)]
| gpl-2.0 |
koolfreak/volunteer_planner | scheduler/migrations/0008_blueprint_needcreator.py | 17 | 1714 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0007_auto_20150819_0138'),
]
operations = [
migrations.CreateModel(
name='BluePrint',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('titel', models.CharField(max_length=255, blank=True)),
('day', models.DateField(verbose_name=b'Tag, der als Vorlage dient')),
('location', models.ForeignKey(verbose_name=b'Ort', to='scheduler.Location')),
],
),
migrations.CreateModel(
name='NeedCreator',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('time_from', models.CharField(help_text=b'Format: 07:30', max_length=5,
verbose_name=b'Uhrzeit Anfang')),
('time_to', models.CharField(help_text=b'Format: 07:30', max_length=5, verbose_name=b'Uhrzeit Ende')),
('slots', models.IntegerField(verbose_name=b'Anz. benoetigter Freiwillige', blank=True)),
('apply_from', models.DateField(verbose_name=b'anwenden ab dem Tag')),
('apply_to', models.DateField(verbose_name=b'anwenden bis dem Tag')),
('location', models.ForeignKey(verbose_name=b'Ort', to='scheduler.Location')),
('topic', models.ForeignKey(verbose_name=b'Hilfetyp', to='scheduler.Topics')),
],
),
]
| agpl-3.0 |
wunderlins/learning | python/zodb/lib/osx/persistent/tests/utils.py | 2 | 2415 |
class ResettingJar(object):
"""Testing stub for _p_jar attribute.
"""
def __init__(self):
from persistent import PickleCache # XXX stub it!
from persistent.interfaces import IPersistentDataManager
from zope.interface import directlyProvides
self.cache = self._cache = PickleCache(self)
self.oid = 1
self.registered = {}
directlyProvides(self, IPersistentDataManager)
def add(self, obj):
import struct
obj._p_oid = struct.pack(">Q", self.oid)
self.oid += 1
obj._p_jar = self
self.cache[obj._p_oid] = obj
def close(self):
pass
# the following methods must be implemented to be a jar
def setklassstate(self):
# I don't know what this method does, but the pickle cache
# constructor calls it.
pass
def register(self, obj):
self.registered[obj] = 1
def setstate(self, obj):
# Trivial setstate() implementation that just re-initializes
# the object. This isn't what setstate() is supposed to do,
# but it suffices for the tests.
obj.__class__.__init__(obj)
class RememberingJar(object):
"""Testing stub for _p_jar attribute.
"""
def __init__(self):
from persistent import PickleCache # XXX stub it!
self.cache = PickleCache(self)
self.oid = 1
self.registered = {}
def add(self, obj):
import struct
obj._p_oid = struct.pack(">Q", self.oid)
self.oid += 1
obj._p_jar = self
self.cache[obj._p_oid] = obj
# Remember object's state for later.
self.obj = obj
self.remembered = obj.__getstate__()
def close(self):
pass
def fake_commit(self):
self.remembered = self.obj.__getstate__()
self.obj._p_changed = 0
# the following methods must be implemented to be a jar
def setklassstate(self):
# I don't know what this method does, but the pickle cache
# constructor calls it.
pass
def register(self, obj):
self.registered[obj] = 1
def setstate(self, obj):
# Trivial setstate() implementation that resets the object's
# state as of the time it was added to the jar.
# This isn't what setstate() is supposed to do,
# but it suffices for the tests.
obj.__setstate__(self.remembered)
| gpl-2.0 |
dbrattli/RxPY | rx/observable.py | 1 | 1867 | import types
from rx import Lock
from .observer import Observer, AbstractObserver
class Observable(object):
"""Represents a push-style collection."""
_methods = []
def __init__(self, subscribe):
self._subscribe = subscribe
self.lock = Lock()
# Deferred method assignment
for name, method in self._methods:
setattr(self, name, types.MethodType(method, self))
def subscribe(self, on_next=None, on_error=None, on_completed=None,
observer=None):
"""Subscribes an observer to the observable sequence. Returns the
source sequence whose subscriptions and unsubscriptions happen on the
specified scheduler.
1 - source.subscribe()
2 - source.subscribe(observer)
3 - source.subscribe(on_next)
4 - source.subscribe(on_next, on_error)
5 - source.subscribe(on_next, on_error, on_completed)
Keyword arguments:
on_next -- [Optional] Action to invoke for each element in the
observable sequence.
on_error -- [Optional] Action to invoke upon exceptional termination of
the observable sequence.
on_completed -- [Optional] Action to invoke upon graceful termination
of the observable sequence.
observer -- [Optional] The object that is to receive notifications. You
may subscribe using an observer or callbacks, not both.
Returns {Diposable} the source sequence whose subscriptions and
unsubscriptions happen on the specified scheduler."""
# Be forgiving and accept an un-named observer as first parameter
if isinstance(on_next, AbstractObserver):
observer = on_next
elif not observer:
observer = Observer(on_next, on_error, on_completed)
return self._subscribe(observer)
| apache-2.0 |
tchernomax/ansible | lib/ansible/modules/network/nso/nso_query.py | 52 | 3295 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: nso_query
extends_documentation_fragment: nso
short_description: Query data from Cisco NSO.
description:
- This module provides support for querying data from Cisco NSO using XPath.
requirements:
- Cisco NSO version 3.4 or higher.
author: "Claes Nästén (@cnasten)"
options:
xpath:
description: XPath selection relative to the root.
required: true
fields:
description: >
List of fields to select from matching nodes.
required: true
version_added: "2.5"
'''
EXAMPLES = '''
- name: Select device name and description
nso_query:
url: http://localhost:8080/jsonrpc
username: username
password: password
xpath: /ncs:devices/device
fields:
- name
- description
'''
RETURN = '''
output:
description: Value of matching nodes
returned: success
type: list
'''
from ansible.module_utils.network.nso.nso import connect, verify_version, nso_argument_spec
from ansible.module_utils.network.nso.nso import ModuleFailException, NsoException
from ansible.module_utils.basic import AnsibleModule
class NsoQuery(object):
REQUIRED_VERSIONS = [
(3, 4)
]
def __init__(self, check_mode, client, xpath, fields):
self._check_mode = check_mode
self._client = client
self._xpath = xpath
self._fields = fields
def main(self):
if self._check_mode:
return []
else:
return self._client.query(self._xpath, self._fields)
def main():
argument_spec = dict(
xpath=dict(required=True, type='str'),
fields=dict(required=True, type='list')
)
argument_spec.update(nso_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
p = module.params
client = connect(p)
nso_query = NsoQuery(
module.check_mode, client,
p['xpath'], p['fields'])
try:
verify_version(client, NsoQuery.REQUIRED_VERSIONS)
output = nso_query.main()
client.logout()
module.exit_json(changed=False, output=output)
except NsoException as ex:
client.logout()
module.fail_json(msg=ex.message)
except ModuleFailException as ex:
client.logout()
module.fail_json(msg=ex.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
bwrsandman/OpenUpgrade | addons/crm/report/__init__.py | 313 | 1127 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_lead_report
import crm_opportunity_report
import crm_phonecall_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Zhongqilong/mykbengineer | kbe/src/lib/python/Lib/distutils/tests/test_spawn.py | 146 | 1857 | """Tests for distutils.spawn."""
import unittest
import os
import time
from test.support import captured_stdout, run_unittest
from distutils.spawn import _nt_quote_args
from distutils.spawn import spawn, find_executable
from distutils.errors import DistutilsExecError
from distutils.tests import support
class SpawnTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_nt_quote_args(self):
for (args, wanted) in ((['with space', 'nospace'],
['"with space"', 'nospace']),
(['nochange', 'nospace'],
['nochange', 'nospace'])):
res = _nt_quote_args(args)
self.assertEqual(res, wanted)
@unittest.skipUnless(os.name in ('nt', 'posix'),
'Runs only under posix or nt')
def test_spawn(self):
tmpdir = self.mkdtemp()
# creating something executable
# through the shell that returns 1
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 1')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 1')
os.chmod(exe, 0o777)
self.assertRaises(DistutilsExecError, spawn, [exe])
# now something that works
if os.name == 'posix':
exe = os.path.join(tmpdir, 'foo.sh')
self.write_file(exe, '#!/bin/sh\nexit 0')
else:
exe = os.path.join(tmpdir, 'foo.bat')
self.write_file(exe, 'exit 0')
os.chmod(exe, 0o777)
spawn([exe]) # should work without any error
def test_suite():
return unittest.makeSuite(SpawnTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| lgpl-3.0 |
AdaptiveApplications/carnegie | tarc_bus_locator_client/protobuf-2.5.0/python/build/lib.linux-x86_64-2.7/google/protobuf/service.py | 590 | 9131 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
| mit |
thisispuneet/potato-blog | django/contrib/gis/utils/srs.py | 311 | 3157 | from django.contrib.gis.gdal import SpatialReference
from django.db import connections, DEFAULT_DB_ALIAS
def add_srs_entry(srs, auth_name='EPSG', auth_srid=None, ref_sys_name=None,
database=DEFAULT_DB_ALIAS):
"""
This function takes a GDAL SpatialReference system and adds its information
to the `spatial_ref_sys` table of the spatial backend. Doing this enables
database-level spatial transformations for the backend. Thus, this utility
is useful for adding spatial reference systems not included by default with
the backend -- for example, the so-called "Google Maps Mercator Projection"
is excluded in PostGIS 1.3 and below, and the following adds it to the
`spatial_ref_sys` table:
>>> from django.contrib.gis.utils import add_srs_entry
>>> add_srs_entry(900913)
Keyword Arguments:
auth_name:
This keyword may be customized with the value of the `auth_name` field.
Defaults to 'EPSG'.
auth_srid:
This keyword may be customized with the value of the `auth_srid` field.
Defaults to the SRID determined by GDAL.
ref_sys_name:
For SpatiaLite users only, sets the value of the the `ref_sys_name` field.
Defaults to the name determined by GDAL.
database:
The name of the database connection to use; the default is the value
of `django.db.DEFAULT_DB_ALIAS` (at the time of this writing, it's value
is 'default').
"""
connection = connections[database]
if not hasattr(connection.ops, 'spatial_version'):
raise Exception('The `add_srs_entry` utility only works '
'with spatial backends.')
if connection.ops.oracle or connection.ops.mysql:
raise Exception('This utility does not support the '
'Oracle or MySQL spatial backends.')
SpatialRefSys = connection.ops.spatial_ref_sys()
# If argument is not a `SpatialReference` instance, use it as parameter
# to construct a `SpatialReference` instance.
if not isinstance(srs, SpatialReference):
srs = SpatialReference(srs)
if srs.srid is None:
raise Exception('Spatial reference requires an SRID to be '
'compatible with the spatial backend.')
# Initializing the keyword arguments dictionary for both PostGIS
# and SpatiaLite.
kwargs = {'srid' : srs.srid,
'auth_name' : auth_name,
'auth_srid' : auth_srid or srs.srid,
'proj4text' : srs.proj4,
}
# Backend-specific fields for the SpatialRefSys model.
if connection.ops.postgis:
kwargs['srtext'] = srs.wkt
if connection.ops.spatialite:
kwargs['ref_sys_name'] = ref_sys_name or srs.name
# Creating the spatial_ref_sys model.
try:
# Try getting via SRID only, because using all kwargs may
# differ from exact wkt/proj in database.
sr = SpatialRefSys.objects.get(srid=srs.srid)
except SpatialRefSys.DoesNotExist:
sr = SpatialRefSys.objects.create(**kwargs)
# Alias is for backwards-compatibility purposes.
add_postgis_srs = add_srs_entry
| bsd-3-clause |
ligthyear/wokkel | wokkel/compat.py | 2 | 3735 | # -*- test-case-name: wokkel.test.test_compat -*-
#
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.internet import protocol
from twisted.words.protocols.jabber import xmlstream
class BootstrapMixin(object):
"""
XmlStream factory mixin to install bootstrap event observers.
This mixin is for factories providing
L{IProtocolFactory<twisted.internet.interfaces.IProtocolFactory>} to make
sure bootstrap event observers are set up on protocols, before incoming
data is processed. Such protocols typically derive from
L{utility.EventDispatcher}, like L{XmlStream}.
You can set up bootstrap event observers using C{addBootstrap}. The
C{event} and C{fn} parameters correspond with the C{event} and
C{observerfn} arguments to L{utility.EventDispatcher.addObserver}.
@since: 8.2.
@ivar bootstraps: The list of registered bootstrap event observers.
@type bootstrap: C{list}
"""
def __init__(self):
self.bootstraps = []
def installBootstraps(self, dispatcher):
"""
Install registered bootstrap observers.
@param dispatcher: Event dispatcher to add the observers to.
@type dispatcher: L{utility.EventDispatcher}
"""
for event, fn in self.bootstraps:
dispatcher.addObserver(event, fn)
def addBootstrap(self, event, fn):
"""
Add a bootstrap event handler.
@param event: The event to register an observer for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The observer callable to be registered.
"""
self.bootstraps.append((event, fn))
def removeBootstrap(self, event, fn):
"""
Remove a bootstrap event handler.
@param event: The event the observer is registered for.
@type event: C{str} or L{xpath.XPathQuery}
@param fn: The registered observer callable.
"""
self.bootstraps.remove((event, fn))
class XmlStreamServerFactory(BootstrapMixin,
protocol.ServerFactory):
"""
Factory for Jabber XmlStream objects as a server.
@since: 8.2.
@ivar authenticatorFactory: Factory callable that takes no arguments, to
create a fresh authenticator to be associated
with the XmlStream.
"""
protocol = xmlstream.XmlStream
def __init__(self, authenticatorFactory):
BootstrapMixin.__init__(self)
self.authenticatorFactory = authenticatorFactory
def buildProtocol(self, addr):
"""
Create an instance of XmlStream.
A new authenticator instance will be created and passed to the new
XmlStream. Registered bootstrap event observers are installed as well.
"""
authenticator = self.authenticatorFactory()
xs = self.protocol(authenticator)
xs.factory = self
self.installBootstraps(xs)
return xs
class IQ(xmlstream.IQ):
def __init__(self, *args, **kwargs):
# Make sure we have a reactor parameter
try:
reactor = kwargs['reactor']
except KeyError:
from twisted.internet import reactor
kwargs['reactor'] = reactor
# Check if IQ's init accepts the reactor parameter
try:
xmlstream.IQ.__init__(self, *args, **kwargs)
except TypeError:
# Guess not. Remove the reactor parameter and try again.
del kwargs['reactor']
xmlstream.IQ.__init__(self, *args, **kwargs)
# Patch the XmlStream instance so that it has a _callLater
self._xmlstream._callLater = reactor.callLater
| mit |
sdarji/lpthw | Lib/site-packages/setuptools/command/setopt.py | 125 | 5068 | import os
import distutils
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from setuptools.compat import ConfigParser
log.debug("Reading configuration from %s", filename)
opts = ConfigParser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
| unlicense |
juergenhamel/cuon | cuon_client/cuon/Finances/InvoiceBook/SingleInpayment.py | 2 | 1544 | # -*- coding: utf-8 -*-
##Copyright (C) [2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
class SingleInpayment(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "in_payment"
self.xmlTableDef = 0
# self.loadTable()
# self.saveTable()
self.loadTable(allTables)
self.setStore( gtk.ListStore(gobject.TYPE_STRING, gobject.TYPE_STRING, gobject.TYPE_UINT) )
self.listHeader['names'] = ['title', 'designation', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
print "number of Columns "
print len(self.table.Columns)
#
| gpl-3.0 |
mrquim/repository.mrquim | script.module.covenant/lib/resources/lib/modules/utils.py | 6 | 1811 | # -*- coding: utf-8 -*-
"""
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import json, re
def json_load_as_str(file_handle):
return byteify(json.load(file_handle, object_hook=byteify), ignore_dicts=True)
def json_loads_as_str(json_text):
return byteify(json.loads(json_text, object_hook=byteify), ignore_dicts=True)
def byteify(data, ignore_dicts=False):
if isinstance(data, unicode):
return data.encode('utf-8')
if isinstance(data, list):
return [byteify(item, ignore_dicts=True) for item in data]
if isinstance(data, dict) and not ignore_dicts:
return dict([(byteify(key, ignore_dicts=True), byteify(value, ignore_dicts=True)) for key, value in data.iteritems()])
return data
def title_key(title):
try:
if title is None: title = ''
articles_en = ['the', 'a', 'an']
articles_de = ['der', 'die', 'das']
articles = articles_en + articles_de
match = re.match('^((\w+)\s+)', title.lower())
if match and match.group(2) in articles:
offset = len(match.group(1))
else:
offset = 0
return title[offset:]
except:
return title
| gpl-2.0 |
snahelou/awx | awx/main/tests/functional/models/test_inventory.py | 1 | 4620 | import pytest
import mock
from django.core.exceptions import ValidationError
# AWX
from awx.main.models import (
Host,
Inventory,
InventorySource,
InventoryUpdate,
)
from awx.main.utils.filters import SmartFilter
@pytest.mark.django_db
class TestSCMUpdateFeatures:
def test_automatic_project_update_on_create(self, inventory, project):
inv_src = InventorySource(
source_project=project,
source_path='inventory_file',
inventory=inventory,
update_on_project_update=True,
source='scm')
with mock.patch.object(inv_src, 'update') as mck_update:
inv_src.save()
mck_update.assert_called_once_with()
def test_reset_scm_revision(self, scm_inventory_source):
starting_rev = scm_inventory_source.scm_last_revision
assert starting_rev != ''
scm_inventory_source.source_path = '/newfolder/newfile.ini'
scm_inventory_source.save()
assert scm_inventory_source.scm_last_revision == ''
def test_source_location(self, scm_inventory_source):
# Combines project directory with the inventory file specified
inventory_update = InventoryUpdate(
inventory_source=scm_inventory_source,
source_path=scm_inventory_source.source_path)
assert inventory_update.get_actual_source_path().endswith('_92__test_proj/inventory_file')
def test_no_unwanted_updates(self, scm_inventory_source):
# Changing the non-sensitive fields should not trigger update
with mock.patch.object(scm_inventory_source.source_project, 'update') as mck_update:
scm_inventory_source.name = 'edited_inventory'
scm_inventory_source.description = "I'm testing this!"
scm_inventory_source.save()
assert not mck_update.called
@pytest.mark.django_db
class TestSCMClean:
def test_clean_update_on_project_update_multiple(self, inventory):
inv_src1 = InventorySource(inventory=inventory,
update_on_project_update=True,
source='scm')
inv_src1.clean_update_on_project_update()
inv_src1.save()
inv_src1.source_vars = '---\nhello: world'
inv_src1.clean_update_on_project_update()
inv_src2 = InventorySource(inventory=inventory,
update_on_project_update=True,
source='scm')
with pytest.raises(ValidationError):
inv_src2.clean_update_on_project_update()
@pytest.fixture
def setup_ec2_gce(organization):
ec2_inv = Inventory.objects.create(name='test_ec2', organization=organization)
ec2_source = ec2_inv.inventory_sources.create(name='test_ec2_source', source='ec2')
for i in range(2):
ec2_host = ec2_inv.hosts.create(name='test_ec2_{0}'.format(i))
ec2_host.inventory_sources.add(ec2_source)
ec2_inv.save()
gce_inv = Inventory.objects.create(name='test_gce', organization=organization)
gce_source = gce_inv.inventory_sources.create(name='test_gce_source', source='gce')
gce_host = gce_inv.hosts.create(name='test_gce_host')
gce_host.inventory_sources.add(gce_source)
gce_inv.save()
@pytest.fixture
def setup_inventory_groups(inventory, group_factory):
groupA = group_factory('test_groupA')
groupB = group_factory('test_groupB')
host = Host.objects.create(name='single_host', inventory=inventory)
groupA.hosts.add(host)
groupA.save()
groupB.hosts.add(host)
groupB.save()
@pytest.mark.django_db
class TestHostManager:
def test_host_filter_not_smart(self, setup_ec2_gce, organization):
smart_inventory = Inventory(name='smart',
organization=organization,
host_filter='inventory_sources__source=ec2')
assert len(smart_inventory.hosts.all()) == 0
def test_host_distinctness(self, setup_inventory_groups, organization):
"""
two criteria would both yield the same host, check that we only get 1 copy here
"""
assert (
list(SmartFilter.query_from_string('name=single_host or name__startswith=single_')) ==
[Host.objects.get(name='single_host')]
)
# Things we can not easily test due to SQLite backend:
# 2 organizations with host of same name only has 1 entry in smart inventory
# smart inventory in 1 organization does not include host from another
# smart inventory correctly returns hosts in filter in same organization
| apache-2.0 |
ska-sa/casperfpga | src/i2c_sn.py | 1 | 1106 | import numpy as np, logging, time, struct
import collections,crcmod
logger = logging.getLogger(__name__)
class DS28CM00:
"""
DS28CM00 I2C/SMBus Silicon Serial Number
"""
crcPoly = 0b100110001
crcInitVal = 0
def __init__(self,itf,addr=0x50):
self.itf = itf
self.addr = addr
# switch from SMB mode to I2C mode
self.write(0x8,0x0)
def readSN(self):
data = self.read(0x0,8)
_crc = self.crc8(data[0:7],self.crcPoly,self.crcInitVal)
if _crc != data[7]:
logger.error('Serial number crc8 failed!')
return data
def read(self,reg=None,length=1):
return self.itf.read(self.addr,reg,length)
def write(self,reg=None,data=None):
self.itf.write(self.addr,reg,data)
def crc8(self,data,poly=0x131,initVal=0):
crc = initVal
if isinstance(data,collections.Iterable):
for d in data:
crc = self.crc8(d,poly,crc)
return crc
else:
crc8_func = crcmod.mkCrcFun(poly,crc)
return crc8_func(chr(data))
| gpl-2.0 |
rkmaddox/mne-python | tutorials/preprocessing/25_background_filtering.py | 3 | 48286 | # -*- coding: utf-8 -*-
r"""
.. _disc-filtering:
===================================
Background information on filtering
===================================
Here we give some background information on filtering in general, and
how it is done in MNE-Python in particular.
Recommended reading for practical applications of digital
filter design can be found in
Parks & Burrus (1987) :footcite:`ParksBurrus1987`
and Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`,
and for filtering in an M/EEG context we recommend reading
Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`.
.. note::
This tutorial goes pretty deep into the mathematics of filtering and the
design decisions that go into choosing a filter. If you just want to know
how to apply the default filters in MNE-Python to your data, skip this
tutorial and read :ref:`tut-filter-resample` instead (but someday, you
should come back and read this one too 🙂).
Problem statement
=================
Practical issues with filtering electrophysiological data are covered
in Widmann *et al.* (2012) :footcite:`WidmannSchroger2012`, where they
conclude with this statement:
Filtering can result in considerable distortions of the time course
(and amplitude) of a signal as demonstrated by VanRullen (2011)
:footcite:`VanRullen2011`.
Thus, filtering should not be used lightly. However, if effects of
filtering are cautiously considered and filter artifacts are minimized,
a valid interpretation of the temporal dynamics of filtered
electrophysiological data is possible and signals missed otherwise
can be detected with filtering.
In other words, filtering can increase signal-to-noise ratio (SNR), but if it
is not used carefully, it can distort data. Here we hope to cover some
filtering basics so users can better understand filtering trade-offs and why
MNE-Python has chosen particular defaults.
.. _tut_filtering_basics:
Filtering basics
================
Let's get some of the basic math down. In the frequency domain, digital
filters have a transfer function that is given by:
.. math::
H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + \ldots + b_M z^{-M}}
{1 + a_1 z^{-1} + a_2 z^{-2} + \ldots + a_N z^{-M}} \\
&= \frac{\sum_{k=0}^Mb_kz^{-k}}{\sum_{k=1}^Na_kz^{-k}}
In the time domain, the numerator coefficients :math:`b_k` and denominator
coefficients :math:`a_k` can be used to obtain our output data
:math:`y(n)` in terms of our input data :math:`x(n)` as:
.. math::
:label: summations
y(n) &= b_0 x(n) + b_1 x(n-1) + \ldots + b_M x(n-M)
- a_1 y(n-1) - a_2 y(n - 2) - \ldots - a_N y(n - N)\\
&= \sum_{k=0}^M b_k x(n-k) - \sum_{k=1}^N a_k y(n-k)
In other words, the output at time :math:`n` is determined by a sum over
1. the numerator coefficients :math:`b_k`, which get multiplied by
the previous input values :math:`x(n-k)`, and
2. the denominator coefficients :math:`a_k`, which get multiplied by
the previous output values :math:`y(n-k)`.
Note that these summations correspond to (1) a weighted `moving average`_ and
(2) an autoregression_.
Filters are broken into two classes: FIR_ (finite impulse response) and
IIR_ (infinite impulse response) based on these coefficients.
FIR filters use a finite number of numerator
coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output
value of :math:`y(n)` depends only on the :math:`M` previous input values.
IIR filters depend on the previous input and output values, and thus can have
effectively infinite impulse responses.
As outlined in Parks & Burrus (1987) :footcite:`ParksBurrus1987`,
FIR and IIR have different trade-offs:
* A causal FIR filter can be linear-phase -- i.e., the same time delay
across all frequencies -- whereas a causal IIR filter cannot. The phase
and group delay characteristics are also usually better for FIR filters.
* IIR filters can generally have a steeper cutoff than an FIR filter of
equivalent order.
* IIR filters are generally less numerically stable, in part due to
accumulating error (due to its recursive calculations).
In MNE-Python we default to using FIR filtering. As noted in Widmann *et al.*
(2015) :footcite:`WidmannEtAl2015`:
Despite IIR filters often being considered as computationally more
efficient, they are recommended only when high throughput and sharp
cutoffs are required
(Ifeachor and Jervis, 2002 :footcite:`IfeachorJervis2002`, p. 321)...
FIR filters are easier to control, are always stable, have a
well-defined passband, can be corrected to zero-phase without
additional computations, and can be converted to minimum-phase.
We therefore recommend FIR filters for most purposes in
electrophysiological data analysis.
When designing a filter (FIR or IIR), there are always trade-offs that
need to be considered, including but not limited to:
1. Ripple in the pass-band
2. Attenuation of the stop-band
3. Steepness of roll-off
4. Filter order (i.e., length for FIR filters)
5. Time-domain ringing
In general, the sharper something is in frequency, the broader it is in time,
and vice-versa. This is a fundamental time-frequency trade-off, and it will
show up below.
FIR Filters
===========
First, we will focus on FIR filters, which are the default filters used by
MNE-Python.
"""
###############################################################################
# Designing FIR filters
# ---------------------
# Here we'll try to design a low-pass filter and look at trade-offs in terms
# of time- and frequency-domain filter characteristics. Later, in
# :ref:`tut_effect_on_signals`, we'll look at how such filters can affect
# signals when they are used.
#
# First let's import some useful tools for filtering, and set some default
# values for our data that are reasonable for M/EEG.
import numpy as np
from numpy.fft import fft, fftfreq
from scipy import signal
import matplotlib.pyplot as plt
from mne.time_frequency.tfr import morlet
from mne.viz import plot_filter, plot_ideal_filter
import mne
sfreq = 1000.
f_p = 40.
flim = (1., sfreq / 2.) # limits for plotting
###############################################################################
# Take for example an ideal low-pass filter, which would give a magnitude
# response of 1 in the pass-band (up to frequency :math:`f_p`) and a magnitude
# response of 0 in the stop-band (down to frequency :math:`f_s`) such that
# :math:`f_p=f_s=40` Hz here (shown to a lower limit of -60 dB for simplicity):
nyq = sfreq / 2. # the Nyquist frequency is half our sample rate
freq = [0, f_p, f_p, nyq]
gain = [1, 1, 0, 0]
third_height = np.array(plt.rcParams['figure.figsize']) * [1, 1. / 3.]
ax = plt.subplots(1, figsize=third_height)[1]
plot_ideal_filter(freq, gain, ax, title='Ideal %s Hz lowpass' % f_p, flim=flim)
###############################################################################
# This filter hypothetically achieves zero ripple in the frequency domain,
# perfect attenuation, and perfect steepness. However, due to the discontinuity
# in the frequency response, the filter would require infinite ringing in the
# time domain (i.e., infinite order) to be realized. Another way to think of
# this is that a rectangular window in the frequency domain is actually a sinc_
# function in the time domain, which requires an infinite number of samples
# (and thus infinite time) to represent. So although this filter has ideal
# frequency suppression, it has poor time-domain characteristics.
#
# Let's try to naïvely make a brick-wall filter of length 0.1 s, and look
# at the filter itself in the time domain and the frequency domain:
n = int(round(0.1 * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq # center our sinc
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (0.1 s)', flim=flim, compensate=True)
###############################################################################
# This is not so good! Making the filter 10 times longer (1 s) gets us a
# slightly better stop-band suppression, but still has a lot of ringing in
# the time domain. Note the x-axis is an order of magnitude longer here,
# and the filter has a correspondingly much longer group delay (again equal
# to half the filter length, or 0.5 seconds):
n = int(round(1. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (1.0 s)', flim=flim, compensate=True)
###############################################################################
# Let's make the stop-band tighter still with a longer filter (10 s),
# with a resulting larger x-axis:
n = int(round(10. * sfreq))
n -= n % 2 - 1 # make it odd
t = np.arange(-(n // 2), n // 2 + 1) / sfreq
h = np.sinc(2 * f_p * t) / (4 * np.pi)
plot_filter(h, sfreq, freq, gain, 'Sinc (10.0 s)', flim=flim, compensate=True)
###############################################################################
# Now we have very sharp frequency suppression, but our filter rings for the
# entire 10 seconds. So this naïve method is probably not a good way to build
# our low-pass filter.
#
# Fortunately, there are multiple established methods to design FIR filters
# based on desired response characteristics. These include:
#
# 1. The Remez_ algorithm (:func:`scipy.signal.remez`, `MATLAB firpm`_)
# 2. Windowed FIR design (:func:`scipy.signal.firwin2`,
# :func:`scipy.signal.firwin`, and `MATLAB fir2`_)
# 3. Least squares designs (:func:`scipy.signal.firls`, `MATLAB firls`_)
# 4. Frequency-domain design (construct filter in Fourier
# domain and use an :func:`IFFT <numpy.fft.ifft>` to invert it)
#
# .. note:: Remez and least squares designs have advantages when there are
# "do not care" regions in our frequency response. However, we want
# well controlled responses in all frequency regions.
# Frequency-domain construction is good when an arbitrary response
# is desired, but generally less clean (due to sampling issues) than
# a windowed approach for more straightforward filter applications.
# Since our filters (low-pass, high-pass, band-pass, band-stop)
# are fairly simple and we require precise control of all frequency
# regions, we will primarily use and explore windowed FIR design.
#
# If we relax our frequency-domain filter requirements a little bit, we can
# use these functions to construct a lowpass filter that instead has a
# *transition band*, or a region between the pass frequency :math:`f_p`
# and stop frequency :math:`f_s`, e.g.:
trans_bandwidth = 10 # 10 Hz transition band
f_s = f_p + trans_bandwidth # = 50 Hz
freq = [0., f_p, f_s, nyq]
gain = [1., 1., 0., 0.]
ax = plt.subplots(1, figsize=third_height)[1]
title = '%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)
plot_ideal_filter(freq, gain, ax, title=title, flim=flim)
###############################################################################
# Accepting a shallower roll-off of the filter in the frequency domain makes
# our time-domain response potentially much better. We end up with a more
# gradual slope through the transition region, but a *much* cleaner time
# domain signal. Here again for the 1 s filter:
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (1.0 s)',
flim=flim, compensate=True)
###############################################################################
# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually
# use a shorter filter (5 cycles at 10 Hz = 0.5 s) and still get acceptable
# stop-band attenuation:
n = int(round(sfreq * 0.5)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.5 s)',
flim=flim, compensate=True)
###############################################################################
# But if we shorten the filter too much (2 cycles of 10 Hz = 0.2 s),
# our effective stop frequency gets pushed out past 60 Hz:
n = int(round(sfreq * 0.2)) + 1
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 10 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# If we want a filter that is only 0.1 seconds long, we should probably use
# something more like a 25 Hz transition band (0.2 s = 5 cycles @ 25 Hz):
trans_bandwidth = 25
f_s = f_p + trans_bandwidth
freq = [0, f_p, f_s, nyq]
h = signal.firwin2(n, freq, gain, nyq=nyq)
plot_filter(h, sfreq, freq, gain, 'Windowed 50 Hz transition (0.2 s)',
flim=flim, compensate=True)
###############################################################################
# So far, we have only discussed *non-causal* filtering, which means that each
# sample at each time point :math:`t` is filtered using samples that come
# after (:math:`t + \Delta t`) *and* before (:math:`t - \Delta t`) the current
# time point :math:`t`.
# In this sense, each sample is influenced by samples that come both before
# and after it. This is useful in many cases, especially because it does not
# delay the timing of events.
#
# However, sometimes it can be beneficial to use *causal* filtering,
# whereby each sample :math:`t` is filtered only using time points that came
# after it.
#
# Note that the delay is variable (whereas for linear/zero-phase filters it
# is constant) but small in the pass-band. Unlike zero-phase filters, which
# require time-shifting backward the output of a linear-phase filtering stage
# (and thus becoming non-causal), minimum-phase filters do not require any
# compensation to achieve small delays in the pass-band. Note that as an
# artifact of the minimum phase filter construction step, the filter does
# not end up being as steep as the linear/zero-phase version.
#
# We can construct a minimum-phase filter from our existing linear-phase
# filter with the :func:`scipy.signal.minimum_phase` function, and note
# that the falloff is not as steep:
h_min = signal.minimum_phase(h)
plot_filter(h_min, sfreq, freq, gain, 'Minimum-phase', flim=flim)
###############################################################################
# .. _tut_effect_on_signals:
#
# Applying FIR filters
# --------------------
#
# Now lets look at some practical effects of these filters by applying
# them to some data.
#
# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part)
# plus noise (random and line). Note that the original clean signal contains
# frequency content in both the pass band and transition bands of our
# low-pass filter.
dur = 10.
center = 2.
morlet_freq = f_p
tlim = [center - 0.2, center + 0.2]
tticks = [tlim[0], center, tlim[1]]
flim = [20, 70]
x = np.zeros(int(sfreq * dur) + 1)
blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20.
n_onset = int(center * sfreq) - len(blip) // 2
x[n_onset:n_onset + len(blip)] += blip
x_orig = x.copy()
rng = np.random.RandomState(0)
x += rng.randn(len(x)) / 1000.
x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000.
###############################################################################
# Filter it with a shallow cutoff, linear-phase FIR (which allows us to
# compensate for the constant filter delay):
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin', verbose=True)
x_v16 = np.convolve(h, x)
# this is the linear->zero phase, causal-to-non-causal conversion / shift
x_v16 = x_v16[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.16 default', flim=flim,
compensate=True)
###############################################################################
# Filter it with a different design method ``fir_design="firwin2"``, and also
# compensate for the constant filter delay. This method does not produce
# quite as sharp a transition compared to ``fir_design="firwin"``, despite
# being twice as long:
transition_band = 0.25 * f_p
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent:
# filter_dur = 6.6 / transition_band # sec
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
fir_design='firwin2', verbose=True)
x_v14 = np.convolve(h, x)[len(h) // 2:]
plot_filter(h, sfreq, freq, gain, 'MNE-Python 0.14 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter with the MNE-Python 0.13 default, which is a
# long-duration, steep cutoff FIR that gets applied twice:
transition_band = 0.5 # Hz
f_s = f_p + transition_band
filter_dur = 10. # sec
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
# This would be equivalent
# n = int(sfreq * filter_dur)
# h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.)
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
h_trans_bandwidth=transition_band,
filter_length='%ss' % filter_dur,
fir_design='firwin2', verbose=True)
x_v13 = np.convolve(np.convolve(h, x)[::-1], h)[::-1][len(h) - 1:-len(h) - 1]
# the effective h is one that is applied to the time-reversed version of itself
h_eff = np.convolve(h, h[::-1])
plot_filter(h_eff, sfreq, freq, gain, 'MNE-Python 0.13 default', flim=flim,
compensate=True)
###############################################################################
# Let's also filter it with the MNE-C default, which is a long-duration
# steep-slope FIR filter designed using frequency-domain techniques:
h = mne.filter.design_mne_c_filter(sfreq, l_freq=None, h_freq=f_p + 2.5)
x_mne_c = np.convolve(h, x)[len(h) // 2:]
transition_band = 5 # Hz (default in MNE-C)
f_s = f_p + transition_band
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'MNE-C default', flim=flim, compensate=True)
###############################################################################
# And now an example of a minimum-phase filter:
h = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
phase='minimum', fir_design='firwin',
verbose=True)
x_min = np.convolve(h, x)
transition_band = 0.25 * f_p
f_s = f_p + transition_band
filter_dur = 6.6 / transition_band # sec
n = int(sfreq * filter_dur)
freq = [0., f_p, f_s, sfreq / 2.]
gain = [1., 1., 0., 0.]
plot_filter(h, sfreq, freq, gain, 'Minimum-phase filter', flim=flim)
###############################################################################
# Both the MNE-Python 0.13 and MNE-C filters have excellent frequency
# attenuation, but it comes at a cost of potential
# ringing (long-lasting ripples) in the time domain. Ringing can occur with
# steep filters, especially in signals with frequency content around the
# transition band. Our Morlet wavelet signal has power in our transition band,
# and the time-domain ringing is thus more pronounced for the steep-slope,
# long-duration filter than the shorter, shallower-slope filter:
axes = plt.subplots(1, 2)[1]
def plot_signal(x, offset):
"""Plot a signal."""
t = np.arange(len(x)) / sfreq
axes[0].plot(t, x + offset)
axes[0].set(xlabel='Time (s)', xlim=t[[0, -1]])
X = fft(x)
freqs = fftfreq(len(x), 1. / sfreq)
mask = freqs >= 0
X = X[mask]
freqs = freqs[mask]
axes[1].plot(freqs, 20 * np.log10(np.maximum(np.abs(X), 1e-16)))
axes[1].set(xlim=flim)
yscale = 30
yticklabels = ['Original', 'Noisy', 'FIR-firwin (0.16)', 'FIR-firwin2 (0.14)',
'FIR-steep (0.13)', 'FIR-steep (MNE-C)', 'Minimum-phase']
yticks = -np.arange(len(yticklabels)) / yscale
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_v16, offset=yticks[2])
plot_signal(x_v14, offset=yticks[3])
plot_signal(x_v13, offset=yticks[4])
plot_signal(x_mne_c, offset=yticks[5])
plot_signal(x_min, offset=yticks[6])
axes[0].set(xlim=tlim, title='FIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-len(yticks) / yscale, 1. / yscale],
yticks=yticks, yticklabels=yticklabels)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.tight_layout()
plt.show()
###############################################################################
# IIR filters
# ===========
#
# MNE-Python also offers IIR filtering functionality that is based on the
# methods from :mod:`scipy.signal`. Specifically, we use the general-purpose
# functions :func:`scipy.signal.iirfilter` and :func:`scipy.signal.iirdesign`,
# which provide unified interfaces to IIR filter design.
#
# Designing IIR filters
# ---------------------
#
# Let's continue with our design of a 40 Hz low-pass filter and look at
# some trade-offs of different IIR filters.
#
# Often the default IIR filter is a `Butterworth filter`_, which is designed
# to have a *maximally flat pass-band*. Let's look at a few filter orders,
# i.e., a few different number of coefficients used and therefore steepness
# of the filter:
#
# .. note:: Notice that the group delay (which is related to the phase) of
# the IIR filters below are not constant. In the FIR case, we can
# design so-called linear-phase filters that have a constant group
# delay, and thus compensate for the delay (making the filter
# non-causal) if necessary. This cannot be done with IIR filters, as
# they have a non-linear phase (non-constant group delay). As the
# filter order increases, the phase distortion near and in the
# transition band worsens. However, if non-causal (forward-backward)
# filtering can be used, e.g. with :func:`scipy.signal.filtfilt`,
# these phase issues can theoretically be mitigated.
sos = signal.iirfilter(2, f_p / nyq, btype='low', ftype='butter', output='sos')
plot_filter(dict(sos=sos), sfreq, freq, gain, 'Butterworth order=2', flim=flim,
compensate=True)
x_shallow = signal.sosfiltfilt(sos, x)
del sos
###############################################################################
# The falloff of this filter is not very steep.
#
# .. note:: Here we have made use of second-order sections (SOS)
# by using :func:`scipy.signal.sosfilt` and, under the
# hood, :func:`scipy.signal.zpk2sos` when passing the
# ``output='sos'`` keyword argument to
# :func:`scipy.signal.iirfilter`. The filter definitions
# given :ref:`above <tut_filtering_basics>` use the polynomial
# numerator/denominator (sometimes called "tf") form ``(b, a)``,
# which are theoretically equivalent to the SOS form used here.
# In practice, however, the SOS form can give much better results
# due to issues with numerical precision (see
# :func:`scipy.signal.sosfilt` for an example), so SOS should be
# used whenever possible.
#
# Let's increase the order, and note that now we have better attenuation,
# with a longer impulse response. Let's also switch to using the MNE filter
# design function, which simplifies a few things and gives us some information
# about the resulting filter:
iir_params = dict(order=8, ftype='butter')
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain, 'Butterworth order=8', flim=flim,
compensate=True)
x_steep = signal.sosfiltfilt(filt['sos'], x)
###############################################################################
# There are other types of IIR filters that we can use. For a complete list,
# check out the documentation for :func:`scipy.signal.iirdesign`. Let's
# try a Chebychev (type I) filter, which trades off ripple in the pass-band
# to get better attenuation in the stop-band:
iir_params.update(ftype='cheby1',
rp=1., # dB of acceptable pass-band ripple
)
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=1 dB', flim=flim, compensate=True)
###############################################################################
# If we can live with even more ripple, we can get it slightly steeper,
# but the impulse response begins to ring substantially longer (note the
# different x-axis scale):
iir_params['rp'] = 6.
filt = mne.filter.create_filter(x, sfreq, l_freq=None, h_freq=f_p,
method='iir', iir_params=iir_params,
verbose=True)
plot_filter(filt, sfreq, freq, gain,
'Chebychev-1 order=8, ripple=6 dB', flim=flim,
compensate=True)
###############################################################################
# Applying IIR filters
# --------------------
#
# Now let's look at how our shallow and steep Butterworth IIR filters
# perform on our Morlet signal from before:
axes = plt.subplots(1, 2)[1]
yticks = np.arange(4) / -30.
yticklabels = ['Original', 'Noisy', 'Butterworth-2', 'Butterworth-8']
plot_signal(x_orig, offset=yticks[0])
plot_signal(x, offset=yticks[1])
plot_signal(x_shallow, offset=yticks[2])
plot_signal(x_steep, offset=yticks[3])
axes[0].set(xlim=tlim, title='IIR, Lowpass=%d Hz' % f_p, xticks=tticks,
ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,)
for text in axes[0].get_yticklabels():
text.set(rotation=45, size=8)
axes[1].set(xlim=flim, ylim=(-60, 10), xlabel='Frequency (Hz)',
ylabel='Magnitude (dB)')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Some pitfalls of filtering
# ==========================
#
# Multiple recent papers have noted potential risks of drawing
# errant inferences due to misapplication of filters.
#
# Low-pass problems
# -----------------
#
# Filters in general, especially those that are non-causal (zero-phase), can
# make activity appear to occur earlier or later than it truly did. As
# mentioned in VanRullen (2011) :footcite:`VanRullen2011`,
# investigations of commonly (at the time)
# used low-pass filters created artifacts when they were applied to simulated
# data. However, such deleterious effects were minimal in many real-world
# examples in Rousselet (2012) :footcite:`Rousselet2012`.
#
# Perhaps more revealing, it was noted in Widmann & Schröger (2012)
# :footcite:`WidmannSchroger2012` that the problematic low-pass filters from
# VanRullen (2011) :footcite:`VanRullen2011`:
#
# 1. Used a least-squares design (like :func:`scipy.signal.firls`) that
# included "do-not-care" transition regions, which can lead to
# uncontrolled behavior.
# 2. Had a filter length that was independent of the transition bandwidth,
# which can cause excessive ringing and signal distortion.
#
# .. _tut_filtering_hp_problems:
#
# High-pass problems
# ------------------
#
# When it comes to high-pass filtering, using corner frequencies above 0.1 Hz
# were found in Acunzo *et al.* (2012) :footcite:`AcunzoEtAl2012` to:
#
# "... generate a systematic bias easily leading to misinterpretations of
# neural activity.”
#
# In a related paper, Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`
# also came to suggest a 0.1 Hz highpass. More evidence followed in
# Tanner *et al.* (2015) :footcite:`TannerEtAl2015` of such distortions.
# Using data from language ERP studies of semantic and
# syntactic processing (i.e., N400 and P600), using a high-pass above 0.3 Hz
# caused significant effects to be introduced implausibly early when compared
# to the unfiltered data. From this, the authors suggested the optimal
# high-pass value for language processing to be 0.1 Hz.
#
# We can recreate a problematic simulation from
# Tanner *et al.* (2015) :footcite:`TannerEtAl2015`:
#
# "The simulated component is a single-cycle cosine wave with an amplitude
# of 5µV [sic], onset of 500 ms poststimulus, and duration of 800 ms. The
# simulated component was embedded in 20 s of zero values to avoid
# filtering edge effects... Distortions [were] caused by 2 Hz low-pass
# and high-pass filters... No visible distortion to the original
# waveform [occurred] with 30 Hz low-pass and 0.01 Hz high-pass filters...
# Filter frequencies correspond to the half-amplitude (-6 dB) cutoff
# (12 dB/octave roll-off)."
#
# .. note:: This simulated signal contains energy not just within the
# pass-band, but also within the transition and stop-bands -- perhaps
# most easily understood because the signal has a non-zero DC value,
# but also because it is a shifted cosine that has been
# *windowed* (here multiplied by a rectangular window), which
# makes the cosine and DC frequencies spread to other frequencies
# (multiplication in time is convolution in frequency, so multiplying
# by a rectangular window in the time domain means convolving a sinc
# function with the impulses at DC and the cosine frequency in the
# frequency domain).
#
x = np.zeros(int(2 * sfreq))
t = np.arange(0, len(x)) / sfreq - 0.2
onset = np.where(t >= 0.5)[0][0]
cos_t = np.arange(0, int(sfreq * 0.8)) / sfreq
sig = 2.5 - 2.5 * np.cos(2 * np.pi * (1. / 0.8) * cos_t)
x[onset:onset + len(sig)] = sig
iir_lp_30 = signal.iirfilter(2, 30. / sfreq, btype='lowpass')
iir_hp_p1 = signal.iirfilter(2, 0.1 / sfreq, btype='highpass')
iir_lp_2 = signal.iirfilter(2, 2. / sfreq, btype='lowpass')
iir_hp_2 = signal.iirfilter(2, 2. / sfreq, btype='highpass')
x_lp_30 = signal.filtfilt(iir_lp_30[0], iir_lp_30[1], x, padlen=0)
x_hp_p1 = signal.filtfilt(iir_hp_p1[0], iir_hp_p1[1], x, padlen=0)
x_lp_2 = signal.filtfilt(iir_lp_2[0], iir_lp_2[1], x, padlen=0)
x_hp_2 = signal.filtfilt(iir_hp_2[0], iir_hp_2[1], x, padlen=0)
xlim = t[[0, -1]]
ylim = [-2, 6]
xlabel = 'Time (sec)'
ylabel = r'Amplitude ($\mu$V)'
tticks = [0, 0.5, 1.3, t[-1]]
axes = plt.subplots(2, 2)[1].ravel()
for ax, x_f, title in zip(axes, [x_lp_2, x_lp_30, x_hp_2, x_hp_p1],
['LP$_2$', 'LP$_{30}$', 'HP$_2$', 'LP$_{0.1}$']):
ax.plot(t, x, color='0.5')
ax.plot(t, x_f, color='k', linestyle='--')
ax.set(ylim=ylim, xlim=xlim, xticks=tticks,
title=title, xlabel=xlabel, ylabel=ylabel)
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Similarly, in a P300 paradigm reported by
# Kappenman & Luck (2010) :footcite:`KappenmanLuck2010`,
# they found that applying a 1 Hz high-pass decreased the probability of
# finding a significant difference in the N100 response, likely because
# the P300 response was smeared (and inverted) in time by the high-pass
# filter such that it tended to cancel out the increased N100. However,
# they nonetheless note that some high-passing can still be useful to deal
# with drifts in the data.
#
# Even though these papers generally advise a 0.1 Hz or lower frequency for
# a high-pass, it is important to keep in mind (as most authors note) that
# filtering choices should depend on the frequency content of both the
# signal(s) of interest and the noise to be suppressed. For example, in
# some of the MNE-Python examples involving the :ref:`sample-dataset` dataset,
# high-pass values of around 1 Hz are used when looking at auditory
# or visual N100 responses, because we analyze standard (not deviant) trials
# and thus expect that contamination by later or slower components will
# be limited.
#
# Baseline problems (or solutions?)
# ---------------------------------
#
# In an evolving discussion, Tanner *et al.* (2015) :footcite:`TannerEtAl2015`
# suggest using baseline correction to remove slow drifts in data. However,
# Maess *et al.* (2016) :footcite:`MaessEtAl2016`
# suggest that baseline correction, which is a form of high-passing, does
# not offer substantial advantages over standard high-pass filtering.
# Tanner *et al.* (2016) :footcite:`TannerEtAl2016`
# rebutted that baseline correction can correct for problems with filtering.
#
# To see what they mean, consider again our old simulated signal ``x`` from
# before:
def baseline_plot(x):
all_axes = plt.subplots(3, 2)[1]
for ri, (axes, freq) in enumerate(zip(all_axes, [0.1, 0.3, 0.5])):
for ci, ax in enumerate(axes):
if ci == 0:
iir_hp = signal.iirfilter(4, freq / sfreq, btype='highpass',
output='sos')
x_hp = signal.sosfiltfilt(iir_hp, x, padlen=0)
else:
x_hp -= x_hp[t < 0].mean()
ax.plot(t, x, color='0.5')
ax.plot(t, x_hp, color='k', linestyle='--')
if ri == 0:
ax.set(title=('No ' if ci == 0 else '') +
'Baseline Correction')
ax.set(xticks=tticks, ylim=ylim, xlim=xlim, xlabel=xlabel)
ax.set_ylabel('%0.1f Hz' % freq, rotation=0,
horizontalalignment='right')
mne.viz.adjust_axes(axes)
mne.viz.tight_layout()
plt.suptitle(title)
plt.show()
baseline_plot(x)
###############################################################################
# In response, Maess *et al.* (2016) :footcite:`MaessEtAl2016a`
# note that these simulations do not
# address cases of pre-stimulus activity that is shared across conditions, as
# applying baseline correction will effectively copy the topology outside the
# baseline period. We can see this if we give our signal ``x`` with some
# consistent pre-stimulus activity, which makes everything look bad.
#
# .. note:: An important thing to keep in mind with these plots is that they
# are for a single simulated sensor. In multi-electrode recordings
# the topology (i.e., spatial pattern) of the pre-stimulus activity
# will leak into the post-stimulus period. This will likely create a
# spatially varying distortion of the time-domain signals, as the
# averaged pre-stimulus spatial pattern gets subtracted from the
# sensor time courses.
#
# Putting some activity in the baseline period:
n_pre = (t < 0).sum()
sig_pre = 1 - np.cos(2 * np.pi * np.arange(n_pre) / (0.5 * n_pre))
x[:n_pre] += sig_pre
baseline_plot(x)
###############################################################################
# Both groups seem to acknowledge that the choices of filtering cutoffs, and
# perhaps even the application of baseline correction, depend on the
# characteristics of the data being investigated, especially when it comes to:
#
# 1. The frequency content of the underlying evoked activity relative
# to the filtering parameters.
# 2. The validity of the assumption of no consistent evoked activity
# in the baseline period.
#
# We thus recommend carefully applying baseline correction and/or high-pass
# values based on the characteristics of the data to be analyzed.
#
#
# Filtering defaults
# ==================
#
# .. _tut_filtering_in_python:
#
# Defaults in MNE-Python
# ----------------------
#
# Most often, filtering in MNE-Python is done at the :class:`mne.io.Raw` level,
# and thus :func:`mne.io.Raw.filter` is used. This function under the hood
# (among other things) calls :func:`mne.filter.filter_data` to actually
# filter the data, which by default applies a zero-phase FIR filter designed
# using :func:`scipy.signal.firwin`.
# In Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`, they
# suggest a specific set of parameters to use for high-pass filtering,
# including:
#
# "... providing a transition bandwidth of 25% of the lower passband
# edge but, where possible, not lower than 2 Hz and otherwise the
# distance from the passband edge to the critical frequency.”
#
# In practice, this means that for each high-pass value ``l_freq`` or
# low-pass value ``h_freq`` below, you would get this corresponding
# ``l_trans_bandwidth`` or ``h_trans_bandwidth``, respectively,
# if the sample rate were 100 Hz (i.e., Nyquist frequency of 50 Hz):
#
# +------------------+-------------------+-------------------+
# | l_freq or h_freq | l_trans_bandwidth | h_trans_bandwidth |
# +==================+===================+===================+
# | 0.01 | 0.01 | 2.0 |
# +------------------+-------------------+-------------------+
# | 0.1 | 0.1 | 2.0 |
# +------------------+-------------------+-------------------+
# | 1.0 | 1.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 2.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 4.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 8.0 | 2.0 | 2.0 |
# +------------------+-------------------+-------------------+
# | 10.0 | 2.5 | 2.5 |
# +------------------+-------------------+-------------------+
# | 20.0 | 5.0 | 5.0 |
# +------------------+-------------------+-------------------+
# | 40.0 | 10.0 | 10.0 |
# +------------------+-------------------+-------------------+
# | 50.0 | 12.5 | 12.5 |
# +------------------+-------------------+-------------------+
#
# MNE-Python has adopted this definition for its high-pass (and low-pass)
# transition bandwidth choices when using ``l_trans_bandwidth='auto'`` and
# ``h_trans_bandwidth='auto'``.
#
# To choose the filter length automatically with ``filter_length='auto'``,
# the reciprocal of the shortest transition bandwidth is used to ensure
# decent attenuation at the stop frequency. Specifically, the reciprocal
# (in samples) is multiplied by 3.1, 3.3, or 5.0 for the Hann, Hamming,
# or Blackman windows, respectively, as selected by the ``fir_window``
# argument for ``fir_design='firwin'``, and double these for
# ``fir_design='firwin2'`` mode.
#
# .. note:: For ``fir_design='firwin2'``, the multiplicative factors are
# doubled compared to what is given in
# Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`
# (p. 357), as :func:`scipy.signal.firwin2` has a smearing effect
# on the frequency response, which we compensate for by
# increasing the filter length. This is why
# ``fir_desgin='firwin'`` is preferred to ``fir_design='firwin2'``.
#
# In 0.14, we default to using a Hamming window in filter design, as it
# provides up to 53 dB of stop-band attenuation with small pass-band ripple.
#
# .. note:: In band-pass applications, often a low-pass filter can operate
# effectively with fewer samples than the high-pass filter, so
# it is advisable to apply the high-pass and low-pass separately
# when using ``fir_design='firwin2'``. For design mode
# ``fir_design='firwin'``, there is no need to separate the
# operations, as the lowpass and highpass elements are constructed
# separately to meet the transition band requirements.
#
# For more information on how to use the
# MNE-Python filtering functions with real data, consult the preprocessing
# tutorial on :ref:`tut-filter-resample`.
#
# Defaults in MNE-C
# -----------------
# MNE-C by default uses:
#
# 1. 5 Hz transition band for low-pass filters.
# 2. 3-sample transition band for high-pass filters.
# 3. Filter length of 8197 samples.
#
# The filter is designed in the frequency domain, creating a linear-phase
# filter such that the delay is compensated for as is done with the MNE-Python
# ``phase='zero'`` filtering option.
#
# Squared-cosine ramps are used in the transition regions. Because these
# are used in place of more gradual (e.g., linear) transitions,
# a given transition width will result in more temporal ringing but also more
# rapid attenuation than the same transition width in windowed FIR designs.
#
# The default filter length will generally have excellent attenuation
# but long ringing for the sample rates typically encountered in M/EEG data
# (e.g. 500-2000 Hz).
#
# Defaults in other software
# --------------------------
# A good but possibly outdated comparison of filtering in various software
# packages is available in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`.
# Briefly:
#
# * EEGLAB
# MNE-Python 0.14 defaults to behavior very similar to that of EEGLAB
# (see the `EEGLAB filtering FAQ`_ for more information).
# * FieldTrip
# By default FieldTrip applies a forward-backward Butterworth IIR filter
# of order 4 (band-pass and band-stop filters) or 2 (for low-pass and
# high-pass filters). Similar filters can be achieved in MNE-Python when
# filtering with :meth:`raw.filter(..., method='iir') <mne.io.Raw.filter>`
# (see also :func:`mne.filter.construct_iir_filter` for options).
# For more information, see e.g. the
# `FieldTrip band-pass documentation <ftbp_>`_.
#
# Reporting Filters
# =================
# On page 45 in Widmann *et al.* (2015) :footcite:`WidmannEtAl2015`,
# there is a convenient list of
# important filter parameters that should be reported with each publication:
#
# 1. Filter type (high-pass, low-pass, band-pass, band-stop, FIR, IIR)
# 2. Cutoff frequency (including definition)
# 3. Filter order (or length)
# 4. Roll-off or transition bandwidth
# 5. Passband ripple and stopband attenuation
# 6. Filter delay (zero-phase, linear-phase, non-linear phase) and causality
# 7. Direction of computation (one-pass forward/reverse, or two-pass forward
# and reverse)
#
# In the following, we will address how to deal with these parameters in MNE:
#
#
# Filter type
# -----------
# Depending on the function or method used, the filter type can be specified.
# To name an example, in :func:`mne.filter.create_filter`, the relevant
# arguments would be ``l_freq``, ``h_freq``, ``method``, and if the method is
# FIR ``fir_window`` and ``fir_design``.
#
#
# Cutoff frequency
# ----------------
# The cutoff of FIR filters in MNE is defined as half-amplitude cutoff in the
# middle of the transition band. That is, if you construct a lowpass FIR filter
# with ``h_freq = 40``, the filter function will provide a transition
# bandwidth that depends on the ``h_trans_bandwidth`` argument. The desired
# half-amplitude cutoff of the lowpass FIR filter is then at
# ``h_freq + transition_bandwidth/2.``.
#
# Filter length (order) and transition bandwidth (roll-off)
# ---------------------------------------------------------
# In the :ref:`tut_filtering_in_python` section, we have already talked about
# the default filter lengths and transition bandwidths that are used when no
# custom values are specified using the respective filter function's arguments.
#
# If you want to find out about the filter length and transition bandwidth that
# were used through the 'auto' setting, you can use
# :func:`mne.filter.create_filter` to print out the settings once more:
# Use the same settings as when calling e.g., `raw.filter()`
fir_coefs = mne.filter.create_filter(
data=None, # data is only used for sanity checking, not strictly needed
sfreq=1000., # sfreq of your data in Hz
l_freq=None,
h_freq=40., # assuming a lowpass of 40 Hz
method='fir',
fir_window='hamming',
fir_design='firwin',
verbose=True)
# See the printed log for the transition bandwidth and filter length.
# Alternatively, get the filter length through:
filter_length = fir_coefs.shape[0]
###############################################################################
# .. note:: If you are using an IIR filter, :func:`mne.filter.create_filter`
# will not print a filter length and transition bandwidth to the log.
# Instead, you can specify the roll-off with the ``iir_params``
# argument or stay with the default, which is a fourth order
# (Butterworth) filter.
#
# Passband ripple and stopband attenuation
# ----------------------------------------
#
# When use standard :func:`scipy.signal.firwin` design (as for FIR filters in
# MNE), the passband ripple and stopband attenuation are dependent upon the
# window used in design. For standard windows the values are listed in this
# table (see Ifeachor & Jervis (2002) :footcite:`IfeachorJervis2002`, p. 357):
#
# +-------------------------+-----------------+----------------------+
# | Name of window function | Passband ripple | Stopband attenuation |
# +=========================+=================+======================+
# | Hann | 0.0545 dB | 44 dB |
# +-------------------------+-----------------+----------------------+
# | Hamming | 0.0194 dB | 53 dB |
# +-------------------------+-----------------+----------------------+
# | Blackman | 0.0017 dB | 74 dB |
# +-------------------------+-----------------+----------------------+
#
#
# Filter delay and direction of computation
# -----------------------------------------
# For reporting this information, it might be sufficient to read the docstring
# of the filter function or method that you apply. For example in the
# docstring of `mne.filter.create_filter`, for the phase parameter it says:
#
# Phase of the filter, only used if ``method='fir'``.
# By default, a symmetric linear-phase FIR filter is constructed.
# If ``phase='zero'`` (default), the delay of this filter
# is compensated for. If ``phase=='zero-double'``, then this filter
# is applied twice, once forward, and once backward. If 'minimum',
# then a minimum-phase, causal filter will be used.
#
#
# Summary
# =======
#
# When filtering, there are always trade-offs that should be considered.
# One important trade-off is between time-domain characteristics (like ringing)
# and frequency-domain attenuation characteristics (like effective transition
# bandwidth). Filters with sharp frequency cutoffs can produce outputs that
# ring for a long time when they operate on signals with frequency content
# in the transition band. In general, therefore, the wider a transition band
# that can be tolerated, the better behaved the filter will be in the time
# domain.
#
# References
# ==========
# .. footbibliography::
#
# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response
# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response
# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function
# .. _moving average: https://en.wikipedia.org/wiki/Moving_average
# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model
# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm
# .. _matlab firpm: https://www.mathworks.com/help/signal/ref/firpm.html
# .. _matlab fir2: https://www.mathworks.com/help/signal/ref/fir2.html
# .. _matlab firls: https://www.mathworks.com/help/signal/ref/firls.html
# .. _Butterworth filter: https://en.wikipedia.org/wiki/Butterworth_filter
# .. _eeglab filtering faq: https://sccn.ucsd.edu/wiki/Firfilt_FAQ
# .. _ftbp: http://www.fieldtriptoolbox.org/reference/ft_preproc_bandpassfilter
| bsd-3-clause |
WillieMaddox/scipy | scipy/io/matlab/tests/test_streams.py | 109 | 5442 | """ Testing
"""
from __future__ import division, print_function, absolute_import
import os
import sys
import zlib
from io import BytesIO
if sys.version_info[0] >= 3:
cStringIO = BytesIO
else:
from cStringIO import StringIO as cStringIO
from tempfile import mkstemp
import numpy as np
from numpy.testing import (assert_, assert_equal, assert_raises,
run_module_suite)
from scipy.io.matlab.streams import make_stream, \
GenericStream, cStringStream, FileStream, ZlibInputStream, \
_read_into, _read_string
fs = None
gs = None
cs = None
fname = None
def setup():
val = b'a\x00string'
global fs, gs, cs, fname
fd, fname = mkstemp()
fs = os.fdopen(fd, 'wb')
fs.write(val)
fs.close()
fs = open(fname, 'rb')
gs = BytesIO(val)
cs = cStringIO(val)
def teardown():
global fname, fs
fs.close()
del fs
os.unlink(fname)
def test_make_stream():
global fs, gs, cs
# test stream initialization
assert_(isinstance(make_stream(gs), GenericStream))
if sys.version_info[0] < 3:
assert_(isinstance(make_stream(cs), cStringStream))
assert_(isinstance(make_stream(fs), FileStream))
def test_tell_seek():
global fs, gs, cs
for s in (fs, gs, cs):
st = make_stream(s)
res = st.seek(0)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 0
res = st.seek(5)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 5
res = st.seek(2, 1)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 7
res = st.seek(-2, 2)
yield assert_equal, res, 0
yield assert_equal, st.tell(), 6
def test_read():
global fs, gs, cs
for s in (fs, gs, cs):
st = make_stream(s)
st.seek(0)
res = st.read(-1)
yield assert_equal, res, b'a\x00string'
st.seek(0)
res = st.read(4)
yield assert_equal, res, b'a\x00st'
# read into
st.seek(0)
res = _read_into(st, 4)
yield assert_equal, res, b'a\x00st'
res = _read_into(st, 4)
yield assert_equal, res, b'ring'
yield assert_raises, IOError, _read_into, st, 2
# read alloc
st.seek(0)
res = _read_string(st, 4)
yield assert_equal, res, b'a\x00st'
res = _read_string(st, 4)
yield assert_equal, res, b'ring'
yield assert_raises, IOError, _read_string, st, 2
class TestZlibInputStream(object):
def _get_data(self, size):
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
stream = BytesIO(compressed_data)
return stream, len(compressed_data), data
def test_read(self):
block_size = 131072
SIZES = [0, 1, 10, block_size//2, block_size-1,
block_size, block_size+1, 2*block_size-1]
READ_SIZES = [block_size//2, block_size-1,
block_size, block_size+1]
def check(size, read_size):
compressed_stream, compressed_data_len, data = self._get_data(size)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
data2 = b''
so_far = 0
while True:
block = stream.read(min(read_size,
size - so_far))
if not block:
break
so_far += len(block)
data2 += block
assert_equal(data, data2)
for size in SIZES:
for read_size in READ_SIZES:
yield check, size, read_size
def test_read_max_length(self):
size = 1234
data = np.random.randint(0, 256, size).astype(np.uint8).tostring()
compressed_data = zlib.compress(data)
compressed_stream = BytesIO(compressed_data + b"abbacaca")
stream = ZlibInputStream(compressed_stream, len(compressed_data))
stream.read(len(data))
assert_equal(compressed_stream.tell(), len(compressed_data))
assert_raises(IOError, stream.read, 1)
def test_seek(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
stream.seek(123)
p = 123
assert_equal(stream.tell(), p)
d1 = stream.read(11)
assert_equal(d1, data[p:p+11])
stream.seek(321, 1)
p = 123+11+321
assert_equal(stream.tell(), p)
d2 = stream.read(21)
assert_equal(d2, data[p:p+21])
stream.seek(641, 0)
p = 641
assert_equal(stream.tell(), p)
d3 = stream.read(11)
assert_equal(d3, data[p:p+11])
assert_raises(IOError, stream.seek, 10, 2)
assert_raises(IOError, stream.seek, -1, 1)
assert_raises(ValueError, stream.seek, 1, 123)
stream.seek(10000, 1)
assert_raises(IOError, stream.read, 12)
def test_all_data_read(self):
compressed_stream, compressed_data_len, data = self._get_data(1024)
stream = ZlibInputStream(compressed_stream, compressed_data_len)
assert_(not stream.all_data_read())
stream.seek(512)
assert_(not stream.all_data_read())
stream.seek(1024)
assert_(stream.all_data_read())
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
nucoin/nucoin-qt | contrib/pyminer/pyminer.py | 1257 | 6438 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file license.txt or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
ozburo/youtube-dl | youtube_dl/extractor/bfmtv.py | 4 | 4216 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import extract_attributes
class BFMTVBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?bfmtv\.com/'
_VALID_URL_TMPL = _VALID_URL_BASE + r'(?:[^/]+/)*[^/?&#]+_%s[A-Z]-(?P<id>\d{12})\.html'
_VIDEO_BLOCK_REGEX = r'(<div[^>]+class="video_block"[^>]*>)'
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/%s/%s_default/index.html?videoId=%s'
def _brightcove_url_result(self, video_id, video_block):
account_id = video_block.get('accountid') or '876450612001'
player_id = video_block.get('playerid') or 'I2qBTln4u'
return self.url_result(
self.BRIGHTCOVE_URL_TEMPLATE % (account_id, player_id, video_id),
'BrightcoveNew', video_id)
class BFMTVIE(BFMTVBaseIE):
IE_NAME = 'bfmtv'
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'V'
_TESTS = [{
'url': 'https://www.bfmtv.com/politique/emmanuel-macron-l-islam-est-une-religion-qui-vit-une-crise-aujourd-hui-partout-dans-le-monde_VN-202010020146.html',
'info_dict': {
'id': '6196747868001',
'ext': 'mp4',
'title': 'Emmanuel Macron: "L\'Islam est une religion qui vit une crise aujourd’hui, partout dans le monde"',
'description': 'Le Président s\'exprime sur la question du séparatisme depuis les Mureaux, dans les Yvelines.',
'uploader_id': '876450610001',
'upload_date': '20201002',
'timestamp': 1601629620,
},
}]
def _real_extract(self, url):
bfmtv_id = self._match_id(url)
webpage = self._download_webpage(url, bfmtv_id)
video_block = extract_attributes(self._search_regex(
self._VIDEO_BLOCK_REGEX, webpage, 'video block'))
return self._brightcove_url_result(video_block['videoid'], video_block)
class BFMTVLiveIE(BFMTVIE):
IE_NAME = 'bfmtv:live'
_VALID_URL = BFMTVBaseIE._VALID_URL_BASE + '(?P<id>(?:[^/]+/)?en-direct)'
_TESTS = [{
'url': 'https://www.bfmtv.com/en-direct/',
'info_dict': {
'id': '5615950982001',
'ext': 'mp4',
'title': r're:^le direct BFMTV WEB \d{4}-\d{2}-\d{2} \d{2}:\d{2}$',
'uploader_id': '876450610001',
'upload_date': '20171018',
'timestamp': 1508329950,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.bfmtv.com/economie/en-direct/',
'only_matching': True,
}]
class BFMTVArticleIE(BFMTVBaseIE):
IE_NAME = 'bfmtv:article'
_VALID_URL = BFMTVBaseIE._VALID_URL_TMPL % 'A'
_TESTS = [{
'url': 'https://www.bfmtv.com/sante/covid-19-un-responsable-de-l-institut-pasteur-se-demande-quand-la-france-va-se-reconfiner_AV-202101060198.html',
'info_dict': {
'id': '202101060198',
'title': 'Covid-19: un responsable de l\'Institut Pasteur se demande "quand la France va se reconfiner"',
'description': 'md5:947974089c303d3ac6196670ae262843',
},
'playlist_count': 2,
}, {
'url': 'https://www.bfmtv.com/international/pour-bolsonaro-le-bresil-est-en-faillite-mais-il-ne-peut-rien-faire_AD-202101060232.html',
'only_matching': True,
}, {
'url': 'https://www.bfmtv.com/sante/covid-19-oui-le-vaccin-de-pfizer-distribue-en-france-a-bien-ete-teste-sur-des-personnes-agees_AN-202101060275.html',
'only_matching': True,
}]
def _real_extract(self, url):
bfmtv_id = self._match_id(url)
webpage = self._download_webpage(url, bfmtv_id)
entries = []
for video_block_el in re.findall(self._VIDEO_BLOCK_REGEX, webpage):
video_block = extract_attributes(video_block_el)
video_id = video_block.get('videoid')
if not video_id:
continue
entries.append(self._brightcove_url_result(video_id, video_block))
return self.playlist_result(
entries, bfmtv_id, self._og_search_title(webpage, fatal=False),
self._html_search_meta(['og:description', 'description'], webpage))
| unlicense |
mitsei/dlkit | dlkit/abstract_osid/relationship/sessions.py | 1 | 129947 | """Implementations of relationship abstract base class sessions."""
# pylint: disable=invalid-name
# Method names comply with OSID specification.
# pylint: disable=no-init
# Abstract classes do not define __init__.
# pylint: disable=too-few-public-methods
# Some interfaces are specified as 'markers' and include no methods.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
# pylint: disable=too-many-arguments
# Argument signature defined in specification.
# pylint: disable=duplicate-code
# All apparent duplicates have been inspected. They aren't.
import abc
class RelationshipLookupSession:
"""This session defines methods for retrieving relationships.
A ``Relationship`` is mapped to two OSID ``Ids``.
This lookup session defines several views:
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete result set or is an error
condition
* federated family view: includes relationships in families which
are children of this family in the family hierarchy
* isolated family view: restricts lookups to this family only
* effective relationship view: Relationship methods return only
relationships currently in effect.
* any effective relationship view: Relationship methods return
both effective and ineffective relationships.
Relationships may have an additional records indicated by their
respective record types. The record may not be accessed through a
cast of the ``Relationship``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_id(self):
"""Gets the ``Family`` ``Id`` associated with this session.
:return: the ``Family Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_id = property(fget=get_family_id)
@abc.abstractmethod
def get_family(self):
"""Gets the ``Family`` associated with this session.
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
family = property(fget=get_family)
@abc.abstractmethod
def can_lookup_relationships(self):
"""Tests if this user can perform ``Relationship`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may not offer lookup operations
to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_relationship_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_relationship_view(self):
"""A complete view of the ``Relationship`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_federated_family_view(self):
"""Federates the view for methods in this session.
A federated view will include relationships in families which
are children of this family in the family hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_family_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts retrievals to this family only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_effective_relationship_view(self):
"""Only relationships whose effective dates are current are returned by methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_any_effective_relationship_view(self):
"""All relationships of any effective dates are returned by all methods in this session.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_relationship(self, relationship_id):
"""Gets the ``Relationship`` specified by its ``Id``.
:param relationship_id: the ``Id`` of the ``Relationship`` to retrieve
:type relationship_id: ``osid.id.Id``
:return: the returned ``Relationship``
:rtype: ``osid.relationship.Relationship``
:raise: ``NotFound`` -- no ``Relationship`` found with the given ``Id``
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Relationship
@abc.abstractmethod
def get_relationships_by_ids(self, relationship_ids):
"""Gets a ``RelationshipList`` corresponding to the given ``IdList``.
:param relationship_ids: the list of ``Ids`` to retrieve
:type relationship_ids: ``osid.id.IdList``
:return: the returned ``Relationship list``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NotFound`` -- an ``Id`` was not found
:raise: ``NullArgument`` -- ``relationship_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type(self, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` which does not include relationships of types derived from the specified ``Type``.
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:return: the returned ``Relationship list``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_parent_genus_type(self, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given relationship genus ``Type`` and include any additional relationships with genus types derived from the specified ``Type``.
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:return: the returned ``Relationship list``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_record_type(self, relationship_record_type):
"""Gets a ``RelationshipList`` containing the given relationship record ``Type``.
:param relationship_record_type: a relationship record type
:type relationship_record_type: ``osid.type.Type``
:return: the returned ``RelationshipList``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``relationship_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_on_date(self, from_, to):
"""Gets a ``RelationshipList`` effective during the entire given date range inclusive but not confined to the date range.
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_source(self, source_id):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id``.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``source_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_source_on_date(self, source_id, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` and effective during the entire given date range inclusive but not confined to the date range.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``source_id, from`` ,or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_source(self, source_id, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` and relationship genus ``Type.
Relationships`` of any genus derived from the given genus are
returned.
In plenary mode, the returned list contains all of the
relationships corresponding to the given peer, including
duplicates, or an error results if a relationship is
inaccessible. Otherwise, inaccessible ``Relationships`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
In effective mode, relationships are returned that are currently
effective. In any effective mode, effective relationships and
those currently expired are returned.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``source_id`` or ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_source_on_date(self, source_id, relationship_genus_type, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` and relationship genus ``Type`` and effective during the entire given date range inclusive but not confined to the date range.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``source_id, relationship_genus_type, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_destination(self, destination_id):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id``.
:param destination_id: a peer ``Id``
:type destination_id: ``osid.id.Id``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_destination_on_date(self, destination_id, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` with a starting effective date in the given range inclusive.
:param destination_id: a peer ``Id``
:type destination_id: ``osid.id.Id``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``destination_id, from`` ,or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_destination(self, destination_id, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` and relationship genus ``Type.
Relationships`` of any genus derived from the given genus are
returned.
In plenary mode, the returned list contains all of the
relationships corresponding to the given peer, including
duplicates, or an error results if a relationship is
inaccessible. Otherwise, inaccessible ``Relationships`` may be
omitted from the list and may present the elements in any order
including returning a unique set.
In effective mode, relationships are returned that are currently
effective. In any effective mode, effective relationships and
those currently expired are returned.
:param destination_id: a peer ``Id``
:type destination_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``destination_id`` or ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_destination_on_date(self, destination_id, relationship_genus_type, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Id`` and relationship genus ``Type`` and effective during the entire given date range inclusive but not confined to the date range.
:param destination_id: a peer ``Id``
:type destination_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``destination_id, relationship_genus_type, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_peers(self, source_id, destination_id):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Ids``.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param destination_id: a related peer ``Id``
:type destination_id: ``osid.id.Id``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``source_id`` or ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_for_peers_on_date(self, source_id, destination_id, from_, to):
"""Gets a ``RelationshipList`` corresponding to the given peer ``Ids`` and effective during the entire given date range inclusive but not confined to the date range.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param destination_id: a related peer ``Id``
:type destination_id: ``osid.id.Id``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from`` is greater than ``to``
:raise: ``NullArgument`` -- ``source_id, destination_id, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_peers(self, source_id, destination_id, relationship_genus_type):
"""Gets a ``RelationshipList`` corresponding between the given peer ``Ids`` and relationship genus ``Type.
Relationships`` of any genus derived from the given genus are
returned.
In plenary mode, the returned list contains all of the
relationships corresponding to the given peer or an error
results if a relationship is inaccessible. Otherwise,
inaccessible ``Relationships`` may be omitted from the list.
In effective mode, relationships are returned that are currently
effective. In any effective mode, effective relationships and
those currently expired are returned.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param destination_id: a related peer ``Id``
:type destination_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``source_id, destination_id,`` or ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships_by_genus_type_for_peers_on_date(self, source_id, destination_id, relationship_genus_type, from_, to):
"""Gets a ``RelationshipList`` effective during the entire given date range inclusive but not confined to the date range.
:param source_id: a peer ``Id``
:type source_id: ``osid.id.Id``
:param destination_id: a related peer ``Id``
:type destination_id: ``osid.id.Id``
:param relationship_genus_type: a relationship genus type
:type relationship_genus_type: ``osid.type.Type``
:param from: starting date
:type from: ``osid.calendaring.DateTime``
:param to: ending date
:type to: ``osid.calendaring.DateTime``
:return: the relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``InvalidArgument`` -- ``from is greater than to``
:raise: ``NullArgument`` -- ``source_id, destination_id, relationship_genus_type, from`` or ``to`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationships(self):
"""Gets all ``Relationships``.
:return: a list of ``Relationships``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
relationships = property(fget=get_relationships)
class RelationshipQuerySession:
"""This session provides methods for searching among ``Relationship`` objects.
The search query is constructed using the ``Relationship``.
Relationships may have a query record indicated by their respective
record types. The query record is accessed via the
``RelationshipQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_id(self):
"""Gets the ``Family`` ``Id`` associated with this session.
:return: the ``Family Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_id = property(fget=get_family_id)
@abc.abstractmethod
def get_family(self):
"""Gets the ``Family`` associated with this session.
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
family = property(fget=get_family)
@abc.abstractmethod
def use_federated_family_view(self):
"""Federates the view for methods in this session.
A federated view will include relationships in families which
are children of this family in the family hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_family_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts retrievals to this family only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def can_search_relationships(self):
"""Tests if this user can perform ``Relationship`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_relationship_query(self):
"""Gets a relationship query.
:return: the relationship query
:rtype: ``osid.relationship.RelationshipQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipQuery
relationship_query = property(fget=get_relationship_query)
@abc.abstractmethod
def get_relationships_by_query(self, relationship_query):
"""Gets a list of ``Relationships`` matching the given relationship query.
:param relationship_query: the relationship query
:type relationship_query: ``osid.relationship.RelationshipQuery``
:return: the returned ``RelationshipList``
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``relationship_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``relationship_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
class RelationshipSearchSession:
"""This session provides methods for searching among ``Relationship`` objects.
The search query is constructed using the ``Relationship``.
``get_relationships_by_query()`` is the basic search method and
returns a list of ``Relationships``. A more advanced search may be
performed with ``getRelationshipsBySearch()``. It accepts a
``RelationshipSearch`` in addition to the query for the purpose of
specifying additional options affecting the entire search, such as
ordering. ``get_relationships_by_search()`` returns a
``RelationshipSearchResults`` that can be used to access the
resulting ``RelationshipList`` or be used to perform a search within
the result set through ``RelationshipSearch``.
Relationships may have a query record indicated by their respective
record types. The query record is accessed via the
``RelationshipQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_relationship_search(self):
"""Gets a relationship search.
:return: the relationship search
:rtype: ``osid.relationship.RelationshipSearch``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipSearch
relationship_search = property(fget=get_relationship_search)
@abc.abstractmethod
def get_relationship_search_order(self):
"""Gets a relationship search order.
The ``RelationshipSearchOrder`` is supplied to a
``RelationshipSearch`` to specify the ordering of results.
:return: the relationship search order
:rtype: ``osid.relationship.RelationshipSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipSearchOrder
relationship_search_order = property(fget=get_relationship_search_order)
@abc.abstractmethod
def get_relationships_by_search(self, relationship_query, relationship_search):
"""Gets the search results matching the given search query using the given search.
:param relationship_query: the relationship query
:type relationship_query: ``osid.relationship.RelationshipQuery``
:param relationship_search: the relationship search
:type relationship_search: ``osid.relationship.RelationshipSearch``
:return: the returned search results
:rtype: ``osid.relationship.RelationshipSearchResults``
:raise: ``NullArgument`` -- ``relationship_query`` or ``relationship_search`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``relationship_query`` or ``relationship_search`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipSearchResults
@abc.abstractmethod
def get_relationship_query_from_inspector(self, relationship_query_inspector):
"""Gets a relationship query from an inspector.
The inspector is available from a ``RelationshipSearchResults``.
:param relationship_query_inspector: a relationship query inspector
:type relationship_query_inspector: ``osid.relationship.RelationshipQueryInspector``
:return: the relationship query
:rtype: ``osid.relationship.RelationshipQuery``
:raise: ``NullArgument`` -- ``relationship_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``relationship_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipQuery
class RelationshipAdminSession:
"""This session creates, updates, and deletes ``Relationships``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create a
``Relationship,`` a ``RelationshipForm`` is requested using
``get_relationship_form_for_create()`` specifying the desired peers
and record ``Types`` or none if no record ``Types`` are needed. The
returned ``RelationshipForm`` will indicate that it is to be used
with a create operation and can be used to examine metdata or
validate data prior to creation. Once the ``RelationshipForm`` is
submiited to a create operation, it cannot be reused with another
create operation unless the first operation was unsuccessful. Each
``RelationshipForm`` corresponds to an attempted transaction.
For updates, ``RelationshipForms`` are requested to the
``Relationship`` ``Id`` that is to be updated using
``getRelationshipFormForUpdate()``. Similarly, the
``RelationshipForm`` has metadata about the data that can be updated
and it can perform validation before submitting the update. The
``RelationshipForm`` can only be used once for a successful update
and cannot be reused.
The delete operations delete ``Relationships``. To unmap a
``Relationship`` from the current ``Family,`` the
``RelationshipFamilyAssignmentSession`` should be used. These delete
operations attempt to remove the ``Relationship`` itself thus
removing it from all known ``Family`` catalogs.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_id(self):
"""Gets the ``Familt`` ``Id`` associated with this session.
:return: the ``Family Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_id = property(fget=get_family_id)
@abc.abstractmethod
def get_family(self):
"""Gets the ``Family`` associated with this session.
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
family = property(fget=get_family)
@abc.abstractmethod
def can_create_relationships(self):
"""Tests if this user can create ``Relationships`` A return of true does not guarantee successful authorization.
A return of false indicates that it is known creating a
``Relationship`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
create operations to an unauthorized user.
:return: ``false`` if ``Relationship`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def can_create_relationship_with_record_types(self, relationship_record_types):
"""Tests if this user can create a single ``Relationship`` using the desired record types.
While ``RelationshipManager.getRelationshipRecordTypes()`` can
be used to examine which records are supported, this method
tests which record(s) are required for creating a specific
``Relationship``. Providing an empty array tests if a
``Relationship`` can be created with no records.
:param relationship_record_types: array of relationship record types
:type relationship_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Relationship`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``relationship_record_types`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_relationship_form_for_create(self, source_id, destination_id, relationship_record_types):
"""Gets the relationship form for creating new relationships.
A new form should be requested for each create transaction.
:param source_id: ``Id`` of a peer
:type source_id: ``osid.id.Id``
:param destination_id: ``Id`` of the related peer
:type destination_id: ``osid.id.Id``
:param relationship_record_types: array of relationship record types
:type relationship_record_types: ``osid.type.Type[]``
:return: the relationship form
:rtype: ``osid.relationship.RelationshipForm``
:raise: ``NotFound`` -- ``source_id`` or ``destination_id`` is not found
:raise: ``NullArgument`` -- ``source_id`` or ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- unable to get form for requested recod types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipForm
@abc.abstractmethod
def create_relationship(self, relationship_form):
"""Creates a new ``Relationship``.
:param relationship_form: the form for this ``Relationship``
:type relationship_form: ``osid.relationship.RelationshipForm``
:return: the new ``Relationship``
:rtype: ``osid.relationship.Relationship``
:raise: ``IllegalState`` -- ``relationship_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``relationship_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``relationship_form`` did not originate from ``get_relationship_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Relationship
@abc.abstractmethod
def can_update_relationships(self):
"""Tests if this user can update ``Relationships``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a
``Relationship`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
update operations to an unauthorized user.
:return: ``false`` if ``Relationship`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_relationship_form_for_update(self, relationship_id):
"""Gets the relationship form for updating an existing relationship.
A new relationship form should be requested for each update
transaction.
:param relationship_id: the ``Id`` of the ``Relationship``
:type relationship_id: ``osid.id.Id``
:return: the relationship form
:rtype: ``osid.relationship.RelationshipForm``
:raise: ``NotFound`` -- ``relationship_id`` is not found
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipForm
@abc.abstractmethod
def update_relationship(self, relationship_form):
"""Updates an existing relationship.
:param relationship_form: the form containing the elements to be updated
:type relationship_form: ``osid.relationship.RelationshipForm``
:raise: ``IllegalState`` -- ``relationship_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``relationship_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``relationship_form`` did not originate from ``get_relationship_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def can_delete_relationships(self):
"""Tests if this user can delete ``Relationships``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a
``Relationship`` will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
delete operations to an unauthorized user.
:return: ``false`` if ``Relationship`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def delete_relationship(self, relationship_id):
"""Deletes a ``Relationship``.
:param relationship_id: the ``Id`` of the ``Relationship`` to remove
:type relationship_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``relationship_id`` not found
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def can_manage_relationship_aliases(self):
"""Tests if this user can manage ``Id`` aliases for ``Relationships``.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Relationship`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def alias_relationship(self, relationship_id, alias_id):
"""Adds an ``Id`` to a ``Relationship`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Relationship`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another relationship, it is
reassigned to the given relationship ``Id``.
:param relationship_id: the ``Id`` of a ``Relationship``
:type relationship_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is already assigned
:raise: ``NotFound`` -- ``relationship`` not found
:raise: ``NullArgument`` -- ``relationship_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class RelationshipNotificationSession:
"""This session defines methods to receive notifications on adds/changes to ``Relationship`` objects in this ``Family``.
This also includes existing relationships that may appear or
disappear due to changes in the ``Family`` hierarchy, This session
is intended for consumers needing to synchronize their state with
this service without the use of polling. Notifications are cancelled
when this session is closed.
The two views defined in this session correspond to the views in the
``RelationshipLookupSession``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_id(self):
"""Gets the ``Family`` ``Id`` associated with this session.
:return: the ``Family Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_id = property(fget=get_family_id)
@abc.abstractmethod
def get_family(self):
"""Gets the ``Family`` associated with this session.
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
family = property(fget=get_family)
@abc.abstractmethod
def can_register_for_relationship_notifications(self):
"""Tests if this user can register for ``Relationship`` notifications.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer
notification operations.
:return: ``false`` if notification methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_federated_family_view(self):
"""Federates the view for methods in this session.
A federated view will include relationships in families which
are children of this family in the family hierarchy.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_isolated_family_view(self):
"""Isolates the view for methods in this session.
An isolated view restricts notifications to this family only.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def reliable_relationship_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_relationship_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def unreliable_relationship_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def acknowledge_relationship_notification(self, notification_id):
"""Acknowledge a relationship notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_new_relationships(self):
"""Register for notifications of new relationships.
``RelationshipReceiver.newRelationships()`` is invoked when a
new ``Relationship`` appears in this family.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_new_relationships_for_source(self, source_id):
"""Register for notifications of new relationships from the given source.
``RelationshipReceiver.newRelationships()`` is invoked when a
new ``Relationship`` appears for the given peer.
:param source_id: the ``Id`` of the source to monitor
:type source_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``source_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_new_relationships_for_destination(self, destination_id):
"""Register for notifications of new relationships to the given destination.
``RelationshipReceiver.newRelationships()`` is invoked when a
new ``Relationship`` appears for the given peer.
:param destination_id: the ``Id`` of the destination node to monitor
:type destination_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_new_relationships_by_genus_type(self, relationship_genus_type):
"""Register for notifications of new relationships.
``RelationshipReceiver.newRelationships()`` is invoked when a
new ``Relationship`` appears for the given peer.
:param relationship_genus_type: the genus type of the ``Relationship`` to monitor
:type relationship_genus_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_relationships(self):
"""Registers for notification of updated relationships.
``RelationshipReceiver.changedRelationships()`` is invoked when
a relationship in this family is changed.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_relationships_for_source(self, source_id):
"""Register for notifications of updated relationships from the given source node.
``RelationshipReceiver.changedRelationships()`` is invoked when
a ``Relationship`` if changed for the given peer.
:param source_id: the ``Id`` of the source node to monitor
:type source_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``source_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_relationships_for_destination(self, destination_id):
"""Register for notifications of updated relationships to the given destination node.
``RelationshipReceiver.changedRelationships()`` is invoked when
a ``Relationship`` if changed for the given peer.
:param destination_id: the ``Id`` of the destination node to monitor
:type destination_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_relationships_by_genus_type(self, relationship_genus_type):
"""Register for notifications of updated relationships.
``RelationshipReceiver.changedRelationships()`` is invoked when
a ``Relationship`` if changed for the given peer.
:param relationship_genus_type: the genus type of the ``Relationship`` to monitor
:type relationship_genus_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_relationship(self, relationship_id):
"""Registers for notification of an updated relationship.
``RelationshipReceiver.changedRelationships()`` is invoked when
the specified relationship in this family is changed.
:param relationship_id: the ``Id`` of the ``Relationship`` to monitor
:type relationship_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_relationships(self):
"""Registers for notification of deleted relationships.
``RelationshipReceiver.deletedRelationships()`` is invoked when
a relationship is deleted or removed from this family.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_relationships_for_source(self, source_id):
"""Register for notifications of deleted relationships from the given source node.
``RelationshipReceiver.deletedRelationships()`` is invoked when
a ``Relationship`` if removed for the given peer.
:param source_id: the ``Id`` of the source node to monitor
:type source_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``source_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_relationships_for_destination(self, destination_id):
"""Register for notifications of deleted relationships to the given destination node.
``RelationshipReceiver.deletedRelationships()`` is invoked when
a ``Relationship`` if removed for the given peer.
:param destination_id: the ``Id`` of the destination node to monitor
:type destination_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``destination_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_relationships_by_genus_type(self, relationship_genus_type):
"""Register for notifications of deleted relationships.
``RelationshipReceiver.deletedRelationships()`` is invoked when
a ``Relationship`` if removed for the given peer.
:param relationship_genus_type: the genus type of the ``Relationship`` to monitor
:type relationship_genus_type: ``osid.type.Type``
:raise: ``NullArgument`` -- ``relationship_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_relationship(self, relationship_id):
"""Registers for notification of a deleted relationship.
``RelationshipReceiver.deletedRelationships()`` is invoked when
the specified relationship is deleted or removed from this
family.
:param relationship_id: the ``Id`` of the ``Relationship`` to monitor
:type relationship_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def reliable_relationship_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_item_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def unreliable_relationship_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def acknowledge_relationship_notification(self, notification_id):
"""Acknowledge an relationship notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class RelationshipFamilySession:
"""This session provides methods to retrieve ``Relationship`` to ``Family`` mappings.
A ``Relationship`` may appear in multiple ``Family`` objects. Each
catalog may have its own authorizations governing who is allowed to
look at it.
This lookup session defines several views:
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete result set or is an error
condition
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_lookup_relationship_family_mappings(self):
"""Tests if this user can perform lookups of relationship/family mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known lookup methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
:return: ``false`` if looking up mappings is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_family_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_family_view(self):
"""A complete view of the ``Relationship`` and ``Family`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_relationship_ids_by_family(self, family_id):
"""Gets the list of ``Relationship Ids`` associated with a ``Family``.
:param family_id: ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: list of related relationship ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_relationships_by_family(self, family_id):
"""Gets the list of ``Relationships`` associated with a ``Family``.
:param family_id: ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: list of related relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_relationship_ids_by_families(self, family_ids):
"""Gets the list of ``Relationship Ids`` corresponding to a list of ``Family`` objects.
:param family_ids: list of family ``Ids``
:type family_ids: ``osid.id.IdList``
:return: list of relationship ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``family_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_relationships_by_families(self, family_ids):
"""Gets the list of ``Relationships`` corresponding to a list of ``Family`` objects.
:param family_ids: list of family ``Ids``
:type family_ids: ``osid.id.IdList``
:return: list of relationships
:rtype: ``osid.relationship.RelationshipList``
:raise: ``NullArgument`` -- ``family_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipList
@abc.abstractmethod
def get_family_ids_by_relationship(self, relationship_id):
"""Gets the ``Family`` ``Ids`` mapped to a ``Relationship``.
:param relationship_id: ``Id`` of a ``Relationship``
:type relationship_id: ``osid.id.Id``
:return: list of family ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``relationship_id`` is not found
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_families_by_relationship(self, relationship_id):
"""Gets the ``Family`` objects mapped to a ``Relationship``.
:param relationship_id: ``Id`` of a ``Relationship``
:type relationship_id: ``osid.id.Id``
:return: list of family ``Ids``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NotFound`` -- ``relationship_id`` is not found
:raise: ``NullArgument`` -- ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
class RelationshipFamilyAssignmentSession:
"""This session provides methods to re-assign ``Relationships`` to ``Family`` objects A ``Relationship`` may appear in multiple ``Family`` objects and removing the last reference to a ``Relationship`` is the equivalent of deleting it.
Each ``Family`` may have its own authorizations governing who is
allowed to operate on it.
Moving or adding a reference of a ``Relationship`` to another
``Family`` is not a copy operation (eg: does not change its ``Id``
).
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_assign_relationships(self):
"""Tests if this user can alter relationship/family mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def can_assign_relationships_to_family(self, family_id):
"""Tests if this user can alter relationship/family mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: ``false`` if mapping is not authorized, ``true`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_assignable_family_ids(self, family_id):
"""Gets a list of families including and under the given family node in which any relationship can be assigned.
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: list of assignable family ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_assignable_family_ids_for_relationship(self, family_id, relationship_id):
"""Gets a list of families including and under the given family node in which a specific relationship can be assigned.
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:param relationship_id: the ``Id`` of the ``Relationship``
:type relationship_id: ``osid.id.Id``
:return: list of assignable family ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``NullArgument`` -- ``family_id`` or ``relationship_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def assign_relationship_to_family(self, relationship_id, family_id):
"""Adds an existing ``Relationship`` to a ``Family``.
:param relationship_id: the ``Id`` of the ``Relationship``
:type relationship_id: ``osid.id.Id``
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``relationship_id`` is already assigned to ``family_id``
:raise: ``NotFound`` -- ``relationship_id`` or ``family_id`` not found
:raise: ``NullArgument`` -- ``relationship_id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def unassign_relationship_from_family(self, relationship_id, family_id):
"""Removes a ``Relationship`` from a ``Family``.
:param relationship_id: the ``Id`` of the ``Relationship``
:type relationship_id: ``osid.id.Id``
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``relationship_id`` or ``family_id`` not found or ``relationship_id`` not assigned to ``family_id``
:raise: ``NullArgument`` -- ``relationship_id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def reassign_relationship_to_family(self, relationship_id, from_family_id, to_family_id):
"""Moves a ``Relationship`` from one ``Family`` to another.
Mappings to other ``Families`` are unaffected.
:param relationship_id: the ``Id`` of the ``Relationship``
:type relationship_id: ``osid.id.Id``
:param from_family_id: the ``Id`` of the current ``Family``
:type from_family_id: ``osid.id.Id``
:param to_family_id: the ``Id`` of the destination ``Family``
:type to_family_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``relationship_id, from_family_id,`` or ``to_family_id`` not found or ``relationship_id`` not mapped to ``from_family_id``
:raise: ``NullArgument`` -- ``relationship_id, from_family_id,`` or ``to_family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class RelationshipSmartFamilySession:
"""This session manages queries and sequencing to create "smart" dynamic catalogs.
A ``RelationshipQuery`` can be retrieved from this session and
mapped to this ``Family`` to create a virtual collection of
``Relationships``. The entries may be sequenced using the
``RelationshipSearchOrder`` from this session.
This ``Family`` has a default query that matches any relationship
and a default search order that specifies no sequencing. The queries
may be examined using a ``RelationshipQueryInspector``. The query
may be modified by converting the inspector back to a
``RelationshipQuery``.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_id(self):
"""Gets the ``Family`` ``Id`` associated with this session.
:return: the ``Family Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_id = property(fget=get_family_id)
@abc.abstractmethod
def get_family(self):
"""Gets the ``Family`` associated with this session.
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
family = property(fget=get_family)
@abc.abstractmethod
def can_manage_smart_families(self):
"""Tests if this user can manage smart families.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer smart
operations.
:return: ``false`` if smart family methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_relationship_query(self):
"""Gets a relationship query.
:return: the relationship query
:rtype: ``osid.relationship.RelationshipQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipQuery
relationship_query = property(fget=get_relationship_query)
@abc.abstractmethod
def get_relationship_search_order(self):
"""Gets a relationship search order.
:return: the relationship search order
:rtype: ``osid.relationship.RelationshipSearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipSearchOrder
relationship_search_order = property(fget=get_relationship_search_order)
@abc.abstractmethod
def apply_relationship_query(self, relationship_query):
"""Applies a relationship query to this family.
:param relationship_query: the relationship query
:type relationship_query: ``osid.relationship.RelationshipQuery``
:raise: ``NullArgument`` -- ``relationship_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``relationship_query`` not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def inspect_relationship_query(self):
"""Gets a relationship query inspector for this family.
:return: the relationship query inspector
:rtype: ``osid.relationship.RelationshipQueryInspector``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipQueryInspector
@abc.abstractmethod
def apply_relationship_sequencing(self, relationship_search_order):
"""Applies a relationship search order to this family.
:param relationship_search_order: the relationship search order
:type relationship_search_order: ``osid.relationship.RelationshipSearchOrder``
:raise: ``NullArgument`` -- ``relationship_search_order`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure occurred
:raise: ``Unsupported`` -- ``relationship_search_order`` not of this service
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def get_relationship_query_from_inspector(self, relationship_query_inspector):
"""Gets a relationship query from an inspector.
:param relationship_query_inspector: a relationship query inspector
:type relationship_query_inspector: ``osid.relationship.RelationshipQueryInspector``
:return: the relationship query
:rtype: ``osid.relationship.RelationshipQuery``
:raise: ``NullArgument`` -- ``relatinship_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``relationship_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.RelationshipQuery
class FamilyLookupSession:
"""This session provides methods for retrieving ``Family`` objects.
The ``Family`` represents a collection of relationships.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: elements may be silently omitted or re-ordered
* plenary view: provides a complete set or is an error condition
Generally, the comparative view should be used for most applications
as it permits operation even if there is data that cannot be
accessed. For example, a browsing application may only need to
examine the ``Families`` it can access, without breaking execution.
However, an assessment may only be useful if all ``Families``
referenced by it are available, and a test-taking applicationmay
sacrifice some interoperability for the sake of precision.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_lookup_families(self):
"""Tests if this user can perform ``Family`` lookups.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may not offer lookup operations
to unauthorized users.
:return: ``false`` if lookup methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_family_view(self):
"""The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_family_view(self):
"""A complete view of the ``Family`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_family(self, family_id):
"""Gets the ``Family`` specified by its ``Id``.
In plenary mode, the exact ``Id`` is found or a ``NotFound``
results. Otherwise, the returned ``Family`` may have a different
``Id`` than requested, such as the case where a duplicate ``Id``
was assigned to a ``Family`` and retained for compatibil
:param family_id: ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: the family
:rtype: ``osid.relationship.Family``
:raise: ``NotFound`` -- ``family_id`` not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.relationship.Family
@abc.abstractmethod
def get_families_by_ids(self, family_ids):
"""Gets a ``FamilyList`` corresponding to the given ``IdList``.
In plenary mode, the returned list contains all of the families
specified in the ``Id`` list, in the order of the list,
including duplicates, or an error results if an ``Id`` in the
supplied list is not found or inaccessible. Otherwise,
inaccessible families may be omitted from the list and may
present the elements in any order including returning a unique
set.
:param family_ids: the list of ``Ids`` to retrieve
:type family_ids: ``osid.id.IdList``
:return: the returned ``Family list``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NotFound`` -- an ``Id was`` not found
:raise: ``NullArgument`` -- ``family_ids`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def get_families_by_genus_type(self, family_genus_type):
"""Gets a ``FamilyList`` corresponding to the given family genus ``Type`` which does not include families of genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
:param family_genus_type: a family genus type
:type family_genus_type: ``osid.type.Type``
:return: the returned ``Family list``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NullArgument`` -- ``family_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def get_families_by_parent_genus_type(self, family_genus_type):
"""Gets a ``FamilyList`` corresponding to the given family genus ``Type`` and include any additional families with genus types derived from the specified ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
:param family_genus_type: a family genus type
:type family_genus_type: ``osid.type.Type``
:return: the returned ``Family list``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NullArgument`` -- ``family_genus_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def get_families_by_record_type(self, family_record_type):
"""Gets a ``FamilyList`` containing the given family record ``Type``.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
:param family_record_type: a family record type
:type family_record_type: ``osid.type.Type``
:return: the returned ``Family list``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NullArgument`` -- ``family_record_type`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def get_families_by_provider(self, resource_id):
"""Gets a ``FamilyList`` from the given provider.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
:param resource_id: a resource ``Id``
:type resource_id: ``osid.id.Id``
:return: the returned ``Family list``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NullArgument`` -- ``resource_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def get_families(self):
"""Gets all families.
In plenary mode, the returned list contains all known families
or an error results. Otherwise, the returned list may contain
only those families that are accessible through this session.
:return: a list of families
:rtype: ``osid.relationship.FamilyList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
families = property(fget=get_families)
class FamilyQuerySession:
"""This session provides methods for searching ``Family`` objects.
The search query is constructed using the ``FamilyQuery``. The
family record ``Type`` also specifies the record for the family
query.
Families may have a query record indicated by their respective
record types. The query record is accessed via the ``FamilyQuery``.
The returns in this session may not be cast directly to these
interfaces.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_search_families(self):
"""Tests if this user can perform ``Family`` searches.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer search
operations to unauthorized users.
:return: ``false`` if search methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_family_query(self):
"""Gets a family query.
:return: the family query
:rtype: ``osid.relationship.FamilyQuery``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyQuery
family_query = property(fget=get_family_query)
@abc.abstractmethod
def get_families_by_query(self, family_query):
"""Gets a list of ``Family`` objects matching the given family query.
:param family_query: the family query
:type family_query: ``osid.relationship.FamilyQuery``
:return: the returned ``FamilyList``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NullArgument`` -- ``family_query`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``family_query`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
class FamilySearchSession:
"""This session provides methods for searching ``Family`` objects.
The search query is constructed using the ``FamilyQuery``. The
family record ``Type`` also specifies the record for the family
query.
``get_families_by_query()`` is the basic search method and returns a
list of ``Family`` elements. A more advanced search may be performed
with ``getFamiliesBySearch()``. It accepts a ``FamilySearch`` in
addition to the query for the purpose of specifying additional
options affecting the entire search, such as ordering.
``get_families_by_search()`` returns a ``FamilySearchResults`` that
can be used to access the resulting ``FamilyList`` or be used to
perform a search within the result set through ``FamilySearch``.
Families may have a query record indicated by their respective
record types. The query record is accessed via the ``FamilyQuery``.
The returns in this session may not be cast directly to these
interfaces.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_search(self):
"""Gets a family search.
:return: the family search
:rtype: ``osid.relationship.FamilySearch``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilySearch
family_search = property(fget=get_family_search)
@abc.abstractmethod
def get_family_search_order(self):
"""Gets a family search order.
The ``FamilySearchOrder`` is supplied to a ``FamilySearch`` to
specify the ordering of results.
:return: the family search order
:rtype: ``osid.relationship.FamilySearchOrder``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilySearchOrder
family_search_order = property(fget=get_family_search_order)
@abc.abstractmethod
def get_families_by_search(self, family_query, family_search):
"""Gets the search results matching the given search.
:param family_query: the family query
:type family_query: ``osid.relationship.FamilyQuery``
:param family_search: the family search
:type family_search: ``osid.relationship.FamilySearch``
:return: the search results
:rtype: ``osid.relationship.FamilySearchResults``
:raise: ``NullArgument`` -- ``family_query`` or ``family_search`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``family_query`` or ``family_search`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilySearchResults
@abc.abstractmethod
def get_family_query_from_inspector(self, family_query_inspector):
"""Gets a family query from an inspector.
The inspector is available from an ``FamilySearchResults``.
:param family_query_inspector: a family query inspector
:type family_query_inspector: ``osid.relationship.FamilyQueryInspector``
:return: the familyh query
:rtype: ``osid.relationship.FamilyQuery``
:raise: ``NullArgument`` -- ``family_query_inspector`` is ``null``
:raise: ``Unsupported`` -- ``family_query_inspector`` is not of this service
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyQuery
class FamilyAdminSession:
"""This session creates, updates, and deletes ``Families``.
The data for create and update is provided by the consumer via the
form object. ``OsidForms`` are requested for each create or update
and may not be reused.
Create and update operations differ in their usage. To create a
``Family,`` a ``FamilyForm`` is requested using
``get_family_form_for_create()`` specifying the desired record
``Types`` or none if no record ``Types`` are needed. The returned
``FamilyForm`` will indicate that it is to be used with a create
operation and can be used to examine metdata or validate data prior
to creation. Once the ``FamilyForm`` is submiited to a create
operation, it cannot be reused with another create operation unless
the first operation was unsuccessful. Each ``FamilyForm``
corresponds to an attempted transaction.
For updates, ``FamilyForms`` are requested to the ``Family`` ``Id``
that is to be updated using ``getFamilyFormForUpdate()``. Similarly,
the ``FamilyForm`` has metadata about the data that can be updated
and it can perform validation before submitting the update. The
``FamilyForm`` can only be used once for a successful update and
cannot be reused.
The delete operations delete ``Families``.
This session includes an ``Id`` aliasing mechanism to assign an
external ``Id`` to an internally assigned Id.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_create_families(self):
"""Tests if this user can create families.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known creating a ``Family``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer create
operations to unauthorized users.
:return: ``false`` if ``Family`` creation is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def can_create_family_with_record_types(self, family_record_types):
"""Tests if this user can create a single ``Family`` using the desired record types.
While ``RelationshipManager.getFamilyRecordTypes()`` can be used
to examine which records are supported, this method tests which
record(s) are required for creating a specific ``Family``.
Providing an empty array tests if a ``Family`` can be created
with no records.
:param family_record_types: array of family record types
:type family_record_types: ``osid.type.Type[]``
:return: ``true`` if ``Family`` creation using the specified record ``Types`` is supported, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NullArgument`` -- ``family_record_types is null``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_family_form_for_create(self, family_record_types):
"""Gets the family form for creating new families.
A new form should be requested for each create transaction.
:param family_record_types: array of family record types
:type family_record_types: ``osid.type.Type[]``
:return: the family form
:rtype: ``osid.relationship.FamilyForm``
:raise: ``NullArgument`` -- ``family_record_types is null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- unable to get form for requested record types
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyForm
@abc.abstractmethod
def create_family(self, family_form):
"""Creates a new ``Family``.
:param family_form: the form for this ``Family``.
:type family_form: ``osid.relationship.FamilyForm``
:return: the new ``Family``
:rtype: ``osid.relationship.Family``
:raise: ``IllegalState`` -- ``family_form`` already used in a create transaction
:raise: ``InvalidArgument`` -- one or more of the form elements is invalid
:raise: ``NullArgument`` -- ``family_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``family_form`` did not originate from ``get_family_form_for_create()``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.Family
@abc.abstractmethod
def can_update_families(self):
"""Tests if this user can update families.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known updating a ``Family``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer update
operations to unauthorized users.
:return: ``false`` if ``Family`` modification is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def get_family_form_for_update(self, family_id):
"""Gets the family form for updating an existing family.
A new family form should be requested for each update
transaction.
:param family_id: the ``Id`` of the ``Family``
:type family_id: ``osid.id.Id``
:return: the family form
:rtype: ``osid.relationship.FamilyForm``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyForm
@abc.abstractmethod
def update_family(self, family_form):
"""Updates an existing family.
:param family_form: the form containing the elements to be updated
:type family_form: ``osid.relationship.FamilyForm``
:raise: ``IllegalState`` -- ``family_form`` already used in an update transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``family_id`` or ``family_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``family_form`` did not originate from ``get_family_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def can_delete_families(self):
"""Tests if this user can delete families.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known deleting a ``Family``
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may not wish to offer delete
operations to unauthorized users.
:return: ``false`` if ``Family`` deletion is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def delete_family(self, family_id):
"""Deletes a ``Family``.
:param family_id: the ``Id`` of the ``Family`` to remove
:type family_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``family_id`` not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def can_manage_family_aliases(self):
"""Tests if this user can manage ``Id`` aliases for families.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known changing an alias
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer alias
operations to an unauthorized user.
:return: ``false`` if ``Family`` aliasing is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def alias_family(self, family_id, alias_id):
"""Adds an ``Id`` to a ``Family`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Family`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another family, it is
reassigned to the given family ``Id``.
:param family_id: the ``Id`` of a ``Family``
:type family_id: ``osid.id.Id``
:param alias_id: the alias ``Id``
:type alias_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``alias_id`` is already assigned
:raise: ``NotFound`` -- ``family_id`` not found
:raise: ``NullArgument`` -- ``family_id`` or ``alias_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class FamilyNotificationSession:
"""This session defines methods to receive notifications on adds/changes to ``Family`` objects.
This session is intended for consumers needing to synchronize their
state with this service without the use of polling. Notifications
are cancelled when this session is closed.
Notifications are triggered with changes to the ``Family`` object
itself. Adding and removing relationships result in notifications
available from the notification session for rules.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def can_register_for_family_notifications(self):
"""Tests if this user can register for ``Family`` notifications.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an application that may opt not to offer
notification operations.
:return: ``false`` if notification methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def reliable_family_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_family_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def unreliable_family_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def acknowledge_family_notification(self, notification_id):
"""Acknowledge a family notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_new_families(self):
"""Register for notifications of new families.
``FamilyReceiver.newFamilies()`` is invoked when a new
``Family`` is created.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_families(self):
"""Registers for notification of updated families.
``FamilyReceiver.changedFamilies()`` is invoked when a family is
changed.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_family(self, family_id):
"""Registers for notification of an updated family.
``FamilyReceiver.changedFamilies()`` is invoked when the
specified family is changed.
:param family_id: the ``Id`` of the ``Family`` to monitor
:type family_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_families(self):
"""Registers for notification of deleted families.
``FamilyReceiver.deletedFamilies()`` is invoked when a family is
deleted.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_deleted_family(self, family_id):
"""Registers for notification of a deleted family.
``FamilyReceiver.deletedFamilies()`` is invoked when the
specified family is deleted.
:param family_id: the ``Id`` of the ``Family`` to monitor
:type family_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_family_hierarchy(self):
"""Registers for notification of an updated family hierarchy structure.
``FamilyReceiver.changedChildOfFamilies()`` is invoked when a
node experiences a change in its children.
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_family_hierarchy_for_ancestors(self, family_id):
"""Registers for notification of an updated family hierarchy structure.
``FamilyReceiver.changedChildOfFamilies()`` is invoked when the
specified node or any of its ancestors experiences a change in
its children.
:param family_id: the ``Id`` of the ``Family`` node to monitor
:type family_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def register_for_changed_family_hierarchy_for_descendants(self, family_id):
"""Registers for notification of an updated family hierarchy structure.
``FamilyReceiver.changedChildOfFamilies()`` is invoked when the
specified node or any of its descendants experiences a change in
its children.
:param family_id: the ``Id`` of the ``Family`` node to monitor
:type family_id: ``osid.id.Id``
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def reliable_family_notifications(self):
"""Reliable notifications are desired.
In reliable mode, notifications are to be acknowledged using
``acknowledge_item_notification()`` .
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def unreliable_family_notifications(self):
"""Unreliable notifications are desired.
In unreliable mode, notifications do not need to be
acknowledged.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def acknowledge_family_notification(self, notification_id):
"""Acknowledge an family notification.
:param notification_id: the ``Id`` of the notification
:type notification_id: ``osid.id.Id``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
class FamilyHierarchySession:
"""This session defines methods for traversing a hierarchy of ``Family`` objects.
Each node in the hierarchy is a unique ``Family``. The hierarchy may
be traversed recursively to establish the tree structure through
``get_parent_families()`` and ``getChildFamilies()``. To relate
these ``Ids`` to another OSID, ``get_family_nodes()`` can be used
for retrievals that can be used for bulk lookups in other OSIDs. Any
``Family`` available in the Relationship OSID is known to this
hierarchy but does not appear in the hierarchy traversal until added
as a root node or a child of another node.
A user may not be authorized to traverse the entire hierarchy. Parts
of the hierarchy may be made invisible through omission from the
returns of ``get_parent_families()`` or ``get_child_families()`` in
lieu of a ``PermissionDenied`` error that may disrupt the traversal
through authorized pathways.
This session defines views that offer differing behaviors when
retrieving multiple objects.
* comparative view: family elements may be silently omitted or re-
ordered
* plenary view: provides a complete set or is an error condition
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_hierarchy_id = property(fget=get_family_hierarchy_id)
@abc.abstractmethod
def get_family_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
family_hierarchy = property(fget=get_family_hierarchy)
@abc.abstractmethod
def can_access_family_hierarchy(self):
"""Tests if this user can perform hierarchy queries.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known all methods in this
session will result in a ``PermissionDenied``. This is intended
as a hint to an an application that may not offer hierrachy
traversal operations to unauthorized users.
:return: ``false`` if hierarchy traversal methods are not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def use_comparative_family_view(self):
"""The returns from the family methods may omit or translate elements based on this session, such as authorization, and not result in an error.
This view is used when greater interoperability is desired at
the expense of precision.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def use_plenary_family_view(self):
"""A complete view of the ``Family`` returns is desired.
Methods will return what is requested or result in an error.
This view is used when greater precision is desired at the
expense of interoperability.
*compliance: mandatory -- This method is must be implemented.*
"""
pass
@abc.abstractmethod
def get_root_family_ids(self):
"""Gets the root family ``Ids`` in this hierarchy.
:return: the root family ``Ids``
:rtype: ``osid.id.IdList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
root_family_ids = property(fget=get_root_family_ids)
@abc.abstractmethod
def get_root_families(self):
"""Gets the root families in the family hierarchy.
A node with no parents is an orphan. While all family ``Ids``
are known to the hierarchy, an orphan does not appear in the
hierarchy unless explicitly added as a root node or child of
another node.
:return: the root families
:rtype: ``osid.relationship.FamilyList``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method is must be implemented.*
"""
return # osid.relationship.FamilyList
root_families = property(fget=get_root_families)
@abc.abstractmethod
def has_parent_families(self, family_id):
"""Tests if the ``Family`` has any parents.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if the family has parents, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def is_parent_of_family(self, id_, family_id):
"""Tests if an ``Id`` is a direct parent of a family.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is a parent of ``family_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
@abc.abstractmethod
def get_parent_family_ids(self, family_id):
"""Gets the parent ``Ids`` of the given family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: the parent ``Ids`` of the family
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_parent_families(self, family_id):
"""Gets the parent families of the given ``id``.
:param family_id: the ``Id`` of the ``Family`` to query
:type family_id: ``osid.id.Id``
:return: the parent families of the ``id``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NotFound`` -- a ``Family`` identified by ``Id is`` not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def is_ancestor_of_family(self, id_, family_id):
"""Tests if an ``Id`` is an ancestor of a family.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if this ``id`` is an ancestor of ``family_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
@abc.abstractmethod
def has_child_families(self, family_id):
"""Tests if a family has any children.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if the ``family_id`` has children, ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def is_child_of_family(self, id_, family_id):
"""Tests if a family is a direct child of another.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a child of ``family_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
return # boolean
@abc.abstractmethod
def get_child_family_ids(self, family_id):
"""Gets the child ``Ids`` of the given family.
:param family_id: the ``Id`` to query
:type family_id: ``osid.id.Id``
:return: the children of the family
:rtype: ``osid.id.IdList``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.IdList
@abc.abstractmethod
def get_child_families(self, family_id):
"""Gets the child families of the given ``id``.
:param family_id: the ``Id`` of the ``Family`` to query
:type family_id: ``osid.id.Id``
:return: the child families of the ``id``
:rtype: ``osid.relationship.FamilyList``
:raise: ``NotFound`` -- a ``Family`` identified by ``Id is`` not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyList
@abc.abstractmethod
def is_descendant_of_family(self, id_, family_id):
"""Tests if an ``Id`` is a descendant of a family.
:param id: an ``Id``
:type id: ``osid.id.Id``
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:return: ``true`` if the ``id`` is a descendant of the ``family_id,`` ``false`` otherwise
:rtype: ``boolean``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``id`` or ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` is not found return ``false``.
"""
return # boolean
@abc.abstractmethod
def get_family_node_ids(self, family_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given family.
:param family_id: the ``Id`` to query
:type family_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a family node
:rtype: ``osid.hierarchy.Node``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Node
@abc.abstractmethod
def get_family_nodes(self, family_id, ancestor_levels, descendant_levels, include_siblings):
"""Gets a portion of the hierarchy for the given family.
:param family_id: the ``Id`` to query
:type family_id: ``osid.id.Id``
:param ancestor_levels: the maximum number of ancestor levels to include. A value of 0 returns no parents in the node.
:type ancestor_levels: ``cardinal``
:param descendant_levels: the maximum number of descendant levels to include. A value of 0 returns no children in the node.
:type descendant_levels: ``cardinal``
:param include_siblings: ``true`` to include the siblings of the given node, ``false`` to omit the siblings
:type include_siblings: ``boolean``
:return: a family node
:rtype: ``osid.relationship.FamilyNode``
:raise: ``NotFound`` -- ``family_id`` is not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.relationship.FamilyNode
class FamilyHierarchyDesignSession:
"""This session manages a hierarchy of families may be organized into a hierarchy for organizing or federating.
A parent ``Family`` includes all of the relationships of its
children such that a single root node contains all of the
relationships of the federation.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def get_family_hierarchy_id(self):
"""Gets the hierarchy ``Id`` associated with this session.
:return: the hierarchy ``Id`` associated with this session
:rtype: ``osid.id.Id``
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.id.Id
family_hierarchy_id = property(fget=get_family_hierarchy_id)
@abc.abstractmethod
def get_family_hierarchy(self):
"""Gets the hierarchy associated with this session.
:return: the hierarchy associated with this session
:rtype: ``osid.hierarchy.Hierarchy``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
return # osid.hierarchy.Hierarchy
family_hierarchy = property(fget=get_family_hierarchy)
@abc.abstractmethod
def can_modify_family_hierarchy(self):
"""Tests if this user can change the hierarchy.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known performing any update
will result in a ``PermissionDenied``. This is intended as a
hint to an application that may opt not to offer these
operations to an unauthorized user.
:return: ``false`` if changing this hierarchy is not authorized, ``true`` otherwise
:rtype: ``boolean``
*compliance: mandatory -- This method must be implemented.*
"""
return # boolean
@abc.abstractmethod
def add_root_family(self, family_id):
"""Adds a root family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``family_id`` is already in hierarchy
:raise: ``NotFound`` -- ``family_id`` not found
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def remove_root_family(self, family_id):
"""Removes a root family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``family_id`` not a root
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def add_child_family(self, family_id, child_id):
"""Adds a child to a family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``AlreadyExists`` -- ``family_id`` is already a parent of ``child_id``
:raise: ``NotFound`` -- ``family_id`` or ``child_id`` not found
:raise: ``NullArgument`` -- ``family_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def remove_child_family(self, family_id, child_id):
"""Removes a child from a family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:param child_id: the ``Id`` of the new child
:type child_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``family_id`` not a parent of ``child_id``
:raise: ``NullArgument`` -- ``family_id`` or ``child_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
@abc.abstractmethod
def remove_child_families(self, family_id):
"""Removes all children from a family.
:param family_id: the ``Id`` of a family
:type family_id: ``osid.id.Id``
:raise: ``NotFound`` -- ``family_id`` not in hierarchy
:raise: ``NullArgument`` -- ``family_id`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
pass
| mit |
slevenhagen/odoo | addons/web/controllers/main.py | 9 | 65864 | # -*- coding: utf-8 -*-
import ast
import base64
import csv
import functools
import glob
import itertools
import jinja2
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import sys
import time
import urllib2
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
try:
import xlwt
except ImportError:
xlwt = None
import openerp
import openerp.modules.registry
from openerp.addons.base.ir.ir_qweb import AssetsBundle, QWebTemplateNotFound
from openerp.modules import get_module_resource
from openerp.tools import topological_sort
from openerp.tools.translate import _
from openerp.tools import ustr
from openerp import http
from openerp.http import request, serialize_exception as _serialize_exception
_logger = logging.getLogger(__name__)
if hasattr(sys, 'frozen'):
# When running on compiled windows binary, we don't have access to package loader.
path = os.path.realpath(os.path.join(os.path.dirname(__file__), '..', 'views'))
loader = jinja2.FileSystemLoader(path)
else:
loader = jinja2.PackageLoader('openerp.addons.web', "views")
env = jinja2.Environment(loader=loader, autoescape=True)
env.filters["json"] = simplejson.dumps
# 1 week cache for asset bundles as advised by Google Page Speed
BUNDLE_MAXAGE = 60 * 60 * 24 * 7
#----------------------------------------------------------
# OpenERP Web helpers
#----------------------------------------------------------
db_list = http.db_list
db_monodb = http.db_monodb
def serialize_exception(f):
@functools.wraps(f)
def wrap(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception, e:
_logger.exception("An exception occured during an http request")
se = _serialize_exception(e)
error = {
'code': 200,
'message': "Odoo Server Error",
'data': se
}
return werkzeug.exceptions.InternalServerError(simplejson.dumps(error))
return wrap
def redirect_with_hash(*args, **kw):
"""
.. deprecated:: 8.0
Use the ``http.redirect_with_hash()`` function instead.
"""
return http.redirect_with_hash(*args, **kw)
def abort_and_redirect(url):
r = request.httprequest
response = werkzeug.utils.redirect(url, 302)
response = r.app.get_response(r, response, explicit_session=False)
werkzeug.exceptions.abort(response)
def ensure_db(redirect='/web/database/selector'):
# This helper should be used in web client auth="none" routes
# if those routes needs a db to work with.
# If the heuristics does not find any database, then the users will be
# redirected to db selector or any url specified by `redirect` argument.
# If the db is taken out of a query parameter, it will be checked against
# `http.db_filter()` in order to ensure it's legit and thus avoid db
# forgering that could lead to xss attacks.
db = request.params.get('db')
# Ensure db is legit
if db and db not in http.db_filter([db]):
db = None
if db and not request.session.db:
# User asked a specific database on a new session.
# That mean the nodb router has been used to find the route
# Depending on installed module in the database, the rendering of the page
# may depend on data injected by the database route dispatcher.
# Thus, we redirect the user to the same page but with the session cookie set.
# This will force using the database route dispatcher...
r = request.httprequest
url_redirect = r.base_url
if r.query_string:
# Can't use werkzeug.wrappers.BaseRequest.url with encoded hashes:
# https://github.com/amigrave/werkzeug/commit/b4a62433f2f7678c234cdcac6247a869f90a7eb7
url_redirect += '?' + r.query_string
response = werkzeug.utils.redirect(url_redirect, 302)
request.session.db = db
abort_and_redirect(url_redirect)
# if db not provided, use the session one
if not db and request.session.db and http.db_filter([request.session.db]):
db = request.session.db
# if no database provided and no database in session, use monodb
if not db:
db = db_monodb(request.httprequest)
# if no db can be found til here, send to the database selector
# the database selector will redirect to database manager if needed
if not db:
werkzeug.exceptions.abort(werkzeug.utils.redirect(redirect, 303))
# always switch the session to the computed db
if db != request.session.db:
request.session.logout()
abort_and_redirect(request.httprequest.url)
request.session.db = db
def module_installed():
# Candidates module the current heuristic is the /static dir
loadable = http.addons_manifest.keys()
modules = {}
# Retrieve database installed modules
# TODO The following code should move to ir.module.module.list_installed_modules()
Modules = request.session.model('ir.module.module')
domain = [('state','=','installed'), ('name','in', loadable)]
for module in Modules.search_read(domain, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = request.session.model('ir.module.module.dependency').read(deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
sorted_modules = topological_sort(modules)
return sorted_modules
def module_installed_bypass_session(dbname):
loadable = http.addons_manifest.keys()
modules = {}
try:
registry = openerp.modules.registry.RegistryManager.get(dbname)
with registry.cursor() as cr:
m = registry.get('ir.module.module')
# TODO The following code should move to ir.module.module.list_installed_modules()
domain = [('state','=','installed'), ('name','in', loadable)]
ids = m.search(cr, 1, [('state','=','installed'), ('name','in', loadable)])
for module in m.read(cr, 1, ids, ['name', 'dependencies_id']):
modules[module['name']] = []
deps = module.get('dependencies_id')
if deps:
deps_read = registry.get('ir.module.module.dependency').read(cr, 1, deps, ['name'])
dependencies = [i['name'] for i in deps_read]
modules[module['name']] = dependencies
except Exception,e:
pass
sorted_modules = topological_sort(modules)
return sorted_modules
def module_boot(db=None):
server_wide_modules = openerp.conf.server_wide_modules or ['web']
serverside = []
dbside = []
for i in server_wide_modules:
if i in http.addons_manifest:
serverside.append(i)
monodb = db or db_monodb()
if monodb:
dbside = module_installed_bypass_session(monodb)
dbside = [i for i in dbside if i not in serverside]
addons = serverside + dbside
return addons
def concat_xml(file_list):
"""Concatenate xml files
:param list(str) file_list: list of files to check
:returns: (concatenation_result, checksum)
:rtype: (str, str)
"""
checksum = hashlib.new('sha1')
if not file_list:
return '', checksum.hexdigest()
root = None
for fname in file_list:
with open(fname, 'rb') as fp:
contents = fp.read()
checksum.update(contents)
fp.seek(0)
xml = ElementTree.parse(fp).getroot()
if root is None:
root = ElementTree.Element(xml.tag)
#elif root.tag != xml.tag:
# raise ValueError("Root tags missmatch: %r != %r" % (root.tag, xml.tag))
for child in xml.getchildren():
root.append(child)
return ElementTree.tostring(root, 'utf-8'), checksum.hexdigest()
def fs2web(path):
"""convert FS path into web path"""
return '/'.join(path.split(os.path.sep))
def manifest_glob(extension, addons=None, db=None, include_remotes=False):
if addons is None:
addons = module_boot(db=db)
else:
addons = addons.split(',')
r = []
for addon in addons:
manifest = http.addons_manifest.get(addon, None)
if not manifest:
continue
# ensure does not ends with /
addons_path = os.path.join(manifest['addons_path'], '')[:-1]
globlist = manifest.get(extension, [])
for pattern in globlist:
if pattern.startswith(('http://', 'https://', '//')):
if include_remotes:
r.append((None, pattern))
else:
for path in glob.glob(os.path.normpath(os.path.join(addons_path, addon, pattern))):
r.append((path, fs2web(path[len(addons_path):])))
return r
def manifest_list(extension, mods=None, db=None, debug=None):
""" list ressources to load specifying either:
mods: a comma separated string listing modules
db: a database name (return all installed modules in that database)
"""
if debug is not None:
_logger.warning("openerp.addons.web.main.manifest_list(): debug parameter is deprecated")
files = manifest_glob(extension, addons=mods, db=db, include_remotes=True)
return [wp for _fp, wp in files]
def get_last_modified(files):
""" Returns the modification time of the most recently modified
file provided
:param list(str) files: names of files to check
:return: most recent modification time amongst the fileset
:rtype: datetime.datetime
"""
files = list(files)
if files:
return max(datetime.datetime.fromtimestamp(os.path.getmtime(f))
for f in files)
return datetime.datetime(1970, 1, 1)
def make_conditional(response, last_modified=None, etag=None, max_age=0):
""" Makes the provided response conditional based upon the request,
and mandates revalidation from clients
Uses Werkzeug's own :meth:`ETagResponseMixin.make_conditional`, after
setting ``last_modified`` and ``etag`` correctly on the response object
:param response: Werkzeug response
:type response: werkzeug.wrappers.Response
:param datetime.datetime last_modified: last modification date of the response content
:param str etag: some sort of checksum of the content (deep etag)
:return: the response object provided
:rtype: werkzeug.wrappers.Response
"""
response.cache_control.must_revalidate = True
response.cache_control.max_age = max_age
if last_modified:
response.last_modified = last_modified
if etag:
response.set_etag(etag)
return response.make_conditional(request.httprequest)
def login_and_redirect(db, login, key, redirect_url='/web'):
request.session.authenticate(db, login, key)
return set_cookie_and_redirect(redirect_url)
def set_cookie_and_redirect(redirect_url):
redirect = werkzeug.utils.redirect(redirect_url, 303)
redirect.autocorrect_location_header = False
return redirect
def login_redirect():
url = '/web/login?'
# built the redirect url, keeping all the query parameters of the url
redirect_url = '%s?%s' % (request.httprequest.base_url, werkzeug.urls.url_encode(request.params))
return """<html><head><script>
window.location = '%sredirect=' + encodeURIComponent("%s" + location.hash);
</script></head></html>
""" % (url, redirect_url)
def load_actions_from_ir_values(key, key2, models, meta):
Values = request.session.model('ir.values')
actions = Values.get(key, key2, models, meta, request.context)
return [(id, name, clean_action(action))
for id, name, action in actions]
def clean_action(action):
action.setdefault('flags', {})
action_type = action.setdefault('type', 'ir.actions.act_window_close')
if action_type == 'ir.actions.act_window':
return fix_view_modes(action)
return action
# I think generate_views,fix_view_modes should go into js ActionManager
def generate_views(action):
"""
While the server generates a sequence called "views" computing dependencies
between a bunch of stuff for views coming directly from the database
(the ``ir.actions.act_window model``), it's also possible for e.g. buttons
to return custom view dictionaries generated on the fly.
In that case, there is no ``views`` key available on the action.
Since the web client relies on ``action['views']``, generate it here from
``view_mode`` and ``view_id``.
Currently handles two different cases:
* no view_id, multiple view_mode
* single view_id, single view_mode
:param dict action: action descriptor dictionary to generate a views key for
"""
view_id = action.get('view_id') or False
if isinstance(view_id, (list, tuple)):
view_id = view_id[0]
# providing at least one view mode is a requirement, not an option
view_modes = action['view_mode'].split(',')
if len(view_modes) > 1:
if view_id:
raise ValueError('Non-db action dictionaries should provide '
'either multiple view modes or a single view '
'mode and an optional view id.\n\n Got view '
'modes %r and view id %r for action %r' % (
view_modes, view_id, action))
action['views'] = [(False, mode) for mode in view_modes]
return
action['views'] = [(view_id, view_modes[0])]
def fix_view_modes(action):
""" For historical reasons, OpenERP has weird dealings in relation to
view_mode and the view_type attribute (on window actions):
* one of the view modes is ``tree``, which stands for both list views
and tree views
* the choice is made by checking ``view_type``, which is either
``form`` for a list view or ``tree`` for an actual tree view
This methods simply folds the view_type into view_mode by adding a
new view mode ``list`` which is the result of the ``tree`` view_mode
in conjunction with the ``form`` view_type.
TODO: this should go into the doc, some kind of "peculiarities" section
:param dict action: an action descriptor
:returns: nothing, the action is modified in place
"""
if not action.get('views'):
generate_views(action)
if action.pop('view_type', 'form') != 'form':
return action
if 'view_mode' in action:
action['view_mode'] = ','.join(
mode if mode != 'tree' else 'list'
for mode in action['view_mode'].split(','))
action['views'] = [
[id, mode if mode != 'tree' else 'list']
for id, mode in action['views']
]
return action
def _local_web_translations(trans_file):
messages = []
try:
with open(trans_file) as t_file:
po = babel.messages.pofile.read_po(t_file)
except Exception:
return
for x in po:
if x.id and x.string and "openerp-web" in x.auto_comments:
messages.append({'id': x.id, 'string': x.string})
return messages
def xml2json_from_elementtree(el, preserve_whitespaces=False):
""" xml2json-direct
Simple and straightforward XML-to-JSON converter in Python
New BSD Licensed
http://code.google.com/p/xml2json-direct/
"""
res = {}
if el.tag[0] == "{":
ns, name = el.tag.rsplit("}", 1)
res["tag"] = name
res["namespace"] = ns[1:]
else:
res["tag"] = el.tag
res["attrs"] = {}
for k, v in el.items():
res["attrs"][k] = v
kids = []
if el.text and (preserve_whitespaces or el.text.strip() != ''):
kids.append(el.text)
for kid in el:
kids.append(xml2json_from_elementtree(kid, preserve_whitespaces))
if kid.tail and (preserve_whitespaces or kid.tail.strip() != ''):
kids.append(kid.tail)
res["children"] = kids
return res
def content_disposition(filename):
filename = ustr(filename)
escaped = urllib2.quote(filename.encode('utf8'))
browser = request.httprequest.user_agent.browser
version = int((request.httprequest.user_agent.version or '0').split('.')[0])
if browser == 'msie' and version < 9:
return "attachment; filename=%s" % escaped
elif browser == 'safari' and version < 537:
return u"attachment; filename=%s" % filename.encode('ascii', 'replace')
else:
return "attachment; filename*=UTF-8''%s" % escaped
#----------------------------------------------------------
# OpenERP Web web Controllers
#----------------------------------------------------------
class Home(http.Controller):
@http.route('/', type='http', auth="none")
def index(self, s_action=None, db=None, **kw):
return http.local_redirect('/web', query=request.params, keep_hash=True)
@http.route('/web', type='http', auth="none")
def web_client(self, s_action=None, **kw):
ensure_db()
if request.session.uid:
if kw.get('redirect'):
return werkzeug.utils.redirect(kw.get('redirect'), 303)
if not request.uid:
request.uid = request.session.uid
menu_data = request.registry['ir.ui.menu'].load_menus(request.cr, request.uid, context=request.context)
return request.render('web.webclient_bootstrap', qcontext={'menu_data': menu_data})
else:
return login_redirect()
@http.route('/web/dbredirect', type='http', auth="none")
def web_db_redirect(self, redirect='/', **kw):
ensure_db()
return werkzeug.utils.redirect(redirect, 303)
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db, request.params['login'], request.params['password'])
if uid is not False:
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = "Wrong login/password"
if request.env.ref('web.login', False):
return request.render('web.login', values)
else:
# probably not an odoo compatible database
error = 'Unable to login on database %s' % request.session.db
return werkzeug.utils.redirect('/web/database/selector?error=%s' % error, 303)
@http.route('/login', type='http', auth="none")
def login(self, db, login, key, redirect="/web", **kw):
if not http.db_filter([db]):
return werkzeug.utils.redirect('/', 303)
return login_and_redirect(db, login, key, redirect_url=redirect)
@http.route([
'/web/js/<xmlid>',
'/web/js/<xmlid>/<version>',
], type='http', auth='public')
def js_bundle(self, xmlid, version=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.js(), [('Content-Type', 'application/javascript')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
@http.route([
'/web/css/<xmlid>',
'/web/css/<xmlid>/<version>',
'/web/css.<int:page>/<xmlid>/<version>',
], type='http', auth='public')
def css_bundle(self, xmlid, version=None, page=None, **kw):
try:
bundle = AssetsBundle(xmlid)
except QWebTemplateNotFound:
return request.not_found()
response = request.make_response(bundle.css(page), [('Content-Type', 'text/css')])
return make_conditional(response, bundle.last_modified, max_age=BUNDLE_MAXAGE)
class WebClient(http.Controller):
@http.route('/web/webclient/csslist', type='json', auth="none")
def csslist(self, mods=None):
return manifest_list('css', mods=mods)
@http.route('/web/webclient/jslist', type='json', auth="none")
def jslist(self, mods=None):
return manifest_list('js', mods=mods)
@http.route('/web/webclient/qweb', type='http', auth="none")
def qweb(self, mods=None, db=None):
files = [f[0] for f in manifest_glob('qweb', addons=mods, db=db)]
last_modified = get_last_modified(files)
if request.httprequest.if_modified_since and request.httprequest.if_modified_since >= last_modified:
return werkzeug.wrappers.Response(status=304)
content, checksum = concat_xml(files)
return make_conditional(
request.make_response(content, [('Content-Type', 'text/xml')]),
last_modified, checksum)
@http.route('/web/webclient/bootstrap_translations', type='json', auth="none")
def bootstrap_translations(self, mods):
""" Load local translations from *.po files, as a temporary solution
until we have established a valid session. This is meant only
for translating the login page and db management chrome, using
the browser's language. """
# For performance reasons we only load a single translation, so for
# sub-languages (that should only be partially translated) we load the
# main language PO instead - that should be enough for the login screen.
lang = request.lang.split('_')[0]
translations_per_module = {}
for addon_name in mods:
if http.addons_manifest[addon_name].get('bootstrap'):
addons_path = http.addons_manifest[addon_name]['addons_path']
f_name = os.path.join(addons_path, addon_name, "i18n", lang + ".po")
if not os.path.exists(f_name):
continue
translations_per_module[addon_name] = {'messages': _local_web_translations(f_name)}
return {"modules": translations_per_module,
"lang_parameters": None}
@http.route('/web/webclient/translations', type='json', auth="none")
def translations(self, mods=None, lang=None):
request.disable_db = False
uid = openerp.SUPERUSER_ID
if mods is None:
m = request.registry.get('ir.module.module')
mods = [x['name'] for x in m.search_read(request.cr, uid,
[('state','=','installed')], ['name'])]
if lang is None:
lang = request.context["lang"]
res_lang = request.registry.get('res.lang')
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
lang_params = None
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["direction", "date_format", "time_format",
"grouping", "decimal_point", "thousands_sep"])
# Regional languages (ll_CC) must inherit/override their parent lang (ll), but this is
# done server-side when the language is loaded, so we only need to load the user's lang.
ir_translation = request.registry.get('ir.translation')
translations_per_module = {}
messages = ir_translation.search_read(request.cr, uid, [('module','in',mods),('lang','=',lang),
('comments','like','openerp-web'),('value','!=',False),
('value','!=','')],
['module','src','value','lang'], order='module')
for mod, msg_group in itertools.groupby(messages, key=operator.itemgetter('module')):
translations_per_module.setdefault(mod,{'messages':[]})
translations_per_module[mod]['messages'].extend({'id': m['src'],
'string': m['value']} \
for m in msg_group)
return {"modules": translations_per_module,
"lang_parameters": lang_params}
@http.route('/web/webclient/version_info', type='json', auth="none")
def version_info(self):
return openerp.service.common.exp_version()
@http.route('/web/tests', type='http', auth="none")
def index(self, mod=None, **kwargs):
return request.render('web.qunit_suite')
class Proxy(http.Controller):
@http.route('/web/proxy/load', type='json', auth="none")
def load(self, path):
""" Proxies an HTTP request through a JSON request.
It is strongly recommended to not request binary files through this,
as the result will be a binary data blob as well.
:param path: actual request path
:return: file content
"""
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
base_url = request.httprequest.base_url
return Client(request.httprequest.app, BaseResponse).get(path, base_url=base_url).data
class Database(http.Controller):
@http.route('/web/database/selector', type='http', auth="none")
def selector(self, **kw):
try:
dbs = http.db_list()
if not dbs:
return http.local_redirect('/web/database/manager')
except openerp.exceptions.AccessDenied:
dbs = False
return env.get_template("database_selector.html").render({
'databases': dbs,
'debug': request.debug,
'error': kw.get('error')
})
@http.route('/web/database/manager', type='http', auth="none")
def manager(self, **kw):
# TODO: migrate the webclient's database manager to server side views
request.session.logout()
return env.get_template("database_manager.html").render({
'modules': simplejson.dumps(module_boot()),
})
@http.route('/web/database/get_list', type='json', auth="none")
def get_list(self):
# TODO change js to avoid calling this method if in monodb mode
try:
return http.db_list()
except openerp.exceptions.AccessDenied:
monodb = db_monodb()
if monodb:
return [monodb]
raise
@http.route('/web/database/create', type='json', auth="none")
def create(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
db_created = request.session.proxy("db").create_database(
params['super_admin_pwd'],
params['db_name'],
bool(params.get('demo_data')),
params['db_lang'],
params['create_admin_pwd'])
if db_created:
request.session.authenticate(params['db_name'], 'admin', params['create_admin_pwd'])
return db_created
@http.route('/web/database/duplicate', type='json', auth="none")
def duplicate(self, fields):
params = dict(map(operator.itemgetter('name', 'value'), fields))
duplicate_attrs = (
params['super_admin_pwd'],
params['db_original_name'],
params['db_name'],
)
return request.session.proxy("db").duplicate_database(*duplicate_attrs)
@http.route('/web/database/drop', type='json', auth="none")
def drop(self, fields):
password, db = operator.itemgetter(
'drop_pwd', 'drop_db')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
if request.session.proxy("db").drop(password, db):
return True
else:
return False
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': 'Drop Database'}
except Exception:
return {'error': _('Could not drop database !'), 'title': _('Drop Database')}
@http.route('/web/database/backup', type='http', auth="none")
def backup(self, backup_db, backup_pwd, token, backup_format='zip'):
try:
openerp.service.security.check_super(backup_pwd)
ts = datetime.datetime.utcnow().strftime("%Y-%m-%d_%H-%M-%S")
filename = "%s_%s.%s" % (backup_db, ts, backup_format)
headers = [
('Content-Type', 'application/octet-stream; charset=binary'),
('Content-Disposition', content_disposition(filename)),
]
dump_stream = openerp.service.db.dump_db(backup_db, None, backup_format)
response = werkzeug.wrappers.Response(dump_stream, headers=headers, direct_passthrough=True)
response.set_cookie('fileToken', token)
return response
except Exception, e:
_logger.exception('Database.backup')
return simplejson.dumps([[],[{'error': openerp.tools.ustr(e), 'title': _('Backup Database')}]])
@http.route('/web/database/restore', type='http', auth="none")
def restore(self, db_file, restore_pwd, new_db, mode):
try:
copy = mode == 'copy'
data = base64.b64encode(db_file.read())
request.session.proxy("db").restore(restore_pwd, new_db, data, copy)
return ''
except openerp.exceptions.AccessDenied, e:
raise Exception("AccessDenied")
@http.route('/web/database/change_password', type='json', auth="none")
def change_password(self, fields):
old_password, new_password = operator.itemgetter(
'old_pwd', 'new_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
try:
return request.session.proxy("db").change_admin_password(old_password, new_password)
except openerp.exceptions.AccessDenied:
return {'error': 'AccessDenied', 'title': _('Change Password')}
except Exception:
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
class Session(http.Controller):
def session_info(self):
request.session.ensure_valid()
return {
"session_id": request.session_id,
"uid": request.session.uid,
"user_context": request.session.get_context() if request.session.uid else {},
"db": request.session.db,
"username": request.session.login,
"company_id": request.env.user.company_id.id if request.session.uid else None,
}
@http.route('/web/session/get_session_info', type='json', auth="none")
def get_session_info(self):
request.uid = request.session.uid
request.disable_db = False
return self.session_info()
@http.route('/web/session/authenticate', type='json', auth="none")
def authenticate(self, db, login, password, base_location=None):
request.session.authenticate(db, login, password)
return self.session_info()
@http.route('/web/session/change_password', type='json', auth="user")
def change_password(self, fields):
old_password, new_password,confirm_password = operator.itemgetter('old_pwd', 'new_password','confirm_pwd')(
dict(map(operator.itemgetter('name', 'value'), fields)))
if not (old_password.strip() and new_password.strip() and confirm_password.strip()):
return {'error':_('You cannot leave any password empty.'),'title': _('Change Password')}
if new_password != confirm_password:
return {'error': _('The new password and its confirmation must be identical.'),'title': _('Change Password')}
try:
if request.session.model('res.users').change_password(
old_password, new_password):
return {'new_password':new_password}
except Exception:
return {'error': _('The old password you provided is incorrect, your password was not changed.'), 'title': _('Change Password')}
return {'error': _('Error, password not changed !'), 'title': _('Change Password')}
@http.route('/web/session/get_lang_list', type='json', auth="none")
def get_lang_list(self):
try:
return request.session.proxy("db").list_lang() or []
except Exception, e:
return {"error": e, "title": _("Languages")}
@http.route('/web/session/modules', type='json', auth="user")
def modules(self):
# return all installed modules. Web client is smart enough to not load a module twice
return module_installed()
@http.route('/web/session/save_session_action', type='json', auth="user")
def save_session_action(self, the_action):
"""
This method store an action object in the session object and returns an integer
identifying that action. The method get_session_action() can be used to get
back the action.
:param the_action: The action to save in the session.
:type the_action: anything
:return: A key identifying the saved action.
:rtype: integer
"""
return request.httpsession.save_action(the_action)
@http.route('/web/session/get_session_action', type='json', auth="user")
def get_session_action(self, key):
"""
Gets back a previously saved action. This method can return None if the action
was saved since too much time (this case should be handled in a smart way).
:param key: The key given by save_session_action()
:type key: integer
:return: The saved action or None.
:rtype: anything
"""
return request.httpsession.get_action(key)
@http.route('/web/session/check', type='json', auth="user")
def check(self):
request.session.assert_valid()
return None
@http.route('/web/session/destroy', type='json', auth="user")
def destroy(self):
request.session.logout()
@http.route('/web/session/logout', type='http', auth="none")
def logout(self, redirect='/web'):
request.session.logout(keep_db=True)
return werkzeug.utils.redirect(redirect, 303)
class Menu(http.Controller):
@http.route('/web/menu/load_needaction', type='json', auth="user")
def load_needaction(self, menu_ids):
""" Loads needaction counters for specific menu ids.
:return: needaction data
:rtype: dict(menu_id: {'needaction_enabled': boolean, 'needaction_counter': int})
"""
return request.session.model('ir.ui.menu').get_needaction_data(menu_ids, request.context)
class DataSet(http.Controller):
@http.route('/web/dataset/search_read', type='json', auth="user")
def search_read(self, model, fields=False, offset=0, limit=False, domain=None, sort=None):
return self.do_search_read(model, fields, offset, limit, domain, sort)
def do_search_read(self, model, fields=False, offset=0, limit=False, domain=None
, sort=None):
""" Performs a search() followed by a read() (if needed) using the
provided search criteria
:param str model: the name of the model to search on
:param fields: a list of the fields to return in the result records
:type fields: [str]
:param int offset: from which index should the results start being returned
:param int limit: the maximum number of records to return
:param list domain: the search domain for the query
:param list sort: sorting directives
:returns: A structure (dict) with two keys: ids (all the ids matching
the (domain, context) pair) and records (paginated records
matching fields selection set)
:rtype: list
"""
Model = request.session.model(model)
records = Model.search_read(domain, fields, offset or 0, limit or False, sort or False,
request.context)
if not records:
return {
'length': 0,
'records': []
}
if limit and len(records) == limit:
length = Model.search_count(domain, request.context)
else:
length = len(records) + (offset or 0)
return {
'length': length,
'records': records
}
@http.route('/web/dataset/load', type='json', auth="user")
def load(self, model, id, fields):
m = request.session.model(model)
value = {}
r = m.read([id], False, request.context)
if r:
value = r[0]
return {'value': value}
def call_common(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
def _call_kw(self, model, method, args, kwargs):
if method.startswith('_'):
raise Exception("Access Denied: Underscore prefixed methods cannot be remotely called")
return getattr(request.registry.get(model), method)(request.cr, request.uid, *args, **kwargs)
@http.route('/web/dataset/call', type='json', auth="user")
def call(self, model, method, args, domain_id=None, context_id=None):
return self._call_kw(model, method, args, {})
@http.route(['/web/dataset/call_kw', '/web/dataset/call_kw/<path:path>'], type='json', auth="user")
def call_kw(self, model, method, args, kwargs, path=None):
return self._call_kw(model, method, args, kwargs)
@http.route('/web/dataset/call_button', type='json', auth="user")
def call_button(self, model, method, args, domain_id=None, context_id=None):
action = self._call_kw(model, method, args, {})
if isinstance(action, dict) and action.get('type') != '':
return clean_action(action)
return False
@http.route('/web/dataset/exec_workflow', type='json', auth="user")
def exec_workflow(self, model, id, signal):
return request.session.exec_workflow(model, id, signal)
@http.route('/web/dataset/resequence', type='json', auth="user")
def resequence(self, model, ids, field='sequence', offset=0):
""" Re-sequences a number of records in the model, by their ids
The re-sequencing starts at the first model of ``ids``, the sequence
number is incremented by one after each record and starts at ``offset``
:param ids: identifiers of the records to resequence, in the new sequence order
:type ids: list(id)
:param str field: field used for sequence specification, defaults to
"sequence"
:param int offset: sequence number for first record in ``ids``, allows
starting the resequencing from an arbitrary number,
defaults to ``0``
"""
m = request.session.model(model)
if not m.fields_get([field]):
return False
# python 2.6 has no start parameter
for i, id in enumerate(ids):
m.write(id, { field: i + offset })
return True
class View(http.Controller):
@http.route('/web/view/add_custom', type='json', auth="user")
def add_custom(self, view_id, arch):
CustomView = request.session.model('ir.ui.view.custom')
CustomView.create({
'user_id': request.session.uid,
'ref_id': view_id,
'arch': arch
}, request.context)
return {'result': True}
@http.route('/web/view/undo_custom', type='json', auth="user")
def undo_custom(self, view_id, reset=False):
CustomView = request.session.model('ir.ui.view.custom')
vcustom = CustomView.search([('user_id', '=', request.session.uid), ('ref_id' ,'=', view_id)],
0, False, False, request.context)
if vcustom:
if reset:
CustomView.unlink(vcustom, request.context)
else:
CustomView.unlink([vcustom[0]], request.context)
return {'result': True}
return {'result': False}
class TreeView(View):
@http.route('/web/treeview/action', type='json', auth="user")
def action(self, model, id):
return load_actions_from_ir_values(
'action', 'tree_but_open',[(model, id)],
False)
class Binary(http.Controller):
@http.route('/web/binary/image', type='http', auth="public")
def image(self, model, id, field, **kw):
last_update = '__last_update'
Model = request.registry[model]
cr, uid, context = request.cr, request.uid, request.context
headers = [('Content-Type', 'image/png')]
etag = request.httprequest.headers.get('If-None-Match')
hashed_session = hashlib.md5(request.session_id).hexdigest()
retag = hashed_session
id = None if not id else simplejson.loads(id)
if type(id) is list:
id = id[0] # m2o
try:
if etag:
if not id and hashed_session == etag:
return werkzeug.wrappers.Response(status=304)
else:
date = Model.read(cr, uid, [id], [last_update], context)[0].get(last_update)
if hashlib.md5(date).hexdigest() == etag:
return werkzeug.wrappers.Response(status=304)
if not id:
res = Model.default_get(cr, uid, [field], context).get(field)
image_base64 = res
else:
res = Model.read(cr, uid, [id], [last_update, field], context)[0]
retag = hashlib.md5(res.get(last_update)).hexdigest()
image_base64 = res.get(field)
if kw.get('resize'):
resize = kw.get('resize').split(',')
if len(resize) == 2 and int(resize[0]) and int(resize[1]):
width = int(resize[0])
height = int(resize[1])
# resize maximum 500*500
if width > 500: width = 500
if height > 500: height = 500
image_base64 = openerp.tools.image_resize_image(base64_source=image_base64, size=(width, height), encoding='base64', filetype='PNG')
image_data = base64.b64decode(image_base64)
except Exception:
image_data = self.placeholder()
headers.append(('ETag', retag))
headers.append(('Content-Length', len(image_data)))
try:
ncache = int(kw.get('cache'))
headers.append(('Cache-Control', 'no-cache' if ncache == 0 else 'max-age=%s' % (ncache)))
except:
pass
return request.make_response(image_data, headers)
def placeholder(self, image='placeholder.png'):
addons_path = http.addons_manifest['web']['addons_path']
return open(os.path.join(addons_path, 'web', 'static', 'src', 'img', image), 'rb').read()
@http.route('/web/binary/saveas', type='http', auth="public")
@serialize_exception
def saveas(self, model, field, id=None, filename_field=None, **kw):
""" Download link for files stored as binary fields.
If the ``id`` parameter is omitted, fetches the default value for the
binary field (via ``default_get``), otherwise fetches the field for
that precise record.
:param str model: name of the model to fetch the binary from
:param str field: binary field
:param str id: id of the record from which to fetch the binary
:param str filename_field: field holding the file's name, if any
:returns: :class:`werkzeug.wrappers.Response`
"""
Model = request.registry[model]
cr, uid, context = request.cr, request.uid, request.context
fields = [field]
if filename_field:
fields.append(filename_field)
if id:
res = Model.read(cr, uid, [int(id)], fields, context)[0]
else:
res = Model.default_get(cr, uid, fields, context)
filecontent = base64.b64decode(res.get(field) or '')
if not filecontent:
return request.not_found()
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
@http.route('/web/binary/saveas_ajax', type='http', auth="public")
@serialize_exception
def saveas_ajax(self, data, token):
jdata = simplejson.loads(data)
model = jdata['model']
field = jdata['field']
data = jdata['data']
id = jdata.get('id', None)
filename_field = jdata.get('filename_field', None)
context = jdata.get('context', {})
Model = request.session.model(model)
fields = [field]
if filename_field:
fields.append(filename_field)
if data:
res = {field: data, filename_field: jdata.get('filename', None)}
elif id:
res = Model.read([int(id)], fields, context)[0]
else:
res = Model.default_get(fields, context)
filecontent = base64.b64decode(res.get(field) or '')
if not filecontent:
raise ValueError(_("No content found for field '%s' on '%s:%s'") %
(field, model, id))
else:
filename = '%s_%s' % (model.replace('.', '_'), id)
if filename_field:
filename = res.get(filename_field, '') or filename
return request.make_response(filecontent,
headers=[('Content-Type', 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))],
cookies={'fileToken': token})
@http.route('/web/binary/upload', type='http', auth="user")
@serialize_exception
def upload(self, callback, ufile):
# TODO: might be useful to have a configuration flag for max-length file uploads
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
data = ufile.read()
args = [len(data), ufile.filename,
ufile.content_type, base64.b64encode(data)]
except Exception, e:
args = [False, e.message]
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route('/web/binary/upload_attachment', type='http', auth="user")
@serialize_exception
def upload_attachment(self, callback, model, id, ufile):
Model = request.session.model('ir.attachment')
out = """<script language="javascript" type="text/javascript">
var win = window.top.window;
win.jQuery(win).trigger(%s, %s);
</script>"""
try:
attachment_id = Model.create({
'name': ufile.filename,
'datas': base64.encodestring(ufile.read()),
'datas_fname': ufile.filename,
'res_model': model,
'res_id': int(id)
}, request.context)
args = {
'filename': ufile.filename,
'id': attachment_id
}
except Exception:
args = {'error': "Something horrible happened"}
_logger.exception("Fail to upload attachment %s" % ufile.filename)
return out % (simplejson.dumps(callback), simplejson.dumps(args))
@http.route([
'/web/binary/company_logo',
'/logo',
'/logo.png',
], type='http', auth="none", cors="*")
def company_logo(self, dbname=None, **kw):
imgname = 'logo.png'
placeholder = functools.partial(get_module_resource, 'web', 'static', 'src', 'img')
uid = None
if request.session.db:
dbname = request.session.db
uid = request.session.uid
elif dbname is None:
dbname = db_monodb()
if not uid:
uid = openerp.SUPERUSER_ID
if not dbname:
response = http.send_file(placeholder(imgname))
else:
try:
# create an empty registry
registry = openerp.modules.registry.Registry(dbname)
with registry.cursor() as cr:
cr.execute("""SELECT c.logo_web, c.write_date
FROM res_users u
LEFT JOIN res_company c
ON c.id = u.company_id
WHERE u.id = %s
""", (uid,))
row = cr.fetchone()
if row and row[0]:
image_data = StringIO(str(row[0]).decode('base64'))
response = http.send_file(image_data, filename=imgname, mtime=row[1])
else:
response = http.send_file(placeholder('nologo.png'))
except Exception:
response = http.send_file(placeholder(imgname))
return response
class Action(http.Controller):
@http.route('/web/action/load', type='json', auth="user")
def load(self, action_id, do_not_eval=False, additional_context=None):
Actions = request.session.model('ir.actions.actions')
value = False
try:
action_id = int(action_id)
except ValueError:
try:
module, xmlid = action_id.split('.', 1)
model, action_id = request.session.model('ir.model.data').get_object_reference(module, xmlid)
assert model.startswith('ir.actions.')
except Exception:
action_id = 0 # force failed read
base_action = Actions.read([action_id], ['type'], request.context)
if base_action:
ctx = request.context
action_type = base_action[0]['type']
if action_type == 'ir.actions.report.xml':
ctx.update({'bin_size': True})
if additional_context:
ctx.update(additional_context)
action = request.session.model(action_type).read([action_id], False, ctx)
if action:
value = clean_action(action[0])
return value
@http.route('/web/action/run', type='json', auth="user")
def run(self, action_id):
return_action = request.session.model('ir.actions.server').run(
[action_id], request.context)
if return_action:
return clean_action(return_action)
else:
return False
class Export(http.Controller):
@http.route('/web/export/formats', type='json', auth="user")
def formats(self):
""" Returns all valid export formats
:returns: for each export format, a pair of identifier and printable name
:rtype: [(str, str)]
"""
return [
{'tag': 'csv', 'label': 'CSV'},
{'tag': 'xls', 'label': 'Excel', 'error': None if xlwt else "XLWT required"},
]
def fields_get(self, model):
Model = request.session.model(model)
fields = Model.fields_get(False, request.context)
return fields
@http.route('/web/export/get_fields', type='json', auth="user")
def get_fields(self, model, prefix='', parent_name= '',
import_compat=True, parent_field_type=None,
exclude=None):
if import_compat and parent_field_type == "many2one":
fields = {}
else:
fields = self.fields_get(model)
if import_compat:
fields.pop('id', None)
else:
fields['.id'] = fields.pop('id', {'string': 'ID'})
fields_sequence = sorted(fields.iteritems(),
key=lambda field: openerp.tools.ustr(field[1].get('string', '')))
records = []
for field_name, field in fields_sequence:
if import_compat:
if exclude and field_name in exclude:
continue
if field.get('readonly'):
# If none of the field's states unsets readonly, skip the field
if all(dict(attrs).get('readonly', True)
for attrs in field.get('states', {}).values()):
continue
if not field.get('exportable', True):
continue
id = prefix + (prefix and '/'or '') + field_name
name = parent_name + (parent_name and '/' or '') + field['string']
record = {'id': id, 'string': name,
'value': id, 'children': False,
'field_type': field.get('type'),
'required': field.get('required'),
'relation_field': field.get('relation_field')}
records.append(record)
if len(name.split('/')) < 3 and 'relation' in field:
ref = field.pop('relation')
record['value'] += '/id'
record['params'] = {'model': ref, 'prefix': id, 'name': name}
if not import_compat or field['type'] == 'one2many':
# m2m field in import_compat is childless
record['children'] = True
return records
@http.route('/web/export/namelist', type='json', auth="user")
def namelist(self, model, export_id):
# TODO: namelist really has no reason to be in Python (although itertools.groupby helps)
export = request.session.model("ir.exports").read([export_id])[0]
export_fields_list = request.session.model("ir.exports.line").read(
export['export_fields'])
fields_data = self.fields_info(
model, map(operator.itemgetter('name'), export_fields_list))
return [
{'name': field['name'], 'label': fields_data[field['name']]}
for field in export_fields_list
]
def fields_info(self, model, export_fields):
info = {}
fields = self.fields_get(model)
if ".id" in export_fields:
fields['.id'] = fields.pop('id', {'string': 'ID'})
# To make fields retrieval more efficient, fetch all sub-fields of a
# given field at the same time. Because the order in the export list is
# arbitrary, this requires ordering all sub-fields of a given field
# together so they can be fetched at the same time
#
# Works the following way:
# * sort the list of fields to export, the default sorting order will
# put the field itself (if present, for xmlid) and all of its
# sub-fields right after it
# * then, group on: the first field of the path (which is the same for
# a field and for its subfields and the length of splitting on the
# first '/', which basically means grouping the field on one side and
# all of the subfields on the other. This way, we have the field (for
# the xmlid) with length 1, and all of the subfields with the same
# base but a length "flag" of 2
# * if we have a normal field (length 1), just add it to the info
# mapping (with its string) as-is
# * otherwise, recursively call fields_info via graft_subfields.
# all graft_subfields does is take the result of fields_info (on the
# field's model) and prepend the current base (current field), which
# rebuilds the whole sub-tree for the field
#
# result: because we're not fetching the fields_get for half the
# database models, fetching a namelist with a dozen fields (including
# relational data) falls from ~6s to ~300ms (on the leads model).
# export lists with no sub-fields (e.g. import_compatible lists with
# no o2m) are even more efficient (from the same 6s to ~170ms, as
# there's a single fields_get to execute)
for (base, length), subfields in itertools.groupby(
sorted(export_fields),
lambda field: (field.split('/', 1)[0], len(field.split('/', 1)))):
subfields = list(subfields)
if length == 2:
# subfields is a seq of $base/*rest, and not loaded yet
info.update(self.graft_subfields(
fields[base]['relation'], base, fields[base]['string'],
subfields
))
elif base in fields:
info[base] = fields[base]['string']
return info
def graft_subfields(self, model, prefix, prefix_string, fields):
export_fields = [field.split('/', 1)[1] for field in fields]
return (
(prefix + '/' + k, prefix_string + '/' + v)
for k, v in self.fields_info(model, export_fields).iteritems())
class ExportFormat(object):
raw_data = False
@property
def content_type(self):
""" Provides the format's content type """
raise NotImplementedError()
def filename(self, base):
""" Creates a valid filename for the format (with extension) from the
provided base name (exension-less)
"""
raise NotImplementedError()
def from_data(self, fields, rows):
""" Conversion method from OpenERP's export data to whatever the
current export class outputs
:params list fields: a list of fields to export
:params list rows: a list of records to export
:returns:
:rtype: bytes
"""
raise NotImplementedError()
def base(self, data, token):
params = simplejson.loads(data)
model, fields, ids, domain, import_compat = \
operator.itemgetter('model', 'fields', 'ids', 'domain',
'import_compat')(
params)
Model = request.session.model(model)
context = dict(request.context or {}, **params.get('context', {}))
ids = ids or Model.search(domain, 0, False, False, context)
if not request.env[model]._is_an_ordinary_table():
fields = [field for field in fields if field['name'] != 'id']
field_names = map(operator.itemgetter('name'), fields)
import_data = Model.export_data(ids, field_names, self.raw_data, context=context).get('datas',[])
if import_compat:
columns_headers = field_names
else:
columns_headers = [val['label'].strip() for val in fields]
return request.make_response(self.from_data(columns_headers, import_data),
headers=[('Content-Disposition',
content_disposition(self.filename(model))),
('Content-Type', self.content_type)],
cookies={'fileToken': token})
class CSVExport(ExportFormat, http.Controller):
@http.route('/web/export/csv', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'text/csv;charset=utf8'
def filename(self, base):
return base + '.csv'
def from_data(self, fields, rows):
fp = StringIO()
writer = csv.writer(fp, quoting=csv.QUOTE_ALL)
writer.writerow([name.encode('utf-8') for name in fields])
for data in rows:
row = []
for d in data:
if isinstance(d, basestring):
d = d.replace('\n',' ').replace('\t',' ')
try:
d = d.encode('utf-8')
except UnicodeError:
pass
if d is False: d = None
row.append(d)
writer.writerow(row)
fp.seek(0)
data = fp.read()
fp.close()
return data
class ExcelExport(ExportFormat, http.Controller):
# Excel needs raw data to correctly handle numbers and date values
raw_data = True
@http.route('/web/export/xls', type='http', auth="user")
@serialize_exception
def index(self, data, token):
return self.base(data, token)
@property
def content_type(self):
return 'application/vnd.ms-excel'
def filename(self, base):
return base + '.xls'
def from_data(self, fields, rows):
workbook = xlwt.Workbook()
worksheet = workbook.add_sheet('Sheet 1')
for i, fieldname in enumerate(fields):
worksheet.write(0, i, fieldname)
worksheet.col(i).width = 8000 # around 220 pixels
base_style = xlwt.easyxf('align: wrap yes')
date_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD')
datetime_style = xlwt.easyxf('align: wrap yes', num_format_str='YYYY-MM-DD HH:mm:SS')
for row_index, row in enumerate(rows):
for cell_index, cell_value in enumerate(row):
cell_style = base_style
if isinstance(cell_value, basestring):
cell_value = re.sub("\r", " ", cell_value)
elif isinstance(cell_value, datetime.datetime):
cell_style = datetime_style
elif isinstance(cell_value, datetime.date):
cell_style = date_style
worksheet.write(row_index + 1, cell_index, cell_value, cell_style)
fp = StringIO()
workbook.save(fp)
fp.seek(0)
data = fp.read()
fp.close()
return data
class Reports(http.Controller):
POLLING_DELAY = 0.25
TYPES_MAPPING = {
'doc': 'application/vnd.ms-word',
'html': 'text/html',
'odt': 'application/vnd.oasis.opendocument.text',
'pdf': 'application/pdf',
'sxw': 'application/vnd.sun.xml.writer',
'xls': 'application/vnd.ms-excel',
}
@http.route('/web/report', type='http', auth="user")
@serialize_exception
def index(self, action, token):
action = simplejson.loads(action)
report_srv = request.session.proxy("report")
context = dict(request.context)
context.update(action["context"])
report_data = {}
report_ids = context.get("active_ids", None)
if 'report_type' in action:
report_data['report_type'] = action['report_type']
if 'datas' in action:
if 'ids' in action['datas']:
report_ids = action['datas'].pop('ids')
report_data.update(action['datas'])
report_id = report_srv.report(
request.session.db, request.session.uid, request.session.password,
action["report_name"], report_ids,
report_data, context)
report_struct = None
while True:
report_struct = report_srv.report_get(
request.session.db, request.session.uid, request.session.password, report_id)
if report_struct["state"]:
break
time.sleep(self.POLLING_DELAY)
report = base64.b64decode(report_struct['result'])
if report_struct.get('code') == 'zlib':
report = zlib.decompress(report)
report_mimetype = self.TYPES_MAPPING.get(
report_struct['format'], 'octet-stream')
file_name = action.get('name', 'report')
if 'name' not in action:
reports = request.session.model('ir.actions.report.xml')
res_id = reports.search([('report_name', '=', action['report_name']),],
0, False, False, context)
if len(res_id) > 0:
file_name = reports.read(res_id[0], ['name'], context)['name']
else:
file_name = action['report_name']
file_name = '%s.%s' % (file_name, report_struct['format'])
return request.make_response(report,
headers=[
('Content-Disposition', content_disposition(file_name)),
('Content-Type', report_mimetype),
('Content-Length', len(report))],
cookies={'fileToken': token})
class Apps(http.Controller):
@http.route('/apps/<app>', auth='user')
def get_app_url(self, req, app):
act_window_obj = request.session.model('ir.actions.act_window')
ir_model_data = request.session.model('ir.model.data')
try:
action_id = ir_model_data.get_object_reference('base', 'open_module_tree')[1]
action = act_window_obj.read(action_id, ['name', 'type', 'res_model', 'view_mode', 'view_type', 'context', 'views', 'domain'])
action['target'] = 'current'
except ValueError:
action = False
try:
app_id = ir_model_data.get_object_reference('base', 'module_%s' % app)[1]
except ValueError:
app_id = False
if action and app_id:
action['res_id'] = app_id
action['view_mode'] = 'form'
action['views'] = [(False, u'form')]
sakey = Session().save_session_action(action)
debug = '?debug' if req.debug else ''
return werkzeug.utils.redirect('/web{0}#sa={1}'.format(debug, sakey))
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
marcosdiez/ansible-modules-extras | monitoring/stackdriver.py | 61 | 6988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: stackdriver
short_description: Send code deploy and annotation events to stackdriver
description:
- Send code deploy and annotation events to Stackdriver
version_added: "1.6"
author: "Ben Whaley (@bwhaley)"
options:
key:
description:
- API key.
required: true
default: null
event:
description:
- The type of event to send, either annotation or deploy
choices: ['annotation', 'deploy']
required: false
default: null
revision_id:
description:
- The revision of the code that was deployed. Required for deploy events
required: false
default: null
deployed_by:
description:
- The person or robot responsible for deploying the code
required: false
default: "Ansible"
deployed_to:
description:
- "The environment code was deployed to. (ie: development, staging, production)"
required: false
default: null
repository:
description:
- The repository (or project) deployed
required: false
default: null
msg:
description:
- The contents of the annotation message, in plain text. Limited to 256 characters. Required for annotation.
required: false
default: null
annotated_by:
description:
- The person or robot who the annotation should be attributed to.
required: false
default: "Ansible"
level:
description:
- one of INFO/WARN/ERROR, defaults to INFO if not supplied. May affect display.
choices: ['INFO', 'WARN', 'ERROR']
required: false
default: 'INFO'
instance_id:
description:
- id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown
required: false
default: null
event_epoch:
description:
- "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this."
required: false
default: null
'''
EXAMPLES = '''
- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123
- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234
'''
# ===========================================
# Stackdriver module specific support methods.
#
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None):
"""Send a deploy event to Stackdriver"""
deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent"
params = {}
params['revision_id'] = revision_id
params['deployed_by'] = deployed_by
if deployed_to:
params['deployed_to'] = deployed_to
if repository:
params['repository'] = repository
return do_send_request(module, deploy_api, params, key)
def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None):
"""Send an annotation event to Stackdriver"""
annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent"
params = {}
params['message'] = msg
if annotated_by:
params['annotated_by'] = annotated_by
if level:
params['level'] = level
if instance_id:
params['instance_id'] = instance_id
if event_epoch:
params['event_epoch'] = event_epoch
return do_send_request(module, annotation_api, params, key)
def do_send_request(module, url, params, key):
data = json.dumps(params)
headers = {
'Content-Type': 'application/json',
'x-stackdriver-apikey': key
}
response, info = fetch_url(module, url, headers=headers, data=data, method='POST')
if info['status'] != 200:
module.fail_json(msg="Unable to send msg: %s" % info['msg'])
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
key=dict(required=True),
event=dict(required=True, choices=['deploy', 'annotation']),
msg=dict(),
revision_id=dict(),
annotated_by=dict(default='Ansible'),
level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']),
instance_id=dict(),
event_epoch=dict(),
deployed_by=dict(default='Ansible'),
deployed_to=dict(),
repository=dict(),
),
supports_check_mode=True
)
key = module.params["key"]
event = module.params["event"]
# Annotation params
msg = module.params["msg"]
annotated_by = module.params["annotated_by"]
level = module.params["level"]
instance_id = module.params["instance_id"]
event_epoch = module.params["event_epoch"]
# Deploy params
revision_id = module.params["revision_id"]
deployed_by = module.params["deployed_by"]
deployed_to = module.params["deployed_to"]
repository = module.params["repository"]
##################################################################
# deploy requires revision_id
# annotation requires msg
# We verify these manually
##################################################################
if event == 'deploy':
if not revision_id:
module.fail_json(msg="revision_id required for deploy events")
try:
send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository)
except Exception, e:
module.fail_json(msg="unable to sent deploy event: %s" % e)
if event == 'annotation':
if not msg:
module.fail_json(msg="msg required for annotation events")
try:
send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch)
except Exception, e:
module.fail_json(msg="unable to sent annotation event: %s" % e)
changed = True
module.exit_json(changed=changed, deployed_by=deployed_by)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 |
nthiep/global-ssh-server | lib/python2.7/site-packages/django/contrib/databrowse/datastructures.py | 100 | 9090 | """
These classes are light wrappers around Django's database API that provide
convenience functionality and permalink functions for the databrowse app.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils import formats
from django.utils.text import capfirst
from django.utils.encoding import smart_text, force_str, iri_to_uri
from django.db.models.query import QuerySet
from django.utils.encoding import python_2_unicode_compatible
EMPTY_VALUE = '(None)'
DISPLAY_SIZE = 100
class EasyModel(object):
def __init__(self, site, model):
self.site = site
self.model = model
self.model_list = list(site.registry.keys())
self.verbose_name = model._meta.verbose_name
self.verbose_name_plural = model._meta.verbose_name_plural
def __repr__(self):
return force_str('<EasyModel for %s>' % self.model._meta.object_name)
def model_databrowse(self):
"Returns the ModelDatabrowse class for this model."
return self.site.registry[self.model]
def url(self):
return '%s%s/%s/' % (self.site.root_url, self.model._meta.app_label, self.model._meta.module_name)
def objects(self, **kwargs):
return self.get_query_set().filter(**kwargs)
def get_query_set(self):
easy_qs = self.model._default_manager.get_query_set()._clone(klass=EasyQuerySet)
easy_qs._easymodel = self
return easy_qs
def object_by_pk(self, pk):
return EasyInstance(self, self.model._default_manager.get(pk=pk))
def sample_objects(self):
for obj in self.model._default_manager.all()[:3]:
yield EasyInstance(self, obj)
def field(self, name):
try:
f = self.model._meta.get_field(name)
except models.FieldDoesNotExist:
return None
return EasyField(self, f)
def fields(self):
return [EasyField(self, f) for f in (self.model._meta.fields + self.model._meta.many_to_many)]
class EasyField(object):
def __init__(self, easy_model, field):
self.model, self.field = easy_model, field
def __repr__(self):
return force_str('<EasyField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def choices(self):
for value, label in self.field.choices:
yield EasyChoice(self.model, self, value, label)
def url(self):
if self.field.choices:
return '%s%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name)
elif self.field.rel:
return '%s%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name)
class EasyChoice(object):
def __init__(self, easy_model, field, value, label):
self.model, self.field = easy_model, field
self.value, self.label = value, label
def __repr__(self):
return force_str('<EasyChoice for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def url(self):
return '%s%s/%s/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.field.name, iri_to_uri(self.value))
@python_2_unicode_compatible
class EasyInstance(object):
def __init__(self, easy_model, instance):
self.model, self.instance = easy_model, instance
def __repr__(self):
return force_str('<EasyInstance for %s (%s)>' % (self.model.model._meta.object_name, self.instance._get_pk_val()))
def __str__(self):
val = smart_text(self.instance)
if len(val) > DISPLAY_SIZE:
return val[:DISPLAY_SIZE] + '...'
return val
def pk(self):
return self.instance._get_pk_val()
def url(self):
return '%s%s/%s/objects/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, iri_to_uri(self.pk()))
def fields(self):
"""
Generator that yields EasyInstanceFields for each field in this
EasyInstance's model.
"""
for f in self.model.model._meta.fields + self.model.model._meta.many_to_many:
yield EasyInstanceField(self.model, self, f)
def related_objects(self):
"""
Generator that yields dictionaries of all models that have this
EasyInstance's model as a ForeignKey or ManyToManyField, along with
lists of related objects.
"""
for rel_object in self.model.model._meta.get_all_related_objects() + self.model.model._meta.get_all_related_many_to_many_objects():
if rel_object.model not in self.model.model_list:
continue # Skip models that aren't in the model_list
em = EasyModel(self.model.site, rel_object.model)
yield {
'model': em,
'related_field': rel_object.field.verbose_name,
'object_list': [EasyInstance(em, i) for i in getattr(self.instance, rel_object.get_accessor_name()).all()],
}
class EasyInstanceField(object):
def __init__(self, easy_model, instance, field):
self.model, self.field, self.instance = easy_model, field, instance
self.raw_value = getattr(instance.instance, field.name)
def __repr__(self):
return force_str('<EasyInstanceField for %s.%s>' % (self.model.model._meta.object_name, self.field.name))
def values(self):
"""
Returns a list of values for this field for this instance. It's a list
so we can accomodate many-to-many fields.
"""
# This import is deliberately inside the function because it causes
# some settings to be imported, and we don't want to do that at the
# module level.
if self.field.rel:
if isinstance(self.field.rel, models.ManyToOneRel):
objs = getattr(self.instance.instance, self.field.name)
elif isinstance(self.field.rel, models.ManyToManyRel): # ManyToManyRel
return list(getattr(self.instance.instance, self.field.name).all())
elif self.field.choices:
objs = dict(self.field.choices).get(self.raw_value, EMPTY_VALUE)
elif isinstance(self.field, models.DateField) or isinstance(self.field, models.TimeField):
if self.raw_value:
if isinstance(self.field, models.DateTimeField):
objs = capfirst(formats.date_format(self.raw_value, 'DATETIME_FORMAT'))
elif isinstance(self.field, models.TimeField):
objs = capfirst(formats.time_format(self.raw_value, 'TIME_FORMAT'))
else:
objs = capfirst(formats.date_format(self.raw_value, 'DATE_FORMAT'))
else:
objs = EMPTY_VALUE
elif isinstance(self.field, models.BooleanField) or isinstance(self.field, models.NullBooleanField):
objs = {True: 'Yes', False: 'No', None: 'Unknown'}[self.raw_value]
else:
objs = self.raw_value
return [objs]
def urls(self):
"Returns a list of (value, URL) tuples."
# First, check the urls() method for each plugin.
plugin_urls = []
for plugin_name, plugin in self.model.model_databrowse().plugins.items():
urls = plugin.urls(plugin_name, self)
if urls is not None:
return zip(self.values(), urls)
if self.field.rel:
m = EasyModel(self.model.site, self.field.rel.to)
if self.field.rel.to in self.model.model_list:
lst = []
for value in self.values():
if value is None:
continue
url = '%s%s/%s/objects/%s/' % (self.model.site.root_url, m.model._meta.app_label, m.model._meta.module_name, iri_to_uri(value._get_pk_val()))
lst.append((smart_text(value), url))
else:
lst = [(value, None) for value in self.values()]
elif self.field.choices:
lst = []
for value in self.values():
url = '%s%s/%s/fields/%s/%s/' % (self.model.site.root_url, self.model.model._meta.app_label, self.model.model._meta.module_name, self.field.name, iri_to_uri(self.raw_value))
lst.append((value, url))
elif isinstance(self.field, models.URLField):
val = list(self.values())[0]
lst = [(val, iri_to_uri(val))]
else:
lst = [(list(self.values())[0], None)]
return lst
class EasyQuerySet(QuerySet):
"""
When creating (or cloning to) an `EasyQuerySet`, make sure to set the
`_easymodel` variable to the related `EasyModel`.
"""
def iterator(self, *args, **kwargs):
for obj in super(EasyQuerySet, self).iterator(*args, **kwargs):
yield EasyInstance(self._easymodel, obj)
def _clone(self, *args, **kwargs):
c = super(EasyQuerySet, self)._clone(*args, **kwargs)
c._easymodel = self._easymodel
return c
| agpl-3.0 |
dmachard/extensive-testing | src/ea/scripts/helps/DocInspect.py | 1 | 5628 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -------------------------------------------------------------------
# Copyright (c) 2010-2019 Denis Machard
# This file is part of the extensive automation project
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA
# -------------------------------------------------------------------
import sys
import operator
import inspect
try:
xrange
except NameError: # support python3
xrange = range
def describeFunc(obj, method=False):
"""
Describe the function object passed as argument.
If this is a method object, the second argument
will be passed as True
@param obj:
@type obj:
@param method:
@type method:
"""
try:
arginfo = inspect.getargspec(obj)
except TypeError:
sys.stderr.write("type error\n")
return
args = arginfo[0]
desc = {}
desc['name'] = obj.__name__
if obj.__doc__ is not None:
desc['desc'] = obj.__doc__
if args:
if args[0] == 'self':
args.pop(0)
desc['args'] = args
if arginfo[3]:
dl = len(arginfo[3])
al = len(args)
defargs = args[al - dl:al]
if sys.version_info < (3,):
desc['default-args'] = zip(defargs, arginfo[3])
else:
desc['default-args'] = list(zip(defargs, arginfo[3]))
# convert None value to str 'None'
for i in xrange(len(desc['default-args'])):
k, v = desc['default-args'][i]
if v is None:
desc['default-args'][i] = (k, str(v))
else:
desc['args'] = []
if arginfo[1]:
desc['pos-args'] = arginfo[1]
if arginfo[2]:
desc['keyword-args'] = arginfo[2]
if method:
desc['type'] = 'method'
else:
desc['type'] = 'function'
return desc
def describeClass(obj, functions):
"""
Describe the class object passed as argument including its methods
@param obj:
@type obj:
@param functions:
@type functions:
"""
ret = []
desc = {'name': obj.__name__, 'functions': ret, 'type': 'class'}
if obj.__doc__ is not None:
desc['desc'] = obj.__doc__
for name in obj.__dict__:
for f in functions:
if f == name:
item = getattr(obj, name)
if sys.version_info < (3,):
if inspect.ismethod(item):
ret.append(describeFunc(item, True))
else:
if inspect.isfunction(item):
ret.append(describeFunc(item, True))
ret.sort(key=operator.itemgetter('name'))
return desc
def describeModule(module, classes, descr='', removeVersion=False):
"""
Describe the module object passed as argument
including its classes and functions
@param module:
@type module:
@param classes:
@type classes:
@param descr:
@type descr:
"""
ret = []
completeName = module.__name__
moduleName = completeName.rsplit('.', 1)
# remove version in complete name
if removeVersion:
fullName = completeName.split('.')
fullModuleName = "%s.%s" % (fullName[0], fullName[2])
else:
fullModuleName = module.__name__
desc = {'name': moduleName[1],
'realname': fullModuleName,
'classes': ret,
'type': 'module',
'desc': descr}
for name in dir(module):
for m in classes:
n, c = m
if n == name:
obj = getattr(module, name)
if inspect.isclass(obj):
ret.append(describeClass(obj, c))
elif (inspect.ismethod(obj) or inspect.isfunction(obj)):
ret.append(describeFunc(obj))
return desc
def describePackage(package, modules, descr=''):
"""
Describe the python package object passed as argument
including its classes and functions
pkg/
module1/
module2/
submod1
class1
submod2
class2
@param package:
@type package:
@param modules:
@type modules:
@param descr:
@type descr:
"""
ret = []
desc = {'name': package.__name__,
'modules': ret,
'type': 'package',
'desc': descr}
for name in dir(package):
for m in modules:
d = ''
if len(m) == 2:
n, c = m # module name, next data to inspect
else:
n, c, d = m # module name, next data to inspect, module description
if n == name:
obj = getattr(package, name)
if inspect.ismodule(obj):
ret.append(describeModule(obj, c, d))
if inspect.isclass(obj):
ret.append(describeClass(obj, c))
return desc
| lgpl-2.1 |
wheeler-microfluidics/opendrop_plugin | __init__.py | 1 | 18899 | """
Copyright 2015 Ryan Fobel
This file is part of opendrop_plugin.
opendrop_plugin is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
opendrop_plugin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with opendrop_plugin. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import math
import re
from copy import deepcopy
import warnings
import tables
from datetime import datetime
from pygtkhelpers.ui.dialogs import info as info_dialog
import yaml
import gtk
import gobject
import numpy as np
from path_helpers import path
from flatland import Integer, Boolean, Float, Form, Enum, String
from flatland.validation import ValueAtLeast, ValueAtMost, Validator
import microdrop_utility as utility
from microdrop_utility.gui import yesno, FormViewDialog
from microdrop.logger import logger
from microdrop.gui.protocol_grid_controller import ProtocolGridController
from microdrop.plugin_helpers import (StepOptionsController, AppDataController,
get_plugin_info)
from microdrop.plugin_manager import (IPlugin, IWaveformGenerator, Plugin,
implements, PluginGlobals,
ScheduleRequest, emit_signal,
get_service_instance,
get_service_instance_by_name)
from microdrop.app_context import get_app
from microdrop.dmf_device import DeviceScaleNotSet
from serial_device import SerialDevice, get_serial_ports
from opendrop_board import OpenDropBoard
# Ignore natural name warnings from PyTables [1].
#
# [1]: https://www.mail-archive.com/pytables-users@lists.sourceforge.net/msg01130.html
warnings.simplefilter('ignore', tables.NaturalNameWarning)
PluginGlobals.push_env('microdrop.managed')
def max_voltage(element, state):
"""Verify that the voltage is below a set maximum"""
service = get_service_instance_by_name(
get_plugin_info(path(__file__).parent).plugin_name)
if service.control_board.connected() and \
element.value > service.control_board.max_waveform_voltage:
return element.errors.append('Voltage exceeds the maximum value '
'(%d V).' %
service.control_board.max_waveform_voltage)
else:
return True
def check_frequency(element, state):
"""Verify that the frequency is within the valid range"""
service = get_service_instance_by_name(
get_plugin_info(path(__file__).parent).plugin_name)
if service.control_board.connected() and \
(element.value < service.control_board.min_waveform_frequency or \
element.value > service.control_board.max_waveform_frequency):
return element.errors.append('Frequency is outside of the valid range '
'(%.1f - %.1f Hz).' %
(service.control_board.min_waveform_frequency,
service.control_board.max_waveform_frequency)
)
else:
return True
class OpenDropPlugin(Plugin, StepOptionsController, AppDataController):
"""
This class is automatically registered with the PluginManager.
"""
implements(IPlugin)
implements(IWaveformGenerator)
serial_ports_ = [port for port in get_serial_ports()]
if len(serial_ports_):
default_port_ = serial_ports_[0]
else:
default_port_ = None
AppFields = Form.of(
Enum.named('serial_port').using(default=default_port_,
optional=True).valued(*serial_ports_),
Integer.named('baud_rate')
.using(default=115200, optional=True, validators=[ValueAtLeast(minimum=0),
],),
)
StepFields = Form.of(
Integer.named('duration').using(default=100, optional=True,
validators=
[ValueAtLeast(minimum=0), ]),
Float.named('voltage').using(default=100, optional=True,
validators=[ValueAtLeast(minimum=0),
max_voltage]),
Float.named('frequency').using(default=10e3, optional=True,
validators=[ValueAtLeast(minimum=0),
check_frequency]),
)
version = get_plugin_info(path(__file__).parent).version
def __init__(self):
self.control_board = OpenDropBoard()
self.name = get_plugin_info(path(__file__).parent).plugin_name
self.connection_status = "Not connected"
self.current_frequency = None
self.timeout_id = None
def on_plugin_enable(self):
super(OpenDropPlugin, self).on_plugin_enable()
self.check_device_name_and_version()
if get_app().protocol:
self.on_step_run()
self._update_protocol_grid()
def on_plugin_disable(self):
if get_app().protocol:
self.on_step_run()
self._update_protocol_grid()
def on_protocol_swapped(self, old_protocol, protocol):
self._update_protocol_grid()
def _update_protocol_grid(self):
app = get_app()
app_values = self.get_app_values()
pgc = get_service_instance(ProtocolGridController, env='microdrop')
if pgc.enabled_fields:
pgc.update_grid()
def on_app_options_changed(self, plugin_name):
app = get_app()
if plugin_name == self.name:
app_values = self.get_app_values()
reconnect = False
if self.control_board.connected():
for k, v in app_values.items():
if k == 'baud_rate' and self.control_board.baud_rate != v:
self.control_board.baud_rate = v
reconnect = True
if k == 'serial_port' and self.control_board.port != v:
reconnect = True
if reconnect:
self.connect()
self._update_protocol_grid()
elif plugin_name == app.name:
# Turn off all electrodes if we're not in realtime mode and not
# running a protocol.
if (self.control_board.connected() and not app.realtime_mode and
not app.running):
logger.info('Turning off all electrodes.')
self.control_board.set_state_of_all_channels(
np.zeros(self.control_board.number_of_channels())
)
def connect(self):
'''
Try to connect to the control board at the default serial port selected
in the Microdrop application options.
If unsuccessful, try to connect to the control board on any available
serial port, one-by-one.
'''
self.current_frequency = None
if len(OpenDropPlugin.serial_ports_):
app_values = self.get_app_values()
# try to connect to the last successful port
try:
self.control_board.connect(str(app_values['serial_port']),
app_values['baud_rate'])
except RuntimeError, why:
logger.warning('Could not connect to control board on port %s.'
' Checking other ports... [%s]' %
(app_values['serial_port'], why))
self.control_board.connect(baud_rate=app_values['baud_rate'])
app_values['serial_port'] = self.control_board.port
self.set_app_values(app_values)
else:
raise Exception("No serial ports available.")
def check_device_name_and_version(self):
'''
Check to see if:
a) The connected device is a OpenDrop
b) The device firmware matches the host driver API version
In the case where the device firmware version does not match, display a
dialog offering to flash the device with the firmware version that
matches the host driver API version.
'''
try:
self.connect()
name = self.control_board.name()
if name != "open_drop":
raise Exception("Device is not an OpenDrop")
host_software_version = self.control_board.host_software_version()
remote_software_version = self.control_board.software_version()
# Reflash the firmware if it is not the right version.
if host_software_version != remote_software_version:
response = yesno("The control board firmware version (%s) "
"does not match the driver version (%s). "
"Update firmware?" % (remote_software_version,
host_software_version))
if response == gtk.RESPONSE_YES:
self.on_flash_firmware()
except Exception, why:
logger.warning("%s" % why)
self.update_connection_status()
def on_flash_firmware(self, widget=None, data=None):
app = get_app()
try:
connected = self.control_board.connected()
if not connected:
self.connect()
'''
response = yesno("Save current control board configuration before "
"flashing?")
if response == gtk.RESPONSE_YES:
self.save_config()
'''
hardware_version = utility.Version.fromstring(
self.control_board.hardware_version()
)
if connected:
self.control_board.disconnect()
self.control_board.flash_firmware(hardware_version)
app.main_window_controller.info("Firmware updated successfully.",
"Firmware update")
except Exception, why:
logger.error("Problem flashing firmware. ""%s" % why)
self.check_device_name_and_version()
def update_connection_status(self):
self.connection_status = "Not connected"
app = get_app()
connected = self.control_board.connected()
if connected:
name = self.control_board.name()
version = self.control_board.hardware_version()
firmware = self.control_board.software_version()
n_channels = self.control_board.number_of_channels()
serial_number = self.control_board.serial_number
self.connection_status = ('%s v%s (Firmware: %s, S/N %03d)\n'
'%d channels' % (name, version, firmware, serial_number,
n_channels))
app.main_window_controller.label_control_board_status\
.set_text(self.connection_status)
def on_step_run(self):
"""
Handler called whenever a step is executed.
Plugins that handle this signal must emit the on_step_complete
signal once they have completed the step. The protocol controller
will wait until all plugins have completed the current step before
proceeding.
"""
logger.debug('[OpenDropPlugin] on_step_run()')
self._kill_running_step()
app = get_app()
options = self.get_step_options()
dmf_options = app.dmf_device_controller.get_step_options()
logger.debug('[OpenDropPlugin] options=%s dmf_options=%s' %
(options, dmf_options))
app_values = self.get_app_values()
if (self.control_board.connected() and (app.realtime_mode or
app.running)):
state = dmf_options.state_of_channels
max_channels = self.control_board.number_of_channels()
if len(state) > max_channels:
state = state[0:max_channels]
elif len(state) < max_channels:
state = np.concatenate([state, np.zeros(max_channels -
len(state), int)])
assert(len(state) == max_channels)
emit_signal("set_frequency",
options['frequency'],
interface=IWaveformGenerator)
emit_signal("set_voltage", options['voltage'],
interface=IWaveformGenerator)
self.control_board.set_state_of_all_channels(state)
# if a protocol is running, wait for the specified minimum duration
if app.running:
logger.debug('[OpenDropPlugin] on_step_run: '
'timeout_add(%d, _callback_step_completed)' %
options['duration'])
self.timeout_id = gobject.timeout_add(
options['duration'], self._callback_step_completed)
return
else:
self.step_complete()
def step_complete(self, return_value=None):
app = get_app()
if app.running or app.realtime_mode:
emit_signal('on_step_complete', [self.name, return_value])
def on_step_complete(self, plugin_name, return_value=None):
if plugin_name == self.name:
self.timeout_id = None
def _kill_running_step(self):
if self.timeout_id:
logger.debug('[OpenDropPlugin] _kill_running_step: removing'
'timeout_id=%d' % self.timeout_id)
gobject.source_remove(self.timeout_id)
def _callback_step_completed(self):
logger.debug('[OpenDropPlugin] _callback_step_completed')
self.step_complete()
return False # stop the timeout from refiring
def on_protocol_run(self):
"""
Handler called when a protocol starts running.
"""
app = get_app()
if not self.control_board.connected():
logger.warning("Warning: no control board connected.")
elif (self.control_board.number_of_channels() <=
app.dmf_device.max_channel()):
logger.warning("Warning: currently connected board does not have "
"enough channels for this protocol.")
def on_protocol_pause(self):
"""
Handler called when a protocol is paused.
"""
app = get_app()
self._kill_running_step()
if self.control_board.connected() and not app.realtime_mode:
# Turn off all electrodes
logger.debug('Turning off all electrodes.')
self.control_board.set_state_of_all_channels(
np.zeros(self.control_board.number_of_channels()))
def on_experiment_log_selection_changed(self, data):
"""
Handler called whenever the experiment log selection changes.
Parameters:
data : dictionary of experiment log data for the selected steps
"""
pass
def set_voltage(self, voltage):
"""
Set the waveform voltage.
Parameters:
voltage : RMS voltage
"""
logger.info("[OpenDropPlugin].set_voltage(%.1f)" % voltage)
self.control_board.set_waveform_voltage(voltage)
def set_frequency(self, frequency):
"""
Set the waveform frequency.
Parameters:
frequency : frequency in Hz
"""
logger.info("[OpenDropPlugin].set_frequency(%.1f)" % frequency)
self.control_board.set_waveform_frequency(frequency)
self.current_frequency = frequency
def on_step_options_changed(self, plugin, step_number):
logger.debug('[OpenDropPlugin] on_step_options_changed(): %s '
'step #%d' % (plugin, step_number))
app = get_app()
app_values = self.get_app_values()
options = self.get_step_options(step_number)
if (app.protocol and not app.running and not app.realtime_mode and
(plugin == 'microdrop.gui.dmf_device_controller' or plugin ==
self.name) and app.protocol.current_step_number == step_number):
self.on_step_run()
def on_step_swapped(self, original_step_number, new_step_number):
logger.debug('[OpenDropPlugin] on_step_swapped():'
'original_step_number=%d, new_step_number=%d' %
(original_step_number, new_step_number))
self.on_step_options_changed(self.name,
get_app().protocol.current_step_number)
def on_experiment_log_changed(self, log):
# Check if the experiment log already has control board meta data, and
# if so, return.
data = log.get("control board name")
for val in data:
if val:
return
# otherwise, add the name, hardware version, serial number,
# and firmware version
data = {}
if self.control_board.connected():
data["control board name"] = self.control_board.name()
data["control board serial number"] = \
self.control_board.serial_number
data["control board hardware version"] = (self.control_board
.hardware_version())
data["control board software version"] = (self.control_board
.software_version())
# add info about the devices on the i2c bus
try:
data["i2c devices"] = (self.control_board._i2c_devices)
except:
pass
log.add_data(data)
def get_schedule_requests(self, function_name):
"""
Returns a list of scheduling requests (i.e., ScheduleRequest
instances) for the function specified by function_name.
"""
if function_name in ['on_step_options_changed']:
return [ScheduleRequest(self.name,
'microdrop.gui.protocol_grid_controller'),
ScheduleRequest(self.name,
'microdrop.gui.protocol_controller'),
]
elif function_name == 'on_app_options_changed':
return [ScheduleRequest('microdrop.app', self.name)]
elif function_name == 'on_protocol_swapped':
return [ScheduleRequest('microdrop.gui.protocol_grid_controller',
self.name)]
return []
PluginGlobals.pop_env()
| gpl-3.0 |
shaolinfry/litecoin | qa/rpc-tests/prioritise_transaction.py | 22 | 5961 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1", "-maxmempool=10"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| mit |
DomBlack/oh-my-zsh | plugins/git-prompt/gitstatus.py | 492 | 2648 | #!/usr/bin/env python
from __future__ import print_function
import sys
import re
import shlex
from subprocess import Popen, PIPE, check_output
def get_tagname_or_hash():
"""return tagname if exists else hash"""
cmd = 'git log -1 --format="%h%d"'
output = check_output(shlex.split(cmd)).decode('utf-8').strip()
hash_, tagname = None, None
# get hash
m = re.search('\(.*\)$', output)
if m:
hash_ = output[:m.start()-1]
# get tagname
m = re.search('tag: .*[,\)]', output)
if m:
tagname = 'tags/' + output[m.start()+len('tag: '): m.end()-1]
if tagname:
return tagname
elif hash_:
return hash_
return None
# `git status --porcelain --branch` can collect all information
# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind
po = Popen(['git', 'status', '--porcelain', '--branch'], stdout=PIPE, stderr=PIPE)
stdout, sterr = po.communicate()
if po.returncode != 0:
sys.exit(0) # Not a git repository
# collect git status information
untracked, staged, changed, conflicts = [], [], [], []
ahead, behind = 0, 0
status = [(line[0], line[1], line[2:]) for line in stdout.decode('utf-8').splitlines()]
for st in status:
if st[0] == '#' and st[1] == '#':
if re.search('Initial commit on', st[2]):
branch = st[2].split(' ')[-1]
elif re.search('no branch', st[2]): # detached status
branch = get_tagname_or_hash()
elif len(st[2].strip().split('...')) == 1:
branch = st[2].strip()
else:
# current and remote branch info
branch, rest = st[2].strip().split('...')
if len(rest.split(' ')) == 1:
# remote_branch = rest.split(' ')[0]
pass
else:
# ahead or behind
divergence = ' '.join(rest.split(' ')[1:])
divergence = divergence.lstrip('[').rstrip(']')
for div in divergence.split(', '):
if 'ahead' in div:
ahead = int(div[len('ahead '):].strip())
elif 'behind' in div:
behind = int(div[len('behind '):].strip())
elif st[0] == '?' and st[1] == '?':
untracked.append(st)
else:
if st[1] == 'M':
changed.append(st)
if st[0] == 'U':
conflicts.append(st)
elif st[0] != ' ':
staged.append(st)
out = ' '.join([
branch,
str(ahead),
str(behind),
str(len(staged)),
str(len(conflicts)),
str(len(changed)),
str(len(untracked)),
])
print(out, end='')
| mit |
zguangyu/wechat-python-sdk | examples/tutorial_official_1.py | 23 | 1717 | # -*- coding: utf-8 -*-
from wechat_sdk import WechatBasic
# 下面这些变量均假设已由 Request 中提取完毕
token = 'WECHAT_TOKEN' # 你的微信 Token
signature = 'f24649c76c3f3d81b23c033da95a7a30cb7629cc' # Request 中 GET 参数 signature
timestamp = '1406799650' # Request 中 GET 参数 timestamp
nonce = '1505845280' # Request 中 GET 参数 nonce
# 用户的请求内容 (Request 中的 Body)
# 请更改 body_text 的内容来测试下面代码的执行情况
body_text = """
<xml>
<ToUserName><![CDATA[touser]]></ToUserName>
<FromUserName><![CDATA[fromuser]]></FromUserName>
<CreateTime>1405994593</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[wechat]]></Content>
<MsgId>6038700799783131222</MsgId>
</xml>
"""
# 实例化 wechat
wechat = WechatBasic(token=token)
# 对签名进行校验
if wechat.check_signature(signature=signature, timestamp=timestamp, nonce=nonce):
# 对 XML 数据进行解析 (必要, 否则不可执行 response_text, response_image 等操作)
wechat.parse_data(body_text)
# 获得解析结果, message 为 WechatMessage 对象 (wechat_sdk.messages中定义)
message = wechat.get_message()
response = None
if message.type == 'text':
if message.content == 'wechat':
response = wechat.response_text(u'^_^')
else:
response = wechat.response_text(u'文字')
elif message.type == 'image':
response = wechat.response_text(u'图片')
else:
response = wechat.response_text(u'未知')
# 现在直接将 response 变量内容直接作为 HTTP Response 响应微信服务器即可,此处为了演示返回内容,直接将响应进行输出
print response | bsd-2-clause |
halfak/python-mwtypes | mwtypes/page.py | 2 | 1517 | """
.. autoclass:: mwtypes.Page
:members:
"""
import jsonable
from .util import none_or
class Page(jsonable.Type):
"""
Page metadata
:Attributes:
.. autoattribute:: mwtypes.Page.id
:annotation: = Page ID : int
.. autoattribute:: mwtypes.Page.title
:annotation: = Page title: str
.. autoattribute:: mwtypes.Page.namespace
:annotation: = Namespace ID: int
.. autoattribute:: mwtypes.Page.redirect
:annotation: = Page name that this page redirects to : str | None
.. autoattribute:: mwtypes.Page.restrictions
:annotation: = A list of page editing restrictions :
list( `str` ) | `None`
"""
__slots__ = ('id', 'title', 'namespace', 'redirect', 'restrictions')
def initialize(self, id=None, title=None, namespace=None, redirect=None,
restrictions=None):
self.id = none_or(id, int)
"""
Page ID : `int`
"""
self.title = none_or(title, str)
"""
Page title (namespace excluded) : `str`
"""
self.namespace = none_or(namespace, int)
"""
Namespace ID : `int`
"""
self.redirect = none_or(redirect, str)
"""
Page name that the page redirects to : `str` | `None`
"""
self.restrictions = none_or(restrictions, list)
"""
A list of page editing restrictions : list( `str` ) | `None`
"""
| mit |
SET001/three.js | utils/converters/ctm/join_ctm.py | 399 | 2521 | """Join multiple binary files into single file and generate JSON snippet with offsets
-------------------------------------
How to use
-------------------------------------
python join_ctm.py -i "part_*.ctm" -o joined.ctm [-j offsets.js]
Will read multiple files following wildcard pattern (ordered lexicographically):
part_000.ctm
part_001.ctm
part_002.ctm
...
part_XXX.ctm
And generate single concatenated files:
joined.ctm
offsets.js (optional, offsets are also dumped to standard output)
"""
import getopt
import glob
import sys
import os
# #####################################################
# Templates
# #####################################################
TEMPLATE_JSON = u"""\
"offsets": [ %(offsets)s ],
"""
# #############################################################################
# Helpers
# #############################################################################
def usage():
print 'Usage: %s -i "filename_*.ctm" -o filename.ctm [-j offsets.js]' % os.path.basename(sys.argv[0])
# #####################################################
# Main
# #####################################################
if __name__ == "__main__":
# get parameters from the command line
try:
opts, args = getopt.getopt(sys.argv[1:], "hi:o:j:", ["help", "input=", "output=", "json="])
except getopt.GetoptError:
usage()
sys.exit(2)
inpattern = ""
outname = ""
jsonname = ""
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-i", "--input"):
inpattern = a
elif o in ("-o", "--output"):
outname = a
elif o in ("-j", "--json"):
jsonname = a
# quit if required parameters are missing
if inpattern == "" or outname == "":
usage()
sys.exit(2)
outfile = open(outname, "wb")
matches = glob.glob(inpattern)
matches.sort()
total = 0
offsets = []
for filename in matches:
filesize = os.path.getsize(filename)
offsets.append(total)
total += filesize
print filename, filesize
infile = open(filename, "rb")
buffer = infile.read()
outfile.write(buffer)
infile.close()
outfile.close()
json_str = TEMPLATE_JSON % {
"offsets" : ", ".join(["%d" % o for o in offsets])
}
print json_str
if jsonname:
jsonfile = open(jsonname, "w")
jsonfile.write(json_str)
jsonfile.close() | mit |
amith01994/intellij-community | python/lib/Lib/tempfile.py | 86 | 15003 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
template - the default prefix for all temporary names.
You may change this to control the default prefix.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import os as _os
import errno as _errno
from random import Random as _Random
if _os.name == 'mac':
import Carbon.Folder as _Folder
import Carbon.Folders as _Folders
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except IOError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import thread as _thread
except ImportError:
import dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
template = "tmp"
tempdir = None
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises os.error if the
# file doesn't exist.
def _stat(fn):
try:
f = open(fn)
except IOError:
raise _os.error
f.close()
def _exists(fn):
try:
_stat(fn)
except _os.error:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = ("abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ" +
"0123456789-_")
def __init__(self):
self.mutex = _allocate_lock()
self.rng = _Random()
self.normcase = _os.path.normcase
def __iter__(self):
return self
def next(self):
m = self.mutex
c = self.characters
choose = self.rng.choice
m.acquire()
try:
letters = [choose(c) for dummy in "123456"]
finally:
m.release()
return self.normcase(''.join(letters))
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'mac':
try:
fsr = _Folder.FSFindFolder(_Folders.kOnSystemDisk,
_Folders.kTemporaryFolderType, 1)
dirname = fsr.as_pathname()
dirlist.append(dirname)
except _Folder.error:
pass
elif _os.name == 'riscos':
dirname = _os.getenv('Wimp$ScrapDir')
if dirname: dirlist.append(dirname)
elif _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, _os.error):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
flags = _text_openflags
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in xrange(100):
name = namer.next()
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, flags, 0600)
fp = _os.fdopen(fd, 'w')
fp.write('blat')
fp.close()
_os.unlink(filename)
del fp, fd
return dir
except (OSError, IOError), e:
if e[0] != _errno.EEXIST:
break # no point trying more names in this directory
pass
raise IOError, (_errno.ENOENT,
("No usable temporary directory found in %s" % dirlist))
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempdir.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""mkstemp([suffix, [prefix, [dir, [text]]]])
User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""mkdtemp([suffix, [prefix, [dir]]])
User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0700)
return file
except OSError, e:
if e.errno == _errno.EEXIST:
continue # try again
raise
raise IOError, (_errno.EEXIST, "No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""mktemp([suffix, [prefix, [dir]]])
User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in xrange(TMP_MAX):
name = names.next()
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise IOError, (_errno.EEXIST, "No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name):
self.file = file
self.name = name
self.close_called = False
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if type(a) != type(0):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt':
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _os.fdopen(fd, mode, bufsize)
return _TemporaryFileWrapper(file, name)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', bufsize=-1, suffix="",
prefix=template, dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to os.fdopen (default "w+b").
'bufsize' -- the buffer size argument to os.fdopen (default -1).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
if 'b' in mode:
flags = _bin_openflags
else:
flags = _text_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _os.fdopen(fd, mode, bufsize)
except:
_os.close(fd)
raise
| apache-2.0 |
nishn/pymol-script | pyca.py | 1 | 3908 | #!/usr/bin/env python
import os.path
import sys
from optparse import OptionParser
# check if the pymol module is installed
try :
import pymol
except :
sys.exit( "error : This script needs 'pymol' module. Please install pymol before running." )
######################################## function ###########################################
# make CA model
def ca( mode, with_separate, filename, name ):
pdbdat = "" # PDB data string
bonds = [] # a list consists of pairs of connected CA atoms
last = 0 # residue number of last atom
resnum = 0 # residue number of focus atom
# check if the filename is a path or pdb_string
if os.path.exists( filename ):
name = os.path.basename( filename )
pdbstr = open( filename ).read().split('\n')
else:
pdbstr = filename.split('\n')
# open the PDB_FILE and read data
for line in pdbstr:
# if not ATOM line or CA atom
if line[0:4] != 'ATOM' or line[12:16] != " CA ":
continue
# set residue number
if with_separate: # if separete mode : resnum is the very number in PDB_FILE
resnum = int(line[22:26])
else: # or if continuous mode : resnum is a count from the first CA
resnum += 1
# add pdb line
pdbdat += line[0:22] + ( '%4d' % resnum ) + line[26:] + "\n"
# add bonds
if last + 1 == resnum:
bonds.append( ( str(last) + "/CA", str(resnum) + "/CA" ) )
# update last residue number
last = resnum
# launch pymol
pymol_argv = [ 'pymol', '-q' ]
pymol.finish_launching()
pymol.cmd.read_pdbstr( pdbdat, name )
pymol.cmd.hide( 'everything' )
# set bond information
if mode == "stick" :
for bond in bonds :
pymol.cmd.bond( bond[0], bond[1] )
else :
pymol.cmd.set( "cartoon_trace", 1 )
# display structure in 'mode'
pymol.cmd.show( mode )
# colorize the structure
# blue : N-terminus
# red : C-terminus
pymol.cmd.spectrum()
########################################## main ##############################################
if __name__ == "__main__":
# OptionParser
def opt() :
# set an explanation of this program
usage = "%prog [options] PDB_FILE"
dscrpt = ( "Show the structure of the PDB_FILE by CA backbone using PyMOL."
"PyMOL needs to be installed." )
parser = OptionParser( usage = usage, description = dscrpt )
# set options
# cartoon mode
parser.add_option( "-c", "--cartoon", dest = "mode", default = "stick",
action = "store_const", const = "cartoon",
help = "Display in CA trace cartoon mode [ default : stick mode ]" )
# Connect separate atoms or not
parser.add_option( "-s", "--separate", default = False, action = "store_true",
help = "Do not connect discontinuous CA atoms. If not specified, "
"[ default : force connect distant CA atoms ]" )
# parse arguments
( options, args ) = parser.parse_args()
# check arguments
if len( args ) != 1 :
parser.error( "Incorrect number of arguments. "
"Just one PDB_FILE must be given. \n"
"To show help message, use '-h or --help' option." )
# check if "file" exists
if not os.path.isfile( args[0] ) :
sys.exit( "error : \"" + args[0] + "\" : no such file" )
return ( options.mode, options.separate, args[0], None )
# get options and Display in PyMOL
ca( *opt() )
| mit |
NeCTAR-RC/nova | nova/cells/filters/availability_zone.py | 1 | 1443 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.cells import filters
LOG = logging.getLogger(__name__)
class AvailabilityZoneFilter(filters.BaseCellFilter):
"""Filters cells by availability zone.
Works with cell capabilities using the key
'availability_zones'
Note: cell can have multiple availability zones
"""
def cell_passes(self, cell, filter_properties):
LOG.debug('Filtering on availability zones for cell %s' % cell)
available_zones = cell.capabilities.get('availability_zones', [])
LOG.debug('Available zones: %s' % available_zones)
spec = filter_properties.get('request_spec', {})
props = spec.get('instance_properties', {})
availability_zone = props.get('availability_zone')
if availability_zone:
return availability_zone in available_zones
return True
| apache-2.0 |
karthik339/Agni | MainDemo/venv/lib/python2.7/site-packages/jinja2/meta.py | 659 | 4190 | # -*- coding: utf-8 -*-
"""
jinja2.meta
~~~~~~~~~~~
This module implements various functions that exposes information about
templates that might be interesting for various kinds of applications.
:copyright: (c) 2010 by the Jinja Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from jinja2 import nodes
from jinja2.compiler import CodeGenerator
from jinja2._compat import string_types
class TrackingCodeGenerator(CodeGenerator):
"""We abuse the code generator for introspection."""
def __init__(self, environment):
CodeGenerator.__init__(self, environment, '<introspection>',
'<introspection>')
self.undeclared_identifiers = set()
def write(self, x):
"""Don't write."""
def pull_locals(self, frame):
"""Remember all undeclared identifiers."""
self.undeclared_identifiers.update(frame.identifiers.undeclared)
def find_undeclared_variables(ast):
"""Returns a set of all variables in the AST that will be looked up from
the context at runtime. Because at compile time it's not known which
variables will be used depending on the path the execution takes at
runtime, all variables are returned.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
>>> meta.find_undeclared_variables(ast)
set(['bar'])
.. admonition:: Implementation
Internally the code generator is used for finding undeclared variables.
This is good to know because the code generator might raise a
:exc:`TemplateAssertionError` during compilation and as a matter of
fact this function can currently raise that exception as well.
"""
codegen = TrackingCodeGenerator(ast.environment)
codegen.visit(ast)
return codegen.undeclared_identifiers
def find_referenced_templates(ast):
"""Finds all the referenced templates from the AST. This will return an
iterator over all the hardcoded template extensions, inclusions and
imports. If dynamic inheritance or inclusion is used, `None` will be
yielded.
>>> from jinja2 import Environment, meta
>>> env = Environment()
>>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
>>> list(meta.find_referenced_templates(ast))
['layout.html', None]
This function is useful for dependency tracking. For example if you want
to rebuild parts of the website after a layout template has changed.
"""
for node in ast.find_all((nodes.Extends, nodes.FromImport, nodes.Import,
nodes.Include)):
if not isinstance(node.template, nodes.Const):
# a tuple with some non consts in there
if isinstance(node.template, (nodes.Tuple, nodes.List)):
for template_name in node.template.items:
# something const, only yield the strings and ignore
# non-string consts that really just make no sense
if isinstance(template_name, nodes.Const):
if isinstance(template_name.value, string_types):
yield template_name.value
# something dynamic in there
else:
yield None
# something dynamic we don't know about here
else:
yield None
continue
# constant is a basestring, direct template name
if isinstance(node.template.value, string_types):
yield node.template.value
# a tuple or list (latter *should* not happen) made of consts,
# yield the consts that are strings. We could warn here for
# non string values
elif isinstance(node, nodes.Include) and \
isinstance(node.template.value, (tuple, list)):
for template_name in node.template.value:
if isinstance(template_name, string_types):
yield template_name
# something else we don't care about, we could warn here
else:
yield None
| apache-2.0 |
agoravoting/agora-identity | docs/conf.py | 62 | 7233 | # -*- coding: utf-8 -*-
#
# project documentation build configuration file, created by
# sphinx-quickstart on Tue Jan 4 15:11:09 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'a django-based project'
copyright = u'2012, the authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'project.tex', u'project Documentation',
u'Mozilla', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'a-django-app', u"a-django-app's Documentation",
[u'the authors'], 1)
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| agpl-3.0 |
karlito40/servo | tests/wpt/css-tests/css-fonts-3_dev/xhtml1/support/fonts/makegsubfonts.py | 820 | 14309 |
import os
import textwrap
from xml.etree import ElementTree
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.psCharStrings import T2CharString
from fontTools.ttLib.tables.otTables import GSUB,\
ScriptList, ScriptRecord, Script, DefaultLangSys,\
FeatureList, FeatureRecord, Feature,\
LookupList, Lookup, AlternateSubst, SingleSubst
# paths
directory = os.path.dirname(__file__)
shellSourcePath = os.path.join(directory, "gsubtest-shell.ttx")
shellTempPath = os.path.join(directory, "gsubtest-shell.otf")
featureList = os.path.join(directory, "gsubtest-features.txt")
javascriptData = os.path.join(directory, "gsubtest-features.js")
outputPath = os.path.join(os.path.dirname(directory), "gsubtest-lookup%d")
baseCodepoint = 0xe000
# -------
# Features
# -------
f = open(featureList, "rb")
text = f.read()
f.close()
mapping = []
for line in text.splitlines():
line = line.strip()
if not line:
continue
if line.startswith("#"):
continue
# parse
values = line.split("\t")
tag = values.pop(0)
mapping.append(tag);
# --------
# Outlines
# --------
def addGlyphToCFF(glyphName=None, program=None, private=None, globalSubrs=None, charStringsIndex=None, topDict=None, charStrings=None):
charString = T2CharString(program=program, private=private, globalSubrs=globalSubrs)
charStringsIndex.append(charString)
glyphID = len(topDict.charset)
charStrings.charStrings[glyphName] = glyphID
topDict.charset.append(glyphName)
def makeLookup1():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup1")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
# bump this up so that the sequence is the same as the lookup 3 font
cp += 3
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 1
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = SingleSubst()
subtable.Format = 2
subtable.LookupType = 1
subtable.mapping = {
"%s.pass" % tag : "%s.fail" % tag,
"%s.fail" % tag : "%s.pass" % tag,
}
lookup.SubTable.append(subtable)
path = outputPath % 1 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeLookup3():
# make a variation of the shell TTX data
f = open(shellSourcePath)
ttxData = f.read()
f.close()
ttxData = ttxData.replace("__familyName__", "gsubtest-lookup3")
tempShellSourcePath = shellSourcePath + ".temp"
f = open(tempShellSourcePath, "wb")
f.write(ttxData)
f.close()
# compile the shell
shell = TTFont(sfntVersion="OTTO")
shell.importXML(tempShellSourcePath)
shell.save(shellTempPath)
os.remove(tempShellSourcePath)
# load the shell
shell = TTFont(shellTempPath)
# grab the PASS and FAIL data
hmtx = shell["hmtx"]
glyphSet = shell.getGlyphSet()
failGlyph = glyphSet["F"]
failGlyph.decompile()
failGlyphProgram = list(failGlyph.program)
failGlyphMetrics = hmtx["F"]
passGlyph = glyphSet["P"]
passGlyph.decompile()
passGlyphProgram = list(passGlyph.program)
passGlyphMetrics = hmtx["P"]
# grab some tables
hmtx = shell["hmtx"]
cmap = shell["cmap"]
# start the glyph order
existingGlyphs = [".notdef", "space", "F", "P"]
glyphOrder = list(existingGlyphs)
# start the CFF
cff = shell["CFF "].cff
globalSubrs = cff.GlobalSubrs
topDict = cff.topDictIndex[0]
topDict.charset = existingGlyphs
private = topDict.Private
charStrings = topDict.CharStrings
charStringsIndex = charStrings.charStringsIndex
features = sorted(mapping)
# build the outline, hmtx and cmap data
cp = baseCodepoint
for index, tag in enumerate(features):
# tag.pass
glyphName = "%s.pass" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
# tag.fail
glyphName = "%s.fail" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
# tag.default
glyphName = "%s.default" % tag
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=passGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = passGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# tag.alt1,2,3
for i in range(1,4):
glyphName = "%s.alt%d" % (tag, i)
glyphOrder.append(glyphName)
addGlyphToCFF(
glyphName=glyphName,
program=failGlyphProgram,
private=private,
globalSubrs=globalSubrs,
charStringsIndex=charStringsIndex,
topDict=topDict,
charStrings=charStrings
)
hmtx[glyphName] = failGlyphMetrics
for table in cmap.tables:
if table.format == 4:
table.cmap[cp] = glyphName
else:
raise NotImplementedError, "Unsupported cmap table format: %d" % table.format
cp += 1
# set the glyph order
shell.setGlyphOrder(glyphOrder)
# start the GSUB
shell["GSUB"] = newTable("GSUB")
gsub = shell["GSUB"].table = GSUB()
gsub.Version = 1.0
# make a list of all the features we will make
featureCount = len(features)
# set up the script list
scriptList = gsub.ScriptList = ScriptList()
scriptList.ScriptCount = 1
scriptList.ScriptRecord = []
scriptRecord = ScriptRecord()
scriptList.ScriptRecord.append(scriptRecord)
scriptRecord.ScriptTag = "DFLT"
script = scriptRecord.Script = Script()
defaultLangSys = script.DefaultLangSys = DefaultLangSys()
defaultLangSys.FeatureCount = featureCount
defaultLangSys.FeatureIndex = range(defaultLangSys.FeatureCount)
defaultLangSys.ReqFeatureIndex = 65535
defaultLangSys.LookupOrder = None
script.LangSysCount = 0
script.LangSysRecord = []
# set up the feature list
featureList = gsub.FeatureList = FeatureList()
featureList.FeatureCount = featureCount
featureList.FeatureRecord = []
for index, tag in enumerate(features):
# feature record
featureRecord = FeatureRecord()
featureRecord.FeatureTag = tag
feature = featureRecord.Feature = Feature()
featureList.FeatureRecord.append(featureRecord)
# feature
feature.FeatureParams = None
feature.LookupCount = 1
feature.LookupListIndex = [index]
# write the lookups
lookupList = gsub.LookupList = LookupList()
lookupList.LookupCount = featureCount
lookupList.Lookup = []
for tag in features:
# lookup
lookup = Lookup()
lookup.LookupType = 3
lookup.LookupFlag = 0
lookup.SubTableCount = 1
lookup.SubTable = []
lookupList.Lookup.append(lookup)
# subtable
subtable = AlternateSubst()
subtable.Format = 1
subtable.LookupType = 3
subtable.alternates = {
"%s.default" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt1" % tag : ["%s.pass" % tag, "%s.fail" % tag, "%s.fail" % tag],
"%s.alt2" % tag : ["%s.fail" % tag, "%s.pass" % tag, "%s.fail" % tag],
"%s.alt3" % tag : ["%s.fail" % tag, "%s.fail" % tag, "%s.pass" % tag]
}
lookup.SubTable.append(subtable)
path = outputPath % 3 + ".otf"
if os.path.exists(path):
os.remove(path)
shell.save(path)
# get rid of the shell
if os.path.exists(shellTempPath):
os.remove(shellTempPath)
def makeJavascriptData():
features = sorted(mapping)
outStr = []
outStr.append("")
outStr.append("/* This file is autogenerated by makegsubfonts.py */")
outStr.append("")
outStr.append("/* ")
outStr.append(" Features defined in gsubtest fonts with associated base")
outStr.append(" codepoints for each feature:")
outStr.append("")
outStr.append(" cp = codepoint for feature featX")
outStr.append("")
outStr.append(" cp default PASS")
outStr.append(" cp featX=1 FAIL")
outStr.append(" cp featX=2 FAIL")
outStr.append("")
outStr.append(" cp+1 default FAIL")
outStr.append(" cp+1 featX=1 PASS")
outStr.append(" cp+1 featX=2 FAIL")
outStr.append("")
outStr.append(" cp+2 default FAIL")
outStr.append(" cp+2 featX=1 FAIL")
outStr.append(" cp+2 featX=2 PASS")
outStr.append("")
outStr.append("*/")
outStr.append("")
outStr.append("var gFeatures = {");
cp = baseCodepoint
taglist = []
for tag in features:
taglist.append("\"%s\": 0x%x" % (tag, cp))
cp += 4
outStr.append(textwrap.fill(", ".join(taglist), initial_indent=" ", subsequent_indent=" "))
outStr.append("};");
outStr.append("");
if os.path.exists(javascriptData):
os.remove(javascriptData)
f = open(javascriptData, "wb")
f.write("\n".join(outStr))
f.close()
# build fonts
print "Making lookup type 1 font..."
makeLookup1()
print "Making lookup type 3 font..."
makeLookup3()
# output javascript data
print "Making javascript data file..."
makeJavascriptData() | mpl-2.0 |
hwmrocker/hysbakstryd | config.py | 1 | 1187 | """
Configuration for Hysbakstryd server.
"""
class AttrDict(dict):
"""Access a dictionary like a dict OR like a class, and also act like a defaultdict with None as its factory.
Nice little tool, taken from: http://stackoverflow.com/a/14620633
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, item):
try:
return super().__getattribute__(item)
except AttributeError:
return None
def __getitem__(self, item):
try:
return super().__getitem__(item)
except KeyError:
return None
d = AttrDict
network = d({
})
game = d({
'ticks_per_day': 120000, # 20 real time minutes per game day, each hour is 50 seconds, 5000 ticks
})
game.ticks_per_hour = game.ticks_per_day / 24
# settings for plugins: if your plugin has settings, add an entry for your plugin here
plugins = d({
})
plugins.people = d({
'people_appear_per_elevator_shaft': 0.1,
'max_people_waiting_per_level': 20,
})
plugins.server_observer = d({
# 'observe_events': True,
'with_args': True,
})
| gpl-2.0 |
lociii/googleads-python-lib | examples/adspygoogle/dfa/v1_19/get_available_permissions.py | 3 | 2027 | #!/usr/bin/python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example displays all of the available permissions that a user role or
subnetwork may be endowed with. To get a subnetwork ID, run
get_subnetworks.py.
A user role may not be set with more permissions than the subnetwork it
belongs to. You may enter a subnetwork ID to see the maximum permissions a
user role belonging to it can have, or enter '0' as the subnetwork ID to see
all possible permissions.
Tags: userrole.getAvailablePermissions
"""
__author__ = 'api.jdilallo@gmail.com (Joseph DiLallo)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfaClient
SUBNETWORK_ID = 'INSERT_SUBNETWORK_ID_HERE'
def main(client, subnetwork_id):
# Initialize appropriate service.
user_role_service = client.GetUserRoleService(
'https://advertisersapitest.doubleclick.net', 'v1.19')
# Get available permissions.
results = user_role_service.GetAvailablePermissions(subnetwork_id)
# Display permission name and its ID.
if results:
for permission in results:
print ('Permission with name \'%s\' and ID \'%s\' was found.'
% (permission['name'], permission['id']))
else:
print 'No permissions found.'
if __name__ == '__main__':
# Initialize client object.
client = DfaClient(path=os.path.join('..', '..', '..', '..'))
main(client, SUBNETWORK_ID)
| apache-2.0 |
aelkikhia/pyduel_engine | pyduel_engine/model/board.py | 1 | 11091 | from math import fabs
import logging
from pyduel_engine.content import default_boards as boards
from pyduel_engine.content.engine_states import SqState, BoardType
from pyduel_engine.model import position as ps
def _adjacent_square_pos(pos):
return [ps.shift_down(pos), ps.shift_up(pos),
ps.shift_left(pos), ps.shift_right(pos),
ps.shift_down_left(pos), ps.shift_down_right(pos),
ps.shift_up_left(pos), ps.shift_up_right(pos)]
DEFAULT_MAX_X = 10
DEFAULT_MAX_Y = 7
class Board(object):
"""Board object that maintains the states of each position """
# logging info
logger = logging.getLogger(__name__)
def __init__(self, kwargs):
self._name = kwargs.get('name', None)
self._type = kwargs.get('board_type', None)
self._max_x = kwargs.get('max_x', DEFAULT_MAX_X)
self._max_y = kwargs.get('max_y', DEFAULT_MAX_Y)
self._board = kwargs.get('board', self.empty_board())
if self._type:
self.board_initializer(self._type)
def __repr__(self):
return self.type
def __str__(self):
"""String representation of the board """
board = str(self.type) + '\n '
for x in range(0, self.max_x):
board += str(x)
board += '\n--' + ('-' * (self.max_x + 2)) + '\n'
for y in range(0, self.max_y):
board += str(y) + ' | '
for x in range(0, self.max_x):
if self.board[x][y]['state'] == SqState.empty:
board += 'E'
if self.board[x][y]['state'] == SqState.dark:
board += 'D'
if self.board[x][y]['state'] == SqState.hole:
board += 'H'
if self.board[x][y]['state'] == SqState.light:
board += 'L'
if self.board[x][y]['state'] == SqState.obstacle:
board += 'O'
board += '\n'
return board
# ############################ Properties #################################
@property
def board(self):
return self._board
@property
def name(self):
return self._name
@property
def max_x(self):
return self._max_x
@property
def max_y(self):
return self._max_y
@property
def type(self):
return self._type
# ############################ Modifiers ##################################
def sqr_state(self, pos):
"""Get square state from board """
return self.board[pos.x][pos.y]['state']
def set_sqr_state(self, pos, char_side):
"""Sets square state at pos on board """
self.board[pos.x][pos.y]['state'] = char_side
def make_empty(self, pos):
"""Sets position to empty on board"""
self.board[pos.x][pos.y]['state'] = SqState.empty
def empty_board(self):
"""Creates an empty board with the given dimensions"""
return {x: {y: {'state': SqState.empty}
for y in range(0, self._max_y + 1)}
for x in range(0, self._max_x + 1)}
def find_moves(self, char, num_moves, pos=None, moves=None):
"""Finds list of all possible moves for a character """
# TODO: FIX THIS Method
# create list
if moves is None:
moves = set()
pos = char.pos
else:
# if the new position isn't valid return without adding
# only specifying side here as an input, can't remember why
if not self.is_valid_move(char.side, pos):
return moves
# add move if square is not occupied
if self.is_empty(pos):
moves.add(pos)
# if no num_moves
if num_moves == 0:
return moves
# decrement the number of moves
num_moves -= 1
# check up down left and right and make sure we don't backtrack
up = ps.shift_up(pos)
down = ps.shift_down(pos)
left = ps.shift_left(pos)
right = ps.shift_right(pos)
# if up not in moves:
self.find_moves(char, num_moves, up, moves)
# if down not in moves:
self.find_moves(char, num_moves, down, moves)
# if left not in moves:
self.find_moves(char, num_moves, left, moves)
# if right not in moves:
self.find_moves(char, num_moves, right, moves)
# return list(moves)
return list(moves)
def get_adj_empty_pos(self, pos):
"""Returns list of all empty positions on the board"""
if not self.out_of_bounds(pos):
adj_pos = _adjacent_square_pos(pos)
return [x for x in adj_pos
if not self.out_of_bounds(x) and self.is_empty(x)]
return []
def board_initializer(self, board_type):
""" builds board by adding holes, obstructions, name, type"""
selected_board = boards.BOARDS[board_type]
self._name = selected_board['name']
self._type = BoardType(board_type)
# Json board coordinates
for name, sq_type in SqState.__members__.items():
if sq_type in selected_board['states']:
for pos in selected_board['states'][sq_type]:
self.board[pos['x']][pos['y']]['state'] = sq_type
# ############################ Discovery ##################################
def is_empty(self, pos):
"""check if square is empty"""
return self.sqr_state(pos) == SqState.empty
def is_ally(self, side, pos):
"""check if square is ally"""
return self.sqr_state(pos) == side
def _is_parallel_clr_x_axis(self, origin, target):
"""called if the pivot is on the x plane and traverses through all the
squares to check if path is clear"""
pivot = origin.x
if origin.y < target.y:
i = origin.y + 1
end = target.y
else:
i = target.y + 1
end = origin.y
while i != end:
if self.board[pivot][i]['state'] == SqState.empty or \
self.board[pivot][i]['state'] == SqState.hole:
i += 1
else:
return False
return True
def _is_parallel_clr_y_axis(self, origin, target):
"""called if the pivot is on the y plane and traverses through all the
squares to check if path is clear"""
pivot = origin.y
if origin.x < target.x:
i = origin.x + 1
end = target.x
else:
i = target.x + 1
end = origin.x
while i != end:
if self.board[i][pivot]['state'] == SqState.empty or \
self.board[i][pivot]['state'] == SqState.hole:
i += 1
else:
return False
return True
def is_parallel_clr(self, origin, target):
"""Verify if there is a clear parallel path between two points"""
# not parallel, moot point
if not origin.is_parallel(target):
return False
# if adj it's already a valid target
if origin.is_adj(target):
return True
if origin.x == target.x:
# when x is the pivot
return self._is_parallel_clr_x_axis(origin, target)
else:
# when y is the pivot
return self._is_parallel_clr_y_axis(origin, target)
def is_diagonal_clr(self, origin, target):
"""Verify if there is a clear diagonal path between two points"""
# default x and y to negative modifiers
x, y = -1, -1
# count number of loops required
end = int(fabs(origin.x - target.x) - 1)
# make x modifier positive
if origin.x < target.x:
x = 1
# make y modifier positive
if origin.y < target.y:
y = 1
# loop and check
for i in range(1, end):
front = self.board[origin.x + x * i][origin.y + y * i]['state']
if front != SqState.empty and front != SqState.hole:
return False
return True
def is_obstructed(self, char):
"""Verify if a character can move through any of the adj squares"""
return not self.can_move_through(char.side, ps.shift_down(char.pos)) \
and not self.can_move_through(char.side, ps.shift_up(char.pos)) \
and not self.can_move_through(char.side, ps.shift_left(char.pos)) \
and not self.can_move_through(char.side, ps.shift_right(char.pos))
def out_of_bounds(self, pos):
"""check if square is out of bounds or not"""
return pos.x > self.max_x - 1 or pos.x < 0 \
or pos.y > self.max_y - 1 or pos.y < 0
def can_move_through(self, side, new_pos):
"""verifies if a a square can be moved on or through """
return self.is_empty(new_pos) or self.is_ally(side, new_pos)
def is_valid_move(self, side, new_pos):
"""check if is valid move
takes in team side and pos. This function asks if a character of a
specific side can move to another square. calls two methods
to see if the square is out of bounds and if the char can validly move
through the square """
return not self.out_of_bounds(new_pos) and self.can_move_through(
side, new_pos)
def can_target(self, char, target):
"""Verify if target can be range attacked. """
if char.is_adj(target):
return True
if char.is_range:
if (char.is_diagonal(target) and
self.is_diagonal_clr(char.pos, target.pos)) \
or (char.is_parallel and
self.is_parallel_clr(char.pos, target.pos)):
return True
return False
# ######################## Print / Format Methods ############################
def print_board_coordinates(self):
"""Prints the board out with coordinates used this before to
help visualize the board more easily """
board = str(self.type) + '\n '
for x in range(0, self.max_x):
board += ' ' + str(x) + ' '
board += '\n' + ('----' + ('-' * self.max_x * 5)) + '\n'
for y in range(0, self.max_y):
board += str(y) + ' | '
for x in range(0, self.max_x):
if self.board[x][y]['state'] == SqState.empty:
board += ''.join(['(', str(x), ',', str(y), ')'])
if self.board[x][y]['state'] == SqState.dark:
board += ''.join(['(', str(x), ',', str(y), ')'])
if self.board[x][y]['state'] == SqState.hole:
board += ''.join(['(', str(x), ',', str(y), ')'])
if self.board[x][y]['state'] == SqState.light:
board += ''.join(['(', str(x), ',', str(y), ')'])
if self.board[x][y]['state'] == SqState.obstacle:
board += ''.join(['(', str(x), ',', str(y), ')'])
board += '\n'
return board
| apache-2.0 |
CroceRossaItaliana/jorvik | posta/tasks.py | 1 | 2602 | from celery import shared_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task(bind=True)
def invia_mail(self, pk):
from .models import Messaggio
messaggio = Messaggio.objects.get(pk=pk)
logger.info("messaggio id=%d" % pk)
# Controlla che il messaggio sia stato inviato
if messaggio.terminato:
logger.warning("Il messaggio e' gia' stato inviato. Esco.")
return
# Controlla che siamo il task giusto (evita duplicati)
if messaggio.task_id != self.request.id:
logger.warning("Worker ID non corrispondente. Possibile duplicato. Esco.")
return
invio_terminato = messaggio.invia()
if not invio_terminato:
logger.error("Errore temporaneo, nuovo tentativo richiesto.")
raise self.retry()
# Messaggio inviato con successo.
logger.info("Messaggio inviato. Rimuovo task_id e salvo.")
messaggio.task_id = None
messaggio.save()
@shared_task(bind=True)
def invia_mail_forzato(self, pk_tuple):
"""
Questo task invia forzatamente la mail.
Nessuna verifica si fa se il messaggio è stato precedentemente inviato
oppure sia accodato. (come con l'invio normale nella funzione di sopra)
"""
from celery import uuid
from celery.result import AsyncResult
from .models import Messaggio
logger = get_task_logger(__name__)
rescheduled_tasks_id = list()
messages_to_resend = Messaggio.objects.filter(pk__in=pk_tuple)
for msg in messages_to_resend:
pk = msg.pk
logger.info("[forced] Controllo messaggio id=%d" % pk)
# Controllo se il messaggio ha un task_id,
# se presente - dimentico il task per assegnare un nuovo task_id al messaggio
if msg.task_id is not None:
task = AsyncResult(msg.task_id)
task.forget()
logger.info("[forced] Dimentico task_id %s per il messaggio id=%d" % (msg.task_id, pk))
# Creiamo un nuovo task ID e riaccodiamo il task.
msg.task_id = uuid()
msg.save()
logger.warning("[forced] Nuovo task per l'invio accodato con id=%s." % msg.task_id)
is_sent = msg.invia(forced=True)
if not is_sent:
logger.error("[forced] Errore temporaneo, nuovo tentativo richiesto.")
raise self.retry()
# Messaggio inviato con successo.
logger.info("[forced] Messaggio %s inviato. Rimuovo task_id e salvo." % msg.pk)
rescheduled_tasks_id.append(msg.task_id)
msg.task_id = None
msg.save()
return len(rescheduled_tasks_id)
| gpl-3.0 |
Cinntax/home-assistant | homeassistant/components/nissan_leaf/sensor.py | 4 | 3155 | """Battery Charge and Range Support for the Nissan Leaf."""
import logging
from homeassistant.const import DEVICE_CLASS_BATTERY
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.distance import LENGTH_KILOMETERS, LENGTH_MILES
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from . import (
DATA_BATTERY,
DATA_CHARGING,
DATA_LEAF,
DATA_RANGE_AC,
DATA_RANGE_AC_OFF,
LeafEntity,
)
_LOGGER = logging.getLogger(__name__)
ICON_RANGE = "mdi:speedometer"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Sensors setup."""
if discovery_info is None:
return
devices = []
for vin, datastore in hass.data[DATA_LEAF].items():
_LOGGER.debug("Adding sensors for vin=%s", vin)
devices.append(LeafBatterySensor(datastore))
devices.append(LeafRangeSensor(datastore, True))
devices.append(LeafRangeSensor(datastore, False))
add_devices(devices, True)
class LeafBatterySensor(LeafEntity):
"""Nissan Leaf Battery Sensor."""
@property
def name(self):
"""Sensor Name."""
return self.car.leaf.nickname + " Charge"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def state(self):
"""Battery state percentage."""
return round(self.car.data[DATA_BATTERY])
@property
def unit_of_measurement(self):
"""Battery state measured in percentage."""
return "%"
@property
def icon(self):
"""Battery state icon handling."""
chargestate = self.car.data[DATA_CHARGING]
return icon_for_battery_level(battery_level=self.state, charging=chargestate)
class LeafRangeSensor(LeafEntity):
"""Nissan Leaf Range Sensor."""
def __init__(self, car, ac_on):
"""Set-up range sensor. Store if AC on."""
self._ac_on = ac_on
super().__init__(car)
@property
def name(self):
"""Update sensor name depending on AC."""
if self._ac_on is True:
return self.car.leaf.nickname + " Range (AC)"
return self.car.leaf.nickname + " Range"
def log_registration(self):
"""Log registration."""
_LOGGER.debug(
"Registered LeafRangeSensor integration with HASS for VIN %s",
self.car.leaf.vin,
)
@property
def state(self):
"""Battery range in miles or kms."""
if self._ac_on:
ret = self.car.data[DATA_RANGE_AC]
else:
ret = self.car.data[DATA_RANGE_AC_OFF]
if not self.car.hass.config.units.is_metric or self.car.force_miles:
ret = IMPERIAL_SYSTEM.length(ret, METRIC_SYSTEM.length_unit)
return round(ret)
@property
def unit_of_measurement(self):
"""Battery range unit."""
if not self.car.hass.config.units.is_metric or self.car.force_miles:
return LENGTH_MILES
return LENGTH_KILOMETERS
@property
def icon(self):
"""Nice icon for range."""
return ICON_RANGE
| apache-2.0 |
kamcpp/tensorflow | tensorflow/python/ops/math_ops.py | 3 | 68893 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@scalar_mul
@@div
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Fourier Transform Functions
TensorFlow provides several operations that you can use to add discrete
Fourier transform functions to your graph.
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
## Scan
TensorFlow provides several operations that you can use to perform scans
(running totals) across one axis of a tensor.
@@cumsum
@@cumprod
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, ops.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
with ops.name_scope(name, "Divide", [x]) as name:
return x / y
def neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype,
name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
# TODO(nolivia): Switch to new Round op
# return gen_math_ops.round(x, name=name)
return gen_math_ops.floor(x + 0.5, name=name)
ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, ops.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
# TODO(aselle): Switch to math_ops.floor_div() when ready
# return gen_math_ops.floor_div(x, y, name=name)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32,
dtypes.float64]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2])
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, ops.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False,
dtype=dtypes.int64, name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
reduction_indices=reduction_indices,
keep_dims=keep_dims),
dtype=dtype)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(input_tensor, reduction_indices, keep_dims=True))
result = gen_math_ops.log(reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
reduction_indices,
keep_dims=True)) + my_max
if not keep_dims:
result = array_ops.squeeze(result, reduction_indices)
return result
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float32`, `float64`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float32`, `float64`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0),
dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [0, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [common_shapes.broadcast_shape(
op.inputs[0].get_shape(),
op.inputs[1].get_shape())]
ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3])
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn)
| apache-2.0 |
xyuanmu/XX-Net | python3.8.2/Lib/encodings/euc_jis_2004.py | 816 | 1051 | #
# euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004
#
# Written by Hye-Shik Chang <perky@FreeBSD.org>
#
import _codecs_jp, codecs
import _multibytecodec as mbc
codec = _codecs_jp.getcodec('euc_jis_2004')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='euc_jis_2004',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| bsd-2-clause |
amirm3hdi/donate-clan | account/migrations/0001_initial.py | 2 | 2112 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.10 on 2018-03-09 16:21
from __future__ import unicode_literals
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()])),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('email', models.EmailField(max_length=254, unique=True)),
('member', models.CharField(blank=True, default=None, max_length=50, null=True)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('is_admin', models.BooleanField(default=False)),
('is_whatsapp', models.BooleanField(default=False)),
('about', models.CharField(blank=True, default=None, max_length=500, null=True)),
('picture', models.URLField(blank=True, default=None, null=True)),
('link', models.URLField(blank=True, default=None, null=True)),
('nationality', models.CharField(blank=True, default=None, max_length=50, null=True)),
],
options={
'verbose_name': 'User',
'verbose_name_plural': 'Users',
'ordering': ['-date_joined'],
'get_latest_by': 'date_joined',
},
),
]
| gpl-3.0 |
harshita-gupta/Harvard-FRSEM-Catalog-2016-17 | flask/lib/python2.7/site-packages/setuptools/depends.py | 41 | 6474 | import sys
import imp
import marshal
from distutils.version import StrictVersion
from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN
from setuptools.extern import six
__all__ = [
'Require', 'find_module', 'get_module_constant', 'extract_constant'
]
class Require:
"""A prerequisite to building or installing a distribution"""
def __init__(self, name, requested_version, module, homepage='',
attribute=None, format=None):
if format is None and requested_version is not None:
format = StrictVersion
if format is not None:
requested_version = format(requested_version)
if attribute is None:
attribute = '__version__'
self.__dict__.update(locals())
del self.self
def full_name(self):
"""Return full package/distribution name, w/version"""
if self.requested_version is not None:
return '%s-%s' % (self.name, self.requested_version)
return self.name
def version_ok(self, version):
"""Is 'version' sufficiently up-to-date?"""
return self.attribute is None or self.format is None or \
str(version) != "unknown" and version >= self.requested_version
def get_version(self, paths=None, default="unknown"):
"""Get version number of installed module, 'None', or 'default'
Search 'paths' for module. If not found, return 'None'. If found,
return the extracted version attribute, or 'default' if no version
attribute was specified, or the value cannot be determined without
importing the module. The version is formatted according to the
requirement's version format (if any), unless it is 'None' or the
supplied 'default'.
"""
if self.attribute is None:
try:
f, p, i = find_module(self.module, paths)
if f:
f.close()
return default
except ImportError:
return None
v = get_module_constant(self.module, self.attribute, default, paths)
if v is not None and v is not default and self.format is not None:
return self.format(v)
return v
def is_present(self, paths=None):
"""Return true if dependency is present on 'paths'"""
return self.get_version(paths) is not None
def is_current(self, paths=None):
"""Return true if dependency is present and up-to-date on 'paths'"""
version = self.get_version(paths)
if version is None:
return False
return self.version_ok(version)
def _iter_code(code):
"""Yield '(op,arg)' pair for each operation in code object 'code'"""
from array import array
from dis import HAVE_ARGUMENT, EXTENDED_ARG
bytes = array('b', code.co_code)
eof = len(code.co_code)
ptr = 0
extended_arg = 0
while ptr < eof:
op = bytes[ptr]
if op >= HAVE_ARGUMENT:
arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg
ptr += 3
if op == EXTENDED_ARG:
long_type = six.integer_types[-1]
extended_arg = arg * long_type(65536)
continue
else:
arg = None
ptr += 1
yield op, arg
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
parts = module.split('.')
while parts:
part = parts.pop(0)
f, path, (suffix, mode, kind) = info = imp.find_module(part, paths)
if kind == PKG_DIRECTORY:
parts = parts or ['__init__']
paths = [path]
elif parts:
raise ImportError("Can't find %r in %s" % (parts, module))
return info
def get_module_constant(module, symbol, default=-1, paths=None):
"""Find 'module' by searching 'paths', and extract 'symbol'
Return 'None' if 'module' does not exist on 'paths', or it does not define
'symbol'. If the module defines 'symbol' as a constant, return the
constant. Otherwise, return 'default'."""
try:
f, path, (suffix, mode, kind) = find_module(module, paths)
except ImportError:
# Module doesn't exist
return None
try:
if kind == PY_COMPILED:
f.read(8) # skip magic & date
code = marshal.load(f)
elif kind == PY_FROZEN:
code = imp.get_frozen_object(module)
elif kind == PY_SOURCE:
code = compile(f.read(), path, 'exec')
else:
# Not something we can parse; we'll have to import it. :(
if module not in sys.modules:
imp.load_module(module, f, path, (suffix, mode, kind))
return getattr(sys.modules[module], symbol, None)
finally:
if f:
f.close()
return extract_constant(code, symbol, default)
def extract_constant(code, symbol, default=-1):
"""Extract the constant value of 'symbol' from 'code'
If the name 'symbol' is bound to a constant value by the Python code
object 'code', return that value. If 'symbol' is bound to an expression,
return 'default'. Otherwise, return 'None'.
Return value is based on the first assignment to 'symbol'. 'symbol' must
be a global, or at least a non-"fast" local in the code block. That is,
only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
must be present in 'code.co_names'.
"""
if symbol not in code.co_names:
# name's not there, can't possibly be an assigment
return None
name_idx = list(code.co_names).index(symbol)
STORE_NAME = 90
STORE_GLOBAL = 97
LOAD_CONST = 100
const = default
for op, arg in _iter_code(code):
if op == LOAD_CONST:
const = code.co_consts[arg]
elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
return const
else:
const = default
def _update_globals():
"""
Patch the globals to remove the objects not available on some platforms.
XXX it'd be better to test assertions about bytecode instead.
"""
if not sys.platform.startswith('java') and sys.platform != 'cli':
return
incompatible = 'extract_constant', 'get_module_constant'
for name in incompatible:
del globals()[name]
__all__.remove(name)
_update_globals()
| mit |
aspidites/django | tests/template_tests/syntax_tests/test_exceptions.py | 513 | 2099 | from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
from .test_extends import inheritance_templates
class ExceptionsTests(SimpleTestCase):
@setup({'exception01': "{% extends 'nonexistent' %}"})
def test_exception01(self):
"""
Raise exception for invalid template name
"""
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception01')
@setup({'exception02': '{% extends nonexistent %}'})
def test_exception02(self):
"""
Raise exception for invalid variable template name
"""
if self.engine.string_if_invalid:
with self.assertRaises(TemplateDoesNotExist):
self.engine.render_to_string('exception02')
else:
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception02')
@setup(
{'exception03': "{% extends 'inheritance01' %}"
"{% block first %}2{% endblock %}{% extends 'inheritance16' %}"},
inheritance_templates,
)
def test_exception03(self):
"""
Raise exception for extra {% extends %} tags
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception03')
@setup(
{'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"},
inheritance_templates,
)
def test_exception04(self):
"""
Raise exception for custom tags used in child with {% load %} tag in parent, not in child
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('exception04')
@setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'})
def test_exception05(self):
"""
Raise exception for block.super used in base template
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('exception05')
| bsd-3-clause |
matthiascy/three.js | utils/converters/msgpack/msgpack/fallback.py | 641 | 26403 | """Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
| mit |
lsk112233/Clone-test-repo | boxes/migrations/0001_initial.py | 10 | 1607 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import markupfield.fields
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Box',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now, blank=True)),
('updated', models.DateTimeField(blank=True)),
('label', models.SlugField(max_length=100, unique=True)),
('content', markupfield.fields.MarkupField(rendered_field=True)),
('content_markup_type', models.CharField(max_length=30, choices=[('', '--'), ('html', 'html'), ('plain', 'plain'), ('markdown', 'markdown'), ('restructuredtext', 'restructuredtext')], default='restructuredtext')),
('_content_rendered', models.TextField(editable=False)),
('creator', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, related_name='boxes_box_creator', blank=True)),
('last_modified_by', models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, related_name='boxes_box_modified', blank=True)),
],
options={
'verbose_name_plural': 'boxes',
},
bases=(models.Model,),
),
]
| apache-2.0 |
caesar2164/edx-platform | lms/djangoapps/course_api/tests/mixins.py | 58 | 1069 | """
Common mixins for Course API Tests
"""
from datetime import datetime
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import ToyCourseFactory
TEST_PASSWORD = u'edx'
class CourseApiFactoryMixin(object):
"""
Mixin to allow creation of test courses and users.
"""
@staticmethod
def create_course(**kwargs):
"""
Create a course for use in test cases
"""
return ToyCourseFactory.create(
end=datetime(2015, 9, 19, 18, 0, 0),
enrollment_start=datetime(2015, 6, 15, 0, 0, 0),
enrollment_end=datetime(2015, 7, 15, 0, 0, 0),
emit_signals=True,
**kwargs
)
@staticmethod
def create_user(username, is_staff):
"""
Create a user as identified by username, email, password and is_staff.
"""
return UserFactory(
username=username,
email=u'{}@example.com'.format(username),
password=TEST_PASSWORD,
is_staff=is_staff
)
| agpl-3.0 |
wpstudio/blazecoin | contrib/spendfrom/spendfrom.py | 1 | 10054 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 75413 if testnet else 55413
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
archf/ansible | lib/ansible/module_utils/facts/compat.py | 31 | 3161 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.namespace import PrefixFactNamespace
from ansible.module_utils.facts import default_collectors
from ansible.module_utils.facts import ansible_collector
def get_all_facts(module):
'''compat api for ansible 2.2/2.3 module_utils.facts.get_all_facts method
Expects module to be an instance of AnsibleModule, with a 'gather_subset' param.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.'''
gather_subset = module.params['gather_subset']
return ansible_facts(module, gather_subset=gather_subset)
def ansible_facts(module, gather_subset=None):
'''Compat api for ansible 2.0/2.2/2.3 module_utils.facts.ansible_facts method
2.3/2.3 expects a gather_subset arg.
2.0/2.1 does not except a gather_subset arg
So make gather_subsets an optional arg, defaulting to configured DEFAULT_GATHER_TIMEOUT
'module' should be an instance of an AnsibleModule.
returns a dict mapping the bare fact name ('default_ipv4' with no 'ansible_' namespace) to
the fact value.
'''
gather_subset = gather_subset or module.params.get('gather_subset', ['all'])
gather_timeout = module.params.get('gather_timeout', 10)
filter_spec = module.params.get('filter', '*')
minimal_gather_subset = frozenset(['apparmor', 'caps', 'cmdline', 'date_time',
'distribution', 'dns', 'env', 'fips', 'local', 'lsb',
'pkg_mgr', 'platform', 'python', 'selinux',
'service_mgr', 'ssh_pub_keys', 'user'])
all_collector_classes = default_collectors.collectors
# don't add a prefix
namespace = PrefixFactNamespace(namespace_name='ansible',
prefix='')
fact_collector = \
ansible_collector.get_ansible_collector(all_collector_classes=all_collector_classes,
namespace=namespace,
filter_spec=filter_spec,
gather_subset=gather_subset,
gather_timeout=gather_timeout,
minimal_gather_subset=minimal_gather_subset)
facts_dict = fact_collector.collect(module=module)
return facts_dict
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/OpenGL/WGL/NV/vertex_array_range.py | 8 | 4786 | '''OpenGL extension NV.vertex_array_range
This module customises the behaviour of the
OpenGL.raw.WGL.NV.vertex_array_range to provide a more
Python-friendly API
Overview (from the spec)
The goal of this extension is to permit extremely high vertex
processing rates via OpenGL vertex arrays even when the CPU lacks
the necessary data movement bandwidth to keep up with the rate
at which the vertex engine can consume vertices. CPUs can keep
up if they can just pass vertex indices to the hardware and
let the hardware "pull" the actual vertex data via Direct Memory
Access (DMA). Unfortunately, the current OpenGL 1.1 vertex array
functionality has semantic constraints that make such an approach
hard. Hence, the vertex array range extension.
This extension provides a mechanism for deferring the pulling of
vertex array elements to facilitate DMAed pulling of vertices for
fast, efficient vertex array transfers. The OpenGL client need only
pass vertex indices to the hardware which can DMA the actual index's
vertex data directly out of the client address space.
The OpenGL 1.1 vertex array functionality specifies a fairly strict
coherency model for when OpenGL extracts vertex data from a vertex
array and when the application can update the in memory
vertex array data. The OpenGL 1.1 specification says "Changes
made to array data between the execution of Begin and the
corresponding execution of End may affect calls to ArrayElement
that are made within the same Begin/End period in non-sequential
ways. That is, a call to ArrayElement that precedes a change to
array data may access the changed data, and a call that follows
a change to array data may access the original data."
This means that by the time End returns (and DrawArrays and
DrawElements return since they have implicit Ends), the actual vertex
array data must be transferred to OpenGL. This strict coherency model
prevents us from simply passing vertex element indices to the hardware
and having the hardware "pull" the vertex data out (which is often
long after the End for the primitive has returned to the application).
Relaxing this coherency model and bounding the range from which
vertex array data can be pulled is key to making OpenGL vertex
array transfers faster and more efficient.
The first task of the vertex array range extension is to relax
the coherency model so that hardware can indeed "pull" vertex
data from the OpenGL client's address space long after the application
has completed sending the geometry primitives requiring the vertex
data.
The second problem with the OpenGL 1.1 vertex array functionality is
the lack of any guidance from the API about what region of memory
vertices can be pulled from. There is no size limit for OpenGL 1.1
vertex arrays. Any vertex index that points to valid data in all
enabled arrays is fair game. This makes it hard for a vertex DMA
engine to pull vertices since they can be potentially pulled from
anywhere in the OpenGL client address space.
The vertex array range extension specifies a range of the OpenGL
client's address space where vertices can be pulled. Vertex indices
that access any array elements outside the vertex array range
are specified to be undefined. This permits hardware to DMA from
finite regions of OpenGL client address space, making DMA engine
implementation tractable.
The extension is specified such that an (error free) OpenGL client
using the vertex array range functionality could no-op its vertex
array range commands and operate equivalently to using (if slower
than) the vertex array range functionality.
Because different memory types (local graphics memory, AGP memory)
have different DMA bandwidths and caching behavior, this extension
includes a window system dependent memory allocator to allocate
cleanly the most appropriate memory for constructing a vertex array
range. The memory allocator provided allows the application to
tradeoff the desired CPU read frequency, CPU write frequency, and
memory priority while still leaving it up to OpenGL implementation
the exact memory type to be allocated.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/vertex_array_range.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.WGL import _types, _glgets
from OpenGL.raw.WGL.NV.vertex_array_range import *
from OpenGL.raw.WGL.NV.vertex_array_range import _EXTENSION_NAME
def glInitVertexArrayRangeNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | gpl-3.0 |
CorySpitzer/ansible | v1/ansible/playbook/play.py | 85 | 42915 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#############################################
from ansible.utils.template import template
from ansible import utils
from ansible import errors
from ansible.playbook.task import Task
from ansible.module_utils.splitter import split_args, unquote
import ansible.constants as C
import pipes
import shlex
import os
import sys
import uuid
class Play(object):
_pb_common = [
'accelerate', 'accelerate_ipv6', 'accelerate_port', 'any_errors_fatal', 'become',
'become_method', 'become_user', 'environment', 'force_handlers', 'gather_facts',
'handlers', 'hosts', 'name', 'no_log', 'remote_user', 'roles', 'serial', 'su',
'su_user', 'sudo', 'sudo_user', 'tags', 'vars', 'vars_files', 'vars_prompt',
'vault_password',
]
__slots__ = _pb_common + [
'_ds', '_handlers', '_play_hosts', '_tasks', 'any_errors_fatal', 'basedir',
'default_vars', 'included_roles', 'max_fail_pct', 'playbook', 'remote_port',
'role_vars', 'transport', 'vars_file_vars',
]
# to catch typos and so forth -- these are userland names
# and don't line up 1:1 with how they are stored
VALID_KEYS = frozenset(_pb_common + [
'connection', 'include', 'max_fail_percentage', 'port', 'post_tasks',
'pre_tasks', 'role_names', 'tasks', 'user',
])
# *************************************************
def __init__(self, playbook, ds, basedir, vault_password=None):
''' constructor loads from a play datastructure '''
for x in ds.keys():
if not x in Play.VALID_KEYS:
raise errors.AnsibleError("%s is not a legal parameter of an Ansible Play" % x)
# allow all playbook keys to be set by --extra-vars
self.vars = ds.get('vars', {})
self.vars_prompt = ds.get('vars_prompt', {})
self.playbook = playbook
self.vars = self._get_vars()
self.vars_file_vars = dict() # these are vars read in from vars_files:
self.role_vars = dict() # these are vars read in from vars/main.yml files in roles
self.basedir = basedir
self.roles = ds.get('roles', None)
self.tags = ds.get('tags', None)
self.vault_password = vault_password
self.environment = ds.get('environment', {})
if self.tags is None:
self.tags = []
elif type(self.tags) in [ str, unicode ]:
self.tags = self.tags.split(",")
elif type(self.tags) != list:
self.tags = []
# make sure we have some special internal variables set, which
# we use later when loading tasks and handlers
load_vars = dict()
load_vars['playbook_dir'] = os.path.abspath(self.basedir)
if self.playbook.inventory.basedir() is not None:
load_vars['inventory_dir'] = self.playbook.inventory.basedir()
if self.playbook.inventory.src() is not None:
load_vars['inventory_file'] = self.playbook.inventory.src()
# We first load the vars files from the datastructure
# so we have the default variables to pass into the roles
self.vars_files = ds.get('vars_files', [])
if not isinstance(self.vars_files, list):
raise errors.AnsibleError('vars_files must be a list')
processed_vars_files = self._update_vars_files_for_host(None)
# now we load the roles into the datastructure
self.included_roles = []
ds = self._load_roles(self.roles, ds)
# and finally re-process the vars files as they may have been updated
# by the included roles, but exclude any which have been processed
self.vars_files = utils.list_difference(ds.get('vars_files', []), processed_vars_files)
if not isinstance(self.vars_files, list):
raise errors.AnsibleError('vars_files must be a list')
self._update_vars_files_for_host(None)
# template everything to be efficient, but do not pre-mature template
# tasks/handlers as they may have inventory scope overrides. We also
# create a set of temporary variables for templating, so we don't
# trample on the existing vars structures
_tasks = ds.pop('tasks', [])
_handlers = ds.pop('handlers', [])
temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
try:
ds = template(basedir, ds, temp_vars)
except errors.AnsibleError, e:
utils.warning("non fatal error while trying to template play variables: %s" % (str(e)))
ds['tasks'] = _tasks
ds['handlers'] = _handlers
self._ds = ds
hosts = ds.get('hosts')
if hosts is None:
raise errors.AnsibleError('hosts declaration is required')
elif isinstance(hosts, list):
try:
hosts = ';'.join(hosts)
except TypeError,e:
raise errors.AnsibleError('improper host declaration: %s' % str(e))
self.serial = str(ds.get('serial', 0))
self.hosts = hosts
self.name = ds.get('name', self.hosts)
self._tasks = ds.get('tasks', [])
self._handlers = ds.get('handlers', [])
self.remote_user = ds.get('remote_user', ds.get('user', self.playbook.remote_user))
self.remote_port = ds.get('port', self.playbook.remote_port)
self.transport = ds.get('connection', self.playbook.transport)
self.remote_port = self.remote_port
self.any_errors_fatal = utils.boolean(ds.get('any_errors_fatal', 'false'))
self.accelerate = utils.boolean(ds.get('accelerate', 'false'))
self.accelerate_port = ds.get('accelerate_port', None)
self.accelerate_ipv6 = ds.get('accelerate_ipv6', False)
self.max_fail_pct = int(ds.get('max_fail_percentage', 100))
self.no_log = utils.boolean(ds.get('no_log', 'false'))
self.force_handlers = utils.boolean(ds.get('force_handlers', self.playbook.force_handlers))
# Fail out if user specifies conflicting privilege escalations
if (ds.get('become') or ds.get('become_user')) and (ds.get('sudo') or ds.get('sudo_user')):
raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("sudo", "sudo_user") cannot be used together')
if (ds.get('become') or ds.get('become_user')) and (ds.get('su') or ds.get('su_user')):
raise errors.AnsibleError('sudo params ("become", "become_user") and su params ("su", "su_user") cannot be used together')
if (ds.get('sudo') or ds.get('sudo_user')) and (ds.get('su') or ds.get('su_user')):
raise errors.AnsibleError('sudo params ("sudo", "sudo_user") and su params ("su", "su_user") cannot be used together')
# become settings are inherited and updated normally
self.become = ds.get('become', self.playbook.become)
self.become_method = ds.get('become_method', self.playbook.become_method)
self.become_user = ds.get('become_user', self.playbook.become_user)
# Make sure current play settings are reflected in become fields
if 'sudo' in ds:
self.become=ds['sudo']
self.become_method='sudo'
if 'sudo_user' in ds:
self.become_user=ds['sudo_user']
elif 'su' in ds:
self.become=True
self.become=ds['su']
self.become_method='su'
if 'su_user' in ds:
self.become_user=ds['su_user']
# gather_facts is not a simple boolean, as None means that a 'smart'
# fact gathering mode will be used, so we need to be careful here as
# calling utils.boolean(None) returns False
self.gather_facts = ds.get('gather_facts', None)
if self.gather_facts is not None:
self.gather_facts = utils.boolean(self.gather_facts)
load_vars['role_names'] = ds.get('role_names', [])
self._tasks = self._load_tasks(self._ds.get('tasks', []), load_vars)
self._handlers = self._load_tasks(self._ds.get('handlers', []), load_vars)
# apply any missing tags to role tasks
self._late_merge_role_tags()
# place holder for the discovered hosts to be used in this play
self._play_hosts = None
# *************************************************
def _get_role_path(self, role):
"""
Returns the path on disk to the directory containing
the role directories like tasks, templates, etc. Also
returns any variables that were included with the role
"""
orig_path = template(self.basedir,role,self.vars)
role_vars = {}
if type(orig_path) == dict:
# what, not a path?
role_name = orig_path.get('role', None)
if role_name is None:
raise errors.AnsibleError("expected a role name in dictionary: %s" % orig_path)
role_vars = orig_path
else:
role_name = utils.role_spec_parse(orig_path)["name"]
role_path = None
possible_paths = [
utils.path_dwim(self.basedir, os.path.join('roles', role_name)),
utils.path_dwim(self.basedir, role_name)
]
if C.DEFAULT_ROLES_PATH:
search_locations = C.DEFAULT_ROLES_PATH.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(utils.path_dwim(loc, role_name))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path is None:
raise errors.AnsibleError("cannot find role in %s" % " or ".join(possible_paths))
return (role_path, role_vars)
def _build_role_dependencies(self, roles, dep_stack, passed_vars={}, level=0):
# this number is arbitrary, but it seems sane
if level > 20:
raise errors.AnsibleError("too many levels of recursion while resolving role dependencies")
for role in roles:
role_path,role_vars = self._get_role_path(role)
# save just the role params for this role, which exclude the special
# keywords 'role', 'tags', and 'when'.
role_params = role_vars.copy()
for item in ('role', 'tags', 'when'):
if item in role_params:
del role_params[item]
role_vars = utils.combine_vars(passed_vars, role_vars)
vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'vars')))
vars_data = {}
if os.path.isfile(vars):
vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
if vars_data:
if not isinstance(vars_data, dict):
raise errors.AnsibleError("vars from '%s' are not a dict" % vars)
role_vars = utils.combine_vars(vars_data, role_vars)
defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults')))
defaults_data = {}
if os.path.isfile(defaults):
defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
# the meta directory contains the yaml that should
# hold the list of dependencies (if any)
meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(role_path, 'meta')))
if os.path.isfile(meta):
data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
if data:
dependencies = data.get('dependencies',[])
if dependencies is None:
dependencies = []
for dep in dependencies:
allow_dupes = False
(dep_path,dep_vars) = self._get_role_path(dep)
# save the dep params, just as we did above
dep_params = dep_vars.copy()
for item in ('role', 'tags', 'when'):
if item in dep_params:
del dep_params[item]
meta = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'meta')))
if os.path.isfile(meta):
meta_data = utils.parse_yaml_from_file(meta, vault_password=self.vault_password)
if meta_data:
allow_dupes = utils.boolean(meta_data.get('allow_duplicates',''))
# if any tags were specified as role/dep variables, merge
# them into the current dep_vars so they're passed on to any
# further dependencies too, and so we only have one place
# (dep_vars) to look for tags going forward
def __merge_tags(var_obj):
old_tags = dep_vars.get('tags', [])
if isinstance(old_tags, basestring):
old_tags = [old_tags, ]
if isinstance(var_obj, dict):
new_tags = var_obj.get('tags', [])
if isinstance(new_tags, basestring):
new_tags = [new_tags, ]
else:
new_tags = []
return list(set(old_tags).union(set(new_tags)))
dep_vars['tags'] = __merge_tags(role_vars)
dep_vars['tags'] = __merge_tags(passed_vars)
# if tags are set from this role, merge them
# into the tags list for the dependent role
if "tags" in passed_vars:
for included_role_dep in dep_stack:
included_dep_name = included_role_dep[0]
included_dep_vars = included_role_dep[2]
if included_dep_name == dep:
if "tags" in included_dep_vars:
included_dep_vars["tags"] = list(set(included_dep_vars["tags"]).union(set(passed_vars["tags"])))
else:
included_dep_vars["tags"] = passed_vars["tags"][:]
dep_vars = utils.combine_vars(passed_vars, dep_vars)
dep_vars = utils.combine_vars(role_vars, dep_vars)
vars = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'vars')))
vars_data = {}
if os.path.isfile(vars):
vars_data = utils.parse_yaml_from_file(vars, vault_password=self.vault_password)
if vars_data:
dep_vars = utils.combine_vars(dep_vars, vars_data)
pass
defaults = self._resolve_main(utils.path_dwim(self.basedir, os.path.join(dep_path, 'defaults')))
dep_defaults_data = {}
if os.path.isfile(defaults):
dep_defaults_data = utils.parse_yaml_from_file(defaults, vault_password=self.vault_password)
if 'role' in dep_vars:
del dep_vars['role']
if not allow_dupes:
if dep in self.included_roles:
# skip back to the top, since we don't want to
# do anything else with this role
continue
else:
self.included_roles.append(dep)
def _merge_conditional(cur_conditionals, new_conditionals):
if isinstance(new_conditionals, (basestring, bool)):
cur_conditionals.append(new_conditionals)
elif isinstance(new_conditionals, list):
cur_conditionals.extend(new_conditionals)
# pass along conditionals from roles to dep roles
passed_when = passed_vars.get('when')
role_when = role_vars.get('when')
dep_when = dep_vars.get('when')
tmpcond = []
_merge_conditional(tmpcond, passed_when)
_merge_conditional(tmpcond, role_when)
_merge_conditional(tmpcond, dep_when)
if len(tmpcond) > 0:
dep_vars['when'] = tmpcond
self._build_role_dependencies([dep], dep_stack, passed_vars=dep_vars, level=level+1)
dep_stack.append([dep, dep_path, dep_vars, dep_params, dep_defaults_data])
# only add the current role when we're at the top level,
# otherwise we'll end up in a recursive loop
if level == 0:
self.included_roles.append(role)
dep_stack.append([role, role_path, role_vars, role_params, defaults_data])
return dep_stack
def _load_role_vars_files(self, vars_files):
# process variables stored in vars/main.yml files
role_vars = {}
for filename in vars_files:
if os.path.exists(filename):
new_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
if new_vars:
if type(new_vars) != dict:
raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_vars)))
role_vars = utils.combine_vars(role_vars, new_vars)
return role_vars
def _load_role_defaults(self, defaults_files):
# process default variables
default_vars = {}
for filename in defaults_files:
if os.path.exists(filename):
new_default_vars = utils.parse_yaml_from_file(filename, vault_password=self.vault_password)
if new_default_vars:
if type(new_default_vars) != dict:
raise errors.AnsibleError("%s must be stored as dictionary/hash: %s" % (filename, type(new_default_vars)))
default_vars = utils.combine_vars(default_vars, new_default_vars)
return default_vars
def _load_roles(self, roles, ds):
# a role is a name that auto-includes the following if they exist
# <rolename>/tasks/main.yml
# <rolename>/handlers/main.yml
# <rolename>/vars/main.yml
# <rolename>/library
# and it auto-extends tasks/handlers/vars_files/module paths as appropriate if found
if roles is None:
roles = []
if type(roles) != list:
raise errors.AnsibleError("value of 'roles:' must be a list")
new_tasks = []
new_handlers = []
role_vars_files = []
defaults_files = []
pre_tasks = ds.get('pre_tasks', None)
if type(pre_tasks) != list:
pre_tasks = []
for x in pre_tasks:
new_tasks.append(x)
# flush handlers after pre_tasks
new_tasks.append(dict(meta='flush_handlers'))
roles = self._build_role_dependencies(roles, [], {})
# give each role an uuid and
# make role_path available as variable to the task
for idx, val in enumerate(roles):
this_uuid = str(uuid.uuid4())
roles[idx][-3]['role_uuid'] = this_uuid
roles[idx][-3]['role_path'] = roles[idx][1]
role_names = []
for (role, role_path, role_vars, role_params, default_vars) in roles:
# special vars must be extracted from the dict to the included tasks
special_keys = [ "sudo", "sudo_user", "when", "with_items", "su", "su_user", "become", "become_user" ]
special_vars = {}
for k in special_keys:
if k in role_vars:
special_vars[k] = role_vars[k]
task_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'tasks'))
handler_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'handlers'))
vars_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'vars'))
meta_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'meta'))
defaults_basepath = utils.path_dwim(self.basedir, os.path.join(role_path, 'defaults'))
task = self._resolve_main(task_basepath)
handler = self._resolve_main(handler_basepath)
vars_file = self._resolve_main(vars_basepath)
meta_file = self._resolve_main(meta_basepath)
defaults_file = self._resolve_main(defaults_basepath)
library = utils.path_dwim(self.basedir, os.path.join(role_path, 'library'))
missing = lambda f: not os.path.isfile(f)
if missing(task) and missing(handler) and missing(vars_file) and missing(defaults_file) and missing(meta_file) and not os.path.isdir(library):
raise errors.AnsibleError("found role at %s, but cannot find %s or %s or %s or %s or %s or %s" % (role_path, task, handler, vars_file, defaults_file, meta_file, library))
if isinstance(role, dict):
role_name = role['role']
else:
role_name = utils.role_spec_parse(role)["name"]
role_names.append(role_name)
if os.path.isfile(task):
nt = dict(include=pipes.quote(task), vars=role_vars, role_params=role_params, default_vars=default_vars, role_name=role_name)
for k in special_keys:
if k in special_vars:
nt[k] = special_vars[k]
new_tasks.append(nt)
if os.path.isfile(handler):
nt = dict(include=pipes.quote(handler), vars=role_vars, role_params=role_params, role_name=role_name)
for k in special_keys:
if k in special_vars:
nt[k] = special_vars[k]
new_handlers.append(nt)
if os.path.isfile(vars_file):
role_vars_files.append(vars_file)
if os.path.isfile(defaults_file):
defaults_files.append(defaults_file)
if os.path.isdir(library):
utils.plugins.module_finder.add_directory(library)
tasks = ds.get('tasks', None)
post_tasks = ds.get('post_tasks', None)
handlers = ds.get('handlers', None)
vars_files = ds.get('vars_files', None)
if type(tasks) != list:
tasks = []
if type(handlers) != list:
handlers = []
if type(vars_files) != list:
vars_files = []
if type(post_tasks) != list:
post_tasks = []
new_tasks.extend(tasks)
# flush handlers after tasks + role tasks
new_tasks.append(dict(meta='flush_handlers'))
new_tasks.extend(post_tasks)
# flush handlers after post tasks
new_tasks.append(dict(meta='flush_handlers'))
new_handlers.extend(handlers)
ds['tasks'] = new_tasks
ds['handlers'] = new_handlers
ds['role_names'] = role_names
self.role_vars = self._load_role_vars_files(role_vars_files)
self.default_vars = self._load_role_defaults(defaults_files)
return ds
# *************************************************
def _resolve_main(self, basepath):
''' flexibly handle variations in main filenames '''
# these filenames are acceptable:
mains = (
os.path.join(basepath, 'main'),
os.path.join(basepath, 'main.yml'),
os.path.join(basepath, 'main.yaml'),
os.path.join(basepath, 'main.json'),
)
if sum([os.path.isfile(x) for x in mains]) > 1:
raise errors.AnsibleError("found multiple main files at %s, only one allowed" % (basepath))
else:
for m in mains:
if os.path.isfile(m):
return m # exactly one main file
return mains[0] # zero mains (we still need to return something)
# *************************************************
def _load_tasks(self, tasks, vars=None, role_params=None, default_vars=None, become_vars=None,
additional_conditions=None, original_file=None, role_name=None):
''' handle task and handler include statements '''
results = []
if tasks is None:
# support empty handler files, and the like.
tasks = []
if additional_conditions is None:
additional_conditions = []
if vars is None:
vars = {}
if role_params is None:
role_params = {}
if default_vars is None:
default_vars = {}
if become_vars is None:
become_vars = {}
old_conditions = list(additional_conditions)
for x in tasks:
# prevent assigning the same conditions to each task on an include
included_additional_conditions = list(old_conditions)
if not isinstance(x, dict):
raise errors.AnsibleError("expecting dict; got: %s, error in %s" % (x, original_file))
# evaluate privilege escalation vars for current and child tasks
included_become_vars = {}
for k in ["become", "become_user", "become_method", "become_exe", "sudo", "su", "sudo_user", "su_user"]:
if k in x:
included_become_vars[k] = x[k]
elif k in become_vars:
included_become_vars[k] = become_vars[k]
x[k] = become_vars[k]
task_vars = vars.copy()
if original_file:
task_vars['_original_file'] = original_file
if 'meta' in x:
if x['meta'] == 'flush_handlers':
if role_name and 'role_name' not in x:
x['role_name'] = role_name
results.append(Task(self, x, module_vars=task_vars, role_name=role_name))
continue
if 'include' in x:
tokens = split_args(str(x['include']))
included_additional_conditions = list(additional_conditions)
include_vars = {}
for k in x:
if k.startswith("with_"):
if original_file:
offender = " (in %s)" % original_file
else:
offender = ""
utils.deprecated("include + with_items is a removed deprecated feature" + offender, "1.5", removed=True)
elif k.startswith("when_"):
utils.deprecated("\"when_<criteria>:\" is a removed deprecated feature, use the simplified 'when:' conditional directly", None, removed=True)
elif k == 'when':
if isinstance(x[k], (basestring, bool)):
included_additional_conditions.append(x[k])
elif type(x[k]) is list:
included_additional_conditions.extend(x[k])
elif k in ("include", "vars", "role_params", "default_vars", "sudo", "sudo_user", "role_name", "no_log", "become", "become_user", "su", "su_user"):
continue
else:
include_vars[k] = x[k]
# get any role parameters specified
role_params = x.get('role_params', {})
# get any role default variables specified
default_vars = x.get('default_vars', {})
if not default_vars:
default_vars = self.default_vars
else:
default_vars = utils.combine_vars(self.default_vars, default_vars)
# append the vars defined with the include (from above)
# as well as the old-style 'vars' element. The old-style
# vars are given higher precedence here (just in case)
task_vars = utils.combine_vars(task_vars, include_vars)
if 'vars' in x:
task_vars = utils.combine_vars(task_vars, x['vars'])
new_role = None
if 'role_name' in x:
new_role = x['role_name']
mv = task_vars.copy()
for t in tokens[1:]:
(k,v) = t.split("=", 1)
v = unquote(v)
mv[k] = template(self.basedir, v, mv)
dirname = self.basedir
if original_file:
dirname = os.path.dirname(original_file)
# temp vars are used here to avoid trampling on the existing vars structures
temp_vars = utils.combine_vars(self.vars, self.vars_file_vars)
temp_vars = utils.combine_vars(temp_vars, mv)
temp_vars = utils.combine_vars(temp_vars, self.playbook.extra_vars)
include_file = template(dirname, tokens[0], temp_vars)
include_filename = utils.path_dwim(dirname, include_file)
data = utils.parse_yaml_from_file(include_filename, vault_password=self.vault_password)
if 'role_name' in x and data is not None:
for y in data:
if isinstance(y, dict) and 'include' in y:
y['role_name'] = new_role
loaded = self._load_tasks(data, mv, role_params, default_vars, included_become_vars, list(included_additional_conditions), original_file=include_filename, role_name=new_role)
results += loaded
elif type(x) == dict:
task = Task(
self, x,
module_vars=task_vars,
play_vars=self.vars,
play_file_vars=self.vars_file_vars,
role_vars=self.role_vars,
role_params=role_params,
default_vars=default_vars,
additional_conditions=list(additional_conditions),
role_name=role_name
)
results.append(task)
else:
raise Exception("unexpected task type")
for x in results:
if self.tags is not None:
x.tags.extend(self.tags)
return results
# *************************************************
def tasks(self):
''' return task objects for this play '''
return self._tasks
def handlers(self):
''' return handler objects for this play '''
return self._handlers
# *************************************************
def _get_vars(self):
''' load the vars section from a play, accounting for all sorts of variable features
including loading from yaml files, prompting, and conditional includes of the first
file found in a list. '''
if self.vars is None:
self.vars = {}
if type(self.vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = {}
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
if getattr(item, 'items', None) is None:
raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
k, v = item.items()[0]
vars[k] = v
else:
vars.update(self.vars)
if type(self.vars_prompt) == list:
for var in self.vars_prompt:
if not 'name' in var:
raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt(
vname, private, prompt, encrypt, confirm, salt_size, salt, default
)
elif type(self.vars_prompt) == dict:
for (vname, prompt) in self.vars_prompt.iteritems():
prompt_msg = "%s: " % prompt
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt(
varname=vname, private=False, prompt=prompt_msg, default=None
)
else:
raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
if type(self.playbook.extra_vars) == dict:
vars = utils.combine_vars(vars, self.playbook.extra_vars)
return vars
# *************************************************
def update_vars_files(self, hosts, vault_password=None):
''' calculate vars_files, which requires that setup runs first so ansible facts can be mixed in '''
# now loop through all the hosts...
for h in hosts:
self._update_vars_files_for_host(h, vault_password=vault_password)
# *************************************************
def compare_tags(self, tags):
''' given a list of tags that the user has specified, return two lists:
matched_tags: tags were found within the current play and match those given
by the user
unmatched_tags: tags that were found within the current play but do not match
any provided by the user '''
# gather all the tags in all the tasks and handlers into one list
# FIXME: isn't this in self.tags already?
all_tags = []
for task in self._tasks:
if not task.meta:
all_tags.extend(task.tags)
for handler in self._handlers:
all_tags.extend(handler.tags)
# compare the lists of tags using sets and return the matched and unmatched
all_tags_set = set(all_tags)
tags_set = set(tags)
matched_tags = all_tags_set.intersection(tags_set)
unmatched_tags = all_tags_set.difference(tags_set)
a = set(['always'])
u = set(['untagged'])
if 'always' in all_tags_set:
matched_tags = matched_tags.union(a)
unmatched_tags = all_tags_set.difference(a)
if 'all' in tags_set:
matched_tags = matched_tags.union(all_tags_set)
unmatched_tags = set()
if 'tagged' in tags_set:
matched_tags = all_tags_set.difference(u)
unmatched_tags = u
if 'untagged' in tags_set and 'untagged' in all_tags_set:
matched_tags = matched_tags.union(u)
unmatched_tags = unmatched_tags.difference(u)
return matched_tags, unmatched_tags
# *************************************************
def _late_merge_role_tags(self):
# build a local dict of tags for roles
role_tags = {}
for task in self._ds['tasks']:
if 'role_name' in task:
this_role = task['role_name'] + "-" + task['vars']['role_uuid']
if this_role not in role_tags:
role_tags[this_role] = []
if 'tags' in task['vars']:
if isinstance(task['vars']['tags'], basestring):
role_tags[this_role] += shlex.split(task['vars']['tags'])
else:
role_tags[this_role] += task['vars']['tags']
# apply each role's tags to its tasks
for idx, val in enumerate(self._tasks):
if getattr(val, 'role_name', None) is not None:
this_role = val.role_name + "-" + val.module_vars['role_uuid']
if this_role in role_tags:
self._tasks[idx].tags = sorted(set(self._tasks[idx].tags + role_tags[this_role]))
# *************************************************
def _update_vars_files_for_host(self, host, vault_password=None):
def generate_filenames(host, inject, filename):
""" Render the raw filename into 3 forms """
# filename2 is the templated version of the filename, which will
# be fully rendered if any variables contained within it are
# non-inventory related
filename2 = template(self.basedir, filename, self.vars)
# filename3 is the same as filename2, but when the host object is
# available, inventory variables will be expanded as well since the
# name is templated with the injected variables
filename3 = filename2
if host is not None:
filename3 = template(self.basedir, filename2, inject)
# filename4 is the dwim'd path, but may also be mixed-scope, so we use
# both play scoped vars and host scoped vars to template the filepath
if utils.contains_vars(filename3) and host is not None:
inject.update(self.vars)
filename4 = template(self.basedir, filename3, inject)
filename4 = utils.path_dwim(self.basedir, filename4)
else:
filename4 = utils.path_dwim(self.basedir, filename3)
return filename2, filename3, filename4
def update_vars_cache(host, data, target_filename=None):
""" update a host's varscache with new var data """
self.playbook.VARS_CACHE[host] = utils.combine_vars(self.playbook.VARS_CACHE.get(host, {}), data)
if target_filename:
self.playbook.callbacks.on_import_for_host(host, target_filename)
def process_files(filename, filename2, filename3, filename4, host=None):
""" pseudo-algorithm for deciding where new vars should go """
data = utils.parse_yaml_from_file(filename4, vault_password=self.vault_password)
if data:
if type(data) != dict:
raise errors.AnsibleError("%s must be stored as a dictionary/hash" % filename4)
if host is not None:
target_filename = None
if utils.contains_vars(filename2):
if not utils.contains_vars(filename3):
target_filename = filename3
else:
target_filename = filename4
update_vars_cache(host, data, target_filename=target_filename)
else:
self.vars_file_vars = utils.combine_vars(self.vars_file_vars, data)
# we did process this file
return True
# we did not process this file
return False
# Enforce that vars_files is always a list
if type(self.vars_files) != list:
self.vars_files = [ self.vars_files ]
# Build an inject if this is a host run started by self.update_vars_files
if host is not None:
inject = {}
inject.update(self.playbook.inventory.get_variables(host, vault_password=vault_password))
inject.update(self.playbook.SETUP_CACHE.get(host, {}))
inject.update(self.playbook.VARS_CACHE.get(host, {}))
else:
inject = None
processed = []
for filename in self.vars_files:
if type(filename) == list:
# loop over all filenames, loading the first one, and failing if none found
found = False
sequence = []
for real_filename in filename:
filename2, filename3, filename4 = generate_filenames(host, inject, real_filename)
sequence.append(filename4)
if os.path.exists(filename4):
found = True
if process_files(filename, filename2, filename3, filename4, host=host):
processed.append(filename)
elif host is not None:
self.playbook.callbacks.on_not_import_for_host(host, filename4)
if found:
break
if not found and host is not None:
raise errors.AnsibleError(
"%s: FATAL, no files matched for vars_files import sequence: %s" % (host, sequence)
)
else:
# just one filename supplied, load it!
filename2, filename3, filename4 = generate_filenames(host, inject, filename)
if utils.contains_vars(filename4):
continue
if process_files(filename, filename2, filename3, filename4, host=host):
processed.append(filename)
return processed
| gpl-3.0 |
PTDreamer/dRonin | python/calibration/mag_calibration.py | 4 | 4543 | #!/usr/bin/python
from numpy import *
from matplotlib.pylab import *
def mag_calibration(mag,gyros=None,LH=200,LV=500):
""" Calibrates the magnetometer data by fitting it to a sphere,
ideally when constantly turning to spread the data around that
sphere somewhat evenly (or at least in a horizontal plane)"""
import numpy
from scipy.optimize import minimize
from numpy.core.multiarray import arange
def find_spinning(mag,gyros):
""" return the indicies in the magnetometer data when
the gyro indicates it is spinning on the z axis """
import scipy.signal
from matplotlib.mlab import find
threshold = 40
spinning = scipy.signal.medfilt(abs(gyros['z'][:,0]),kernel_size=5) > threshold
# make sure to always find end elements
spinning = numpy.concatenate((numpy.array([False]),spinning,numpy.array([False])))
start = find(spinning[1:] & ~spinning[0:-1])
stop = find(~spinning[1:] & spinning[0:-1])-1
tstart = gyros['time'][start]
tstop = gyros['time'][stop]
idx = numpy.zeros((0),dtype=int)
for i in arange(tstart.size):
i1 = abs(mag['time']-tstart[i]).argmin()
i2 = abs(mag['time']-tstop[i]).argmin()
idx = numpy.concatenate((idx,arange(i1,i2,dtype=int)))
return idx
if gyros is not None:
idx = find_spinning(mag,gyros)
else:
idx = arange(mag['time'].size)
mag_x = mag['x'][idx,0]
mag_y = mag['y'][idx,0]
mag_z = mag['z'][idx,0]
rx = max(mag_x) - min(mag_x)
ry = max(mag_y) - min(mag_y)
mx = rx / 2 + min(mag_x)
my = ry / 2 + min(mag_y)
def distortion(x,mag_x=mag_x,mag_y=mag_y,mag_z=mag_z,LH=LH,LV=LV):
""" loss function for distortion from spherical data """
from numpy import sqrt, mean
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
l = sqrt(cor_x**2 + cor_y**2 + cor_z**2)
L0 = sqrt(LH**2 + LV**2)
spherical_error = numpy.mean((l - L0)**2)
# note that ideally the horizontal error would be calculated
# after correcting for attitude but that requires high temporal
# accuracy from attitude which we don't want to requires. this
# works well in practice.
lh = sqrt(cor_x**2 + cor_y**2)
err = (lh - LH)**2
horizontal_error = numpy.mean(err)
# weight both the spherical error and the horizontal error
# components equally
return spherical_error+horizontal_error
cons = ({'type': 'ineq', 'fun' : lambda x: numpy.array([x[0] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[1] - 0.5])},
{'type': 'ineq', 'fun' : lambda x: numpy.array([x[2] - 0.5])})
opts = {'xtol': 1e-8, 'disp': False, 'maxiter': 10000}
# method of COBYLA also works well
x0 = numpy.array([1, 1, 1, numpy.mean(mag_x), numpy.mean(mag_y), numpy.mean(mag_z)])
res = minimize(distortion, x0, method='COBYLA', options=opts, constraints=cons)
x = res.x
cor_x = mag_x * x[0] - x[3]
cor_y = mag_y * x[1] - x[4]
cor_z = mag_z * x[2] - x[5]
import matplotlib
from numpy import sqrt
matplotlib.pyplot.subplot(1,2,1)
matplotlib.pyplot.plot(cor_x,cor_y,'.',cor_x,cor_z,'.',cor_z,cor_y,'.')
#matplotlib.pyplot.xlim(-1,1)
#matplotlib.pyplot.ylim(-1,1)
matplotlib.pyplot.subplot(1,2,2)
matplotlib.pyplot.plot(sqrt(cor_x**2+cor_y**2+cor_z**2))
return res, cor_x, cor_y, cor_z
def main():
import sys, os
sys.path.insert(1, os.path.dirname(sys.path[0]))
from dronin import telemetry
uavo_list = telemetry.get_telemetry_by_args()
from dronin.uavo import UAVO_Magnetometer, UAVO_Gyros
print mag_calibration(uavo_list.as_numpy_array(UAVO_Magnetometer), uavo_list.as_numpy_array(UAVO_Gyros))
# Wait for user to close window.
matplotlib.pyplot.show()
if __name__ == "__main__":
main()
| gpl-3.0 |
ychfan/tensorflow | tensorflow/contrib/layers/python/layers/normalization_test.py | 31 | 7237 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for contrib.layers.python.layers.normalization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import normalization
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class InstanceNormTest(test.TestCase):
def testUnknownShape(self):
inputs = array_ops.placeholder(dtypes.float32)
with self.assertRaisesRegexp(ValueError, 'undefined rank'):
normalization.instance_norm(inputs)
def testBadDataFormat(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(2, 5, 5))
with self.assertRaisesRegexp(ValueError,
'data_format has to be either NCHW or NHWC.'):
normalization.instance_norm(inputs, data_format='NHCW')
def testParamsShapeNotFullyDefinedNCHW(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(3, None, 4))
with self.assertRaisesRegexp(ValueError, 'undefined channels dimension'):
normalization.instance_norm(inputs, data_format='NCHW')
def testParamsShapeNotFullyDefinedNHWC(self):
inputs = array_ops.placeholder(dtypes.float32, shape=(3, 4, None))
with self.assertRaisesRegexp(ValueError, 'undefined channels dimension'):
normalization.instance_norm(inputs, data_format='NHWC')
def testCreateOp(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
output = normalization.instance_norm(images)
print('name: ', output.op.name)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
def testCreateOpFloat64(self):
height, width = 3, 3
images = random_ops.random_uniform(
(5, height, width, 3), dtype=dtypes.float64, seed=1)
output = normalization.instance_norm(images)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
def testCreateOpNoScaleCenter(self):
height, width = 3, 3
images = random_ops.random_uniform(
(5, height, width, 3), dtype=dtypes.float64, seed=1)
output = normalization.instance_norm(images, center=False, scale=False)
self.assertStartsWith(
output.op.name, 'InstanceNorm/instancenorm')
self.assertListEqual([5, height, width, 3], output.shape.as_list())
self.assertEqual(0, len(contrib_variables.get_variables_by_name('beta')))
self.assertEqual(0, len(contrib_variables.get_variables_by_name('gamma')))
def testCreateVariables(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
normalization.instance_norm(images, center=True, scale=True)
beta = contrib_variables.get_variables_by_name('beta')[0]
gamma = contrib_variables.get_variables_by_name('gamma')[0]
self.assertEqual('InstanceNorm/beta', beta.op.name)
self.assertEqual('InstanceNorm/gamma', gamma.op.name)
def testReuseVariables(self):
height, width = 3, 3
images = random_ops.random_uniform((5, height, width, 3), seed=1)
normalization.instance_norm(images, scale=True, scope='IN')
normalization.instance_norm(images, scale=True, scope='IN', reuse=True)
beta = contrib_variables.get_variables_by_name('beta')
gamma = contrib_variables.get_variables_by_name('gamma')
self.assertEqual(1, len(beta))
self.assertEqual(1, len(gamma))
def testValueCorrectWithReuseVars(self):
height, width = 3, 3
image_shape = (10, height, width, 3)
images = random_ops.random_uniform(image_shape, seed=1)
output_train = normalization.instance_norm(images, scope='IN')
output_eval = normalization.instance_norm(images, scope='IN', reuse=True)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
# output_train and output_eval should be the same.
train_np, eval_np = sess.run([output_train, output_eval])
self.assertAllClose(train_np, eval_np)
def doOutputTest(self, input_shape, data_format, tol=1e-3):
axis = -1 if data_format == 'NHWC' else 1
for mu in (0.0, 1e2):
for sigma in (1.0, 0.1):
# Determine shape of Tensor after normalization.
reduced_shape = (input_shape[0], input_shape[axis])
expected_mean = np.zeros(reduced_shape)
expected_var = np.ones(reduced_shape)
# Determine axes that will be normalized.
reduced_axes = list(range(len(input_shape)))
del reduced_axes[axis]
del reduced_axes[0]
reduced_axes = tuple(reduced_axes)
inputs = random_ops.random_uniform(input_shape, seed=0) * sigma + mu
output_op = normalization.instance_norm(
inputs, center=False, scale=False, data_format=data_format)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
outputs = sess.run(output_op)
# Make sure that there are no NaNs
self.assertFalse(np.isnan(outputs).any())
mean = np.mean(outputs, axis=reduced_axes)
var = np.var(outputs, axis=reduced_axes)
# The mean and variance of each example should be close to 0 and 1
# respectively.
self.assertAllClose(expected_mean, mean, rtol=tol, atol=tol)
self.assertAllClose(expected_var, var, rtol=tol, atol=tol)
def testOutputSmallInput4DNHWC(self):
self.doOutputTest((10, 10, 10, 30), 'NHWC', tol=1e-2)
def testOutputSmallInput4DNCHW(self):
self.doOutputTest((10, 10, 10, 30), 'NCHW', tol=1e-2)
def testOutputBigInput4DNHWC(self):
self.doOutputTest((1, 100, 100, 1), 'NHWC', tol=1e-3)
def testOutputBigInput4DNCHW(self):
self.doOutputTest((1, 100, 100, 1), 'NCHW', tol=1e-3)
def testOutputSmallInput5DNHWC(self):
self.doOutputTest((10, 10, 10, 10, 30), 'NHWC', tol=1e-2)
def testOutputSmallInput5DNCHW(self):
self.doOutputTest((10, 10, 10, 10, 30), 'NCHW', tol=1e-2)
def testOutputBigInput5DNHWC(self):
self.doOutputTest((1, 100, 100, 1, 1), 'NHWC', tol=1e-3)
def testOutputBigInput5DNCHW(self):
self.doOutputTest((1, 100, 100, 1, 1), 'NCHW', tol=1e-3)
if __name__ == '__main__':
test.main()
| apache-2.0 |
fly19890211/edx-platform | openedx/core/djangoapps/call_stack_manager/core.py | 38 | 8963 | """
Call Stack Manager deals with tracking call stacks of functions/methods/classes(Django Model Classes)
Call Stack Manager logs unique call stacks. The call stacks then can be retrieved via Splunk, or log reads.
classes:
CallStackManager - stores all stacks in global dictionary and logs
CallStackMixin - used for Model save(), and delete() method
Decorators:
@donottrack - Decorator that will halt tracking for parameterized entities,
(or halt tracking anything in case of non-parametrized decorator).
@trackit - Decorator that will start tracking decorated entity.
@track_till_now - Will log every unique call stack of parametrized entity/ entities.
TRACKING DJANGO MODEL CLASSES -
Call stacks of Model Class
in three cases-
1. QuerySet API
2. save()
3. delete()
How to use:
1. Import following in the file where class to be tracked resides
from openedx.core.djangoapps.call_stack_manager import CallStackManager, CallStackMixin
2. Override objects of default manager by writing following in any model class which you want to track-
objects = CallStackManager()
3. For tracking Save and Delete events-
Use mixin called "CallStackMixin"
For ex.
class StudentModule(models.Model, CallStackMixin):
TRACKING FUNCTIONS, and METHODS-
1. Import following-
from openedx.core.djangoapps.call_stack_manager import trackit
NOTE - @trackit is non-parameterized decorator.
FOR DISABLING TRACKING-
1. Import following at appropriate location-
from openedx.core.djangoapps.call_stack_manager import donottrack
NOTE - You need to import function/class you do not want to track.
"""
import logging
import traceback
import re
import collections
import wrapt
import types
import inspect
from django.db.models import Manager
log = logging.getLogger(__name__)
# List of regular expressions acting as filters
REGULAR_EXPS = [re.compile(x) for x in ['^.*python2.7.*$', '^.*<exec_function>.*$', '^.*exec_code_object.*$',
'^.*edxapp/src.*$', '^.*call_stack_manager.*$']]
# List keeping track of entities not to be tracked
HALT_TRACKING = []
STACK_BOOK = collections.defaultdict(list)
# Dictionary which stores call logs
# {'EntityName' : ListOf<CallStacks>}
# CallStacks is ListOf<Frame>
# Frame is a tuple ('FilePath','LineNumber','Function Name', 'Context')
# {"<class 'courseware.models.StudentModule'>" : [[(file, line number, function name, context),(---,---,---)],
# [(file, line number, function name, context),(---,---,---)]]}
def capture_call_stack(entity_name):
""" Logs customised call stacks in global dictionary STACK_BOOK and logs it.
Arguments:
entity_name - entity
"""
# Holds temporary callstack
# List with each element 4-tuple(filename, line number, function name, text)
# and filtered with respect to regular expressions
temp_call_stack = [frame for frame in traceback.extract_stack()
if not any(reg.match(frame[0]) for reg in REGULAR_EXPS)]
final_call_stack = "".join(traceback.format_list(temp_call_stack))
def _should_get_logged(entity_name): # pylint: disable=
""" Checks if current call stack of current entity should be logged or not.
Arguments:
entity_name - Name of the current entity
Returns:
True if the current call stack is to logged, False otherwise
"""
is_class_in_halt_tracking = bool(HALT_TRACKING and inspect.isclass(entity_name) and
issubclass(entity_name, tuple(HALT_TRACKING[-1])))
is_function_in_halt_tracking = bool(HALT_TRACKING and not inspect.isclass(entity_name) and
any((entity_name.__name__ == x.__name__ and
entity_name.__module__ == x.__module__)
for x in tuple(HALT_TRACKING[-1])))
is_top_none = HALT_TRACKING and HALT_TRACKING[-1] is None
# if top of STACK_BOOK is None
if is_top_none:
return False
# if call stack is empty
if not temp_call_stack:
return False
if HALT_TRACKING:
if is_class_in_halt_tracking or is_function_in_halt_tracking:
return False
else:
return temp_call_stack not in STACK_BOOK[entity_name]
else:
return temp_call_stack not in STACK_BOOK[entity_name]
if _should_get_logged(entity_name):
STACK_BOOK[entity_name].append(temp_call_stack)
if inspect.isclass(entity_name):
log.info("Logging new call stack number %s for %s:\n %s", len(STACK_BOOK[entity_name]),
entity_name, final_call_stack)
else:
log.info("Logging new call stack number %s for %s.%s:\n %s", len(STACK_BOOK[entity_name]),
entity_name.__module__, entity_name.__name__, final_call_stack)
class CallStackMixin(object):
""" Mixin class for getting call stacks when save() and delete() methods are called """
def save(self, *args, **kwargs):
""" Logs before save() and overrides respective model API save() """
capture_call_stack(type(self))
return super(CallStackMixin, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
""" Logs before delete() and overrides respective model API delete() """
capture_call_stack(type(self))
return super(CallStackMixin, self).delete(*args, **kwargs)
class CallStackManager(Manager):
""" Manager class which overrides the default Manager class for getting call stacks """
def get_query_set(self):
""" Override the default queryset API method """
capture_call_stack(self.model)
return super(CallStackManager, self).get_query_set()
def donottrack(*entities_not_to_be_tracked):
""" Decorator which halts tracking for some entities for specific functions
Arguments:
entities_not_to_be_tracked: entities which are not to be tracked
Returns:
wrapped function
"""
if not entities_not_to_be_tracked:
entities_not_to_be_tracked = None
@wrapt.decorator
def real_donottrack(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
""" Takes function to be decorated and returns wrapped function
Arguments:
wrapped - The wrapped function which in turns needs to be called by wrapper function
instance - The object to which the wrapped function was bound when it was called.
args - The list of positional arguments supplied when the decorated function was called.
kwargs - The dictionary of keyword arguments supplied when the decorated function was called.
Returns:
return of wrapped function
"""
global HALT_TRACKING # pylint: disable=global-variable-not-assigned
if entities_not_to_be_tracked is None:
HALT_TRACKING.append(None)
else:
if HALT_TRACKING:
if HALT_TRACKING[-1] is None: # if @donottrack() calls @donottrack('xyz')
pass
else:
HALT_TRACKING.append(set(HALT_TRACKING[-1].union(set(entities_not_to_be_tracked))))
else:
HALT_TRACKING.append(set(entities_not_to_be_tracked))
return_value = wrapped(*args, **kwargs)
# check if the returning class is a generator
if isinstance(return_value, types.GeneratorType):
def generator_wrapper(wrapped_generator):
""" Function handling wrapped yielding values.
Argument:
wrapped_generator - wrapped function returning generator function
Returns:
Generator Wrapper
"""
try:
while True:
return_value = next(wrapped_generator)
yield return_value
finally:
HALT_TRACKING.pop()
return generator_wrapper(return_value)
else:
HALT_TRACKING.pop()
return return_value
return real_donottrack
@wrapt.decorator
def trackit(wrapped, instance, args, kwargs): # pylint: disable=unused-argument
""" Decorator which tracks logs call stacks
Arguments:
wrapped - The wrapped function which in turns needs to be called by wrapper function.
instance - The object to which the wrapped function was bound when it was called.
args - The list of positional arguments supplied when the decorated function was called.
kwargs - The dictionary of keyword arguments supplied when the decorated function was called.
Returns:
wrapped function
"""
capture_call_stack(wrapped)
return wrapped(*args, **kwargs)
| agpl-3.0 |
tracierenea/gnuradio | grc/python/epy_block_io.py | 5 | 2648 |
import inspect
import collections
from gnuradio import gr
import pmt
TYPE_MAP = {
'complex64': 'complex', 'complex': 'complex',
'float32': 'float', 'float': 'float',
'int32': 'int', 'uint32': 'int',
'int16': 'short', 'uint16': 'short',
'int8': 'byte', 'uint8': 'byte',
}
BlockIO = collections.namedtuple('BlockIO', 'name cls params sinks sources doc')
def _ports(sigs, msgs):
ports = list()
for i, dtype in enumerate(sigs):
port_type = TYPE_MAP.get(dtype.name, None)
if not port_type:
raise ValueError("Can't map {0:!r} to GRC port type".format(dtype))
ports.append((str(i), port_type))
for msg_key in msgs:
if msg_key == 'system':
continue
ports.append((msg_key, 'message'))
return ports
def _blk_class(source_code):
ns = {}
try:
exec source_code in ns
except Exception as e:
raise ValueError("Can't interpret source code: " + str(e))
for var in ns.itervalues():
if inspect.isclass(var)and issubclass(var, gr.gateway.gateway_block):
return var
raise ValueError('No python block class found in code')
def extract(cls):
if not inspect.isclass(cls):
cls = _blk_class(cls)
spec = inspect.getargspec(cls.__init__)
defaults = map(repr, spec.defaults or ())
doc = cls.__doc__ or cls.__init__.__doc__ or ''
cls_name = cls.__name__
if len(defaults) + 1 != len(spec.args):
raise ValueError("Need all __init__ arguments to have default values")
try:
instance = cls()
except Exception as e:
raise RuntimeError("Can't create an instance of your block: " + str(e))
name = instance.name()
params = list(zip(spec.args[1:], defaults))
sinks = _ports(instance.in_sig(),
pmt.to_python(instance.message_ports_in()))
sources = _ports(instance.out_sig(),
pmt.to_python(instance.message_ports_out()))
return BlockIO(name, cls_name, params, sinks, sources, doc)
if __name__ == '__main__':
blk_code = """
import numpy as np
from gnuradio import gr
import pmt
class blk(gr.sync_block):
def __init__(self, param1=None, param2=None):
"Test Docu"
gr.sync_block.__init__(
self,
name='Embedded Python Block',
in_sig = (np.float32,),
out_sig = (np.float32,np.complex64,),
)
self.message_port_register_in(pmt.intern('msg_in'))
self.message_port_register_out(pmt.intern('msg_out'))
def work(self, inputs_items, output_items):
return 10
"""
print extract(blk_code)
| gpl-3.0 |
asterisk/pjproject | tests/pjsua/scripts-recvfrom/230_reg_bad_fail_stale_true.py | 42 | 1625 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
# In this test we simulate broken server, where it always sends
# stale=true with all 401 responses. We should expect pjsip to
# retry the authentication until PJSIP_MAX_STALE_COUNT is
# exceeded. When pjsip retries the authentication, it should
# use the new nonce from server
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("First retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("Second retry retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"3\", stale=true"]
)
req4 = sip.RecvfromTransaction("Third retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"3\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"4\", stale=true"],
expect="PJSIP_EAUTHSTALECOUNT"
)
recvfrom_cfg = sip.RecvfromCfg("Failed registration retry (server rejects with stale=true) ",
pjsua, [req1, req2, req3, req4])
| gpl-2.0 |
sctigercat1/panda3d | direct/src/distributed/DistributedObjectOV.py | 11 | 6524 |
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.distributed.DistributedObjectBase import DistributedObjectBase
#from PyDatagram import PyDatagram
#from PyDatagramIterator import PyDatagramIterator
# Values for DistributedObjectOV.activeState
# these should match DistributedObject.ES*
ESNew = 1
ESDeleted = 2
ESDisabling = 3
ESDisabled = 4 # values here and lower are considered "disabled"
ESGenerating = 5 # values here and greater are considered "generated"
ESGenerated = 6
class DistributedObjectOV(DistributedObjectBase):
"""
Implementation of the 'owner view' (OV) of a distributed object;
"""
notify = directNotify.newCategory("DistributedObjectOV")
def __init__(self, cr):
assert self.notify.debugStateCall(self)
try:
self.DistributedObjectOV_initialized
except:
self.DistributedObjectOV_initialized = 1
DistributedObjectBase.__init__(self, cr)
# Keep track of our state as a distributed object. This
# is only trustworthy if the inheriting class properly
# calls up the chain for disable() and generate().
self.activeState = ESNew
if __debug__:
def status(self, indent=0):
"""
print out "doId(parentId, zoneId) className"
and conditionally show generated, disabled
"""
spaces=' '*(indent+2)
try:
print "%s%s:"%(
' '*indent, self.__class__.__name__)
print "%sfrom DistributedObjectOV doId:%s, parent:%s, zone:%s"%(
spaces,
self.doId, self.parentId, self.zoneId),
flags=[]
if self.activeState == ESGenerated:
flags.append("generated")
if self.activeState < ESGenerating:
flags.append("disabled")
if len(flags):
print "(%s)"%(" ".join(flags),),
print
except Exception, e: print "%serror printing status"%(spaces,), e
def getDelayDeleteCount(self):
# OV objects cannot be delayDeleted
return 0
def deleteOrDelay(self):
self.disableAnnounceAndDelete()
def disableAnnounceAndDelete(self):
self.disableAndAnnounce()
self.delete()
def disableAndAnnounce(self):
# We must send the disable announce message *before* we
# actually disable the object. That way, the various cleanup
# tasks can run first and take care of restoring the object to
# a normal, nondisabled state; and *then* the disable function
# can properly disable it (for instance, by parenting it to
# hidden).
if self.activeState != ESDisabled:
self.activeState = ESDisabling
messenger.send(self.uniqueName("disable"))
self.disable()
def announceGenerate(self):
"""
Sends a message to the world after the object has been
generated and all of its required fields filled in.
"""
assert self.notify.debug('announceGenerate(): %s' % (self.doId))
def disable(self):
"""
Inheritors should redefine this to take appropriate action on disable
"""
assert self.notify.debug('disable(): %s' % (self.doId))
if self.activeState != ESDisabled:
self.activeState = ESDisabled
def isDisabled(self):
"""
Returns true if the object has been disabled and/or deleted,
or if it is brand new and hasn't yet been generated.
"""
return (self.activeState < ESGenerating)
def isGenerated(self):
"""
Returns true if the object has been fully generated by now,
and not yet disabled.
"""
assert self.notify.debugStateCall(self)
return (self.activeState == ESGenerated)
def delete(self):
"""
Inheritors should redefine this to take appropriate action on delete
"""
assert self.notify.debug('delete(): %s' % (self.doId))
try:
self.DistributedObjectOV_deleted
except:
self.DistributedObjectOV_deleted = 1
self.cr = None
self.dclass = None
def generate(self):
"""
Inheritors should redefine this to take appropriate action on generate
"""
assert self.notify.debugStateCall(self)
self.activeState = ESGenerating
# this has already been set at this point
#self.cr.storeObjectLocation(self, self.parentId, self.zoneId)
def generateInit(self):
"""
This method is called when the DistributedObjectOV is first introduced
to the world... Not when it is pulled from the cache.
"""
self.activeState = ESGenerating
def getDoId(self):
"""
Return the distributed object id
"""
return self.doId
def postGenerateMessage(self):
if self.activeState != ESGenerated:
self.activeState = ESGenerated
messenger.send(self.uniqueName("generate"), [self])
def updateRequiredFields(self, dclass, di):
dclass.receiveUpdateBroadcastRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateAllRequiredFields(self, dclass, di):
dclass.receiveUpdateAllRequired(self, di)
self.announceGenerate()
self.postGenerateMessage()
def updateRequiredOtherFields(self, dclass, di):
# First, update the required fields
dclass.receiveUpdateBroadcastRequiredOwner(self, di)
# Announce generate after updating all the required fields,
# but before we update the non-required fields.
self.announceGenerate()
self.postGenerateMessage()
dclass.receiveUpdateOther(self, di)
def getCacheable(self):
return False
def sendUpdate(self, fieldName, args = [], sendToId = None):
if self.cr:
dg = self.dclass.clientFormatUpdate(
fieldName, sendToId or self.doId, args)
self.cr.send(dg)
else:
self.notify.warning("sendUpdate failed, because self.cr is not set")
def taskName(self, taskString):
return ('%s-%s-OV' % (taskString, self.getDoId()))
def uniqueName(self, idString):
return ('%s-%s-OV' % (idString, self.getDoId()))
| bsd-3-clause |
jamesthechamp/zamboni | mkt/ratings/tests/test_cron.py | 18 | 2091 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.core import mail
from django.utils.encoding import smart_str
import mock
from nose.tools import eq_
import mkt.site.tests
from mkt.ratings.cron import email_daily_ratings
from mkt.ratings.models import Review
from mkt.site.fixtures import fixture
from mkt.webapps.models import AddonUser
from mkt.users.models import UserProfile
@mock.patch.object(settings, 'SEND_REAL_EMAIL', True)
class TestEmailDailyRatings(mkt.site.tests.TestCase):
fixtures = fixture('users')
def setUp(self):
self.app = mkt.site.tests.app_factory(name='test')
self.app2 = mkt.site.tests.app_factory(name='test2')
self.user = UserProfile.objects.get(email='regular@mozilla.com')
AddonUser.objects.create(addon=self.app, user=self.user)
AddonUser.objects.create(addon=self.app2, user=self.user)
def test_emails_goes_out(self):
self.app1_review = Review.objects.create(
addon=self.app, user=self.user, rating=1,
body='sux, I want my money back.')
self.app1_review.update(created=self.days_ago(1))
email_daily_ratings()
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [self.user.email])
eq_(str(self.app1_review.body) in smart_str(mail.outbox[0].body), True)
def test_one_email_for_multiple_reviews(self):
self.app2_review = Review.objects.create(
addon=self.app2, user=self.user, rating=4,
body='waste of two seconds of my life.')
self.app2_review.update(created=self.days_ago(1))
self.app2_review2 = Review.objects.create(
addon=self.app2, user=self.user, rating=5,
body='a++ would play again')
self.app2_review2.update(created=self.days_ago(1))
email_daily_ratings()
eq_(len(mail.outbox), 1)
eq_(mail.outbox[0].to, [self.user.email])
eq_(str(self.app2_review.body) in smart_str(mail.outbox[0].body), True)
eq_(str(self.app2_review2.body) in smart_str(mail.outbox[0].body),
True)
| bsd-3-clause |
Depado/pexpect | tests/qa.py | 22 | 1079 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import commands
import signal
signal.signal(signal.SIGCHLD, signal.SIG_IGN)
print(commands.getoutput('/bin/ls -l'))
| isc |
petrleocompel/gnome15 | src/pylibg19/g19/receivers.py | 8 | 10382 | # Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2010 Brett Smith <tanktarta@blueyonder.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from keys import (Data, Key)
from runnable import Runnable
import threading
import time
import logging
logger = logging.getLogger(__name__)
class InputProcessor(object):
'''Object to process key presses.'''
def process_input(self, inputEvent):
'''Processes given event.
Should return as fast as possible. Any time-consuming processing
should be done in another thread.
@param inputEvent Event to process.
@return True if event was consumed, or False if ignored.
'''
return False
class InputEvent(object):
'''Event created by a key press or release.'''
def __init__(self, oldState, newState, keysDown, keysUp):
'''Creates an InputEvent.
@param oldState State before event happened.
@param newState State after event happened.
@param keysDown Keys newly pressed.
@param keysUp Kys released by this event.
'''
self.oldState = oldState
self.newState = newState
self.keysDown = keysDown
self.keysUp = keysUp
class State(object):
'''Current state of keyboard.'''
def __init__(self):
self.__keysDown = set()
def _data_to_keys_g_and_m(self, data):
'''Converts a G/M keys data package to a set of keys defined as
pressed by it.
'''
if len(data) != 4 or data[0] != 2:
raise ValueError("not a multimedia key packet: " + str(data))
empty = 0x400000
curVal = data[3] << 16 | data[2] << 8 | data[1]
keys = []
while curVal != empty:
foundAKey = False
for val in Data.gmKeys.keys():
if val & curVal == val:
curVal ^= val
keys.append(Data.gmKeys[val])
foundAKey = True
if not foundAKey:
raise ValueError("incorrect g/m key packet: " +
str(data))
return set(keys)
self.__keysDown = set()
def _data_to_keys_d(self, data):
'''Converts a D data package to a set of keys defined as
pressed by it.
'''
if len(data) != 2 or data[1] != 0x80:
raise ValueError("not a D key packet: " + str(data))
curVal = data[0]
keys = []
'''Zero is release
'''
if curVal != 0:
foundAKey = False
for val in Data.displayKeys.keys():
if val & curVal == val:
curVal ^= val
keys.append(Data.displayKeys[val])
foundAKey = True
if not foundAKey:
raise ValueError("incorrect D key packet: " +
str(data))
return set(keys)
def _data_to_keys_mm(self, data):
'''Converts a multimedia keys data package to a set of keys defined as
pressed by it.
'''
if len(data) != 2 or data[0] not in [1, 3]:
raise ValueError("not a multimedia key packet: " + str(data))
if data[0] == 1:
curVal = data[1]
keys = []
while curVal:
foundAKey = False
for val in Data.mmKeys.keys():
if val & curVal == val:
curVal ^= val
keys.append(Data.mmKeys[val])
foundAKey = True
if not foundAKey:
raise ValueError("incorrect multimedia key packet: " +
str(data))
elif data == [3, 1]:
keys = [Key.WINKEY_SWITCH]
elif data == [3, 0]:
keys = []
else:
raise ValueError("incorrect multimedia key packet: " + str(data))
return set(keys)
def _update_keys_down(self, possibleKeys, keys):
'''Updates internal keysDown set.
Updates the current state of all keys in 'possibleKeys' with state
given in 'keys'.
Example:
Currently set as pressed in self.__keysDown: [A, B]
possibleKeys: [B, C, D]
keys: [C]
This would set self.__keysDown to [A, C] and return ([C], [B])
@param possibleKeys Keys whose state could be given as 'pressed' at the
same time by 'keys'.
@param keys Current state of all keys in possibleKeys.
@return A pair of sets listing newly pressed and newly released keys.
'''
keysDown = set()
keysUp = set()
for key in possibleKeys:
if key in keys:
if key not in self.__keysDown:
self.__keysDown.add(key)
keysDown.add(key)
else:
if key in self.__keysDown:
self.__keysDown.remove(key)
keysUp.add(key)
return (keysDown, keysUp)
def clone(self):
'''Returns an exact copy of this state.'''
state = State()
state.__keysDown = set(self.__keysDown)
return state
def packet_received_g_and_m(self, data):
'''Mutates the state by given data packet from G- and M- keys.
@param data Data packet received.
@return InputEvent for data packet, or None if data packet was ignored.
'''
oldState = self.clone()
evt = None
logger.debug("G key of %d", len(data))
if len(data) == 4:
keys = self._data_to_keys_g_and_m(data)
keysDown, keysUp = self._update_keys_down(Key.gmKeys, keys)
newState = self.clone()
evt = InputEvent(oldState, newState, keysDown, keysUp)
return evt
def packet_received_d(self, data):
'''Mutates the state by given data packet from D- keys.
@param data Data packet received.
@return InputEvent for data packet, or None if data packet was ignored.
'''
oldState = self.clone()
evt = None
logger.debug("D key of %d", len(data))
if len(data) == 2:
keys = self._data_to_keys_d(data)
keysDown, keysUp = self._update_keys_down(Key.displayKeys, keys)
newState = self.clone()
evt = InputEvent(oldState, newState, keysDown, keysUp)
return evt
def packet_received_mm(self, data):
'''Mutates the state by given data packet from multimedia keys.
@param data Data packet received.
@return InputEvent for data packet.
'''
oldState = self.clone()
if len(data) != 2:
raise ValueError("incorrect multimedia key packet: " + str(data))
logger.debug("MM or Win key of %d", len(data))
keys = self._data_to_keys_mm(data)
winKeySet = set([Key.WINKEY_SWITCH])
if data[0] == 1:
# update state of all mm keys
logger.debug("MM key %d", len(data))
possibleKeys = Key.mmKeys.difference(winKeySet)
keysDown, keysUp = self._update_keys_down(possibleKeys, keys)
else:
# update winkey state
logger.debug("Win key")
keysDown, keysUp = self._update_keys_down(winKeySet, keys)
newState = self.clone()
return InputEvent(oldState, newState, keysDown, keysUp)
class G19Receiver(Runnable):
'''This receiver consumes all data sent by special keys.'''
def __init__(self, g19):
Runnable.__init__(self)
self.__g19 = g19
self.__ips = []
self.__mutex = threading.Lock()
self.__state = State()
def add_input_processor(self, processor):
'''Adds an input processor.'''
self.__mutex.acquire()
self.__ips.append(processor)
self.__mutex.release()
pass
def execute(self):
gotData = False
processors = self.list_all_input_processors()
if self.__g19.enable_mm_keys:
data = self.__g19.read_multimedia_keys()
if data:
logger.debug('MM keys data %s', len(data))
evt = self.__state.packet_received_mm(data)
if evt:
for proc in processors:
if proc.process_input(evt):
break
else:
logger.info('MM keys ignored')
gotData = True
data = self.__g19.read_g_and_m_keys()
if data:
logger.debug('G/M keys data %s', len(data))
evt = self.__state.packet_received_g_and_m(data)
if evt:
for proc in processors:
if proc.process_input(evt):
break
else:
logger.info('G/M keys ignored')
gotData = True
data = self.__g19.read_display_menu_keys()
if data:
logger.debug('Menu keys Data %s', len(data))
evt = self.__state.packet_received_d(data)
if evt:
for proc in processors:
if proc.process_input(evt):
break
else:
logger.info('Menu keys ignored')
gotData = True
if not gotData:
time.sleep(0.05)
def list_all_input_processors(self):
'''Returns a list of all input processors currently registered to this
receiver.
@return All registered processors. This list is a copy of the internal
one.
'''
self.__mutex.acquire()
allProcessors = list(self.__ips)
self.__mutex.release()
return allProcessors
| gpl-3.0 |
securionpay/securionpay-python | securionpay/resource.py | 2 | 1099 | import securionpay as api
import requests
class Resource(object):
def name(self):
return self.__class__.__name__.lower()
def _get(self, path, params=None):
return self.request('GET', path, params)
def _post(self, path, params=None):
return self.request('POST', path, params)
def _delete(self, path, params=None):
return self.request('DELETE', path, params)
@staticmethod
def request(method, path, params=None):
url = api.url.rstrip('/') + path
data = {'params' if method in ['GET', 'DELETE'] else 'json': params}
resp = requests.request(method, url, auth=(api.private_key, ''), **data)
json = resp.json()
if resp.status_code == 200:
return json
error = json.get('error')
if error is None:
raise api.SecurionPayException('Internal error', None, json, None, None)
raise api.SecurionPayException(error.get('type'), error.get('code'), error.get('message'),
error.get('charge_id'), error.get('blacklist_rule_id'))
| mit |
jeroenj/CouchPotatoServer | libs/CodernityDB/storage.py | 81 | 3736 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import struct
import shutil
import marshal
import io
try:
from CodernityDB import __version__
except ImportError:
from __init__ import __version__
class StorageException(Exception):
pass
class DummyStorage(object):
"""
Storage mostly used to fake real storage
"""
def create(self, *args, **kwargs):
pass
def open(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
def data_from(self, *args, **kwargs):
pass
def data_to(self, *args, **kwargs):
pass
def save(self, *args, **kwargs):
return 0, 0
def insert(self, *args, **kwargs):
return self.save(*args, **kwargs)
def update(self, *args, **kwargs):
return 0, 0
def get(self, *args, **kwargs):
return None
# def compact(self, *args, **kwargs):
# pass
def fsync(self, *args, **kwargs):
pass
def flush(self, *args, **kwargs):
pass
class IU_Storage(object):
__version__ = __version__
def __init__(self, db_path, name='main'):
self.db_path = db_path
self.name = name
self._header_size = 100
def create(self):
if os.path.exists(os.path.join(self.db_path, self.name + "_stor")):
raise IOError("Storage already exists!")
with io.open(os.path.join(self.db_path, self.name + "_stor"), 'wb') as f:
f.write(struct.pack("10s90s", self.__version__, '|||||'))
f.close()
self._f = io.open(os.path.join(
self.db_path, self.name + "_stor"), 'r+b', buffering=0)
self.flush()
self._f.seek(0, 2)
def open(self):
if not os.path.exists(os.path.join(self.db_path, self.name + "_stor")):
raise IOError("Storage doesn't exists!")
self._f = io.open(os.path.join(
self.db_path, self.name + "_stor"), 'r+b', buffering=0)
self.flush()
self._f.seek(0, 2)
def destroy(self):
os.unlink(os.path.join(self.db_path, self.name + '_stor'))
def close(self):
self._f.close()
# self.flush()
# self.fsync()
def data_from(self, data):
return marshal.loads(data)
def data_to(self, data):
return marshal.dumps(data)
def save(self, data):
s_data = self.data_to(data)
self._f.seek(0, 2)
start = self._f.tell()
size = len(s_data)
self._f.write(s_data)
self.flush()
return start, size
def insert(self, data):
return self.save(data)
def update(self, data):
return self.save(data)
def get(self, start, size, status='c'):
if status == 'd':
return None
else:
self._f.seek(start)
return self.data_from(self._f.read(size))
def flush(self):
self._f.flush()
def fsync(self):
os.fsync(self._f.fileno())
# classes for public use, done in this way because of
# generation static files with indexes (_index directory)
class Storage(IU_Storage):
pass
| gpl-3.0 |
misham/etaluma-kernel | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
mcocdawc/chemcoord | src/chemcoord/configuration.py | 1 | 3385 | # -*- coding: utf-8 -*-
try:
import configparser
except ImportError:
# Due to PY27 compatibility
import ConfigParser as configparser
import os
from warnings import warn
def _give_default_file_path():
HOME = os.path.expanduser('~')
filepath = os.path.join(HOME, '.chemcoordrc')
return filepath
def provide_default_settings():
settings = {}
# The Cartesian().get_bonds() method will use or not use a lookup.
# Greatly increases performance if True, but could introduce bugs
# if the Cartesian().xyz_frame is changed manually.
settings['defaults'] = {}
settings['defaults']['use_lookup'] = False
settings['defaults']['atomic_radius_data'] = 'atomic_radius_cc'
settings['defaults']['viewer'] = 'gv.exe'
# settings['viewer'] = 'avogadro'
# settings['viewer'] = 'molden'
# settings['viewer'] = 'jmol'
return settings
def write_configuration_file(filepath=_give_default_file_path(),
overwrite=False):
"""Create a configuration file.
Writes the current state of settings into a configuration file.
.. note:: Since a file is permamently written, this function
is strictly speaking not sideeffect free.
Args:
filepath (str): Where to write the file.
The default is under both UNIX and Windows ``~/.chemcoordrc``.
overwrite (bool):
Returns:
None:
"""
config = configparser.ConfigParser()
config.read_dict(settings)
if os.path.isfile(filepath) and not overwrite:
try:
raise FileExistsError
except NameError: # because of python2
warn('File exists already and overwrite is False (default).')
else:
with open(filepath, 'w') as configfile:
config.write(configfile)
def read_configuration_file(filepath=_give_default_file_path()):
"""Read the configuration file.
.. note:: This function changes ``cc.settings`` inplace and is
therefore not sideeffect free.
Args:
filepath (str): Where to read the file.
The default is under both UNIX and Windows ``~/.chemcoordrc``.
Returns:
None:
"""
config = configparser.ConfigParser()
config.read(filepath)
def get_correct_type(section, key, config):
"""Gives e.g. the boolean True for the string 'True'"""
def getstring(section, key, config):
return config[section][key]
def getinteger(section, key, config): # pylint:disable=unused-variable
return config[section].getint(key)
def getboolean(section, key, config):
return config[section].getboolean(key)
def getfloat(section, key, config): # pylint:disable=unused-variable
return config[section].getfloat(key)
special_actions = {} # Something different than a string is expected
special_actions['defaults'] = {}
special_actions['defaults']['use_lookup'] = getboolean
try:
return special_actions[section][key](section, key, config)
except KeyError:
return getstring(section, key, config)
for section in config.sections():
for key in config[section]:
settings[section][key] = get_correct_type(section, key, config)
return settings
settings = provide_default_settings()
read_configuration_file()
| lgpl-3.0 |
courtarro/gnuradio | gr-utils/python/modtool/__init__.py | 40 | 1424 | #
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from cmakefile_editor import CMakeFileEditor
from code_generator import GRMTemplate
from grc_xml_generator import GRCXMLGenerator
from modtool_base import ModTool, ModToolException, get_class_dict
from modtool_add import ModToolAdd
from modtool_disable import ModToolDisable
from modtool_info import ModToolInfo
from modtool_makexml import ModToolMakeXML
from modtool_newmod import ModToolNewModule
from modtool_rm import ModToolRemove
from modtool_rename import ModToolRename
from templates import Templates
# Leave this at the end
from modtool_help import ModToolHelp
from parser_cc_block import ParserCCBlock
from util_functions import *
| gpl-3.0 |
dwadler/QGIS | tests/src/python/test_qgsrasterbandcombobox.py | 23 | 3809 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsRasterBandComboBox.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '09/05/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import qgis # NOQA
import os
from qgis.core import QgsRasterLayer
from qgis.gui import QgsRasterBandComboBox
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QFileInfo
from qgis.PyQt.QtTest import QSignalSpy
from utilities import unitTestDataPath
start_app()
class TestQgsRasterBandComboBox(unittest.TestCase):
def testNoLayer(self):
"""
Test widget with no layer
"""
combo = QgsRasterBandComboBox()
self.assertFalse(combo.layer())
self.assertEqual(combo.currentBand(), -1)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(11111)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(-11111)
self.assertEqual(combo.currentBand(), -1)
def testOneBandRaster(self):
path = os.path.join(unitTestDataPath('raster'),
'band1_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
self.assertEqual(combo.layer(), layer)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 1)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 2)
combo.setBand(-1)
self.assertEqual(combo.currentBand(), -1)
combo.setBand(1)
self.assertEqual(combo.currentBand(), 1)
combo.setShowNotSetOption(False)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 1)
def testMultiBandRaster(self):
path = os.path.join(unitTestDataPath('raster'),
'band3_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
self.assertEqual(combo.layer(), layer)
self.assertEqual(combo.currentBand(), 1)
self.assertEqual(combo.count(), 3)
combo.setBand(2)
self.assertEqual(combo.currentBand(), 2)
combo.setShowNotSetOption(True)
self.assertEqual(combo.currentBand(), 2)
self.assertEqual(combo.count(), 4)
combo.setShowNotSetOption(False)
self.assertEqual(combo.currentBand(), 2)
self.assertEqual(combo.count(), 3)
def testSignals(self):
path = os.path.join(unitTestDataPath('raster'),
'band3_float32_noct_epsg4326.tif')
info = QFileInfo(path)
base_name = info.baseName()
layer = QgsRasterLayer(path, base_name)
self.assertTrue(layer)
combo = QgsRasterBandComboBox()
combo.setLayer(layer)
signal_spy = QSignalSpy(combo.bandChanged)
combo.setBand(2)
self.assertEqual(len(signal_spy), 1)
self.assertEqual(signal_spy[0][0], 2)
combo.setBand(3)
self.assertEqual(len(signal_spy), 2)
self.assertEqual(signal_spy[1][0], 3)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
sertac/django | tests/template_tests/filter_tests/test_cut.py | 521 | 2269 | from django.template.defaultfilters import cut
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class CutTests(SimpleTestCase):
@setup({'cut01': '{% autoescape off %}{{ a|cut:"x" }} {{ b|cut:"x" }}{% endautoescape %}'})
def test_cut01(self):
output = self.engine.render_to_string('cut01', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut02': '{{ a|cut:"x" }} {{ b|cut:"x" }}'})
def test_cut02(self):
output = self.engine.render_to_string('cut02', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "&y &y")
@setup({'cut03': '{% autoescape off %}{{ a|cut:"&" }} {{ b|cut:"&" }}{% endautoescape %}'})
def test_cut03(self):
output = self.engine.render_to_string('cut03', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
@setup({'cut04': '{{ a|cut:"&" }} {{ b|cut:"&" }}'})
def test_cut04(self):
output = self.engine.render_to_string('cut04', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "xy xamp;y")
# Passing ';' to cut can break existing HTML entities, so those strings
# are auto-escaped.
@setup({'cut05': '{% autoescape off %}{{ a|cut:";" }} {{ b|cut:";" }}{% endautoescape %}'})
def test_cut05(self):
output = self.engine.render_to_string('cut05', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&y")
@setup({'cut06': '{{ a|cut:";" }} {{ b|cut:";" }}'})
def test_cut06(self):
output = self.engine.render_to_string('cut06', {"a": "x&y", "b": mark_safe("x&y")})
self.assertEqual(output, "x&y x&ampy")
class FunctionTests(SimpleTestCase):
def test_character(self):
self.assertEqual(cut('a string to be mangled', 'a'), ' string to be mngled')
def test_characters(self):
self.assertEqual(cut('a string to be mangled', 'ng'), 'a stri to be maled')
def test_non_matching_string(self):
self.assertEqual(cut('a string to be mangled', 'strings'), 'a string to be mangled')
def test_non_string_input(self):
self.assertEqual(cut(123, '2'), '13')
| bsd-3-clause |
NicovincX2/Python-3.5 | Algèbre/Algèbre linéaire/Algèbre multilinéaire/slice3.py | 1 | 7017 | # -*- coding: utf-8 -*-
"""
slice3.py - plot 3D data on a uniform tensor-product grid as a set of
three adjustable xy, yz, and xz plots
Copyright (c) 2013 Greg von Winckel
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Created on Wed Dec 4 11:24:14 MST 2013
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
def meshgrid3(x, y, z):
""" Create a three-dimensional meshgrid """
nx = len(x)
ny = len(y)
nz = len(z)
xx = np.swapaxes(np.reshape(np.tile(x, (1, ny, nz)), (nz, ny, nx)), 0, 2)
yy = np.swapaxes(np.reshape(np.tile(y, (nx, 1, nz)), (nx, nz, ny)), 1, 2)
zz = np.tile(z, (nx, ny, 1))
return xx, yy, zz
class DiscreteSlider(Slider):
"""A matplotlib slider widget with discrete steps.
Created by Joe Kington and submitted to StackOverflow on Dec 1 2012
http://stackoverflow.com/questions/13656387/can-i-make-matplotlib-sliders-more-discrete
"""
def __init__(self, *args, **kwargs):
"""Identical to Slider.__init__, except for the "increment" kwarg.
"increment" specifies the step size that the slider will be discritized
to."""
self.inc = kwargs.pop('increment', 1)
Slider.__init__(self, *args, **kwargs)
def set_val(self, val):
xy = self.poly.xy
xy[2] = val, 1
xy[3] = val, 0
self.poly.xy = xy
# Suppress slider label
self.valtext.set_text('')
if self.drawon:
self.ax.figure.canvas.draw()
self.val = val
if not self.eventson:
return
for cid, func in self.observers.iteritems():
func(val)
class slice3(object):
def __init__(self, xx, yy, zz, u):
self.x = xx[:, 0, 0]
self.y = yy[0, :, 0]
self.z = zz[0, 0, :]
self.data = u
self.fig = plt.figure(1, (20, 7))
self.ax1 = self.fig.add_subplot(131, aspect='equal')
self.ax2 = self.fig.add_subplot(132, aspect='equal')
self.ax3 = self.fig.add_subplot(133, aspect='equal')
self.xplot_zline = self.ax1.axvline(color='m', linestyle='--', lw=2)
self.xplot_zline.set_xdata(self.z[0])
self.xplot_yline = self.ax1.axhline(color='m', linestyle='--', lw=2)
self.xplot_yline.set_ydata(self.y[0])
self.yplot_xline = self.ax2.axhline(color='m', linestyle='--', lw=2)
self.yplot_xline.set_ydata(self.x[0])
self.yplot_zline = self.ax2.axvline(color='m', linestyle='--', lw=2)
self.yplot_zline.set_xdata(self.z[0])
self.zplot_xline = self.ax3.axvline(color='m', linestyle='--', lw=2)
self.zplot_xline.set_xdata(self.x[0])
self.zplot_yline = self.ax3.axhline(color='m', linestyle='--', lw=2)
self.zplot_yline.set_ydata(self.y[0])
self.xslice = self.ax1.imshow(u[0, :, :], extent=(
self.z[0], self.z[-1], self.y[0], self.y[-1]))
self.yslice = self.ax2.imshow(u[:, 0, :], extent=(
self.z[0], self.z[-1], self.x[0], self.x[-1]))
self.zslice = self.ax3.imshow(u[:, :, 0], extent=(
self.x[0], self.x[-1], self.y[0], self.y[-1]))
# Create and initialize x-slider
self.sliderax1 = self.fig.add_axes([0.125, 0.08, 0.225, 0.03])
self.sliderx = DiscreteSlider(
self.sliderax1, '', 0, len(self.x) - 1, increment=1, valinit=0)
self.sliderx.on_changed(self.update_x)
self.sliderx.set_val(0)
# Create and initialize y-slider
self.sliderax2 = self.fig.add_axes([0.4, 0.08, 0.225, 0.03])
self.slidery = DiscreteSlider(
self.sliderax2, '', 0, len(self.y) - 1, increment=1, valinit=0)
self.slidery.on_changed(self.update_y)
self.slidery.set_val(0)
# Create and initialize z-slider
self.sliderax3 = self.fig.add_axes([0.675, 0.08, 0.225, 0.03])
self.sliderz = DiscreteSlider(
self.sliderax3, '', 0, len(self.z) - 1, increment=1, valinit=0)
self.sliderz.on_changed(self.update_z)
self.sliderz.set_val(0)
z0, z1 = self.ax1.get_xlim()
x0, x1 = self.ax2.get_ylim()
y0, y1 = self.ax1.get_ylim()
self.ax1.set_aspect((z1 - z0) / (y1 - y0))
self.ax2.set_aspect((z1 - z0) / (x1 - x0))
self.ax3.set_aspect((x1 - x0) / (y1 - y0))
def xlabel(self, *args, **kwargs):
self.ax2.set_ylabel(*args, **kwargs)
self.ax3.set_xlabel(*args, **kwargs)
def ylabel(self, *args, **kwargs):
self.ax1.set_ylabel(*args, **kwargs)
self.ax3.set_ylabel(*args, **kwargs)
def zlabel(self, *args, **kwargs):
self.ax1.set_xlabel(*args, **kwargs)
self.ax2.set_xlabel(*args, **kwargs)
def update_x(self, value):
self.xslice.set_data(self.data[value, :, :])
self.yplot_xline.set_ydata(self.x[value])
self.zplot_xline.set_xdata(self.x[value])
def update_y(self, value):
self.yslice.set_data(self.data[:, value, :])
self.xplot_yline.set_ydata(self.y[value])
self.zplot_yline.set_ydata(self.y[value])
def update_z(self, value):
self.zslice.set_data(self.data[:, :, value])
self.xplot_zline.set_xdata(self.z[value])
self.yplot_zline.set_xdata(self.z[value])
def show(self):
plt.show()
if __name__ == '__main__':
# Number of x-grid points
nx = 100
# Number of
ny = 100
nz = 200
x = np.linspace(-4, 4, nx)
y = np.linspace(-4, 4, ny)
z = np.linspace(0, 8, nz)
xx, yy, zz = meshgrid3(x, y, z)
# Display three cross sections of a Gaussian Beam/Paraxial wave
u = np.real(np.exp(-(2 * xx**2 + yy**2) / (.2 + 2j * zz)) /
np.sqrt(.2 + 2j * zz))
s3 = slice3(xx, yy, zz, u)
s3.xlabel('x', fontsize=18)
s3.ylabel('y', fontsize=18)
s3.zlabel('z', fontsize=18)
s3.show()
os.system("pause")
| gpl-3.0 |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pyrsistent/_transformations.py | 2 | 3800 | import re
try:
from inspect import Parameter, signature
except ImportError:
signature = None
from inspect import getfullargspec
_EMPTY_SENTINEL = object()
def inc(x):
""" Add one to the current value """
return x + 1
def dec(x):
""" Subtract one from the current value """
return x - 1
def discard(evolver, key):
""" Discard the element and returns a structure without the discarded elements """
try:
del evolver[key]
except KeyError:
pass
# Matchers
def rex(expr):
""" Regular expression matcher to use together with transform functions """
r = re.compile(expr)
return lambda key: isinstance(key, str) and r.match(key)
def ny(_):
""" Matcher that matches any value """
return True
# Support functions
def _chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def transform(structure, transformations):
r = structure
for path, command in _chunks(transformations, 2):
r = _do_to_path(r, path, command)
return r
def _do_to_path(structure, path, command):
if not path:
return command(structure) if callable(command) else command
kvs = _get_keys_and_values(structure, path[0])
return _update_structure(structure, kvs, path[1:], command)
def _items(structure):
try:
return structure.items()
except AttributeError:
# Support wider range of structures by adding a transform_items() or similar?
return list(enumerate(structure))
def _get(structure, key, default):
try:
if hasattr(structure, '__getitem__'):
return structure[key]
return getattr(structure, key)
except (IndexError, KeyError):
return default
def _get_keys_and_values(structure, key_spec):
if callable(key_spec):
# Support predicates as callable objects in the path
arity = _get_arity(key_spec)
if arity == 1:
# Unary predicates are called with the "key" of the path
# - eg a key in a mapping, an index in a sequence.
return [(k, v) for k, v in _items(structure) if key_spec(k)]
elif arity == 2:
# Binary predicates are called with the key and the corresponding
# value.
return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
else:
# Other arities are an error.
raise ValueError(
"callable in transform path must take 1 or 2 arguments"
)
# Non-callables are used as-is as a key.
return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
if signature is None:
def _get_arity(f):
argspec = getfullargspec(f)
return len(argspec.args) - len(argspec.defaults or ())
else:
def _get_arity(f):
return sum(
1
for p
in signature(f).parameters.values()
if p.default is Parameter.empty
and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
)
def _update_structure(structure, kvs, path, command):
from pyrsistent._pmap import pmap
e = structure.evolver()
if not path and command is discard:
# Do this in reverse to avoid index problems with vectors. See #92.
for k, v in reversed(kvs):
discard(e, k)
else:
for k, v in kvs:
is_empty = False
if v is _EMPTY_SENTINEL:
# Allow expansion of structure but make sure to cover the case
# when an empty pmap is added as leaf node. See #154.
is_empty = True
v = pmap()
result = _do_to_path(v, path, command)
if result is not v or is_empty:
e[k] = result
return e.persistent()
| gpl-2.0 |
dhruvvyas90/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
402231466/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/unittest/test/testmock/testmagicmethods.py | 737 | 12145 | import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
kidburglar/youtube-dl | youtube_dl/extractor/trutv.py | 20 | 2532 | # coding: utf-8
from __future__ import unicode_literals
import re
from .turner import TurnerBaseIE
from ..utils import (
int_or_none,
parse_iso8601,
)
class TruTVIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:www\.)?trutv\.com/(?:shows|full-episodes)/(?P<series_slug>[0-9A-Za-z-]+)/(?:videos/(?P<clip_slug>[0-9A-Za-z-]+)|(?P<id>\d+))'
_TEST = {
'url': 'https://www.trutv.com/shows/the-carbonaro-effect/videos/sunlight-activated-flower.html',
'info_dict': {
'id': 'f16c03beec1e84cd7d1a51f11d8fcc29124cc7f1',
'ext': 'mp4',
'title': 'Sunlight-Activated Flower',
'description': "A customer is stunned when he sees Michael's sunlight-activated flower.",
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
series_slug, clip_slug, video_id = re.match(self._VALID_URL, url).groups()
if video_id:
path = 'episode'
display_id = video_id
else:
path = 'series/clip'
display_id = clip_slug
data = self._download_json(
'https://api.trutv.com/v2/web/%s/%s/%s' % (path, series_slug, display_id),
display_id)
video_data = data['episode'] if video_id else data['info']
media_id = video_data['mediaId']
title = video_data['title'].strip()
info = self._extract_ngtv_info(
media_id, {}, {
'url': url,
'site_name': 'truTV',
'auth_required': video_data.get('isAuthRequired'),
})
thumbnails = []
for image in video_data.get('images', []):
image_url = image.get('srcUrl')
if not image_url:
continue
thumbnails.append({
'url': image_url,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
})
info.update({
'id': media_id,
'display_id': display_id,
'title': title,
'description': video_data.get('description'),
'thumbnails': thumbnails,
'timestamp': parse_iso8601(video_data.get('publicationDate')),
'series': video_data.get('showTitle'),
'season_number': int_or_none(video_data.get('seasonNum')),
'episode_number': int_or_none(video_data.get('episodeNum')),
})
return info
| unlicense |
user-none/calibre | setup/vcvars.py | 11 | 2188 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, sys, subprocess
from distutils.msvc9compiler import find_vcvarsall, get_build_version
plat = 'amd64' if sys.maxsize > 2**32 else 'x86'
def remove_dups(variable):
old_list = variable.split(os.pathsep)
new_list = []
for i in old_list:
if i not in new_list:
new_list.append(i)
return os.pathsep.join(new_list)
def query_process(cmd):
result = {}
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
stdout, stderr = popen.communicate()
if popen.wait() != 0:
raise RuntimeError(stderr.decode("mbcs"))
stdout = stdout.decode("mbcs")
for line in stdout.splitlines():
if '=' not in line:
continue
line = line.strip()
key, value = line.split('=', 1)
key = key.lower()
if key == 'path':
if value.endswith(os.pathsep):
value = value[:-1]
value = remove_dups(value)
result[key] = value
finally:
popen.stdout.close()
popen.stderr.close()
return result
def query_vcvarsall():
vcvarsall = find_vcvarsall(get_build_version())
return query_process('"%s" %s & set' % (vcvarsall, plat))
env = query_vcvarsall()
paths = env['path'].split(';')
lib = env['lib']
include = env['include']
libpath = env['libpath']
def unix(paths):
up = []
for p in paths:
prefix, p = p.replace(os.sep, '/').partition('/')[0::2]
up.append('/cygdrive/%s/%s'%(prefix[0].lower(), p))
return ':'.join(up)
raw = '''\
#!/bin/sh
export PATH="%s:$PATH"
export LIB="%s"
export INCLUDE="%s"
export LIBPATH="%s"
'''%(unix(paths), lib, include, libpath)
with open(os.path.expanduser('~/.vcvars'), 'wb') as f:
f.write(raw.encode('utf-8'))
| gpl-3.0 |
ahojjati/grr | gui/api_plugins/reflection.py | 3 | 1393 | #!/usr/bin/env python
"""API renderer for rendering descriptors of GRR data structures."""
from grr.gui import api_call_renderers
from grr.gui import api_value_renderers
from grr.lib import rdfvalue
from grr.lib.rdfvalues import structs as rdf_structs
from grr.proto import api_pb2
class ApiRDFValueReflectionRendererArgs(rdf_structs.RDFProtoStruct):
protobuf = api_pb2.ApiRDFValueReflectionRendererArgs
class ApiRDFValueReflectionRenderer(api_call_renderers.ApiCallRenderer):
"""Renders descriptor of a given RDFValue type."""
args_type = ApiRDFValueReflectionRendererArgs
def Render(self, args, token=None):
_ = token
# We have to provide info for python primitive types as well, as sometimes
# they may be used within FlowState objects.
all_types = dict(rdfvalue.RDFValue.classes.items())
for cls in [bool, int, float, long, basestring, str, unicode, list, tuple]:
all_types[cls.__name__] = cls
if self.args_type:
rdfvalue_class = all_types[args.type]
return api_value_renderers.RenderTypeMetadata(rdfvalue_class)
else:
results = {}
for cls in all_types.values():
results[cls.__name__] = api_value_renderers.RenderTypeMetadata(cls)
return results
class ApiAllRDFValuesReflectionRenderer(ApiRDFValueReflectionRenderer):
"""Renders descriptors of all available RDFValues."""
args_type = None
| apache-2.0 |
duyet-website/api.duyet.net | lib/numpy/distutils/__config__.py | 14 | 1313 | # This file is generated by /io/numpy/setup.py
# It contains system_info results at the time of building this package.
__all__ = ["get_info","show"]
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
blis_info={}
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]}
lapack_mkl_info={}
blas_mkl_info={}
def get_info(name):
g = globals()
return g.get(name, g.get(name + "_info", {}))
def show():
for name,info_dict in globals().items():
if name[0] == "_" or type(info_dict) is not type({}): continue
print(name + ":")
if not info_dict:
print(" NOT AVAILABLE")
for k,v in info_dict.items():
v = str(v)
if k == "sources" and len(v) > 200:
v = v[:60] + " ...\n... " + v[-60:]
print(" %s = %s" % (k,v))
| mit |
coberger/DIRAC | TransformationSystem/Agent/TransformationAgent.py | 4 | 29976 | """ TransformationAgent processes transformations found in the transformation database.
"""
import time, Queue, os, datetime, pickle
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.Core.Utilities.ThreadPool import ThreadPool
from DIRAC.Core.Utilities.ThreadSafe import Synchronizer
from DIRAC.Core.Utilities.List import breakListIntoChunks, randomize
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.TransformationSystem.Agent.TransformationAgentsUtilities import TransformationAgentsUtilities
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/TransformationAgent'
gSynchro = Synchronizer()
class TransformationAgent( AgentModule, TransformationAgentsUtilities ):
""" Usually subclass of AgentModule
"""
def __init__( self, *args, **kwargs ):
""" c'tor
"""
AgentModule.__init__( self, *args, **kwargs )
TransformationAgentsUtilities.__init__( self )
# few parameters
self.pluginLocation = ''
self.transformationStatus = []
self.maxFiles = 0
self.transformationTypes = []
# clients (out of the threads)
self.transfClient = None
# parameters for the threading
self.transQueue = Queue.Queue()
self.transInQueue = []
# parameters for caching
self.workDirectory = ''
self.cacheFile = ''
self.controlDirectory = ''
self.lastFileOffset = {}
# Validity of the cache
self.replicaCache = None
self.replicaCacheValidity = None
self.writingCache = False
self.removedFromCache = 0
self.noUnusedDelay = 0
self.unusedFiles = {}
self.unusedTimeStamp = {}
self.debug = False
self.transInThread = {}
self.pluginTimeout = {}
def initialize( self ):
""" standard initialize
"""
# few parameters
self.pluginLocation = self.am_getOption( 'PluginLocation',
'DIRAC.TransformationSystem.Agent.TransformationPlugin' )
self.transformationStatus = self.am_getOption( 'transformationStatus', ['Active', 'Completing', 'Flush'] )
self.maxFiles = self.am_getOption( 'MaxFiles', 5000 )
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
dataProc = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
self.transformationTypes = sorted( dataProc + dataManip )
# clients
self.transfClient = TransformationClient()
# for caching using a pickle file
self.workDirectory = self.am_getWorkDirectory()
self.cacheFile = os.path.join( self.workDirectory, 'ReplicaCache.pkl' )
self.controlDirectory = self.am_getControlDirectory()
# remember the offset if any in TS
self.lastFileOffset = {}
# Validity of the cache
self.replicaCache = {}
self.replicaCacheValidity = self.am_getOption( 'ReplicaCacheValidity', 2 )
self.noUnusedDelay = self.am_getOption( 'NoUnusedDelay', 6 )
# Get it threaded
maxNumberOfThreads = self.am_getOption( 'maxThreadsInPool', 1 )
threadPool = ThreadPool( maxNumberOfThreads, maxNumberOfThreads )
self.log.info( "Multithreaded with %d threads" % maxNumberOfThreads )
for i in xrange( maxNumberOfThreads ):
threadPool.generateJobAndQueueIt( self._execute, [i] )
self.log.info( "Will treat the following transformation types: %s" % str( self.transformationTypes ) )
return S_OK()
def finalize( self ):
""" graceful finalization
"""
method = 'finalize'
if self.transInQueue:
self.transInQueue = []
self._logInfo( "Wait for threads to get empty before terminating the agent (%d tasks)" % len( self.transInThread ), method = method )
self._logInfo( 'Remaining transformations: ' + ','.join( [str( transID ) for transID in self.transInThread] ), method = method )
while self.transInThread:
time.sleep( 2 )
self._logInfo( "Threads are empty, terminating the agent..." , method = method )
self.__writeCache()
return S_OK()
def execute( self ):
""" Just puts transformations in the queue
"""
# Get the transformations to process
res = self.getTransformations()
if not res['OK']:
self._logError( "Failed to obtain transformations:", res['Message'] )
return S_OK()
# Process the transformations
count = 0
for transDict in res['Value']:
transID = long( transDict['TransformationID'] )
if transDict.get( 'InheritedFrom' ):
# Try and move datasets from the ancestor production
res = self.transfClient.moveFilesToDerivedTransformation( transDict )
if not res['OK']:
self._logError( "Error moving files from an inherited transformation", res['Message'], transID = transID )
else:
parentProd, movedFiles = res['Value']
if movedFiles:
self._logInfo( "Successfully moved files from %d to %d:" % ( parentProd, transID ), transID = transID )
for status, val in movedFiles.items():
self._logInfo( "\t%d files to status %s" % ( val, status ), transID = transID )
if transID not in self.transInQueue:
count += 1
self.transInQueue.append( transID )
self.transQueue.put( transDict )
self._logInfo( "Out of %d transformations, %d put in thread queue" % ( len( res['Value'] ), count ) )
return S_OK()
def getTransformations( self ):
""" Obtain the transformations to be executed - this is executed at the start of every loop (it's really the
only real thing in the execute()
"""
transName = self.am_getOption( 'Transformation', 'All' )
method = 'getTransformations'
if transName == 'All':
self._logInfo( "Getting all transformations%s, status %s." %
( ' of type %s' % str( self.transformationTypes ) if self.transformationTypes else '',
str( self.transformationStatus ) ),
method = method )
transfDict = {'Status': self.transformationStatus }
if self.transformationTypes:
transfDict['Type'] = self.transformationTypes
res = self.transfClient.getTransformations( transfDict, extraParams = True )
if not res['OK']:
return res
transformations = res['Value']
self._logInfo( "Obtained %d transformations to process" % len( transformations ), method = method )
else:
self._logInfo( "Getting transformation %s." % transName, method = method )
res = self.transfClient.getTransformation( transName, extraParams = True )
if not res['OK']:
self._logError( "Failed to get transformation:", res['Message'], method = method )
return res
transformations = [res['Value']]
return S_OK( transformations )
def _getClients( self ):
""" returns the clients used in the threads
"""
threadTransformationClient = TransformationClient()
threadDataManager = DataManager()
return {'TransformationClient': threadTransformationClient,
'DataManager': threadDataManager}
def _execute( self, threadID ):
""" thread - does the real job: processing the transformations to be processed
"""
# Each thread will have its own clients
clients = self._getClients()
while True:
transDict = self.transQueue.get()
try:
transID = long( transDict['TransformationID'] )
if transID not in self.transInQueue:
break
self.transInThread[transID] = ' [Thread%d] [%s] ' % ( threadID, str( transID ) )
self._logInfo( "Processing transformation %s." % transID, transID = transID )
startTime = time.time()
res = self.processTransformation( transDict, clients )
if not res['OK']:
self._logInfo( "Failed to process transformation:", res['Message'], transID = transID )
except Exception, x:
self._logException( '%s' % x, transID = transID )
finally:
if not transID:
transID = 'None'
self._logInfo( "Processed transformation in %.1f seconds" % ( time.time() - startTime ), transID = transID )
if transID in self.transInQueue:
self.transInQueue.remove( transID )
self.transInThread.pop( transID, None )
self._logVerbose( "%d transformations still in queue" % len( self.transInQueue ) )
return S_OK()
def processTransformation( self, transDict, clients, active = True ):
""" process a single transformation (in transDict)
"""
transID = transDict['TransformationID']
replicateOrRemove = transDict['Type'].lower() in ( 'replication', 'removal' )
# First get the LFNs associated to the transformation
transFiles = self._getTransformationFiles( transDict, clients, replicateOrRemove = replicateOrRemove )
if not transFiles['OK']:
return transFiles
if not transFiles['Value']:
return S_OK()
if transID not in self.replicaCache:
self.__readCache( transID )
transFiles = transFiles['Value']
lfns = [ f['LFN'] for f in transFiles ]
unusedFiles = len( lfns )
# Limit the number of LFNs to be considered for replication or removal as they are treated individually
if replicateOrRemove:
totLfns = len( lfns )
lfns = self.__applyReduction( lfns )
if len( lfns ) != totLfns:
self._logInfo( "Reduced number of files from %d to %d" % ( totLfns, len( lfns ) ),
method = "processTransformation", transID = transID )
# Check the data is available with replicas
res = self.__getDataReplicas( transDict, lfns, clients, active = not replicateOrRemove )
if not res['OK']:
self._logError( "Failed to get data replicas:", res['Message'],
method = "processTransformation", transID = transID )
return res
dataReplicas = res['Value']
# Get the plug-in type and create the plug-in object
plugin = transDict.get( 'Plugin', 'Standard' )
self._logInfo( "Processing transformation with '%s' plug-in." % plugin,
method = "processTransformation", transID = transID )
res = self.__generatePluginObject( plugin, clients )
if not res['OK']:
return res
oPlugin = res['Value']
# Get the plug-in and set the required params
oPlugin.setParameters( transDict )
oPlugin.setInputData( dataReplicas )
oPlugin.setTransformationFiles( transFiles )
res = oPlugin.run()
if not res['OK']:
self._logError( "Failed to generate tasks for transformation:", res['Message'],
method = "processTransformation", transID = transID )
return res
tasks = res['Value']
self.pluginTimeout[transID] = res.get( 'Timeout', False )
# Create the tasks
allCreated = True
created = 0
lfnsInTasks = []
for se, lfns in tasks:
res = clients['TransformationClient'].addTaskForTransformation( transID, lfns, se )
if not res['OK']:
self._logError( "Failed to add task generated by plug-in:", res['Message'],
method = "processTransformation", transID = transID )
allCreated = False
else:
created += 1
lfnsInTasks += lfns
if created:
self._logInfo( "Successfully created %d tasks for transformation." % created,
method = "processTransformation", transID = transID )
else:
self._logInfo( "No new tasks created for transformation.",
method = "processTransformation", transID = transID )
self.unusedFiles[transID] = unusedFiles - len( lfnsInTasks )
# If not all files were obtained, move the offset
lastOffset = self.lastFileOffset.get( transID )
if lastOffset:
self.lastFileOffset[transID] = max( 0, lastOffset - len( lfnsInTasks ) )
self.__removeFilesFromCache( transID, lfnsInTasks )
# If this production is to Flush
if transDict['Status'] == 'Flush' and allCreated:
res = clients['TransformationClient'].setTransformationParameter( transID, 'Status', 'Active' )
if not res['OK']:
self._logError( "Failed to update transformation status to 'Active':" , res['Message'],
method = "processTransformation", transID = transID )
else:
self._logInfo( "Updated transformation status to 'Active'.",
method = "processTransformation", transID = transID )
return S_OK()
######################################################################
#
# Internal methods used by the agent
#
def _getTransformationFiles( self, transDict, clients, statusList = None, replicateOrRemove = False ):
""" get the data replicas for a certain transID
"""
transID = transDict['TransformationID']
plugin = transDict.get( 'Plugin', 'Standard' )
# Check if files should be sorted and limited in number
operations = Operations()
sortedBy = operations.getValue( 'TransformationPlugins/%s/SortedBy' % plugin, None )
maxFiles = operations.getValue( 'TransformationPlugins/%s/MaxFiles' % plugin, 0 )
noUnusedDelay = 0 if self.pluginTimeout.get( transID, False ) else operations.getValue( 'TransformationPlugins/%s/NoUnusedDelay' % plugin, self.noUnusedDelay )
method = '_getTransformationFiles'
lastOffset = self.lastFileOffset.setdefault( transID, 0 )
# Files that were problematic (either explicit or because SE was banned) may be recovered,
# and always removing the missing ones
if not statusList:
statusList = ['Unused', 'ProbInFC']
statusList += ['MissingInFC'] if transDict['Type'] == 'Removal' else []
transClient = clients['TransformationClient']
res = transClient.getTransformationFiles( condDict = {'TransformationID':transID,
'Status':statusList},
orderAttribute = sortedBy,
offset = lastOffset, maxfiles = maxFiles )
if not res['OK']:
self._logError( "Failed to obtain input data:", res['Message'],
method = method, transID = transID )
return res
transFiles = res['Value']
if maxFiles and len( transFiles ) == maxFiles:
self.lastFileOffset[transID] += maxFiles
else:
del self.lastFileOffset[transID]
if not transFiles:
self._logInfo( "No '%s' files found for transformation." % ','.join( statusList ),
method = method, transID = transID )
if transDict['Status'] == 'Flush':
res = transClient.setTransformationParameter( transID, 'Status', 'Active' )
if not res['OK']:
self._logError( "Failed to update transformation status to 'Active':", res['Message'],
method = method, transID = transID )
else:
self._logInfo( "Updated transformation status to 'Active'.",
method = method, transID = transID )
return S_OK()
# Check if transformation is kicked
kickFile = os.path.join( self.controlDirectory, 'KickTransformation_%s' % str( transID ) )
try:
kickTrans = os.path.exists( kickFile )
if kickTrans:
os.remove( kickFile )
except:
pass
# Check if something new happened
now = datetime.datetime.utcnow()
if not kickTrans and not replicateOrRemove and noUnusedDelay:
nextStamp = self.unusedTimeStamp.setdefault( transID, now ) + datetime.timedelta( hours = noUnusedDelay )
skip = now < nextStamp
if len( transFiles ) == self.unusedFiles.get( transID, 0 ) and transDict['Status'] != 'Flush' and skip:
self._logInfo( "No new '%s' files found for transformation." % ','.join( statusList ),
method = method, transID = transID )
return S_OK()
self.unusedTimeStamp[transID] = now
# If files are not Unused, set them Unused
notUnused = [trFile['LFN'] for trFile in transFiles if trFile['Status'] != 'Unused']
otherStatuses = sorted( set( [trFile['Status'] for trFile in transFiles] ) - set( ['Unused'] ) )
if notUnused:
res = transClient.setFileStatusForTransformation( transID, 'Unused', notUnused, force = True )
if not res['OK']:
self._logError( "Error setting %d files Unused:" % len( notUnused ), res['Message'],
method = method, transID = transID )
else:
self._logInfo( "Set %d files from %s to Unused" % ( len( notUnused ), ','.join( otherStatuses ) ) )
self.__removeFilesFromCache( transID, notUnused )
return S_OK( transFiles )
def __applyReduction( self, lfns ):
""" eventually remove the number of files to be considered
"""
if len( lfns ) <= self.maxFiles:
return lfns
return randomize( lfns )[:self.maxFiles]
def __getDataReplicas( self, transDict, lfns, clients, active = True ):
""" Get the replicas for the LFNs and check their statuses. It first looks within the cache.
"""
method = '__getDataReplicas'
transID = transDict['TransformationID']
if 'RemoveFile' in transDict['Body']:
# When removing files, we don't care about their replicas
return S_OK( dict.fromkeys( lfns, ['None'] ) )
clearCacheFile = os.path.join( self.controlDirectory, 'ClearCache_%s' % str( transID ) )
try:
clearCache = os.path.exists( clearCacheFile )
if clearCache:
os.remove( clearCacheFile )
except:
pass
if clearCache or transDict['Status'] == 'Flush':
self._logInfo( "Replica cache cleared", method = method, transID = transID )
# We may need to get new replicas
self.__clearCacheForTrans( transID )
else:
# If the cache needs to be cleaned
self.__cleanCache( transID )
startTime = time.time()
dataReplicas = {}
nLfns = len( lfns )
self._logVerbose( "Getting replicas for %d files" % nLfns, method = method, transID = transID )
cachedReplicaSets = self.replicaCache.get( transID, {} )
cachedReplicas = {}
# Merge all sets of replicas
for replicas in cachedReplicaSets.values():
cachedReplicas.update( replicas )
self._logInfo( "Number of cached replicas: %d" % len( cachedReplicas ), method = method, transID = transID )
setCached = set( cachedReplicas )
setLfns = set( lfns )
for lfn in setLfns & setCached:
dataReplicas[lfn] = cachedReplicas[lfn]
newLFNs = setLfns - setCached
self._logInfo( "ReplicaCache hit for %d out of %d LFNs" % ( len( dataReplicas ), nLfns ),
method = method, transID = transID )
if newLFNs:
startTime = time.time()
self._logInfo( "Getting replicas for %d files from catalog" % len( newLFNs ),
method = method, transID = transID )
newReplicas = {}
for chunk in breakListIntoChunks( newLFNs, 10000 ):
res = self._getDataReplicasDM( transID, chunk, clients, active = active )
if res['OK']:
reps = dict( [( lfn, ses ) for lfn, ses in res['Value'].items() if ses] )
newReplicas.update( reps )
self.__updateCache( transID, reps )
else:
self._logWarn( "Failed to get replicas for %d files" % len( chunk ), res['Message'],
method = method, transID = transID )
self._logInfo( "Obtained %d replicas from catalog in %.1f seconds" \
% ( len( newReplicas ), time.time() - startTime ),
method = method, transID = transID )
dataReplicas.update( newReplicas )
noReplicas = newLFNs - set( dataReplicas )
self.__writeCache( transID )
if noReplicas:
self._logWarn( "Found %d files without replicas (or only in Failover)" % len( noReplicas ),
method = method, transID = transID )
return S_OK( dataReplicas )
def _getDataReplicasDM( self, transID, lfns, clients, active = True, ignoreMissing = False ):
""" Get the replicas for the LFNs and check their statuses, using the replica manager
"""
method = '_getDataReplicasDM'
startTime = time.time()
self._logVerbose( "Getting replicas for %d files from catalog" % len( lfns ),
method = method, transID = transID )
if active:
res = clients['DataManager'].getActiveReplicas( lfns )
else:
res = clients['DataManager'].getReplicas( lfns )
if not res['OK']:
return res
replicas = res['Value']
# Prepare a dictionary for all LFNs
dataReplicas = {}
self._logVerbose( "Replica results for %d files obtained in %.2f seconds" % ( len( lfns ), time.time() - startTime ),
method = method, transID = transID )
# If files are neither Successful nor Failed, they are set problematic in the FC
problematicLfns = [lfn for lfn in lfns if lfn not in replicas['Successful'] and lfn not in replicas['Failed']]
if problematicLfns:
self._logInfo( "%d files found problematic in the catalog, set ProbInFC" % len( problematicLfns ) )
res = clients['TransformationClient'].setFileStatusForTransformation( transID, 'ProbInFC', problematicLfns )
if not res['OK']:
self._logError( "Failed to update status of problematic files:", res['Message'],
method = method, transID = transID )
# Create a dictionary containing all the file replicas
failoverLfns = []
for lfn, replicaDict in replicas['Successful'].items():
for se in replicaDict:
#### This should definitely be included in the SE definition (i.e. not used for transformations)
if active and 'failover' in se.lower():
self._logVerbose( "Ignoring failover replica for %s." % lfn, method = method, transID = transID )
else:
dataReplicas.setdefault( lfn, [] ).append( se )
if not dataReplicas.get( lfn ):
failoverLfns.append( lfn )
if failoverLfns:
self._logVerbose( "%d files have no replica but possibly in Failover SE" % len( failoverLfns ) )
# Make sure that file missing from the catalog are marked in the transformation DB.
missingLfns = []
for lfn, reason in replicas['Failed'].items():
if "No such file or directory" in reason:
self._logVerbose( "%s not found in the catalog." % lfn, method = method, transID = transID )
missingLfns.append( lfn )
if missingLfns:
self._logInfo( "%d files not found in the catalog" % len( missingLfns ) )
if ignoreMissing:
dataReplicas.update( dict.fromkeys( missingLfns, [] ) )
else:
res = clients['TransformationClient'].setFileStatusForTransformation( transID, 'MissingInFC', missingLfns )
if not res['OK']:
self._logError( "Failed to update status of missing files:", res['Message'],
method = method, transID = transID )
return S_OK( dataReplicas )
def __updateCache( self, transID, newReplicas ):
""" Add replicas to the cache
"""
self.replicaCache.setdefault( transID, {} )[datetime.datetime.utcnow()] = newReplicas
# if len( newReplicas ) > 5000:
# self.__writeCache( transID )
def __clearCacheForTrans( self, transID ):
""" Remove all replicas for a transformation
"""
self.replicaCache.pop( transID , None )
def __cleanReplicas( self, transID, lfns ):
""" Remove cached replicas that are not in a list
"""
cachedReplicas = set()
for replicas in self.replicaCache.get( transID, {} ).values():
cachedReplicas.update( replicas )
toRemove = cachedReplicas - set( lfns )
if toRemove:
self._logInfo( "Remove %d files from cache" % len( toRemove ), method = '__cleanReplicas', transID = transID )
self.__removeFromCache( transID, toRemove )
def __cleanCache( self, transID ):
""" Cleans the cache
"""
try:
if transID in self.replicaCache:
timeLimit = datetime.datetime.utcnow() - datetime.timedelta( days = self.replicaCacheValidity )
for updateTime in set( self.replicaCache[transID] ):
nCache = len( self.replicaCache[transID][updateTime] )
if updateTime < timeLimit or not nCache:
self._logInfo( "Clear %s replicas for transformation %s, time %s" %
( '%d cached' % nCache if nCache else 'empty cache' , str( transID ), str( updateTime ) ),
transID = transID, method = '__cleanCache' )
del self.replicaCache[transID][updateTime]
# Remove empty transformations
if not self.replicaCache[transID]:
del self.replicaCache[transID]
except Exception:
self._logException( "Exception when cleaning replica cache:" )
def __removeFilesFromCache( self, transID, lfns ):
removed = self.__removeFromCache( transID, lfns )
if removed:
self._logInfo( "Removed %d replicas from cache" % removed, method = '__removeFilesFromCache', transID = transID )
self.__writeCache( transID )
def __removeFromCache( self, transID, lfns ):
if transID not in self.replicaCache:
return
removed = 0
if self.replicaCache[transID] and lfns:
for lfn in lfns:
for timeKey in self.replicaCache[transID]:
if self.replicaCache[transID][timeKey].pop( lfn, None ):
removed += 1
return removed
def __cacheFile( self, transID ):
return self.cacheFile.replace( '.pkl', '_%s.pkl' % str( transID ) )
@gSynchro
def __readCache( self, transID ):
""" Reads from the cache
"""
if transID in self.replicaCache:
return
try:
method = '__readCache'
fileName = self.__cacheFile( transID )
if not os.path.exists( fileName ):
# This is as a transitory measure for migrating from single to multiple cache files
fileName = self.cacheFile
cacheFile = open( fileName, 'r' )
cache = pickle.load( cacheFile )
for t_id in [t_id for t_id in cache if t_id not in self.replicaCache]:
self.replicaCache[t_id] = cache[t_id]
self.replicaCache[transID] = cache.get( transID, {} )
else:
cacheFile = open( fileName, 'r' )
self.replicaCache[transID] = pickle.load( cacheFile )
cacheFile.close()
self._logInfo( "Successfully loaded replica cache from file %s (%d files)" %
( fileName, self.__filesInCache( transID ) ),
method = method, transID = transID )
except Exception:
self._logException( "Failed to load replica cache from file %s" % fileName,
method = method, transID = transID )
self.replicaCache[transID] = {}
def __filesInCache( self, transID ):
cache = self.replicaCache.get( transID, {} )
return sum( [len( lfns ) for lfns in cache.values()] )
@gSynchro
def __writeCache( self, transID = None ):
""" Writes the cache
"""
method = '__writeCache'
try:
startTime = time.time()
transList = [transID] if transID else set( self.replicaCache )
filesInCache = 0
nCache = 0
for t_id in transList:
# Protect the copy of the cache
filesInCache += self.__filesInCache( t_id )
# write to a temporary file in order to avoid corrupted files
cacheFile = self.__cacheFile( t_id )
tmpFile = cacheFile + '.tmp'
f = open( tmpFile, 'w' )
pickle.dump( self.replicaCache.get( t_id, {} ), f )
f.close()
# Now rename the file as it shold
os.rename( tmpFile, cacheFile )
nCache += 1
self._logInfo( "Successfully wrote %d replica cache file(s) (%d files) in %.1f seconds" \
% ( nCache, filesInCache, time.time() - startTime ),
method = method, transID = transID if transID else None )
except Exception:
self._logException( "Could not write replica cache file %s" % cacheFile,
method = method, transID = t_id )
def __generatePluginObject( self, plugin, clients ):
""" This simply instantiates the TransformationPlugin class with the relevant plugin name
"""
try:
plugModule = __import__( self.pluginLocation, globals(), locals(), ['TransformationPlugin'] )
except ImportError, e:
self._logException( "Failed to import 'TransformationPlugin' %s: %s" % ( plugin, e ),
method = "__generatePluginObject" )
return S_ERROR()
try:
plugin_o = getattr( plugModule, 'TransformationPlugin' )( '%s' % plugin,
transClient = clients['TransformationClient'],
dataManager = clients['DataManager'] )
return S_OK( plugin_o )
except AttributeError, e:
self._logException( "Failed to create %s(): %s." % ( plugin, e ), method = "__generatePluginObject" )
return S_ERROR()
plugin_o.setDirectory( self.workDirectory )
plugin_o.setCallback( self.pluginCallback )
def pluginCallback( self, transID, invalidateCache = False ):
""" Standard plugin callback
"""
if invalidateCache:
try:
if transID in self.replicaCache:
self._logInfo( "Removed cached replicas for transformation" , method = 'pluginCallBack', transID = transID )
self.replicaCache.pop( transID )
self.__writeCache( transID )
except:
pass
| gpl-3.0 |
tommo/gii | template/host/waf-tools/emxx.py | 2 | 3207 | ###############################################################################
#
# Oak game engine
# Copyright (c) 2013 Remi Papillie
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
###############################################################################
#! /usr/bin/env python
# encoding: utf-8
import os,sys
from waflib import Configure,Options,Utils
from waflib.Tools import ccroot
from waflib.Configure import conf
@conf
def find_emxx(conf):
cxx=conf.find_program(['em++'], var = "CXX")
cxx=conf.cmd_to_list(cxx)
conf.env.CXX_NAME='emcc'
conf.env.CXX=cxx
@conf
def emxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
# v['STLIB_ST']='lib%s.a'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Wl,-Bdynamic'
v['STLIB_MARKER']='-Wl,-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-shared']
v['cxxshlib_PATTERN']='lib%s.js'
v['LINKFLAGS_cxxstlib']=['-Wl,-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
v['LINKFLAGS_MACBUNDLE']=['-bundle','-undefined','dynamic_lookup']
v['CXXFLAGS_MACBUNDLE']=['-fPIC']
v['macbundle_PATTERN']='%s.bundle'
@conf
def emxx_modifier_browser(conf):
v=conf.env
v['cxxprogram_PATTERN']='%s.html'
v['cxxshlib_PATTERN']='%s.js'
v['implib_PATTERN']='lib%s.js.a'
v['IMPLIB_ST']='-Wl,--out-implib,%s'
v['CXXFLAGS_cxxshlib']=[]
v.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
@conf
def emxx_modifier_platform(conf):
emxx_modifier_func=getattr(conf,'emxx_modifier_'+conf.env.TARGET_OS,None)
if emxx_modifier_func:
emxx_modifier_func()
def configure(conf):
conf.find_emxx()
conf.load('emcc', tooldir="waf-tools")
conf.load('emar', tooldir="waf-tools")
conf.emxx_common_flags()
conf.emxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| mit |
iansf/engine | sky/tools/webkitpy/common/checkout/scm/scm_mock.py | 48 | 4096 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive
class MockSCM(object):
executable_name = "MockSCM"
def __init__(self, filesystem=None, executive=None):
self.checkout_root = "/mock-checkout/third_party/WebKit"
self.added_paths = set()
self._filesystem = filesystem or MockFileSystem()
self._executive = executive or MockExecutive()
def add(self, destination_path, return_exit_code=False):
self.add_list([destination_path], return_exit_code)
def add_list(self, destination_paths, return_exit_code=False):
self.added_paths.update(set(destination_paths))
if return_exit_code:
return 0
def has_working_directory_changes(self):
return False
def ensure_cleanly_tracking_remote_master(self):
pass
def current_branch(self):
return "mock-branch-name"
def checkout_branch(self, name):
pass
def create_clean_branch(self, name):
pass
def delete_branch(self, name):
pass
def supports_local_commits(self):
return True
def exists(self, path):
# TestRealMain.test_real_main (and several other rebaseline tests) are sensitive to this return value.
# We should make those tests more robust, but for now we just return True always (since no test needs otherwise).
return True
def absolute_path(self, *comps):
return self._filesystem.join(self.checkout_root, *comps)
def svn_revision(self, path):
return '5678'
def svn_revision_from_git_commit(self, git_commit):
if git_commit == '6469e754a1':
return 1234
if git_commit == '624c3081c0':
return 5678
if git_commit == '624caaaaaa':
return 10000
return None
def timestamp_of_revision(self, path, revision):
return '2013-02-01 08:48:05 +0000'
def commit_locally_with_message(self, message, commit_all_working_directory_changes=True):
pass
def delete(self, path):
return self.delete_list([path])
def delete_list(self, paths):
if not self._filesystem:
return
for path in paths:
if self._filesystem.exists(path):
self._filesystem.remove(path)
def move(self, origin, destination):
if self._filesystem:
self._filesystem.move(self.absolute_path(origin), self.absolute_path(destination))
def changed_files(self):
return []
| bsd-3-clause |
jonashaag/django-nonrel-nohistory | tests/regressiontests/test_utils/tests_25.py | 48 | 1281 | from __future__ import with_statement
from django.test import TestCase
from models import Person
class AssertNumQueriesContextManagerTests(TestCase):
def test_simple(self):
with self.assertNumQueries(0):
pass
with self.assertNumQueries(1):
Person.objects.count()
with self.assertNumQueries(2):
Person.objects.count()
Person.objects.count()
def test_failure(self):
with self.assertRaises(AssertionError) as exc_info:
with self.assertNumQueries(2):
Person.objects.count()
self.assertIn("1 queries executed, 2 expected", str(exc_info.exception))
with self.assertRaises(TypeError):
with self.assertNumQueries(4000):
raise TypeError
def test_with_client(self):
person = Person.objects.create(name="test")
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(1):
self.client.get("/test_utils/get_person/%s/" % person.pk)
with self.assertNumQueries(2):
self.client.get("/test_utils/get_person/%s/" % person.pk)
self.client.get("/test_utils/get_person/%s/" % person.pk)
| bsd-3-clause |
awduda/awduda.github.io | venv/lib/python2.7/site-packages/pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
sonaht/ansible | lib/ansible/modules/monitoring/sensu_subscription.py | 17 | 5330 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Anders Ingemann <aim@secoya.dk>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sensu_subscription
short_description: Manage Sensu subscriptions
version_added: 2.2
description:
- Manage which I(sensu channels) a machine should subscribe to
options:
name:
description:
- The name of the channel
required: true
state:
description:
- Whether the machine should subscribe or unsubscribe from the channel
choices: [ 'present', 'absent' ]
required: false
default: present
path:
description:
- Path to the subscriptions json file
required: false
default: /etc/sensu/conf.d/subscriptions.json
backup:
description:
- Create a backup file (if yes), including the timestamp information so you
- can get the original file back if you somehow clobbered it incorrectly.
choices: [ 'yes', 'no' ]
required: false
default: no
requirements: [ ]
author: Anders Ingemann
'''
RETURN = '''
reasons:
description: the reasons why the moule changed or did not change something
returned: success
type: list
sample: ["channel subscription was absent and state is `present'"]
'''
EXAMPLES = '''
# Subscribe to the nginx channel
- name: subscribe to nginx checks
sensu_subscription: name=nginx
# Unsubscribe from the common checks channel
- name: unsubscribe from common checks
sensu_subscription: name=common state=absent
'''
def sensu_subscription(module, path, name, state='present', backup=False):
changed = False
reasons = []
try:
import json
except ImportError:
import simplejson as json
try:
config = json.load(open(path))
except IOError:
e = get_exception()
if e.errno is 2: # File not found, non-fatal
if state == 'absent':
reasons.append('file did not exist and state is `absent\'')
return changed, reasons
config = {}
else:
module.fail_json(msg=str(e))
except ValueError:
msg = '{path} contains invalid JSON'.format(path=path)
module.fail_json(msg=msg)
if 'client' not in config:
if state == 'absent':
reasons.append('`client\' did not exist and state is `absent\'')
return changed, reasons
config['client'] = {}
changed = True
reasons.append('`client\' did not exist')
if 'subscriptions' not in config['client']:
if state == 'absent':
reasons.append('`client.subscriptions\' did not exist and state is `absent\'')
return changed, reasons
config['client']['subscriptions'] = []
changed = True
reasons.append('`client.subscriptions\' did not exist')
if name not in config['client']['subscriptions']:
if state == 'absent':
reasons.append('channel subscription was absent')
return changed, reasons
config['client']['subscriptions'].append(name)
changed = True
reasons.append('channel subscription was absent and state is `present\'')
else:
if state == 'absent':
config['client']['subscriptions'].remove(name)
changed = True
reasons.append('channel subscription was present and state is `absent\'')
if changed and not module.check_mode:
if backup:
module.backup_local(path)
try:
open(path, 'w').write(json.dumps(config, indent=2) + '\n')
except IOError:
e = get_exception()
module.fail_json(msg='Failed to write to file %s: %s' % (path, str(e)))
return changed, reasons
def main():
arg_spec = {'name': {'type': 'str', 'required': True},
'path': {'type': 'str', 'default': '/etc/sensu/conf.d/subscriptions.json'},
'state': {'type': 'str', 'default': 'present', 'choices': ['present', 'absent']},
'backup': {'type': 'str', 'default': 'no', 'type': 'bool'},
}
module = AnsibleModule(argument_spec=arg_spec,
supports_check_mode=True)
path = module.params['path']
name = module.params['name']
state = module.params['state']
backup = module.params['backup']
changed, reasons = sensu_subscription(module, path, name, state, backup)
module.exit_json(path=path, name=name, changed=changed, msg='OK', reasons=reasons)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
navotsil/Open-Knesset | notify/management/commands/notify.py | 8 | 12644 | from __future__ import absolute_import
from django.core.management.base import NoArgsCommand
from django.contrib.auth.models import User,Group
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.utils.translation import ugettext as _
from django.utils import translation
from django.template.loader import render_to_string
from django.template import TemplateDoesNotExist
from django.conf import settings
from django.core.cache import cache
import datetime
from optparse import make_option
import logging
logger = logging.getLogger("open-knesset.notify")
from actstream.models import Follow, Action
from mailer import send_html_mail
from mks.models import Member
from laws.models import Bill, get_debated_bills
from agendas.models import Agenda
from notify.models import LastSent
from user.models import UserProfile
from committees.models import Topic
class Command(NoArgsCommand):
help = "Send e-mail notification to users that requested it."
requires_model_validation = False
update_models = [Member, Bill, Agenda, Topic, None]
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL', 'email@example.com')
days_back = getattr(settings, 'DEFAULT_NOTIFICATION_DAYS_BACK', 10)
lang = getattr(settings, 'LANGUAGE_CODE', 'he')
@property
def domain(self):
if not hasattr(self, '_domain'):
self._domain = Site.objects.get_current().domain
return self._domain
option_list = NoArgsCommand.option_list + (
make_option('--daily', action='store_true', dest='daily',
help="send notifications to users that requested a daily update"),
make_option('--weekly', action='store_true', dest='weekly',
help="send notifications to users that requested a weekly update"))
def agenda_update(self, agenda):
''' generate the general update email for this agenda.
this will be called, and its output added to the email,
if and only if there has been some update in it's data.
'''
mks = agenda.selected_instances(Member)
template_name = 'notify/agenda_update'
update_txt = render_to_string(template_name + '.txt',
{'mks':mks,
'domain':self.domain})
update_html = render_to_string(template_name + '.html',
{'mks':mks,
'domain':self.domain})
return (update_txt,update_html)
@classmethod
def get_model_headers(cls, model):
''' for a given model this function returns a tuple with
(model, text_header, html_header)
'''
try:
template_name = 'notify/%s_section' % model.__name__.lower()
return (model, render_to_string(template_name + '.txt'), render_to_string(template_name + '.html'))
except TemplateDoesNotExist:
return (model, model._meta.verbose_name_plural, '<h2>%s</h2>' % model._meta.verbose_name_plural.format())
except AttributeError:
return (model, _('Other Updates'), '<h2>%s</h2>' % _('Other Updates'))
def get_email_for_user(self, user):
''' return the body text and html for a user's email '''
updates = dict(zip(self.update_models, ([] for x in self.update_models))) # will contain the updates to be sent
updates_html = dict(zip(self.update_models, ([] for x in self.update_models)))
follows = Follow.objects.filter(user=user) # everything this user is following
# sometime a user follows something several times. we want to filter that out:
follows = set([f.actor for f in follows])
for f in follows:
if not f:
logger.warning('Follow object with None actor. ignoring')
continue
model_class = f.__class__
model_template = f.__class__.__name__.lower()
try:
model_name = f.__class__._meta.verbose_name
except AttributeError:
logger.warning('follows %d has no __class__?' % f.id)
model_name = ""
content_type = ContentType.objects.get_for_model(f)
if model_class in updates:
key = model_class
else:
key = None # put all updates for 'other' classes at the 'None' group
try: # get actions that happened since last update
last_sent = LastSent.objects.get(user=user, content_type=content_type, object_pk=f.id)
last_sent_time = last_sent.time
stream = Action.objects.filter(actor_content_type = content_type,
actor_object_id = f.id,
timestamp__gt=last_sent_time,
).order_by('-timestamp')
if stream: # if there are updates to be sent,
last_sent.save() # update timestamp of last sent
except LastSent.DoesNotExist: # never updated about this actor, send some updates
stream = Action.objects.filter(actor_content_type = content_type,
actor_object_id = f.id,
timestamp__gt=datetime.datetime.now()-datetime.timedelta(self.days_back),
).order_by('-timestamp')
last_sent = LastSent.objects.create(user=user,content_type=content_type, object_pk=f.id)
if stream: # this actor has some updates
try: # genereate the appropriate header for this actor class
header = render_to_string(('notify/%(model)s_header.txt' % {'model': model_template}),{'model':model_name,'object':f})
except TemplateDoesNotExist:
header = render_to_string(('notify/model_header.txt'),{'model':model_name,'object':f})
try:
header_html = render_to_string(('notify/%(model)s_header.html' % {'model': model_template}),{'model':model_name,'object':f,'domain':self.domain})
except TemplateDoesNotExist:
header_html = render_to_string(('notify/model_header.html'),{'model':model_name,'object':f,'domain':self.domain})
updates[key].append(header)
updates_html[key].append(header_html)
for action_instance in stream: # now generate the updates themselves
try:
action_output = render_to_string(('activity/%(verb)s/action_email.txt' % { 'verb':action_instance.verb.replace(' ','_') }),{ 'action':action_instance },None)
except TemplateDoesNotExist: # fallback to the generic template
action_output = render_to_string(('activity/action_email.txt'),{ 'action':action_instance },None)
try:
action_output_html = render_to_string(('activity/%(verb)s/action_email.html' % { 'verb':action_instance.verb.replace(' ','_') }),{ 'action':action_instance,'domain':self.domain },None)
except TemplateDoesNotExist: # fallback to the generic template
action_output_html = render_to_string(('activity/action_email.html'),{ 'action':action_instance,'domain':self.domain },None)
updates[key].append(action_output)
updates_html[key].append(action_output_html)
if model_class == Agenda:
txt,html = self.agenda_update(f)
updates[key].append(txt)
updates_html[key].append(html)
email_body = []
email_body_html = []
# Add the updates for followed models
for (model_class,title,title_html) in map(self.get_model_headers, self.update_models):
if updates[model_class]: # this model has some updates, add it to the email
email_body.append(title.format())
email_body.append('\n'.join(updates[model_class]))
email_body_html.append(title_html.format())
email_body_html.append(''.join(updates_html[model_class]))
if email_body or email_body_html:
# Generate party membership section if needed
up = UserProfile.objects.filter(user=user).select_related('party')
if up:
up = up[0]
party = up.party
if party:
num_members = cache.get('party_num_members_%d' % party.id,
None)
if not num_members:
num_members = party.userprofile_set.count()
cache.set('party_num_members_%d' % party.id,
num_members,
settings.LONG_CACHE_TIME)
else:
num_members = None
debated_bills = get_debated_bills() or []
template_name = 'notify/party_membership'
party_membership_txt = render_to_string(template_name + '.txt',
{'user':user,
'userprofile':up,
'num_members':num_members,
'bills':debated_bills,
'domain':self.domain})
party_membership_html = render_to_string(template_name + '.html',
{'user':user,
'userprofile':up,
'num_members':num_members,
'bills':debated_bills,
'domain':self.domain})
else:
logger.warning('Can\'t find user profile')
if email_body:
email_body.insert(0, party_membership_txt)
if email_body_html:
email_body_html.insert(0, party_membership_html)
return (email_body, email_body_html)
def handle_noargs(self, **options):
daily = options.get('daily', False)
weekly = options.get('weekly', False)
if not daily and not weekly:
print "use --daily or --weekly"
return
translation.activate(self.lang)
email_notification = []
if daily:
email_notification.append('D')
if weekly:
email_notification.append('W')
queued = 0
g = Group.objects.get(name='Valid Email')
for user in User.objects.filter(groups=g,
profiles__isnull=False)\
.exclude(email=''):
try:
user_profile = user.get_profile()
except UserProfile.DoesNotExist:
logger.warn('can\'t access user %d userprofile' % user.id)
continue
if (user_profile.email_notification in email_notification):
# if this user has requested emails in the frequency we are
# handling now
email_body, email_body_html = self.get_email_for_user(user)
if email_body: # there are some updates. generate email
header = render_to_string(('notify/header.txt'),{ 'user':user })
footer = render_to_string(('notify/footer.txt'),{ 'user':user,'domain':self.domain })
header_html = render_to_string(('notify/header.html'),{ 'user':user })
footer_html = render_to_string(('notify/footer.html'),{ 'user':user,'domain':self.domain })
send_html_mail(_('Open Knesset Updates'), "%s\n%s\n%s" % (header, '\n'.join(email_body), footer),
"%s\n%s\n%s" % (header_html, ''.join(email_body_html), footer_html),
self.from_email,
[user.email],
)
queued += 1
logger.info("%d email notifications queued for sending" % queued)
translation.deactivate()
| bsd-3-clause |
tkaitchuck/nupic | external/linux64/lib/python2.6/site-packages/numpy/lib/arraysetops.py | 6 | 11527 | """
Set operations for 1D numeric arrays based on sorting.
:Contains:
ediff1d,
unique,
intersect1d,
setxor1d,
in1d,
union1d,
setdiff1d
:Notes:
For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
Speed could be gained in some operations by an implementation of
sort(), that can provide directly the permutation vectors, avoiding
thus calls to argsort().
To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
__all__ = ['ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d',
'unique', 'in1d']
import numpy as np
from numpy.lib.utils import deprecate
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : array_like
If necessary, will be flattened before the differences are taken.
to_end : array_like, optional
Number(s) to append at the end of the returned differences.
to_begin : array_like, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ed : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
See Also
--------
diff, gradient
Notes
-----
When applied to masked arrays, this function drops the mask information
if the `to_begin` and/or `to_end` parameters are used.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
array([-99, 1, 2, 3, -7, 88, 99])
The returned array is always 1D.
>>> y = [[1, 2, 4], [1, 6, 24]]
>>> np.ediff1d(y)
array([ 1, 2, -3, 5, 18])
"""
ary = np.asanyarray(ary).flat
ed = ary[1:] - ary[:-1]
arrays = [ed]
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in
# the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
def unique(ar, return_index=False, return_inverse=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are two optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, and the indices of the unique array that
reconstruct the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1, 2, 3])
Return the indices of the original array that give the unique values:
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array(['a', 'b', 'c'],
dtype='|S1')
>>> indices
array([0, 1, 3])
>>> a[indices]
array(['a', 'b', 'c'],
dtype='|S1')
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> indices
array([0, 1, 4, 3, 1, 2, 1])
>>> u[indices]
array([1, 2, 6, 4, 2, 3, 2])
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asanyarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
if return_index:
perm = ar.argsort(kind='mergesort')
else:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
def intersect1d(ar1, ar2, assume_unique=False):
"""
Find the intersection of two arrays.
Return the sorted, unique values that are in both of the input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
out : ndarray
Sorted 1D array of common and unique elements.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
array([1, 3])
"""
if not assume_unique:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate( (ar1, ar2) )
aux.sort()
return aux[aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
Return the sorted, unique values that are in only one (not both) of the
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
xor : ndarray
Sorted 1D array of unique values that are in only one of the input
arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4])
>>> b = np.array([2, 3, 5, 7, 5])
>>> np.setxor1d(a,b)
array([1, 4, 5, 7])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = np.concatenate( (ar1, ar2) )
if aux.size == 0:
return aux
aux.sort()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = np.concatenate( ([True], aux[1:] != aux[:-1], [True] ) )
# flag2 = ediff1d( flag ) == 0
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False):
"""
Test whether each element of a 1D array is also present in a second array.
Returns a boolean array the same length as `ar1` that is True
where an element of `ar1` is in `ar2` and False otherwise.
Parameters
----------
ar1 : array_like, shape (M,)
Input array.
ar2 : array_like
The values against which to test each value of `ar1`.
assume_unique : bool, optional
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
mask : ndarray of bools, shape(M,)
The values `ar1[mask]` are in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Notes
-----
`in1d` can be considered as an element-wise function version of the
python keyword `in`, for 1D sequences. ``in1d(a, b)`` is roughly
equivalent to ``np.array([item in b for item in a])``.
.. versionadded:: 1.4.0
Examples
--------
>>> test = np.array([0, 1, 2, 5, 0])
>>> states = [0, 2]
>>> mask = np.in1d(test, states)
>>> mask
array([ True, False, True, False, True], dtype=bool)
>>> test[mask]
array([0, 2, 0])
"""
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate( (ar1, ar2) )
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = np.concatenate( (equal_adj, [False] ) )
indx = order.argsort(kind='mergesort')[:len( ar1 )]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Find the union of two arrays.
Return the unique, sorted array of values that are in either of the two
input arrays.
Parameters
----------
ar1, ar2 : array_like
Input arrays. They are flattened if they are not already 1D.
Returns
-------
union : ndarray
Unique, sorted union of the input arrays.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> np.union1d([-1, 0, 1], [-2, 0, 2])
array([-2, -1, 0, 1, 2])
"""
return unique( np.concatenate( (ar1, ar2) ) )
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
Return the sorted, unique values in `ar1` that are not in `ar2`.
Parameters
----------
ar1 : array_like
Input array.
ar2 : array_like
Input comparison array.
assume_unique : bool
If True, the input arrays are both assumed to be unique, which
can speed up the calculation. Default is False.
Returns
-------
difference : ndarray
Sorted 1D array of values in `ar1` that are not in `ar2`.
See Also
--------
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
>>> a = np.array([1, 2, 3, 2, 4, 1])
>>> b = np.array([3, 4, 5, 6])
>>> np.setdiff1d(a, b)
array([1, 2])
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = in1d(ar1, ar2, assume_unique=True)
if aux.size == 0:
return aux
else:
return np.asarray(ar1)[aux == 0]
| gpl-3.0 |
sodafree/backend | django/contrib/localflavor/ie/ie_counties.py | 503 | 1127 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| bsd-3-clause |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/pip/_internal/commands/help.py | 30 | 1090 | from __future__ import absolute_import
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import CommandError
class HelpCommand(Command):
"""Show help for commands"""
name = 'help'
usage = """
%prog <command>"""
summary = 'Show help for commands.'
ignore_require_venv = True
def run(self, options, args):
from pip._internal.commands import commands_dict, get_similar_commands
try:
# 'pip help' with no args is handled by pip.__init__.parseopt()
cmd_name = args[0] # the command we need help for
except IndexError:
return SUCCESS
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
command = commands_dict[cmd_name]()
command.parser.print_help()
return SUCCESS
| mit |
mavit/ansible | test/units/module_utils/urls/test_fetch_url.py | 28 | 7987 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves.http_cookiejar import Cookie
from ansible.module_utils.six.moves.http_client import HTTPMessage
from ansible.module_utils.urls import fetch_url, urllib_error, ConnectionError, NoSSLError, httplib
import pytest
from mock import MagicMock
class AnsibleModuleExit(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class ExitJson(AnsibleModuleExit):
pass
class FailJson(AnsibleModuleExit):
pass
@pytest.fixture
def open_url_mock(mocker):
return mocker.patch('ansible.module_utils.urls.open_url')
@pytest.fixture
def fake_ansible_module():
return FakeAnsibleModule()
class FakeAnsibleModule:
def __init__(self):
self.params = {}
self.tmpdir = None
def exit_json(self, *args, **kwargs):
raise ExitJson(*args, **kwargs)
def fail_json(self, *args, **kwargs):
raise FailJson(*args, **kwargs)
def test_fetch_url_no_urlparse(mocker, fake_ansible_module):
mocker.patch('ansible.module_utils.urls.HAS_URLPARSE', new=False)
with pytest.raises(FailJson):
fetch_url(fake_ansible_module, 'http://ansible.com/')
def test_fetch_url(open_url_mock, fake_ansible_module):
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None,
follow_redirects='urllib2', force=False, force_basic_auth='', headers=None,
http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='',
use_proxy=True, validate_certs=True)
def test_fetch_url_params(open_url_mock, fake_ansible_module):
fake_ansible_module.params = {
'validate_certs': False,
'url_username': 'user',
'url_password': 'passwd',
'http_agent': 'ansible-test',
'force_basic_auth': True,
'follow_redirects': 'all',
'client_cert': 'client.pem',
'client_key': 'client.key',
}
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None,
follow_redirects='all', force=False, force_basic_auth=True, headers=None,
http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user',
use_proxy=True, validate_certs=False)
def test_fetch_url_cookies(mocker, fake_ansible_module):
def make_cookies(*args, **kwargs):
cookies = kwargs['cookies']
r = MagicMock()
try:
r.headers = HTTPMessage()
add_header = r.headers.add_header
except TypeError:
# PY2
r.headers = HTTPMessage(StringIO())
add_header = r.headers.addheader
r.info.return_value = r.headers
for name, value in (('Foo', 'bar'), ('Baz', 'qux')):
cookie = Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain="ansible.com",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
cookies.set_cookie(cookie)
add_header('Set-Cookie', '%s=%s' % (name, value))
return r
mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'}
# Python sorts cookies in order of most specific (ie. longest) path first
# items with the same path are reversed from response order
assert info['cookies_string'] == 'Baz=qux; Foo=bar'
# The key here has a `-` as opposed to what we see in the `uri` module that converts to `_`
# Note: this is response order, which differs from cookies_string
assert info['set-cookie'] == 'Foo=bar, Baz=qux'
def test_fetch_url_nossl(open_url_mock, fake_ansible_module, mocker):
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='notredhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' not in excinfo.value.kwargs['msg']
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='redhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' in excinfo.value.kwargs['msg']
def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = ConnectionError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
open_url_mock.side_effect = ValueError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
def test_fetch_url_httperror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.HTTPError(
'http://ansible.com/',
500,
'Internal Server Error',
{'Content-Type': 'application/json'},
StringIO('TESTS')
)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS',
'status': 500, 'url': 'http://ansible.com/', 'content-type': 'application/json'}
def test_fetch_url_urlerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.URLError('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Request failed: <urlopen error TESTS>', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_socketerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = socket.error('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_exception(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = Exception('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
exception = info.pop('exception')
assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
assert "Exception: TESTS" in exception
def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = httplib.BadStatusLine('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.